diff --git a/README.md b/README.md index 8ca04f1..4404558 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ You can then run `helm search repo devops-ia` to see the charts. | [default-resources](./charts/default-resources) | A Helm chart for Default Resources | | [ecr-registry](./charts/ecr-registry) | CronJob to update Amazon Elastic Container Registry credentials | | [helm-release-cleaner](./charts/helm-release-cleaner) | A Helm chart for Helm Charts to clean up the releases installed in the declared namespaces | +| [kafka-cruise-control](./charts/kafka-cruise-control) | A Helm chart to deploy Kafka Cruise Control | | [opencti](./charts/opencti) | A Helm chart to deploy open cyber threat intelligence platform. **This chart was moved to .** | | [prometheus-prefect-exporter](./charts/prometheus-prefect-exporter) | A Helm chart to deploy Prometheus Prefect Exporter. **This chart was moved to .** | | [replika](./charts/replika) | A Kubernetes operator to replicate resources across namespaces | diff --git a/charts/kafka-cruise-control/.helmignore b/charts/kafka-cruise-control/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/kafka-cruise-control/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/kafka-cruise-control/Chart.yaml b/charts/kafka-cruise-control/Chart.yaml new file mode 100644 index 0000000..cc6eef3 --- /dev/null +++ b/charts/kafka-cruise-control/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v2 +name: kafka-cruise-control +description: A Helm chart to deploy Kafka Cruise Control +type: application +maintainers: + - name: ialejandro + email: hello@ialejandro.rocks + url: https://ialejandro.rocks +sources: + - https://github.com/devops-ia/kafka-cruise-control + - https://github.com/linkedin/cruise-control +version: 1.0.0 +appVersion: "jdk11-cc2.5.138" +home: https://github.com/devops-ia/helm-charts/tree/main/charts/kafka-cruise-control +keywords: + - kafka + - cruise-control diff --git a/charts/kafka-cruise-control/README.md b/charts/kafka-cruise-control/README.md new file mode 100644 index 0000000..33f97dc --- /dev/null +++ b/charts/kafka-cruise-control/README.md @@ -0,0 +1,132 @@ +# kafka-cruise-control + +# kafka-cruise-control + +A Helm chart to deploy Kafka Cruise Control + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| ialejandro | | | + +## Prerequisites + +* Helm 3+ + +## Add repository + +```console +helm repo add devops-ia https://devops-ia.github.io/helm-charts +helm repo update +``` + +## Install Helm chart + +```console +helm install [RELEASE_NAME] devops-ia/kafka-cruise-control +``` + +This install all the Kubernetes components associated with the chart and creates the release. + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Helm chart + +```console +# Helm +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Configuration + +See [Customizing the chart before installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with comments: + +```console +helm show values devops-ia/kafka-cruise-control +``` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment | +| autoscaling | object | `{"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80}` | Autoscaling with CPU or memory utilization percentage | +| config | object | `{"anomaly.detection.goals":["com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal"],"anomaly.detection.interval.ms":"10000","anomaly.notifier.class":"com.linkedin.kafka.cruisecontrol.detector.notifier.SelfHealingNotifier","bootstrap.servers":"localhost:9092","broker.metric.sample.store.topic":"__KafkaCruiseControlModelTrainingSamples","broker.metrics.window.ms":300000,"broker.sample.store.topic.partition.count":8,"capacity.config.file":"config/capacityCores.json","client.id":"kafka-cruise-control","cluster.configs.file":"config/clusterConfigs.json","completed.cruise.control.admin.user.task.retention.time.ms":604800000,"completed.cruise.control.monitor.user.task.retention.time.ms":86400000,"completed.kafka.admin.user.task.retention.time.ms":604800000,"completed.kafka.monitor.user.task.retention.time.ms":86400000,"completed.user.task.retention.time.ms":86400000,"connections.max.idle.ms":540000,"cpu.balance.threshold":1.1,"cpu.capacity.threshold":0.7,"cpu.low.utilization.threshold":0,"default.goals":["com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderBytesInDistributionGoal"],"default.replica.movement.strategies":["com.linkedin.kafka.cruisecontrol.executor.strategy.BaseReplicaMovementStrategy"],"demotion.history.retention.time.ms":1209600000,"disk.balance.threshold":1.1,"disk.capacity.threshold":0.8,"disk.low.utilization.threshold":0,"execution.progress.check.interval.ms":10000,"goals":["com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderBytesInDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.PreferredLeaderElectionGoal"],"hard.goals":["com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal"],"intra.broker.goals":["com.linkedin.kafka.cruisecontrol.analyzer.goals.IntraBrokerDiskCapacityGoal","com.linkedin.kafka.cruisecontrol.analyzer.goals.IntraBrokerDiskUsageDistributionGoal"],"max.active.user.tasks":5,"max.cached.completed.cruise.control.admin.user.tasks":30,"max.cached.completed.cruise.control.monitor.user.tasks":20,"max.cached.completed.kafka.admin.user.tasks":30,"max.cached.completed.kafka.monitor.user.tasks":20,"max.cached.completed.user.tasks":25,"max.num.cluster.partition.movements":1250,"max.replicas.per.broker":10000,"metric.anomaly.analyzer.metrics":["BROKER_PRODUCE_LOCAL_TIME_MS_50TH","BROKER_PRODUCE_LOCAL_TIME_MS_999TH","BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_50TH","BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_999TH","BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_50TH","BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_999TH","BROKER_LOG_FLUSH_TIME_MS_50TH","BROKER_LOG_FLUSH_TIME_MS_999TH"],"metric.anomaly.detection.interval.ms":120000,"metric.anomaly.finder.class":"com.linkedin.kafka.cruisecontrol.detector.KafkaMetricAnomalyFinder","metric.anomaly.percentile.lower.threshold":10,"metric.anomaly.percentile.upper.threshold":90,"metric.sampler.class":"com.linkedin.kafka.cruisecontrol.monitor.sampling.prometheus.PrometheusMetricSampler","metric.sampler.partition.assignor.class":"com.linkedin.kafka.cruisecontrol.monitor.sampling.DefaultMetricSamplerPartitionAssignor","metric.sampling.interval.ms":120000,"min.samples.per.broker.metrics.window":1,"min.samples.per.partition.metrics.window":1,"min.valid.partition.ratio":0.95,"network.inbound.balance.threshold":1.1,"network.inbound.capacity.threshold":0.8,"network.inbound.low.utilization.threshold":0,"network.outbound.balance.threshold":1.1,"network.outbound.capacity.threshold":0.8,"network.outbound.low.utilization.threshold":0,"num.broker.metrics.windows":20,"num.concurrent.intra.broker.partition.movements":2,"num.concurrent.leader.movements":1000,"num.concurrent.partition.movements.per.broker":5,"num.partition.metrics.windows":5,"num.proposal.precompute.threads":1,"num.sample.loading.threads":8,"partition.metric.sample.store.topic":"__KafkaCruiseControlPartitionMetricSamples","partition.metrics.window.ms":300000,"partition.sample.store.topic.partition.count":8,"prometheus.server.endpoint":"prometheus.prometheus:9090","proposal.expiration.ms":60000,"removal.history.retention.time.ms":1209600000,"replica.count.balance.threshold":1.1,"replica.movement.strategies":["com.linkedin.kafka.cruisecontrol.executor.strategy.PostponeUrpReplicaMovementStrategy","com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeLargeReplicaMovementStrategy","com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeSmallReplicaMovementStrategy","com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeMinIsrWithOfflineReplicasStrategy","com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeOneAboveMinIsrWithOfflineReplicasStrategy","com.linkedin.kafka.cruisecontrol.executor.strategy.BaseReplicaMovementStrategy"],"sample.store.class":"com.linkedin.kafka.cruisecontrol.monitor.sampling.KafkaSampleStore","sample.store.topic.replication.factor":2,"sampling.allow.cpu.capacity.estimation":true,"self.healing.disk.failure.enabled":false,"self.healing.enabled":false,"self.healing.exclude.recently.demoted.brokers":true,"self.healing.exclude.recently.removed.brokers":true,"self.healing.goal.violation.enabled":false,"self.healing.maintenance.event.enabled":false,"self.healing.metric.anomaly.enabled":false,"self.healing.topic.anomaly.enabled":false,"topic.anomaly.finder.class":"com.linkedin.kafka.cruisecontrol.detector.TopicReplicationFactorAnomalyFinder","topic.config.provider.class":"com.linkedin.kafka.cruisecontrol.config.KafkaAdminTopicConfigProvider","topics.excluded.from.partition.movement":"__consumer_offsets.*|__amazon_msk_canary.*|__amazon_msk_connect.*|__KafkaCruiseControl.*","two.step.purgatory.max.requests":25,"two.step.purgatory.retention.time.ms":1209600000,"two.step.verification.enabled":false,"vertx.enabled":false,"webserver.accesslog.enabled":true,"webserver.api.urlprefix":"/kafkacruisecontrol/*","webserver.http.address":"0.0.0.0","webserver.http.cors.enabled":false,"webserver.http.port":9090,"webserver.request.maxBlockTimeMs":10000,"webserver.session.maxExpiryTimeMs":60000,"webserver.session.path":"/","webserver.ui.diskpath":"./cruise-control-ui/dist/","webserver.ui.urlprefix":"/*","zookeeper.security.enabled":false}` | kafka-cruise-control service configuration ref: https://github.com/linkedin/cruise-control/wiki/Configurations | +| fullnameOverride | string | `""` | String to fully override kafka-cruise-control.fullname template | +| image | object | `{"pullPolicy":"IfNotPresent","repository":"https://hub.docker.com/r/devopsiaci/cruise-control","tag":""}` | Image registry | +| imagePullSecrets | list | `[]` | Global Docker registry secret names as an array | +| ingress | object | `{"annotations":{},"className":"","enabled":false,"hosts":[{"host":"chart-example.local","paths":[{"path":"/","pathType":"ImplementationSpecific"}]}],"tls":[]}` | Ingress configuration to expose app | +| kafkaCluster | object | `{"networkIn":1000,"networkOut":1000,"numCores":2,"storage":1024}` | Cluster configuration | +| livenessProbe | object | `{"httpGet":{"path":"/","port":"http"}}` | Liveness probe configuration to check if the application is running | +| log4j."appender.console.layout.pattern" | string | `"[%d] %p %m (%c)%n"` | | +| log4j."appender.console.layout.type" | string | `"PatternLayout"` | | +| log4j."appender.console.name" | string | `"STDOUT"` | | +| log4j."appender.console.type" | string | `"Console"` | | +| log4j."appender.kafkaCruiseControlAppender.fileName" | string | `"${filename}/kafkacruisecontrol.log"` | | +| log4j."appender.kafkaCruiseControlAppender.filePattern" | string | `"${filename}/kafkacruisecontrol.log.%d{yyyy-MM-dd-HH}"` | | +| log4j."appender.kafkaCruiseControlAppender.layout.pattern" | string | `"[%d] %p %m (%c)%n"` | | +| log4j."appender.kafkaCruiseControlAppender.layout.type" | string | `"PatternLayout"` | | +| log4j."appender.kafkaCruiseControlAppender.name" | string | `"kafkaCruiseControlFile"` | | +| log4j."appender.kafkaCruiseControlAppender.policies.time.interval" | int | `1` | | +| log4j."appender.kafkaCruiseControlAppender.policies.time.type" | string | `"TimeBasedTriggeringPolicy"` | | +| log4j."appender.kafkaCruiseControlAppender.policies.type" | string | `"Policies"` | | +| log4j."appender.kafkaCruiseControlAppender.type" | string | `"RollingFile"` | | +| log4j."appender.operationAppender.fileName" | string | `"${filename}/kafkacruisecontrol-operation.log"` | | +| log4j."appender.operationAppender.filePattern" | string | `"${filename}/kafkacruisecontrol-operation.log.%d{yyyy-MM-dd}"` | | +| log4j."appender.operationAppender.layout.pattern" | string | `"[%d] %p [%c] %m %n"` | | +| log4j."appender.operationAppender.layout.type" | string | `"PatternLayout"` | | +| log4j."appender.operationAppender.name" | string | `"operationFile"` | | +| log4j."appender.operationAppender.policies.time.interval" | int | `1` | | +| log4j."appender.operationAppender.policies.time.type" | string | `"TimeBasedTriggeringPolicy"` | | +| log4j."appender.operationAppender.policies.type" | string | `"Policies"` | | +| log4j."appender.operationAppender.type" | string | `"RollingFile"` | | +| log4j."appender.requestAppender.fileName" | string | `"${filename}/kafkacruisecontrol-request.log"` | | +| log4j."appender.requestAppender.filePattern" | string | `"${filename}/kafkacruisecontrol-request.log.%d{yyyy-MM-dd-HH}"` | | +| log4j."appender.requestAppender.layout.pattern" | string | `"[%d] %p %m (%c)%n"` | | +| log4j."appender.requestAppender.layout.type" | string | `"PatternLayout"` | | +| log4j."appender.requestAppender.name" | string | `"requestFile"` | | +| log4j."appender.requestAppender.policies.time.interval" | int | `1` | | +| log4j."appender.requestAppender.policies.time.type" | string | `"TimeBasedTriggeringPolicy"` | | +| log4j."appender.requestAppender.policies.type" | string | `"Policies"` | | +| log4j."appender.requestAppender.type" | string | `"RollingFile"` | | +| log4j."logger.CruiseControlPublicAccessLogger.appenderRef.requestAppender.ref" | string | `"requestFile"` | | +| log4j."logger.CruiseControlPublicAccessLogger.level" | string | `"info"` | | +| log4j."logger.CruiseControlPublicAccessLogger.name" | string | `"CruiseControlPublicAccessLogger"` | | +| log4j."logger.cruisecontrol.appenderRef.kafkaCruiseControlAppender.ref" | string | `"kafkaCruiseControlFile"` | | +| log4j."logger.cruisecontrol.level" | string | `"info"` | | +| log4j."logger.cruisecontrol.name" | string | `"com.linkedin.kafka.cruisecontrol"` | | +| log4j."logger.detector.appenderRef.kafkaCruiseControlAppender.ref" | string | `"kafkaCruiseControlFile"` | | +| log4j."logger.detector.level" | string | `"info"` | | +| log4j."logger.detector.name" | string | `"com.linkedin.kafka.cruisecontrol.detector"` | | +| log4j."logger.operationLogger.appenderRef.operationAppender.ref" | string | `"operationFile"` | | +| log4j."logger.operationLogger.level" | string | `"info"` | | +| log4j."logger.operationLogger.name" | string | `"operationLogger"` | | +| log4j."property.filename" | string | `"./logs"` | | +| log4j."rootLogger.appenderRef.console.ref" | string | `"STDOUT"` | | +| log4j."rootLogger.appenderRef.kafkaCruiseControlAppender.ref" | string | `"kafkaCruiseControlFile"` | | +| log4j."rootLogger.appenderRefs" | string | `"console, kafkaCruiseControlAppender"` | | +| log4j."rootLogger.level" | string | `"INFO"` | | +| log4j.appenders | string | `"console, kafkaCruiseControlAppender, operationAppender, requestAppender"` | | +| nameOverride | string | `""` | String to partially override kafka-cruise-control.fullname template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podAnnotations | object | `{}` | Pod annotations | +| podLabels | object | `{}` | Pod labels | +| podSecurityContext | object | `{}` | To specify security settings for a Pod | +| readinessProbe | object | `{"httpGet":{"path":"/","port":"http"}}` | Readiness probe configuration to check if the application is ready to accept traffic | +| replicaCount | int | `1` | Number of replicas | +| resources | object | `{}` | The resources limits and requested | +| securityContext | object | `{}` | Defines privilege and access control settings for a Pod or Container | +| service | object | `{"port":80,"targetPort":9090,"type":"ClusterIP"}` | Kubernetes servide to expose Pod | +| service.port | int | `80` | Kubernetes Service port | +| service.targetPort | int | `9090` | Pod expose port | +| service.type | string | `"ClusterIP"` | Kubernetes Service type. Allowed values: NodePort, LoadBalancer or ClusterIP | +| serviceAccount | object | `{"annotations":{},"automount":false,"create":true,"name":""}` | Enable creation of ServiceAccount | +| testConnection | bool | `false` | Enable livenessProbe and readinessProbe | +| tolerations | list | `[]` | Tolerations for pod assignment | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | diff --git a/charts/kafka-cruise-control/README.md.gotmpl b/charts/kafka-cruise-control/README.md.gotmpl new file mode 100644 index 0000000..c107285 --- /dev/null +++ b/charts/kafka-cruise-control/README.md.gotmpl @@ -0,0 +1,51 @@ +# kafka-cruise-control + +# {{ template "chart.name" . }} + +{{ template "chart.description" . }} + +{{ template "chart.maintainersSection" . }} + +## Prerequisites + +* Helm 3+ + +{{ template "chart.requirementsSection" . }} + +## Add repository + +```console +helm repo add devops-ia https://devops-ia.github.io/helm-charts +helm repo update +``` + +## Install Helm chart + +```console +helm install [RELEASE_NAME] devops-ia/{{ template "chart.name" . }} +``` + +This install all the Kubernetes components associated with the chart and creates the release. + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Helm chart + +```console +# Helm +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Configuration + +See [Customizing the chart before installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with comments: + +```console +helm show values devops-ia/{{ template "chart.name" . }} +``` + +{{ template "chart.valuesSection" . }} diff --git a/charts/kafka-cruise-control/templates/NOTES.txt b/charts/kafka-cruise-control/templates/NOTES.txt new file mode 100644 index 0000000..d8ea4d8 --- /dev/null +++ b/charts/kafka-cruise-control/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-cruise-control.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-cruise-control.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-cruise-control.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-cruise-control.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/charts/kafka-cruise-control/templates/_helpers.tpl b/charts/kafka-cruise-control/templates/_helpers.tpl new file mode 100644 index 0000000..1ffd4f2 --- /dev/null +++ b/charts/kafka-cruise-control/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka-cruise-control.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kafka-cruise-control.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kafka-cruise-control.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kafka-cruise-control.labels" -}} +helm.sh/chart: {{ include "kafka-cruise-control.chart" . }} +{{ include "kafka-cruise-control.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kafka-cruise-control.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kafka-cruise-control.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kafka-cruise-control.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "kafka-cruise-control.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/kafka-cruise-control/templates/configmap.yaml b/charts/kafka-cruise-control/templates/configmap.yaml new file mode 100644 index 0000000..c394a04 --- /dev/null +++ b/charts/kafka-cruise-control/templates/configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka-cruise-control.fullname" . }}-config + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} +data: + cruise-control.properties: | + {{- range $key, $value := .Values.config }} + {{- $valueStr := printf "%v" $value }} + {{- if and (regexMatch "^[0-9]+$" $valueStr) (lt (len $valueStr) 10) }} + {{ $key }}={{ $valueStr }} + {{- else if regexMatch "^[0-9]+\\.[0-9]+$" $valueStr }} + {{ $key }}={{ printf "%.1f" $value }} + {{- else if eq (typeOf $value) "float64" }} + {{ $key }}={{ $value | int64 }} + {{- else if eq (typeOf $value) "[]interface {}" }} + {{ $key }}={{ $value | join "," }} + {{- else }} + {{ $key }}={{ $value }} + {{- end }} + {{- end }} + capacityCores.json: | + { + "brokerCapacities":[ + { + "brokerId": "-1", + "capacity": { + "DISK": {{ .Values.kafkaCluster.storage | int64 | quote }}, + "CPU": {"num.cores": {{ .Values.kafkaCluster.numCores | quote }}}, + "NW_IN": {{ .Values.kafkaCluster.networkIn | int64 | quote }}, + "NW_OUT": {{ .Values.kafkaCluster.networkOut | default .Values.kafkaCluster.networkIn | int64 | quote }} + }, + "doc": "This is the default capacity. Capacity unit used for disk is in MB, cpu is in number of cores, network throughput is in KB." + } + ] + } + log4j.properties: | + {{- range $key, $value := .Values.log4j }} + {{ $key }}={{ $value }} + {{- end }} diff --git a/charts/kafka-cruise-control/templates/deployment.yaml b/charts/kafka-cruise-control/templates/deployment.yaml new file mode 100644 index 0000000..82f096b --- /dev/null +++ b/charts/kafka-cruise-control/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kafka-cruise-control.fullname" . }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "kafka-cruise-control.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka-cruise-control.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.targetPort | default .Values.service.port }} + protocol: TCP + {{ if .Values.testConnection }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /cruise-control/config/cruisecontrol.properties + subPath: cruise-control.properties + - name: config-volume + mountPath: /cruise-control/config/capacityCores.json + subPath: capacityCores.json + - name: config-volume + mountPath: /cruise-control/config/log4j.properties + subPath: log4j.properties + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ include "kafka-cruise-control.fullname" . }}-config + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/kafka-cruise-control/templates/hpa.yaml b/charts/kafka-cruise-control/templates/hpa.yaml new file mode 100644 index 0000000..0d7e34a --- /dev/null +++ b/charts/kafka-cruise-control/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kafka-cruise-control.fullname" . }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kafka-cruise-control.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/kafka-cruise-control/templates/ingress.yaml b/charts/kafka-cruise-control/templates/ingress.yaml new file mode 100644 index 0000000..c6634d2 --- /dev/null +++ b/charts/kafka-cruise-control/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "kafka-cruise-control.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/kafka-cruise-control/templates/service.yaml b/charts/kafka-cruise-control/templates/service.yaml new file mode 100644 index 0000000..2dc9f6d --- /dev/null +++ b/charts/kafka-cruise-control/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kafka-cruise-control.fullname" . }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort | default .Values.service.port }} + protocol: TCP + name: http + selector: + {{- include "kafka-cruise-control.selectorLabels" . | nindent 4 }} diff --git a/charts/kafka-cruise-control/templates/serviceaccount.yaml b/charts/kafka-cruise-control/templates/serviceaccount.yaml new file mode 100644 index 0000000..0ae8da6 --- /dev/null +++ b/charts/kafka-cruise-control/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kafka-cruise-control.serviceAccountName" . }} + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/charts/kafka-cruise-control/templates/tests/test-connection.yaml b/charts/kafka-cruise-control/templates/tests/test-connection.yaml new file mode 100644 index 0000000..ebff63d --- /dev/null +++ b/charts/kafka-cruise-control/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "kafka-cruise-control.fullname" . }}-test-connection" + labels: + {{- include "kafka-cruise-control.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "kafka-cruise-control.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/charts/kafka-cruise-control/values.yaml b/charts/kafka-cruise-control/values.yaml new file mode 100644 index 0000000..8029077 --- /dev/null +++ b/charts/kafka-cruise-control/values.yaml @@ -0,0 +1,400 @@ +# -- Number of replicas +replicaCount: 1 + +# -- Image registry +image: + # Repository: + repository: https://hub.docker.com/r/devopsiaci/cruise-control + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +# -- String to partially override kafka-cruise-control.fullname template (will maintain the release name) +nameOverride: "" + +# -- String to fully override kafka-cruise-control.fullname template +fullnameOverride: "" + +# -- Global Docker registry secret names as an array +imagePullSecrets: [] + +# -- Enable creation of ServiceAccount +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Specifies if you don't want the kubelet to automatically mount + # a ServiceAccount's API credentials + automount: false + +# -- Pod annotations +podAnnotations: {} + +# -- Pod labels +podLabels: {} + +# -- To specify security settings for a Pod +podSecurityContext: {} + # fsGroup: 2000 + +# -- Defines privilege and access control settings for a Pod or Container +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +# -- Kubernetes servide to expose Pod +service: + # -- Kubernetes Service type. Allowed values: NodePort, LoadBalancer or ClusterIP + type: ClusterIP + # -- Kubernetes Service port + port: 80 + # -- Pod expose port + targetPort: 9090 + +# -- Ingress configuration to expose app +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +# -- The resources limits and requested +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# -- Enable livenessProbe and readinessProbe +testConnection: false + +# -- Liveness probe configuration to check if the application is running +livenessProbe: + httpGet: + path: / + port: http + +# -- Readiness probe configuration to check if the application is ready to accept traffic +readinessProbe: + httpGet: + path: / + port: http + +# -- Autoscaling with CPU or memory utilization percentage +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +# -- Node labels for pod assignment +nodeSelector: {} + +# -- Tolerations for pod assignment +tolerations: [] + +# -- Affinity for pod assignment +affinity: {} + +# -- Cluster configuration +kafkaCluster: + storage: 1024 + numCores: 2 + networkIn: 1000 + networkOut: 1000 + +# -- kafka-cruise-control service configuration +# ref: https://github.com/linkedin/cruise-control/wiki/Configurations +config: + # METADATA CLIENT + bootstrap.servers: "localhost:9092" + client.id: "kafka-cruise-control" + connections.max.idle.ms: 540000 + # logdir.response.timeout.ms: 10000 + # metadata.max.age.ms: 300000 + # receive.buffer.bytes: 131072 + # reconnect.backoff.ms: 50 + # request.timeout.ms: 30000 + # send.buffer.bytes: 131072 + + # LOAD MONITOR + broker.metric.sample.store.topic: "__KafkaCruiseControlModelTrainingSamples" + broker.metrics.window.ms: 300000 + broker.sample.store.topic.partition.count: 8 + capacity.config.file: config/capacityCores.json + metric.sampler.class: com.linkedin.kafka.cruisecontrol.monitor.sampling.prometheus.PrometheusMetricSampler + metric.sampler.partition.assignor.class: "com.linkedin.kafka.cruisecontrol.monitor.sampling.DefaultMetricSamplerPartitionAssignor" + metric.sampling.interval.ms: 120000 + min.samples.per.broker.metrics.window: 1 + min.samples.per.partition.metrics.window: 1 + num.broker.metrics.windows: 20 + num.partition.metrics.windows: 5 + num.sample.loading.threads: 8 + partition.metric.sample.store.topic: "__KafkaCruiseControlPartitionMetricSamples" + partition.metrics.window.ms: 300000 + partition.sample.store.topic.partition.count: 8 + prometheus.server.endpoint: prometheus.prometheus:9090 + sample.store.class: "com.linkedin.kafka.cruisecontrol.monitor.sampling.KafkaSampleStore" + sample.store.topic.replication.factor: 2 + sampling.allow.cpu.capacity.estimation: true + + # ANALYZER + cpu.balance.threshold: 1.1 + cpu.capacity.threshold: 0.7 + cpu.low.utilization.threshold: 0.0 + default.goals: + - com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderBytesInDistributionGoal + disk.balance.threshold: 1.1 + disk.capacity.threshold: 0.8 + disk.low.utilization.threshold: 0.0 + #goal.balancedness.priority.weight: + #goal.balancedness.strictness.weight: + goals: + - com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.PotentialNwOutGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.LeaderBytesInDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerDiskUsageDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.kafkaassigner.KafkaAssignerEvenRackAwareGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.PreferredLeaderElectionGoal + hard.goals: + - com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.TopicReplicaDistributionGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal + intra.broker.goals: + - com.linkedin.kafka.cruisecontrol.analyzer.goals.IntraBrokerDiskCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.IntraBrokerDiskUsageDistributionGoal + max.replicas.per.broker: 10000 + metric.anomaly.percentile.lower.threshold: 10.0 + metric.anomaly.percentile.upper.threshold: 90.0 + min.valid.partition.ratio: 0.95 + network.inbound.balance.threshold: 1.1 + network.inbound.capacity.threshold: 0.8 + network.inbound.low.utilization.threshold: 0.0 + network.outbound.balance.threshold: 1.1 + network.outbound.capacity.threshold: 0.8 + network.outbound.low.utilization.threshold: 0.0 + num.proposal.precompute.threads: 1 + proposal.expiration.ms: 60000 + replica.count.balance.threshold: 1.1 + topics.excluded.from.partition.movement: __consumer_offsets.*|__amazon_msk_canary.*|__amazon_msk_connect.*|__KafkaCruiseControl.* + + # EXECUTOR + #default.replication.throttle: + execution.progress.check.interval.ms: 10000 + max.num.cluster.partition.movements: 1250 + num.concurrent.intra.broker.partition.movements: 2 + num.concurrent.leader.movements: 1000 + num.concurrent.partition.movements.per.broker: 5 + zookeeper.security.enabled: false + replica.movement.strategies: + - com.linkedin.kafka.cruisecontrol.executor.strategy.PostponeUrpReplicaMovementStrategy + - com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeLargeReplicaMovementStrategy + - com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeSmallReplicaMovementStrategy + - com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeMinIsrWithOfflineReplicasStrategy + - com.linkedin.kafka.cruisecontrol.executor.strategy.PrioritizeOneAboveMinIsrWithOfflineReplicasStrategy + - com.linkedin.kafka.cruisecontrol.executor.strategy.BaseReplicaMovementStrategy + default.replica.movement.strategies: + - com.linkedin.kafka.cruisecontrol.executor.strategy.BaseReplicaMovementStrategy + + # ANOMALY DETECTOR + anomaly.detection.goals: + - com.linkedin.kafka.cruisecontrol.analyzer.goals.RackAwareGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.MinTopicLeadersPerBrokerGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal + - com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal + anomaly.detection.interval.ms: "10000" + anomaly.notifier.class: com.linkedin.kafka.cruisecontrol.detector.notifier.SelfHealingNotifier + cluster.configs.file: "config/clusterConfigs.json" + completed.cruise.control.admin.user.task.retention.time.ms: 604800000 + completed.cruise.control.monitor.user.task.retention.time.ms: 86400000 + completed.kafka.admin.user.task.retention.time.ms: 604800000 + completed.kafka.monitor.user.task.retention.time.ms: 86400000 + completed.user.task.retention.time.ms: 86400000 + demotion.history.retention.time.ms: 1209600000 + #goal.violation.distribution.threshold.multiplier: 2.50 + max.active.user.tasks: 5 + max.cached.completed.cruise.control.admin.user.tasks: 30 + max.cached.completed.cruise.control.monitor.user.tasks: 20 + max.cached.completed.kafka.admin.user.tasks: 30 + max.cached.completed.kafka.monitor.user.tasks: 20 + max.cached.completed.user.tasks: 25 + metric.anomaly.analyzer.metrics: + - BROKER_PRODUCE_LOCAL_TIME_MS_50TH + - BROKER_PRODUCE_LOCAL_TIME_MS_999TH + - BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_50TH + - BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_999TH + - BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_50TH + - BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_999TH + - BROKER_LOG_FLUSH_TIME_MS_50TH + - BROKER_LOG_FLUSH_TIME_MS_999TH + metric.anomaly.detection.interval.ms: 120000 + metric.anomaly.finder.class: com.linkedin.kafka.cruisecontrol.detector.KafkaMetricAnomalyFinder + removal.history.retention.time.ms: 1209600000 + #self.healing.broker.failure.enabled: true + self.healing.enabled: false + self.healing.exclude.recently.demoted.brokers: true + self.healing.exclude.recently.removed.brokers: true + self.healing.disk.failure.enabled: false + self.healing.goal.violation.enabled: false + self.healing.maintenance.event.enabled: false + self.healing.metric.anomaly.enabled: false + self.healing.topic.anomaly.enabled: false + topic.anomaly.finder.class: com.linkedin.kafka.cruisecontrol.detector.TopicReplicationFactorAnomalyFinder + topic.config.provider.class: com.linkedin.kafka.cruisecontrol.config.KafkaAdminTopicConfigProvider + + # WEBSERVER + webserver.accesslog.enabled: true + webserver.api.urlprefix: /kafkacruisecontrol/* + webserver.http.address: 0.0.0.0 + webserver.http.cors.enabled: false + webserver.http.port: 9090 + webserver.request.maxBlockTimeMs: 10000 + webserver.session.maxExpiryTimeMs: 60000 + webserver.session.path: / + webserver.ui.diskpath: ./cruise-control-ui/dist/ + webserver.ui.urlprefix: /* + + # SERVLET + two.step.purgatory.max.requests: 25 + two.step.purgatory.retention.time.ms: 1209600000 + two.step.verification.enabled: false + vertx.enabled: false + +# kafka-cruise-control log4j configuration +log4j: + rootLogger.level: INFO + appenders: console, kafkaCruiseControlAppender, operationAppender, requestAppender + + property.filename: ./logs + + appender.console.type: Console + appender.console.name: STDOUT + appender.console.layout.type: PatternLayout + appender.console.layout.pattern: "[%d] %p %m (%c)%n" + + appender.kafkaCruiseControlAppender.type: RollingFile + appender.kafkaCruiseControlAppender.name: kafkaCruiseControlFile + appender.kafkaCruiseControlAppender.fileName: ${filename}/kafkacruisecontrol.log + appender.kafkaCruiseControlAppender.filePattern: ${filename}/kafkacruisecontrol.log.%d{yyyy-MM-dd-HH} + appender.kafkaCruiseControlAppender.layout.type: PatternLayout + appender.kafkaCruiseControlAppender.layout.pattern: "[%d] %p %m (%c)%n" + appender.kafkaCruiseControlAppender.policies.type: Policies + appender.kafkaCruiseControlAppender.policies.time.type: TimeBasedTriggeringPolicy + appender.kafkaCruiseControlAppender.policies.time.interval: 1 + + appender.operationAppender.type: RollingFile + appender.operationAppender.name: operationFile + appender.operationAppender.fileName: ${filename}/kafkacruisecontrol-operation.log + appender.operationAppender.filePattern: ${filename}/kafkacruisecontrol-operation.log.%d{yyyy-MM-dd} + appender.operationAppender.layout.type: PatternLayout + appender.operationAppender.layout.pattern: "[%d] %p [%c] %m %n" + appender.operationAppender.policies.type: Policies + appender.operationAppender.policies.time.type: TimeBasedTriggeringPolicy + appender.operationAppender.policies.time.interval: 1 + + appender.requestAppender.type: RollingFile + appender.requestAppender.name: requestFile + appender.requestAppender.fileName: ${filename}/kafkacruisecontrol-request.log + appender.requestAppender.filePattern: ${filename}/kafkacruisecontrol-request.log.%d{yyyy-MM-dd-HH} + appender.requestAppender.layout.type: PatternLayout + appender.requestAppender.layout.pattern: "[%d] %p %m (%c)%n" + appender.requestAppender.policies.type: Policies + appender.requestAppender.policies.time.type: TimeBasedTriggeringPolicy + appender.requestAppender.policies.time.interval: 1 + + # Loggers + logger.cruisecontrol.name: com.linkedin.kafka.cruisecontrol + logger.cruisecontrol.level: info + logger.cruisecontrol.appenderRef.kafkaCruiseControlAppender.ref: kafkaCruiseControlFile + + logger.detector.name: com.linkedin.kafka.cruisecontrol.detector + logger.detector.level: info + logger.detector.appenderRef.kafkaCruiseControlAppender.ref: kafkaCruiseControlFile + + logger.operationLogger.name: operationLogger + logger.operationLogger.level: info + logger.operationLogger.appenderRef.operationAppender.ref: operationFile + + logger.CruiseControlPublicAccessLogger.name: CruiseControlPublicAccessLogger + logger.CruiseControlPublicAccessLogger.level: info + logger.CruiseControlPublicAccessLogger.appenderRef.requestAppender.ref: requestFile + + rootLogger.appenderRefs: console, kafkaCruiseControlAppender + rootLogger.appenderRef.console.ref: STDOUT + rootLogger.appenderRef.kafkaCruiseControlAppender.ref: kafkaCruiseControlFile