diff --git a/charts/retool/.helmignore b/.helmignore similarity index 100% rename from charts/retool/.helmignore rename to .helmignore diff --git a/charts/retool-temporal-services/Chart.yaml b/charts/retool-temporal-services/Chart.yaml new file mode 100644 index 00000000..6d72e54d --- /dev/null +++ b/charts/retool-temporal-services/Chart.yaml @@ -0,0 +1,55 @@ +apiVersion: v2 + +name: retool-temporal-services + +description: A Helm chart for Temporal server for Retool Workflows. + +home: https://temporal.io/ + +keywords: + - temporal + - workflow + - orchestration + +maintainers: + - name: Retool Engineering + email: engineering+helm@retool.com + +sources: + - https://github.com/temporalio/temporal + +dependencies: + - name: cassandra + version: "0.14.3" + repository: https://charts.helm.sh/incubator + condition: cassandra.enabled + - name: prometheus + repository: https://prometheus-community.github.io/helm-charts + version: 15.1.3 + condition: prometheus.enabled + - name: elasticsearch + repository: https://helm.elastic.co + version: 7.16.3 + condition: elasticsearch.enabled + - name: grafana + repository: https://grafana.github.io/helm-charts + version: 5.0.10 + condition: grafana.enabled + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 1.1.2 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.18.5 diff --git a/charts/retool-temporal-services/LICENSE b/charts/retool-temporal-services/LICENSE new file mode 100644 index 00000000..20a609ec --- /dev/null +++ b/charts/retool-temporal-services/LICENSE @@ -0,0 +1,23 @@ +The MIT License + +Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. + +Copyright (c) 2020 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/charts/retool-temporal-services/README.md b/charts/retool-temporal-services/README.md new file mode 100644 index 00000000..5d71adde --- /dev/null +++ b/charts/retool-temporal-services/README.md @@ -0,0 +1,549 @@ +# Temporal Helm Chart +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts?ref=badge_shield) + +Temporal is a distributed, scalable, durable, and highly available orchestration engine designed to execute asynchronous long-running business logic in a resilient way. + +This repo contains a basic V3 [Helm](https://helm.sh) chart that deploys Temporal to a Kubernetes cluster. The dependencies that are bundled with this solution by default offer an easy way to experiment with Temporal software. This Helm chart can also be used to install just the Temporal server, configured to connect to dependencies (such as a Cassandra, MySQL, or PostgreSQL database) that you may already have available in your environment. + +**We do not recommend using Helm for managing Temporal deployments in production**. Rather, we recommend it for templating/generating manifests for Temporal's internal services only. [See our recent discussion on this topic](https://docs.temporal.io/blog/temporal-and-kubernetes/). + +This Helm Chart code is tested by a dedicated test pipeline. It is also used extensively by other Temporal pipelines for testing various aspects of Temporal systems. Our test pipeline currently uses Helm 3.1.1. + +# Install Temporal service on a Kubernetes cluster + +## Prerequisites + +This sequence assumes +* that your system is configured to access a kubernetes cluster (e. g. [AWS EKS](https://aws.amazon.com/eks/), [kind](https://kind.sigs.k8s.io/), or [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)), and +* that your machine has + - [AWS CLI V2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html), + - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), and + - [Helm v3](https://helm.sh) + installed and able to access your cluster. + +## Download Helm Chart Dependencies + +Download Helm dependencies: + +```bash +~/temporal-helm$ helm dependencies update +``` + +## Install Temporal with Helm Chart + +Temporal can be configured to run with various dependencies. The default "Batteries Included" Helm Chart configuration deploys and configures the following components: + +* Cassandra +* ElasticSearch +* Prometheus +* Grafana + +The sections that follow describe various deployment configurations, from a minimal one-replica installation using included dependencies, to a replicated deployment on existing infrastructure. + +### Minimal installation with required dependencies only + +To install Temporal in a limited but working and self-contained configuration (one replica of Cassandra and each of Temporal's services, no metrics or ElasticSearch), you can run the following command + +``` +~/temporal-helm$ helm install \ + --set server.replicaCount=1 \ + --set cassandra.config.cluster_size=1 \ + --set prometheus.enabled=false \ + --set grafana.enabled=false \ + --set elasticsearch.enabled=false \ + temporaltest . --timeout 15m +``` + +This configuration consumes limited resources and it is useful for small scale tests (such as using minikube). + +Below is an example of an environment installed in this configuration: + +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +temporaltest-admintools-6cdf56b869-xdxz2 1/1 Running 0 11m +temporaltest-cassandra-0 1/1 Running 0 11m +temporaltest-frontend-5d5b6d9c59-v9g5j 1/1 Running 2 11m +temporaltest-history-64b9ddbc4b-bwk6j 1/1 Running 2 11m +temporaltest-matching-c8887ddc4-jnzg2 1/1 Running 2 11m +temporaltest-metrics-server-7fbbf65cff-rp2ks 1/1 Running 0 11m +temporaltest-web-77f68bff76-ndkzf 1/1 Running 0 11m +temporaltest-worker-7c9d68f4cf-8tzfw 1/1 Running 2 11m +``` + +### Install with required and optional dependencies + +This method requires a three node kubernetes cluster to successfully bring up all the dependencies. + +By default, Temporal Helm Chart configures Temporal to run with a three node Cassandra cluster (for persistence) and Elasticsearch (for "visibility" features), Prometheus, and Grafana. By default, Temporal Helm Chart installs all dependencies, out of the box. + +To install Temporal with all of its dependencies run this command: + +```bash +~/temporal-helm$ helm install temporaltest . --timeout 900s +``` + +To use your own instance of ElasticSearch, MySQL, PostgreSQL, or Cassandra, please read the "Bring Your Own" sections below. + +Other components (Prometheus, Grafana) can be omitted from the installation by setting their corresponding `enable` flag to `false`: + +```bash +~/temporal-helm$ helm install \ + --set prometheus.enabled=false \ + --set grafana.enabled=false \ + temporaltest . --timeout 900s +``` + +### Install with sidecar containers + +You may need to provide your own sidecar containers. + +To do so, you may look at the example for Google's `cloud sql proxy` in the `values/values.cloudsqlproxy.yaml` and pass that file to `helm install`. + +Example: + +```bash +~/temporal-helm$ helm install -f values/values.cloudsqlproxy.yaml temporaltest . --timeout 900s +``` + +### Install with your own ElasticSearch + +You might already be operating an instance of ElasticSearch that you want to use with Temporal. + +To do so, fill in the relevant configuration values in `values.elasticsearch.yaml`, and pass the file to 'helm install'. + +Example: + +```bash +~/temporal-helm$ helm install -f values/values.elasticsearch.yaml temporaltest . --timeout 900s +``` + +### Install with your own MySQL + +You might already be operating a MySQL instance that you want to use with Temporal. + +In this case, create and configure temporal databases on your MySQL host with `temporal-sql-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo. + +Here are examples of commands you can use to create and initialize the databases: + +```bash +# in https://github.com/temporalio/temporal git repo dir +export SQL_PLUGIN=mysql +export SQL_HOST=mysql_host +export SQL_PORT=3306 +export SQL_USER=mysql_user +export SQL_PASSWORD=mysql_password + +./temporal-sql-tool create-database -database temporal +SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0 +SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/mysql/v57/temporal/versioned + +./temporal-sql-tool create-database -database temporal_visibility +SQL_DATABASE=temporal_visibility ./temporal-sql-tool setup-schema -v 0.0 +SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/mysql/v57/visibility/versioned +``` + +Once you initialized the two databases, fill in the configuration values in `values/values.mysql.yaml`, and run + +```bash +# in https://github.com/temporalio/helm-charts git repo dir +helm install -f values/values.mysql.yaml temporaltest . --timeout 900s +``` + +Alternatively, instead of modifying `values/values.mysql.yaml`, you can supply those values in your command line: + +```bash +# in https://github.com/temporalio/helm-charts git repo dir +helm install -f values/values.mysql.yaml temporaltest \ + --set elasticsearch.enabled=false \ + --set server.config.persistence.default.sql.user=mysql_user \ + --set server.config.persistence.default.sql.password=mysql_password \ + --set server.config.persistence.visibility.sql.user=mysql_user \ + --set server.config.persistence.visibility.sql.password=mysql_password \ + --set server.config.persistence.default.sql.host=mysql_host \ + --set server.config.persistence.visibility.sql.host=mysql_host . --timeout 900s +``` +*NOTE:* For MYSQL <5.7.20 (e.g AWS Aurora MySQL) use `values/values.aurora-mysql.yaml` + +### Install with your own PostgreSQL + +You might already be operating a PostgreSQL instance that you want to use with Temporal. + +In this case, create and configure temporal databases on your PostgreSQL host with `temporal-sql-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo. + +Here are examples of commands you can use to create and initialize the databases: + +```bash +# in https://github.com/temporalio/temporal git repo dir +export SQL_PLUGIN=postgres +export SQL_HOST=postgresql_host +export SQL_PORT=5432 +export SQL_USER=postgresql_user +export SQL_PASSWORD=postgresql_password + +./temporal-sql-tool create-database -database temporal +SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0 +SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/postgresql/v96/temporal/versioned + +./temporal-sql-tool create-database -database temporal_visibility +SQL_DATABASE=temporal_visibility ./temporal-sql-tool setup-schema -v 0.0 +SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/postgresql/v96/visibility/versioned +``` + +Once you initialized the two databases, fill in the configuration values in `values/values.postgresql.yaml`, and run + +```bash +# in https://github.com/temporalio/helm-charts git repo dir +helm install -f values/values.postgresql.yaml temporaltest . --timeout 900s +``` + +Alternatively, instead of modifying `values/values.postgresql.yaml`, you can supply those values in your command line: + +```bash +# in https://github.com/temporalio/helm-charts git repo dir +helm install -f values/values.postgresql.yaml temporaltest \ + --set elasticsearch.enabled=false \ + --set server.config.persistence.default.sql.user=postgresql_user \ + --set server.config.persistence.default.sql.password=postgresql_password \ + --set server.config.persistence.visibility.sql.user=postgresql_user \ + --set server.config.persistence.visibility.sql.password=postgresql_password \ + --set server.config.persistence.default.sql.host=postgresql_host \ + --set server.config.persistence.visibility.sql.host=postgresql_host . --timeout 900s +``` + +### Install with your own Cassandra + +You might already be operating a Cassandra instance that you want to use with Temporal. + +In this case, create and setup keyspaces in your Cassandra instance with `temporal-cassandra-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo. + + +Here are examples of commands you can use to create and initialize the keyspaces: + +```bash +# in https://github.com/temporalio/temporal git repo dir +export CASSANDRA_HOST=cassandra_host +export CASSANDRA_PORT=9042 +export CASSANDRA_USER=cassandra_user +export CASSANDRA_PASSWORD=cassandra_user_password + +./temporal-cassandra-tool create-Keyspace -k temporal +CASSANDRA_KEYSPACE=temporal ./temporal-cassandra-tool setup-schema -v 0.0 +CASSANDRA_KEYSPACE=temporal ./temporal-cassandra-tool update -schema-dir schema/cassandra/temporal/versioned + +./temporal-cassandra-tool create-Keyspace -k temporal_visibility +CASSANDRA_KEYSPACE=temporal_visibility ./temporal-cassandra-tool setup-schema  -v 0.0 +CASSANDRA_KEYSPACE=temporal_visibility ./temporal-cassandra-tool update -schema-dir schema/cassandra/visibility/versioned +``` + +Once you initialized the two keyspaces, fill in the configuration values in `values/values.cassandra.yaml`, and run + +```bash +~/temporal-helm$ helm install -f values/values.cassandra.yaml temporaltest . --timeout 900s +``` + +### Install and configure Temporal + +If a live application environment already uses systems that Temporal can use as dependencies, then those systems can continue to be used. This Helm chart can install the minimal pieces of Temporal such that it can then be configured to use those systems as its dependencies. + +The example below demonstrates a few things: + +1. How to set values via the command line rather than the environment. +2. How to configure a database (shows Cassandra, but MySQL works the same way) +3. How to enable TLS for the database connection. +4. How to enable Auth for the Web UI + +```bash +helm install temporaltest \ + -f values/values.cassandra.yaml \ + -f values/values.elasticsearch.yaml \ + --set grafana.enabled=false \ + --set prometheus.enabled=false \ + --set server.replicaCount=5 \ + --set server.config.persistence.default.cassandra.hosts=cassandra.data.host.example \ + --set server.config.persistence.default.cassandra.user=cassandra_user \ + --set server.config.persistence.default.cassandra.password=cassandra_user_password \ + --set server.config.persistence.default.cassandra.tls.caData=$(base64 --wrap=0 cassandra.ca.pem) \ + --set server.config.persistence.default.cassandra.tls.enabled=true \ + --set server.config.persistence.default.cassandra.replicationFactor=3 \ + --set server.config.persistence.default.cassandra.keyspace=temporal \ + --set server.config.persistence.visibility.cassandra.hosts=cassandra.vis.host.example \ + --set server.config.persistence.visibility.cassandra.user=cassandra_user_vis \ + --set server.config.persistence.visibility.cassandra.password=cassandra_user_vis_password \ + --set server.config.persistence.visibility.cassandra.tls.caData=$(base64 --wrap=0 cassandra.ca.pem) \ + --set server.config.persistence.visibility.cassandra.tls.enabled=true \ + --set server.config.persistence.visibility.cassandra.replicationFactor=3 \ + --set server.config.persistence.visibility.cassandra.keyspace=temporal_visibility \ + . \ + --timeout 15m \ + --wait +``` + +## Play With It + +### Exploring Your Cluster + +You can use your favorite kubernetes tools ([k9s](https://github.com/derailed/k9s), [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), etc.) to interact with your cluster. + +```bash +$ kubectl get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +... +temporaltest-admintools ClusterIP 172.20.237.59 22/TCP 15m +temporaltest-frontend-headless ClusterIP None 7233/TCP,9090/TCP 15m +temporaltest-history-headless ClusterIP None 7234/TCP,9090/TCP 15m +temporaltest-matching-headless ClusterIP None 7235/TCP,9090/TCP 15m +temporaltest-worker-headless ClusterIP None 7239/TCP,9090/TCP 15m +... +``` + +``` +$ kubectl get pods +... +temporaltest-admintools-7b6c599855-8bk4x 1/1 Running 0 25m +temporaltest-frontend-54d94fdcc4-bx89b 1/1 Running 2 25m +temporaltest-history-86d8d7869-lzb6f 1/1 Running 2 25m +temporaltest-matching-6c7d6d7489-kj5pj 1/1 Running 3 25m +temporaltest-worker-769b996fd-qmvbw 1/1 Running 2 25m +... +``` + +### Running Temporal CLI From the Admin Tools Container + +You can also shell into `admin-tools` container via [k9s](https://github.com/derailed/k9s) or by running + +``` +$ kubectl exec -it services/temporaltest-admintools /bin/bash +bash-5.0# +``` + +and run Temporal CLI from there: + +``` +bash-5.0# tctl namespace list +Name: temporal-system +Id: 32049b68-7872-4094-8e63-d0dd59896a83 +Description: Temporal internal system namespace +OwnerEmail: temporal-core@temporal.io +NamespaceData: map[string]string(nil) +Status: Registered +RetentionInDays: 7 +EmitMetrics: true +ActiveClusterName: active +Clusters: active +HistoryArchivalStatus: Disabled +VisibilityArchivalStatus: Disabled +Bad binaries to reset: ++-----------------+----------+------------+--------+ +| BINARY CHECKSUM | OPERATOR | START TIME | REASON | ++-----------------+----------+------------+--------+ ++-----------------+----------+------------+--------+ +``` + +``` +bash-5.0# tctl --namespace nonesuch namespace desc +Error: Namespace nonesuch does not exist. +Error Details: Namespace nonesuch does not exist. +``` +``` +bash-5.0# tctl --namespace nonesuch namespace re +Namespace nonesuch successfully registered. +``` +``` +bash-5.0# tctl --namespace nonesuch namespace desc +Name: nonesuch +UUID: 465bb575-8c01-43f8-a67d-d676e1ae5eae +Description: +OwnerEmail: +NamespaceData: map[string]string(nil) +Status: Registered +RetentionInDays: 3 +EmitMetrics: false +ActiveClusterName: active +Clusters: active +HistoryArchivalStatus: ArchivalStatusDisabled +VisibilityArchivalStatus: ArchivalStatusDisabled +Bad binaries to reset: ++-----------------+----------+------------+--------+ +| BINARY CHECKSUM | OPERATOR | START TIME | REASON | ++-----------------+----------+------------+--------+ ++-----------------+----------+------------+--------+ +``` + +### Forwarding Your Machine's Local Port to Temporal FrontEnd + +You can also expose your instance's front end port on your local machine: + +``` +$ kubectl port-forward services/temporaltest-frontend-headless 7233:7233 +Forwarding from 127.0.0.1:7233 -> 7233 +Forwarding from [::1]:7233 -> 7233 +``` + +and, from a separate window, use the local port to access the service from your application or Temporal samples. + +### Forwarding Your Machine's Local Port to Temporal Web UI + +Similarly to how you accessed Temporal front end via kubernetes port forwarding, you can access your Temporal instance's web user interface. + +To do so, forward your machine's local port to the Web service in your Temporal installation + +``` +$ kubectl port-forward services/temporaltest-web 8080:8080 +Forwarding from 127.0.0.1:8080 -> 8080 +Forwarding from [::1]:8080 -> 8080 +``` + +and navigate to http://127.0.0.1:8080 in your browser. + + +### Exploring Metrics via Grafana + +By default, the full "Batteries Included" configuration comes with a few Grafana dashboards. + +To access those dashboards, follow the following steps: + +1. Extract Grafana's `admin` password from your installation: + +``` +$ kubectl get secret --namespace default temporaltest-grafana -o jsonpath="{.data.admin-password}" | base64 --decode + +t7EqZQpiB6BztZV321dEDppXbeisdpiEAMgnu6yy% +``` + +2. Setup port forwarding, so you can access Grafana from your host: + +``` +$ kubectl port-forward services/temporaltest-grafana 8081:80 +Forwarding from 127.0.0.1:8081 -> 3000 +Forwarding from [::1]:8081 -> 3000 +... +``` + +3. Navigate to the forwarded Grafana port in your browser (http://localhost:8081/), login as `admin` (using the password from step 1), and click on the "Home" button (upper left corner) to see available dashboards. + +### Updating Dynamic Configs +By default dynamic config is empty, if you want to override some properties for your cluster, you should: +1. Create a yaml file with your config (for example dc.yaml). +2. Populate it with some values under server.dynamicConfig prefix (use the sample provided at `values/values.dynamic_config.yaml` as a starting point) +3. Install your helm configuration: +```bash +$ helm install -f values/values.dynamic_config.yaml temporaltest . --timeout 900s +``` +Note that if you already have a running cluster you can use the "helm upgrade" command to change dynamic config values: +```bash +$ helm upgrade -f values/values.dynamic_config.yaml temporaltest . --timeout 900s +``` + +WARNING: The "helm upgrade" approach will trigger a rolling upgrade of all the pods. + +If a rolling upgrade is not desirable, you can also generate the ConfigMap file explicitly and then apply it using the following command: + +```bash +$ kubectl apply -f dynamicconfigmap.yaml +``` +You can use helm upgrade with the "--dry-run" option to generate the content for the dynamicconfigmap.yaml. + +The dynamic-config ConfigMap is referenced as a mounted volume within the Temporal Containers, so any applied change will be automatically picked up by all pods within a few minutes without the need for pod recycling. See k8S documentation (https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#mounted-configmaps-are-updated-automatically) for more details on how this works. + +### Updating Temporal Web Config +the config file `server/config.yml` for the temporal web ui is referenced as a mounted volume within the Temporal Web UI Container and can be populated by inserting values in the `web.config` section in the `values.yml` for possible config check (https://github.com/temporalio/web#configuring-authentication-optional) + +## Uninstalling + +Note: in this example chart, uninstalling a Temporal instance also removes all the data that might have been created during its lifetime. + +```bash +~/temporal-helm $ helm uninstall temporaltest +``` + +## Upgrading + +To upgrade your cluster, upgrade your database schema (if the release includes schema changes), and then use `helm upgrade` command to perform a rolling upgrade of your installation. + +Note: +* Not supported: running newer binaries with an older schema. +* Supported: downgrading binaries – running older binaries with a newer schema. + +Example: + +### Upgrade Schema + +Here are examples of commands you can use to upgrade the "default" and "visibility" schemas in your "bring your own" Cassandra database. + +Upgrade default schema: + +``` +temporal_v1.2.1 $ temporal-cassandra-tool \ + --tls \ + --tls-ca-file ... \ + --user cassandra-user \ + --password cassandra-password \ + --endpoint cassandra.example.com \ + --keyspace temporal \ + --timeout 120 \ + update \ + --schema-dir ./schema/cassandra/temporal/versioned +``` + +Upgrade visibility schema: + +``` +temporal_v1.2.1 $ temporal-cassandra-tool \ + --tls \ + --tls-ca-file ... \ + --user cassandra-user \ + --password cassandra-password \ + --endpoint cassandra.example.com \ + --keyspace temporal_visibility \ + --timeout 120 \ + update \ + --schema-dir ./schema/cassandra/visibility/versioned +``` + +To upgrade your MySQL database, please use `temporal-sql-tool` tool instead of `temporal-cassandra-tool`. + +### Upgrade Temporal Instance's Docker Images + +Here is an example of a `helm upgrade` command that can be used to upgrade a cluster: + +``` +helm-charts $ helm \ + upgrade \ + temporaltest \ + -f values/values.cassandra.yaml \ + --set elasticsearch.enabled=true \ + --set server.replicaCount=8 \ + --set server.config.persistence.default.cassandra.hosts='{c1.example.com,c2.example.com,c3.example.com}' \ + --set server.config.persistence.default.cassandra.user=cassandra-user \ + --set server.config.persistence.default.cassandra.password=cassandra-password \ + --set server.config.persistence.default.cassandra.tls.caData=... \ + --set server.config.persistence.default.cassandra.tls.enabled=true \ + --set server.config.persistence.default.cassandra.replicationFactor=3 \ + --set server.config.persistence.default.cassandra.keyspace=temporal \ + --set server.config.persistence.visibility.cassandra.hosts='{c1.example.com,c2.example.com,c3.example.com}' \ + --set server.config.persistence.visibility.cassandra.user=cassandra-user \ + --set server.config.persistence.visibility.cassandra.password=cassandra-password \ + --set server.config.persistence.visibility.cassandra.tls.caData=... \ + --set server.config.persistence.visibility.cassandra.tls.enabled=true \ + --set server.config.persistence.visibility.cassandra.replicationFactor=3 \ + --set server.config.persistence.visibility.cassandra.keyspace=temporal_visibility \ + --set server.image.tag=1.2.1 \ + --set server.image.repository=temporalio/server \ + --set admintools.image.tag=1.2.1 \ + --set admintools.image.repository=temporalio/admin-tools \ + --set web.image.tag=1.1.1 \ + --set web.image.repository=temporalio/web \ + . \ + --wait \ + --timeout 15m +``` + + +# Acknowledgements + +Many thanks to [Banzai Cloud](https://github.com/banzaicloud) whose [Cadence Helm Charts](https://github.com/banzaicloud/banzai-charts/tree/master/cadence) heavily inspired this work. + + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts?ref=badge_large) diff --git a/charts/retool-temporal-services/templates/NOTES.txt b/charts/retool-temporal-services/templates/NOTES.txt new file mode 100644 index 00000000..96b50698 --- /dev/null +++ b/charts/retool-temporal-services/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that Temporal has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/charts/retool-temporal-services/templates/_helpers.tpl b/charts/retool-temporal-services/templates/_helpers.tpl new file mode 100644 index 00000000..6a0666ab --- /dev/null +++ b/charts/retool-temporal-services/templates/_helpers.tpl @@ -0,0 +1,380 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "temporal.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "temporal.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-temporal" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-temporal" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "temporal.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "temporal.serviceAccountName" -}} +{{ default (include "temporal.fullname" .) .Values.serviceAccount.name }} +{{- end -}} + +{{/* +Define the service account as needed +*/}} +{{- define "temporal.serviceAccount" -}} +{{- if .Values.serviceAccount.create -}} +serviceAccountName: {{ include "temporal.serviceAccountName" . }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified component name from the full app name and a component name. +We truncate the full name at 63 - 1 (last dash) - len(component name) chars because some Kubernetes name fields are limited to this (by the DNS naming spec) +and we want to make sure that the component is included in the name. +*/}} +{{- define "temporal.componentname" -}} +{{- $global := index . 0 -}} +{{- $component := index . 1 | trimPrefix "-" -}} +{{- printf "%s-%s" (include "temporal.fullname" $global | trunc (sub 62 (len $component) | int) | trimSuffix "-" ) $component | trimSuffix "-" -}} +{{- end -}} + +{{/* +Call nested templates. +Source: https://stackoverflow.com/a/52024583/3027614 +*/}} +{{- define "call-nested" }} +{{- $dot := index . 0 }} +{{- $subchart := index . 1 }} +{{- $template := index . 2 }} +{{- include $template (dict "Chart" (dict "Name" $subchart) "Values" (index $dot.Values $subchart) "Release" $dot.Release "Capabilities" $dot.Capabilities) }} +{{- end }} + +{{- define "temporal.frontend.grpcPort" -}} +{{- if $.Values.server.frontend.service.port -}} +{{- $.Values.server.frontend.service.port -}} +{{- else -}} +{{- 7233 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.frontend.membershipPort" -}} +{{- if $.Values.server.frontend.service.membershipPort -}} +{{- $.Values.server.frontend.service.membershipPort -}} +{{- else -}} +{{- 6933 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.frontend.serviceName" -}} +{{- print "temporal.api.workflowservice.v1.WorkflowService" -}} +{{- end -}} + +{{- define "temporal.history.grpcPort" -}} +{{- if $.Values.server.history.service.port -}} +{{- $.Values.server.history.service.port -}} +{{- else -}} +{{- 7234 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.history.membershipPort" -}} +{{- if $.Values.server.history.service.membershipPort -}} +{{- $.Values.server.history.service.membershipPort -}} +{{- else -}} +{{- 6934 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.history.serviceName" -}} +{{- print "temporal.api.workflowservice.v1.HistoryService" -}} +{{- end -}} + +{{- define "temporal.matching.grpcPort" -}} +{{- if $.Values.server.matching.service.port -}} +{{- $.Values.server.matching.service.port -}} +{{- else -}} +{{- 7235 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.matching.membershipPort" -}} +{{- if $.Values.server.matching.service.membershipPort -}} +{{- $.Values.server.matching.service.membershipPort -}} +{{- else -}} +{{- 6935 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.matching.serviceName" -}} +{{- print "temporal.api.workflowservice.v1.MatchingService" -}} +{{- end -}} + +{{- define "temporal.worker.grpcPort" -}} +{{- if $.Values.server.worker.service.port -}} +{{- $.Values.server.worker.service.port -}} +{{- else -}} +{{- 7239 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.worker.membershipPort" -}} +{{- if $.Values.server.worker.service.membershipPort -}} +{{- $.Values.server.worker.service.membershipPort -}} +{{- else -}} +{{- 6939 -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.schema" -}} +{{- if eq . "default" -}} +{{- print "temporal" -}} +{{- else -}} +{{- print . -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.driver" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.driver -}} +{{- $storeConfig.driver -}} +{{- else if $global.Values.cassandra.enabled -}} +{{- print "cassandra" -}} +{{- else if $global.Values.mysql.enabled -}} +{{- print "sql" -}} +{{- else if $global.Values.postgresql.enabled -}} +{{- print "sql" -}} +{{- else -}} +{{- required (printf "Please specify persistence driver for %s store" $store) $storeConfig.driver -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.cassandra.hosts" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.cassandra.hosts -}} +{{- $storeConfig.cassandra.hosts | join "," -}} +{{- else if and $global.Values.cassandra.enabled (eq (include "temporal.persistence.driver" (list $global $store)) "cassandra") -}} +{{- include "cassandra.hosts" $global -}} +{{- else -}} +{{- required (printf "Please specify cassandra hosts for %s store" $store) $storeConfig.cassandra.hosts -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.cassandra.port" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.cassandra.port -}} +{{- $storeConfig.cassandra.port -}} +{{- else if and $global.Values.cassandra.enabled (eq (include "temporal.persistence.driver" (list $global $store)) "cassandra") -}} +{{- $global.Values.cassandra.config.ports.cql -}} +{{- else -}} +{{- required (printf "Please specify cassandra port for %s store" $store) $storeConfig.cassandra.port -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.cassandra.secretName" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.cassandra.existingSecret -}} +{{- $storeConfig.cassandra.existingSecret -}} +{{- else if $storeConfig.cassandra.password -}} +{{- include "temporal.componentname" (list $global (printf "%s-store" $store)) -}} +{{- else -}} +{{/* Cassandra password is optional, but we will create an empty secret for it */}} +{{- include "temporal.componentname" (list $global (printf "%s-store" $store)) -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.cassandra.secretKey" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{/* Cassandra password is optional, but we will create an empty secret for it */}} +{{- print "password" -}} +{{- end -}} + +{{- define "temporal.persistence.sql.driver" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.driver -}} +{{- $storeConfig.sql.driver -}} +{{- else if $global.Values.mysql.enabled -}} +{{- print "mysql" -}} +{{- else if $global.Values.postgresql.enabled -}} +{{- print "postgres" -}} +{{- else -}} +{{- required (printf "Please specify sql driver for %s store" $store) $storeConfig.sql.host -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.host" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.host -}} +{{- $storeConfig.sql.host -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- include "mysql.host" $global -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- include "postgresql.host" $global -}} +{{- else -}} +{{- required (printf "Please specify sql host for %s store" $store) $storeConfig.sql.host -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.port" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.port -}} +{{- $storeConfig.sql.port -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- $global.Values.mysql.service.port -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- $global.Values.postgresql.service.port -}} +{{- else -}} +{{- required (printf "Please specify sql port for %s store" $store) $storeConfig.sql.port -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.user" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.user -}} +{{- $storeConfig.sql.user -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- $global.Values.mysql.mysqlUser -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- $global.Values.postgresql.postgresqlUser -}} +{{- else -}} +{{- required (printf "Please specify sql user for %s store" $store) $storeConfig.sql.user -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.password" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.password -}} +{{- $storeConfig.sql.password -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- if or $global.Values.schema.setup.enabled $global.Values.schema.update.enabled -}} +{{- required "Please specify password for MySQL chart" $global.Values.mysql.mysqlPassword -}} +{{- else -}} +{{- $global.Values.mysql.mysqlPassword -}} +{{- end -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- if or $global.Values.schema.setup.enabled $global.Values.schema.update.enabled -}} +{{- required "Please specify password for PostgreSQL chart" $global.Values.postgresql.postgresqlPassword -}} +{{- else -}} +{{- $global.Values.postgresql.postgresqlPassword -}} +{{- end -}} +{{- else -}} +{{- required (printf "Please specify sql password for %s store" $store) $storeConfig.sql.password -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.secretName" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if $storeConfig.sql.existingSecret -}} +{{- $storeConfig.sql.existingSecret -}} +{{- else if $storeConfig.sql.password -}} +{{- include "temporal.componentname" (list $global (printf "%s-store" $store)) -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- include "call-nested" (list $global "mysql" "mysql.secretName") -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- include "call-nested" (list $global "postgresql" "postgresql.secretName") -}} +{{- else -}} +{{- required (printf "Please specify sql password or existing secret for %s store" $store) $storeConfig.sql.existingSecret -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.sql.secretKey" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- $storeConfig := index $global.Values.server.config.persistence $store -}} +{{- if and $storeConfig.sql.existingSecret $storeConfig.sql.secretKey -}} +{{- $storeConfig.sql.secretKey -}} +{{- else if or $storeConfig.sql.existingSecret $storeConfig.sql.password -}} +{{- print "password" -}} +{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}} +{{- print "mysql-password" -}} +{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}} +{{- print "postgresql-password" -}} +{{- else -}} +{{- fail (printf "Please specify sql password or existing secret for %s store" $store) -}} +{{- end -}} +{{- end -}} + +{{- define "temporal.persistence.secretName" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- include (printf "temporal.persistence.%s.secretName" (include "temporal.persistence.driver" (list $global $store))) (list $global $store) -}} +{{- end -}} + +{{- define "temporal.persistence.secretKey" -}} +{{- $global := index . 0 -}} +{{- $store := index . 1 -}} +{{- include (printf "temporal.persistence.%s.secretKey" (include "temporal.persistence.driver" (list $global $store))) (list $global $store) -}} +{{- end -}} + +{{/* +All Cassandra hosts. +*/}} +{{- define "cassandra.hosts" -}} +{{- range $i := (until (int .Values.cassandra.config.cluster_size)) }} +{{- $cassandraName := include "call-nested" (list $ "cassandra" "cassandra.fullname") -}} +{{- printf "%s.%s.svc.cluster.local," $cassandraName $.Release.Namespace -}} +{{- end }} +{{- end -}} + +{{/* +The first Cassandra host in the stateful set. +*/}} +{{- define "cassandra.host" -}} +{{- $cassandraName := include "call-nested" (list . "cassandra" "cassandra.fullname") -}} +{{- printf "%s.%s.svc.cluster.local" $cassandraName .Release.Namespace -}} +{{- end -}} + +{{/* +Based on Bitnami charts method +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/retool-temporal-services/templates/admintools-deployment.yaml b/charts/retool-temporal-services/templates/admintools-deployment.yaml new file mode 100644 index 00000000..fd6c791c --- /dev/null +++ b/charts/retool-temporal-services/templates/admintools-deployment.yaml @@ -0,0 +1,78 @@ +{{- if $.Values.admintools.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "temporal.componentname" (list . "admintools") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: admintools + app.kubernetes.io/part-of: {{ .Chart.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: admintools + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: admintools + app.kubernetes.io/part-of: {{ .Chart.Name }} + {{- with $.Values.admintools.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with $.Values.admintools.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{ include "temporal.serviceAccount" . }} + containers: + - name: admin-tools + {{- if and .Values.admintools.image.repository .Values.admintools.image.tag }} + image: "{{ .Values.admintools.image.repository }}:{{ .Values.admintools.image.tag }}" + imagePullPolicy: {{ .Values.admintools.image.pullPolicy }} + {{- else }} + image: {{ printf "%s/temporal-admin-tools" .Values.imagesDir | .Files.Get }} + {{- end }} + ports: + - name: http + containerPort: 22 + protocol: TCP + env: + - name: TEMPORAL_CLI_ADDRESS + value: {{ include "temporal.fullname" . }}-frontend:{{ include "temporal.frontend.grpcPort" . }} + livenessProbe: + exec: + command: + - ls + - / + initialDelaySeconds: 5 + periodSeconds: 5 + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/admintools-service.yaml b/charts/retool-temporal-services/templates/admintools-service.yaml new file mode 100644 index 00000000..25ffa349 --- /dev/null +++ b/charts/retool-temporal-services/templates/admintools-service.yaml @@ -0,0 +1,26 @@ +{{- if $.Values.admintools.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "temporal.componentname" (list . "admintools") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: admintools + app.kubernetes.io/part-of: {{ .Chart.Name }} +spec: + type: ClusterIP + ports: + - port: 22 + targetPort: 22 + protocol: TCP + name: ssh + + selector: + app.kubernetes.io/name: {{ include "temporal.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: admintools +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-configmap.yaml b/charts/retool-temporal-services/templates/server-configmap.yaml new file mode 100644 index 00000000..514841a4 --- /dev/null +++ b/charts/retool-temporal-services/templates/server-configmap.yaml @@ -0,0 +1,175 @@ +{{- if $.Values.server.enabled }} +{{- range $service := (list "frontend" "history" "matching" "worker") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ include "temporal.componentname" (list $ $service) }}-config" + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} +data: + config_template.yaml: |- + log: + stdout: true + level: {{ $.Values.server.config.logLevel | quote }} + + persistence: + defaultStore: {{ $.Values.server.config.persistence.defaultStore }} + visibilityStore: visibility + {{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }} + advancedVisibilityStore: es-visibility + {{- end }} + numHistoryShards: {{ $.Values.server.config.numHistoryShards }} + datastores: + {{- with $.Values.server.config.persistence.additionalStores }} + {{- toYaml . | nindent 8 }} + {{- end }} + default: + {{- if eq (include "temporal.persistence.driver" (list $ "default")) "cassandra" }} + cassandra: + hosts: "{{ include "temporal.persistence.cassandra.hosts" (list $ "default") }}" + port: {{ include "temporal.persistence.cassandra.port" (list $ "default") }} + password: "{{ `{{ .Env.TEMPORAL_STORE_PASSWORD }}` }}" + {{- with (omit $.Values.server.config.persistence.default.cassandra "hosts" "port" "password" "existingSecret") }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- if $.Values.server.config.persistence.faultinjection}} + {{- if $.Values.server.config.persistence.faultinjection.rate }} + faultInjection: + rate: {{ $.Values.server.config.persistence.faultinjection.rate }} + {{- end }} + {{- end }} + {{- if eq (include "temporal.persistence.driver" (list $ "default")) "sql" }} + sql: + pluginName: "{{ include "temporal.persistence.sql.driver" (list $ "default") }}" + driverName: "{{ include "temporal.persistence.sql.driver" (list $ "default") }}" + databaseName: "{{ $.Values.server.config.persistence.default.sql.database }}" + connectAddr: "{{ include "temporal.persistence.sql.host" (list $ "default") }}:{{ include "temporal.persistence.sql.port" (list $ "default") }}" + connectProtocol: "tcp" + user: {{ include "temporal.persistence.sql.user" (list $ "default") }} + password: "{{ `{{ .Env.TEMPORAL_STORE_PASSWORD }}` }}" + {{- with (omit $.Values.server.config.persistence.default.sql "driver" "driverName" "host" "port" "connectAddr" "connectProtocol" "database" "databaseName" "user" "password" "existingSecret") }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + visibility: + {{- if eq (include "temporal.persistence.driver" (list $ "visibility")) "cassandra" }} + cassandra: + hosts: "{{ include "temporal.persistence.cassandra.hosts" (list $ "visibility") }}" + port: {{ include "temporal.persistence.cassandra.port" (list $ "visibility") }} + password: "{{ `{{ .Env.TEMPORAL_VISIBILITY_STORE_PASSWORD }}` }}" + {{- with (omit $.Values.server.config.persistence.visibility.cassandra "hosts" "port" "password" "existingSecret") }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- if eq (include "temporal.persistence.driver" (list $ "default")) "sql" }} + sql: + pluginName: "{{ include "temporal.persistence.sql.driver" (list $ "visibility") }}" + driverName: "{{ include "temporal.persistence.sql.driver" (list $ "visibility") }}" + databaseName: "{{ $.Values.server.config.persistence.visibility.sql.database }}" + connectAddr: "{{ include "temporal.persistence.sql.host" (list $ "visibility") }}:{{ include "temporal.persistence.sql.port" (list $ "visibility") }}" + connectProtocol: "tcp" + user: "{{ include "temporal.persistence.sql.user" (list $ "visibility") }}" + password: "{{ `{{ .Env.TEMPORAL_VISIBILITY_STORE_PASSWORD }}` }}" + {{- with (omit $.Values.server.config.persistence.visibility.sql "driver" "driverName" "host" "port" "connectAddr" "connectProtocol" "database" "databaseName" "user" "password" "existingSecret") }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + + {{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }} + es-visibility: + elasticsearch: + version: "{{ $.Values.elasticsearch.version }}" + url: + scheme: "{{ $.Values.elasticsearch.scheme }}" + host: "{{ $.Values.elasticsearch.host }}:{{ $.Values.elasticsearch.port }}" + username: "{{ $.Values.elasticsearch.username }}" + password: "{{ `{{ default .Env.ES_PWD $.Values.elasticsearch.password }}` }}" + logLevel: "{{ $.Values.elasticsearch.logLevel }}" + indices: + visibility: "{{ $.Values.elasticsearch.visibilityIndex }}" + {{- end }} + + global: + membership: + name: temporal + maxJoinDuration: 30s + broadcastAddress: {{ `{{ default .Env.POD_IP "0.0.0.0" }}` }} + + pprof: + port: 7936 + + metrics: + tags: + type: {{ $service }} + prometheus: + timerType: histogram + listenAddress: "0.0.0.0:9090" + + + services: + frontend: + rpc: + grpcPort: {{ include "temporal.frontend.grpcPort" $ }} + membershipPort: {{ include "temporal.frontend.membershipPort" $ }} + bindOnIP: "0.0.0.0" + + history: + rpc: + grpcPort: {{ include "temporal.history.grpcPort" $ }} + membershipPort: {{ include "temporal.history.membershipPort" $ }} + bindOnIP: "0.0.0.0" + + matching: + rpc: + grpcPort: {{ include "temporal.matching.grpcPort" $ }} + membershipPort: {{ include "temporal.matching.membershipPort" $ }} + bindOnIP: "0.0.0.0" + + worker: + rpc: + grpcPort: {{ include "temporal.worker.grpcPort" $ }} + membershipPort: {{ include "temporal.worker.membershipPort" $ }} + bindOnIP: "0.0.0.0" + + {{- if $.Values.server.config.clusterMetadata }} + clusterMetadata: + {{- with $.Values.server.config.clusterMetadata }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else }} + clusterMetadata: + enableGlobalDomain: false + failoverVersionIncrement: 10 + masterClusterName: "active" + currentClusterName: "active" + clusterInformation: + active: + enabled: true + initialFailoverVersion: 1 + rpcName: "temporal-frontend" + rpcAddress: "127.0.0.1:7933" + {{- end }} + + dcRedirectionPolicy: + policy: "noop" + toDC: "" + + archival: + status: "disabled" + + publicClient: + hostPort: "{{ include "temporal.componentname" (list $ "frontend") }}:{{ $.Values.server.frontend.service.port }}" + + dynamicConfigClient: + filepath: "/etc/temporal/dynamic_config/dynamic_config.yaml" + pollInterval: "10s" +--- + +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-deployment.yaml b/charts/retool-temporal-services/templates/server-deployment.yaml new file mode 100644 index 00000000..565d71d5 --- /dev/null +++ b/charts/retool-temporal-services/templates/server-deployment.yaml @@ -0,0 +1,255 @@ +{{- if $.Values.server.enabled }} +{{- range $service := (list "frontend" "history" "matching" "worker") }} +{{- $serviceValues := index $.Values.server $service -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "temporal.componentname" (list $ $service) }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: {{ $service }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} +spec: + replicas: {{ default $.Values.server.replicaCount $serviceValues.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/component: {{ $service }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: {{ $service }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} + {{- with (default $.Values.server.podLabels $serviceValues.podLabels) }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/server-configmap.yaml") $ | sha256sum }} + {{- if (default $.Values.server.metrics.annotations.enabled $serviceValues.metrics.annotations.enabled) }} + prometheus.io/job: {{ $.Chart.Name }}-{{ $service }} + prometheus.io/scrape: 'true' + prometheus.io/port: '9090' + {{- end }} + {{- with (default $.Values.server.podAnnotations $serviceValues.podAnnotations) }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{ include "temporal.serviceAccount" $ }} + {{- if semverCompare ">=1.13.0" $.Chart.AppVersion}} + securityContext: + fsGroup: 1000 #temporal group + runAsUser: 1000 #temporal user + {{- end }} + {{- if or $.Values.cassandra.enabled (or $.Values.elasticsearch.enabled $.Values.elasticsearch.external)}} + initContainers: + {{- if and (ne $service "frontend") $.Values.server.autosetup }} + - name: check-schema-setup + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: "{{ $.Values.admintools.image.pullPolicy }}" + command: ['sh', '-c', 'until tctl cluster health | grep 'SERVING'; do echo waiting for temporal server to be serving; sleep 1; done;'] + env: + - name: TEMPORAL_CLI_ADDRESS + value: "{{ include "temporal.fullname" $ }}-frontend:{{ include "temporal.frontend.grpcPort" $ }}" + {{- end }} + {{- if $.Values.cassandra.enabled }} + - name: check-cassandra-service + image: busybox + command: ['sh', '-c', 'until nslookup {{ include "cassandra.host" $ }}; do echo waiting for cassandra service; sleep 1; done;'] + - name: check-cassandra + image: "{{ $.Values.cassandra.image.repo }}:{{ $.Values.cassandra.image.tag }}" + imagePullPolicy: {{ $.Values.cassandra.image.pullPolicy }} + command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ $.Values.cassandra.config.ports.cql }} -e "SHOW VERSION"; do echo waiting for cassandra to start; sleep 1; done;'] + - name: check-cassandra-temporal-schema + image: "{{ $.Values.cassandra.image.repo }}:{{ $.Values.cassandra.image.tag }}" + imagePullPolicy: {{ $.Values.cassandra.image.pullPolicy }} + command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ $.Values.cassandra.config.ports.cql }} -e "SELECT keyspace_name FROM system_schema.keyspaces" | grep {{ $.Values.server.config.persistence.default.cassandra.keyspace }}$; do echo waiting for default keyspace to become ready; sleep 1; done;'] + - name: check-cassandra-visibility-schema + image: "{{ $.Values.cassandra.image.repo }}:{{ $.Values.cassandra.image.tag }}" + imagePullPolicy: {{ $.Values.cassandra.image.pullPolicy }} + command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ $.Values.cassandra.config.ports.cql }} -e "SELECT keyspace_name FROM system_schema.keyspaces" | grep {{ $.Values.server.config.persistence.visibility.cassandra.keyspace }}$; do echo waiting for visibility keyspace to become ready; sleep 1; done;'] + {{- end }} + {{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }} + - name: check-elasticsearch-index + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + command: ['sh', '-c', 'until curl --silent --fail --user $(ES_USER):$(ES_PWD) {{ $.Values.elasticsearch.scheme }}://{{ $.Values.elasticsearch.host }}:{{ $.Values.elasticsearch.port }}/{{ $.Values.elasticsearch.visibilityIndex }} 2>&1 > /dev/null; do echo waiting for elasticsearch index to become ready; sleep 1; done;'] + env: + - name: ES_USER + value: "{{ $.Values.elasticsearch.username }}" + - name: ES_PWD + {{- if ($.Values.elasticsearch).existingSecret }} + valueFrom: + secretKeyRef: + name: "{{ $.Values.elasticsearch.existingSecret }}" + key: "{{ default "password" $.Values.elasticsearch.secretKey }}" + {{- else }} + value: "{{ $.Values.elasticsearch.password }}" + {{- end }} + {{- end }} + {{- end }} + containers: + - name: {{ $.Chart.Name }}-{{ $service }} + image: "{{ $.Values.server.image.repository }}:{{ $.Values.server.image.tag }}" + imagePullPolicy: {{ $.Values.server.image.pullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SKIP_ADD_CUSTOM_SEARCH_ATTRIBUTES + value: 'true' + - name: ENABLE_ES + value: "{{ or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }}" + - name: ES_SEEDS + value: "{{ $.Values.elasticsearch.host }}" + - name: ES_PORT + value: "{{ $.Values.elasticsearch.port }}" + - name: ES_VERSION + value: "{{ $.Values.elasticsearch.version }}" + - name: ES_SCHEME + value: "{{ $.Values.elasticsearch.scheme }}" + - name: ES_VIS_INDEX + value: "{{ $.Values.elasticsearch.visibilityIndex }}" + - name: ES_USER + value: "{{ $.Values.elasticsearch.username }}" + - name: ES_PWD + {{- if ($.Values.elasticsearch).existingSecret }} + valueFrom: + secretKeyRef: + name: "{{ $.Values.elasticsearch.existingSecret }}" + key: "{{ default "password" $.Values.elasticsearch.secretKey }}" + {{- else }} + value: "{{ $.Values.elasticsearch.password }}" + {{- end }} + - name: SERVICES + value: {{ $service }} + - name: DEFAULT_NAMESPACE + value: workflows + - name: DB + value: postgresql + {{- if $.Values.server.config.persistence.default.sql.database }} + - name: DBNAME + value: "{{ $.Values.server.config.persistence.default.sql.database }}" + {{- end }} + {{- if $.Values.server.config.persistence.visibility.sql.database }} + - name: VISIBILITY_DBNAME + value: "{{ $.Values.server.config.persistence.visibility.sql.database }}" + {{- end }} + {{- if ($.Values.server.config.persistence.default.sql.tls).enabled }} + - name: SQL_TLS_ENABLED + value: "true" + - name: SQL_TLS + value: "true" + {{- end }} + {{- if and ($.Values.server.config.persistence.default.sql.tls).enabled (not ($.Values.server.config.persistence.default.sql.tls).enableHostVerification) }} + - name: SQL_TLS_DISABLE_HOST_VERIFICATION + value: "true" + {{- end }} + - name: POSTGRES_HOST + value: "{{ $.Values.server.config.persistence.default.sql.host }}" + - name: POSTGRES_PORT + value: "{{ $.Values.server.config.persistence.default.sql.port }}" + - name: POSTGRES_USER + value: "{{ $.Values.server.config.persistence.default.sql.user }}" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ "default") }} + key: {{ include "temporal.persistence.secretKey" (list $ "default") }} + {{- if ne $service "frontend"}} + - name: TEMPORAL_CLI_ADDRESS + value: {{ include "temporal.fullname" $ }}-frontend:{{ include "temporal.frontend.grpcPort" $ }} + {{- end }} + - name: TEMPORAL_STORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ "default") }} + key: {{ include "temporal.persistence.secretKey" (list $ "default") }} + - name: TEMPORAL_VISIBILITY_STORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ "visibility") }} + key: {{ include "temporal.persistence.secretKey" (list $ "visibility") }} + {{- if $.Values.server.versionCheckDisabled }} + - name: TEMPORAL_VERSION_CHECK_DISABLED + value: "1" + {{- end }} + ports: + - name: rpc + containerPort: {{ include (printf "temporal.%s.grpcPort" $service) $ }} + protocol: TCP + - name: metrics + containerPort: 9090 + protocol: TCP + {{- if and (ne $service "worker") $.Values.server.useLegacyHealthProbe }} + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:{{ include (printf "temporal.%s.grpcPort" $service) $ }}", "-service={{ include (printf "temporal.%s.serviceName" $service) $ }}"] + initialDelaySeconds: 5 + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:{{ include (printf "temporal.%s.grpcPort" $service) $ }}", "-service={{ include (printf "temporal.%s.serviceName" $service) $ }}"] + initialDelaySeconds: 5 + {{- else if and (ne $service "worker") (not $.Values.server.useLegacyHealthProbe) }} + livenessProbe: + initialDelaySeconds: 150 + tcpSocket: + port: rpc + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/temporal/config/config_template.yaml + subPath: config_template.yaml + - name: dynamic-config + mountPath: /etc/temporal/dynamic_config + {{- if $.Values.server.additionalVolumeMounts }} + {{- toYaml $.Values.server.additionalVolumeMounts | nindent 12}} + {{- end }} + resources: + {{- toYaml (default $.Values.server.resources $serviceValues.resources) | nindent 12 }} + {{- if $.Values.server.sidecarContainers }} + {{- toYaml $.Values.server.sidecarContainers | nindent 8 }} + {{- end }} + + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: "{{ include "temporal.componentname" (list $ $service) }}-config" + - name: dynamic-config + configMap: + name: "{{ include "temporal.fullname" $ }}-dynamic-config" + items: + - key: dynamic_config.yaml + path: dynamic_config.yaml + {{- if $.Values.server.additionalVolumes }} + {{- toYaml $.Values.server.additionalVolumes | nindent 8}} + {{- end }} + {{- with (default $.Values.server.nodeSelector $serviceValues.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (default $.Values.server.affinity $serviceValues.affinity) }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (default $.Values.server.tolerations $serviceValues.tolerations) }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-dynamicconfigmap.yaml b/charts/retool-temporal-services/templates/server-dynamicconfigmap.yaml new file mode 100644 index 00000000..95ba8f9c --- /dev/null +++ b/charts/retool-temporal-services/templates/server-dynamicconfigmap.yaml @@ -0,0 +1,18 @@ +{{- if $.Values.server.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ include "temporal.fullname" . }}-dynamic-config" + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/part-of: {{ .Chart.Name }} +data: + dynamic_config.yaml: |- + {{- if $.Values.server.dynamicConfig }} + {{- toYaml .Values.server.dynamicConfig | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-job.yaml b/charts/retool-temporal-services/templates/server-job.yaml new file mode 100644 index 00000000..1482cc3e --- /dev/null +++ b/charts/retool-temporal-services/templates/server-job.yaml @@ -0,0 +1,303 @@ +{{- if $.Values.server.enabled }} +{{- if .Values.schema.setup.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "temporal.componentname" (list . "schema-setup") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + annotations: + {{- if .Values.cassandra.enabled }} + "helm.sh/hook": post-install + {{- else }} + "helm.sh/hook": pre-install + {{- end }} + "helm.sh/hook-weight": "0" + {{- if not .Values.debug }} + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed + {{- end }} +spec: + backoffLimit: {{ .Values.schema.setup.backoffLimit }} + template: + metadata: + name: {{ include "temporal.componentname" (list . "schema-setup") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + spec: + {{ include "temporal.serviceAccount" . }} + restartPolicy: "OnFailure" + initContainers: + {{- if or .Values.cassandra.enabled (eq (include "temporal.persistence.driver" (list $ "default")) "cassandra") (eq (include "temporal.persistence.driver" (list $ "visibility")) "cassandra") }} + {{- if .Values.cassandra.enabled }} + - name: check-cassandra-service + image: busybox + command: ['sh', '-c', 'until nslookup {{ include "cassandra.host" $ }}; do echo waiting for cassandra service; sleep 1; done;'] + - name: check-cassandra + image: "{{ .Values.cassandra.image.repo }}:{{ .Values.cassandra.image.tag }}" + imagePullPolicy: {{ .Values.cassandra.image.pullPolicy }} + command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ .Values.cassandra.config.ports.cql }} -e "SHOW VERSION"; do echo waiting for cassandra to start; sleep 1; done;'] + {{- end }} + {{- range $store := (list "default" "visibility") }} + {{- $storeConfig := index $.Values.server.config.persistence $store }} + {{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }} + - name: create-{{ $store }}-store + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + command: ['sh', '-c', 'temporal-cassandra-tool create -k {{ $storeConfig.cassandra.keyspace }} --replication-factor {{ $storeConfig.cassandra.replicationFactor }}'] + env: + - name: CASSANDRA_HOST + value: {{ first (splitList "," (include "temporal.persistence.cassandra.hosts" (list $ $store))) }} + - name: CASSANDRA_PORT + value: {{ include "temporal.persistence.cassandra.port" (list $ $store) | quote }} + - name: CASSANDRA_KEYSPACE + value: {{ $storeConfig.cassandra.keyspace }} + {{- if $storeConfig.cassandra.user }} + - name: CASSANDRA_USER + value: {{ $storeConfig.cassandra.user }} + {{- end }} + {{- if (or $storeConfig.cassandra.password $storeConfig.cassandra.existingSecret) }} + - name: CASSANDRA_PASSWORD + {{- if $storeConfig.cassandra.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ $store) }} + key: {{ include "temporal.persistence.secretKey" (list $ $store) }} + {{- else }} + value: {{ $storeConfig.cassandra.password }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- else }} + [] + {{- end }} + containers: + {{- range $store := (list "default" "visibility") }} + {{- $storeConfig := index $.Values.server.config.persistence $store }} + - name: {{ $store }}-schema + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + command: ["temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool", "setup-schema", "-v", "0.0"] + env: + {{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }} + - name: CASSANDRA_HOST + value: {{ first (splitList "," (include "temporal.persistence.cassandra.hosts" (list $ $store))) }} + - name: CASSANDRA_PORT + value: {{ include "temporal.persistence.cassandra.port" (list $ $store) | quote }} + - name: CASSANDRA_KEYSPACE + value: {{ $storeConfig.cassandra.keyspace }} + {{- if $storeConfig.cassandra.user }} + - name: CASSANDRA_USER + value: {{ $storeConfig.cassandra.user }} + {{- end }} + {{- if (or $storeConfig.cassandra.password $storeConfig.cassandra.existingSecret) }} + - name: CASSANDRA_PASSWORD + {{- if $storeConfig.cassandra.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ $store) }} + key: {{ include "temporal.persistence.secretKey" (list $ $store) }} + {{- else }} + value: {{ $storeConfig.cassandra.password }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (default $.Values.server.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +{{- end }} +{{- if .Values.schema.update.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "temporal.componentname" (list . "schema-update") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + annotations: + {{- if .Values.cassandra.enabled }} + "helm.sh/hook": post-install,pre-upgrade + {{- else }} + "helm.sh/hook": pre-install,pre-upgrade + {{- end }} + "helm.sh/hook-weight": "1" + {{- if not .Values.debug }} + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed + {{- end }} +spec: + backoffLimit: {{ .Values.schema.update.backoffLimit }} + template: + metadata: + name: {{ include "temporal.componentname" (list . "schema-update") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + spec: + {{ include "temporal.serviceAccount" . }} + restartPolicy: "OnFailure" + initContainers: + {{- if .Values.cassandra.enabled }} + - name: check-cassandra-service + image: busybox + command: ['sh', '-c', 'until nslookup {{ include "cassandra.host" $ }}; do echo waiting for cassandra service; sleep 1; done;'] + - name: check-cassandra + image: "{{ .Values.cassandra.image.repo }}:{{ .Values.cassandra.image.tag }}" + imagePullPolicy: {{ .Values.cassandra.image.pullPolicy }} + command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ .Values.cassandra.config.ports.cql }} -e "SHOW VERSION"; do echo waiting for cassandra to start; sleep 1; done;'] + {{- else }} + [] + {{- end }} + containers: + {{- range $store := (list "default" "visibility") }} + {{- $storeConfig := index $.Values.server.config.persistence $store }} + - name: {{ $store }}-schema + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + {{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }} + command: ['sh', '-c', 'temporal-cassandra-tool update-schema -d /etc/temporal/schema/cassandra/{{ include "temporal.persistence.schema" $store }}/versioned'] + {{- end }} + env: + {{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }} + - name: CASSANDRA_HOST + value: {{ first (splitList "," (include "temporal.persistence.cassandra.hosts" (list $ $store))) }} + - name: CASSANDRA_PORT + value: {{ include "temporal.persistence.cassandra.port" (list $ $store) | quote }} + - name: CASSANDRA_KEYSPACE + value: {{ $storeConfig.cassandra.keyspace }} + {{- if $storeConfig.cassandra.user }} + - name: CASSANDRA_USER + value: {{ $storeConfig.cassandra.user }} + {{- end }} + {{- if (or $storeConfig.cassandra.password $storeConfig.cassandra.existingSecret) }} + - name: CASSANDRA_PASSWORD + {{- if $storeConfig.cassandra.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ include "temporal.persistence.secretName" (list $ $store) }} + key: {{ include "temporal.persistence.secretKey" (list $ $store) }} + {{- else }} + value: {{ $storeConfig.cassandra.password }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with (default $.Values.admintools.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +{{- end }} +{{- if and $.Values.elasticsearch.autosetup (or $.Values.elasticsearch.enabled $.Values.elasticsearch.external) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "temporal.componentname" (list . "es-index-setup") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + annotations: + {{- if .Values.elasticsearch.external }} + "helm.sh/hook": pre-install + {{- else }} + "helm.sh/hook": post-install + {{- end }} + "helm.sh/hook-weight": "0" + {{- if not .Values.debug }} + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed + {{- end }} +spec: + backoffLimit: {{ .Values.schema.setup.backoffLimit }} + template: + metadata: + name: {{ include "temporal.componentname" (list . "es-index-setup") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: database + app.kubernetes.io/part-of: {{ .Chart.Name }} + spec: + {{ include "temporal.serviceAccount" . }} + restartPolicy: "OnFailure" + initContainers: + - name: check-elasticsearch-service + image: busybox + command: ['sh', '-c', 'until nslookup {{ .Values.elasticsearch.host }}; do echo waiting for elasticsearch service; sleep 1; done;'] + - name: check-elasticsearch + image: "{{ .Values.admintools.image.repository }}:{{ .Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + command: ['sh', '-c', 'until curl --silent --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }} 2>&1 > /dev/null; do echo waiting for elasticsearch to start; sleep 1; done;'] + containers: + - name: create-elasticsearch-index + image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}" + imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }} + command: ['sh', '-c'] + args: + - 'curl -X PUT --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/_template/temporal_visibility_v1_template -H "Content-Type: application/json" --data-binary "@schema/elasticsearch/visibility/index_template_{{ .Values.elasticsearch.version }}.json" 2>&1 && + curl -X PUT --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/{{ .Values.elasticsearch.visibilityIndex }} 2>&1' + {{- with (default $.Values.admintools.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.admintools.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-secret.yaml b/charts/retool-temporal-services/templates/server-secret.yaml new file mode 100644 index 00000000..5d8c026e --- /dev/null +++ b/charts/retool-temporal-services/templates/server-secret.yaml @@ -0,0 +1,28 @@ +{{- if $.Values.server.enabled }} +{{- range $store := (list "default" "visibility") }} +{{- $storeConfig := index $.Values.server.config.persistence $store }} +{{- $driverConfig := index $storeConfig (include "temporal.persistence.driver" (list $ $store)) }} +{{- $secretName := include "temporal.componentname" (list $ (printf "%s-store" $store)) }} +{{- if and (not $driverConfig.existingSecret) (eq (include "temporal.persistence.secretName" (list $ $store)) $secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} +type: Opaque +data: + {{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }} + password: {{ $storeConfig.cassandra.password | b64enc | quote }} + {{- else if eq (include "temporal.persistence.driver" (list $ $store)) "sql" }} + password: {{ include "temporal.persistence.sql.password" (list $ $store) | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-service-monitor.yaml b/charts/retool-temporal-services/templates/server-service-monitor.yaml new file mode 100644 index 00000000..73909009 --- /dev/null +++ b/charts/retool-temporal-services/templates/server-service-monitor.yaml @@ -0,0 +1,41 @@ +{{- if $.Values.server.enabled }} +{{- range $service := (list "frontend" "matching" "history" "worker") }} +{{- $serviceValues := index $.Values.server $service -}} +{{- if (default $.Values.server.metrics.serviceMonitor.enabled $serviceValues.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "temporal.componentname" (list $ $service) }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: {{ $service }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} + {{- with (default $.Values.server.metrics.serviceMonitor.additionalLabels $serviceValues.metrics.serviceMonitor.additionalLabels) }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ default $.Values.server.metrics.serviceMonitor.interval $serviceValues.metrics.serviceMonitor.interval }} + {{- with (default $.Values.server.metrics.serviceMonitor.metricRelabelings $serviceValues.metrics.serviceMonitor.metricRelabelings) }} + metricRelabelings: + {{- toYaml . | nindent 4 }} + {{- end }} + jobLabel: {{ include "temporal.componentname" (list $ $service) }} + namespaceSelector: + matchNames: + - "{{ $.Release.Namespace }}" + selector: + matchLabels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/component: {{ $service }} + app.kubernetes.io/headless: 'true' +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/server-service.yaml b/charts/retool-temporal-services/templates/server-service.yaml new file mode 100644 index 00000000..28cb0294 --- /dev/null +++ b/charts/retool-temporal-services/templates/server-service.yaml @@ -0,0 +1,79 @@ +{{- if $.Values.server.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "temporal.componentname" (list . "frontend") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: frontend + app.kubernetes.io/part-of: {{ .Chart.Name }} + {{- if .Values.server.frontend.service.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.server.frontend.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.server.frontend.service.type }} + ports: + - port: {{ .Values.server.frontend.service.port }} + targetPort: rpc + protocol: TCP + name: grpc-rpc + {{- if hasKey .Values.server.frontend.service "nodePort" }} + nodePort: {{ .Values.server.frontend.service.nodePort }} + {{- end }} + selector: + app.kubernetes.io/name: {{ include "temporal.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: frontend + +--- +{{- range $service := (list "frontend" "matching" "history" "worker") }} +{{- $serviceValues := index $.Values.server $service -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "temporal.componentname" (list $ (printf "%s-headless" $service)) }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + helm.sh/chart: {{ include "temporal.chart" $ }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: {{ $service }} + app.kubernetes.io/part-of: {{ $.Chart.Name }} + app.kubernetes.io/headless: 'true' + prometheus.io/job: {{ $.Chart.Name }}-{{ $service }} + prometheus.io/scrape: 'true' + prometheus.io/scheme: http + prometheus.io/port: "9090" + + annotations: + # Use this annotation in addition to the actual field below because the + # annotation will stop being respected soon but the field is broken in + # some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ $serviceValues.service.port }} + targetPort: rpc + protocol: TCP + name: grpc-rpc + - port: 9090 + targetPort: metrics + protocol: TCP + name: metrics + selector: + app.kubernetes.io/name: {{ include "temporal.name" $ }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/component: {{ $service }} + +--- +{{- end }} +{{- end }} diff --git a/charts/retool-temporal-services/templates/serviceaccount.yaml b/charts/retool-temporal-services/templates/serviceaccount.yaml new file mode 100644 index 00000000..f44e51db --- /dev/null +++ b/charts/retool-temporal-services/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "temporal.serviceAccountName" . }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/part-of: {{ .Chart.Name }} + annotations: + helm.sh/hook: pre-install + helm.sh/hook-weight: "-10" + {{- with .Values.serviceAccount.extraAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/charts/retool-temporal-services/templates/web-configmap.yaml b/charts/retool-temporal-services/templates/web-configmap.yaml new file mode 100644 index 00000000..b5f05091 --- /dev/null +++ b/charts/retool-temporal-services/templates/web-configmap.yaml @@ -0,0 +1,18 @@ +{{- if .Values.web.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "temporal.componentname" (list . "web") }}-config + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: web + app.kubernetes.io/part-of: {{ .Chart.Name }} +data: + config.yml: | + {{- toYaml .Values.web.config | nindent 4 }} + +{{- end }} diff --git a/charts/retool-temporal-services/templates/web-deployment.yaml b/charts/retool-temporal-services/templates/web-deployment.yaml new file mode 100644 index 00000000..f2ffc83a --- /dev/null +++ b/charts/retool-temporal-services/templates/web-deployment.yaml @@ -0,0 +1,73 @@ +{{- if .Values.web.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "temporal.componentname" (list . "web") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: web + app.kubernetes.io/part-of: {{ .Chart.Name }} +spec: + replicas: {{ .Values.web.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: web + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: web + app.kubernetes.io/part-of: {{ .Chart.Name }} + {{- with .Values.web.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.web.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{ include "temporal.serviceAccount" . }} + volumes: + - name: {{ .Chart.Name }}-web-config + configMap: + name: {{ include "temporal.componentname" (list . "web") }}-config + containers: + - name: {{ .Chart.Name }}-web + {{- if and .Values.web.image.repository .Values.web.image.tag }} + image: "{{ .Values.web.image.repository }}:{{ .Values.web.image.tag }}" + imagePullPolicy: {{ .Values.web.image.pullPolicy }} + {{- else }} + image: {{ printf "%s/temporal-web" .Values.imagesDir | .Files.Get }} + {{- end }} + env: + - name: TEMPORAL_ADDRESS + value: "{{ include "temporal.fullname" . }}-frontend.{{ .Release.Namespace }}.svc:{{ .Values.server.frontend.service.port }}" + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + {{- toYaml .Values.web.resources | nindent 12 }} + {{- with .Values.web.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.web.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.web.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/charts/retool-temporal-services/templates/web-ingress.yaml b/charts/retool-temporal-services/templates/web-ingress.yaml new file mode 100644 index 00000000..68c52cd4 --- /dev/null +++ b/charts/retool-temporal-services/templates/web-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.web.ingress.enabled -}} + {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }} +apiVersion: networking.k8s.io/v1 + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 + {{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" }} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ include "temporal.componentname" (list . "web") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: web + app.kubernetes.io/part-of: {{ .Chart.Name }} +{{- with .Values.web.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- with .Values.web.ingress.className }} + ingressClassName: {{ . | quote }} + {{- end }} + {{- if .Values.web.ingress.tls }} + tls: + {{- range .Values.web.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.web.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: + - path: /{{ rest $url | join "/" }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }} + pathType: Prefix + backend: + service: + name: {{ include "temporal.fullname" $ }}-web + port: + number: {{ $.Values.web.service.port }} + {{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} + backend: + serviceName: {{ include "temporal.fullname" $ }}-web + servicePort: {{ $.Values.web.service.port }} + {{- end }} + {{- end}} + {{- end }} diff --git a/charts/retool-temporal-services/templates/web-service.yaml b/charts/retool-temporal-services/templates/web-service.yaml new file mode 100644 index 00000000..72534777 --- /dev/null +++ b/charts/retool-temporal-services/templates/web-service.yaml @@ -0,0 +1,35 @@ +{{- if .Values.web.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "temporal.componentname" (list . "web") }} + labels: + app.kubernetes.io/name: {{ include "temporal.name" . }} + helm.sh/chart: {{ include "temporal.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: web + app.kubernetes.io/part-of: {{ .Chart.Name }} +{{- with .Values.web.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- with .Values.web.service.loadBalancerIP }} + loadBalancerIP: {{.}} + {{- end }} + type: {{ .Values.web.service.type }} + ports: + - port: {{ .Values.web.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if hasKey .Values.web.service "nodePort" }} + nodePort: {{ .Values.web.service.nodePort }} + {{- end }} + selector: + app.kubernetes.io/name: {{ include "temporal.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: web + {{- end }} diff --git a/charts/retool-temporal-services/values.yaml b/charts/retool-temporal-services/values.yaml new file mode 100644 index 00000000..1178d2e6 --- /dev/null +++ b/charts/retool-temporal-services/values.yaml @@ -0,0 +1,405 @@ +nameOverride: "retool-wf-temporal" +# fullnameOverride: "" + +# Chart debug mode +# (eg. disable helm hook delete policy) +debug: false + +# Custom Service account management +serviceAccount: + # Whether to create service account or not + create: false + + # Name of the service account, default: temporal.fullname + name: + + # extraAnnotations would let users add additional annotations + extraAnnotations: + +server: + enabled: true + sidecarContainers: + # automatically sets up schema for temporal + # this requires using our custom image (tryretool/one-offs:retool-temporal) + # if disabled, can use temporalio/server:1.18.5 image for faster startup + autosetup: true + image: + repository: tryretool/one-offs + tag: retool-temporal-1.1.3 + pullPolicy: IfNotPresent + + # Global default settings (can be overridden per service) + replicaCount: 1 + metrics: + # Annotate pods directly with Prometheus annotations. + # Use this if you installed Prometheus from a Helm chart. + annotations: + enabled: true + # Enable Prometheus ServiceMonitor + # Use this if you installed the Prometheus Operator (https://github.com/coreos/prometheus-operator). + serviceMonitor: + enabled: false + interval: 30s + # Set additional lables to all the ServiceMonitor resources + additionalLabels: {} + # label1: value1 + # label2: value2 + # Set Prometheus metric_relabel_configs via ServiceMonitor + # Use metricRelabelings to adjust metric and label names as needed + metricRelabelings: [] + # - action: replace + # sourceLabels: + # - exported_namespace + # targetLabel: temporal_namespace + # - action: replace + # regex: service_errors_(.+) + # replacement: ${1} + # sourceLabels: + # - __name__ + # targetLabel: temporal_error_kind + # - action: replace + # regex: service_errors_.+ + # replacement: temporal_service_errors + # sourceLabels: + # - __name__ + # targetLabel: __name__ + prometheus: + timerType: histogram + podAnnotations: {} + podLabels: {} + resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + additionalVolumes: [] + additionalVolumeMounts: [] + + config: + logLevel: "debug,info" + + # IMPORTANT: This value cannot be changed, once it's set. + numHistoryShards: 256 + + persistence: + defaultStore: default + additionalStores: {} + + default: + driver: "sql" + + cassandra: + hosts: [] + # port: 9042 + keyspace: "temporal" + user: "user" + password: "password" + existingSecret: "" + replicationFactor: 1 + consistency: + default: + consistency: "local_quorum" + serialConsistency: "local_serial" + # datacenter: "us-east-1a" + # maxQPS: 1000 + # maxConns: 2 + + sql: + driver: "postgres" + host: _HOST_ + port: 5432 + database: temporal + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + + visibility: + driver: "sql" + + cassandra: + hosts: [] + # port: 9042 + keyspace: "temporal_visibility" + user: "user" + password: "password" + existingSecret: "" + # datacenter: "us-east-1a" + # maxQPS: 1000 + # maxConns: 2 + replicationFactor: 1 + consistency: + default: + consistency: "local_quorum" + serialConsistency: "local_serial" + + sql: + driver: "postgres" + host: _HOST_ + port: 5432 + database: temporal_visibility + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + + frontend: + # replicaCount: 1 + service: + annotations: {} # Evaluated as template + type: ClusterIP + port: 7233 + metrics: + annotations: + enabled: true + serviceMonitor: {} + # enabled: false + prometheus: {} + # timerType: histogram + podAnnotations: {} + podLabels: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + + history: + # replicaCount: 1 + service: + # type: ClusterIP + port: 7234 + metrics: + annotations: + enabled: true + serviceMonitor: {} + # enabled: false + prometheus: {} + # timerType: histogram + podAnnotations: {} + podLabels: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + + matching: + # replicaCount: 1 + service: + # type: ClusterIP + port: 7235 + metrics: + annotations: + enabled: false + serviceMonitor: {} + # enabled: false + prometheus: {} + # timerType: histogram + podAnnotations: {} + podLabels: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + + worker: + # replicaCount: 1 + service: + # type: ClusterIP + port: 7239 + metrics: + annotations: + enabled: true + serviceMonitor: {} + # enabled: false + prometheus: {} + # timerType: histogram + podAnnotations: {} + podLabels: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + +admintools: + enabled: false + image: + repository: temporalio/admin-tools + tag: 1.18.5 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + port: 22 + annotations: {} + podLabels: {} + podAnnotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + +web: + enabled: false + config: + # server/config.yml file content + auth: + enabled: false + routing: + default_to_namespace: workflows # internal use only + issue_report_link: https://github.com/temporalio/web/issues/new/choose # set this field if you need to direct people to internal support forums + + + replicaCount: 1 + + image: + repository: tryretool/one-offs + tag: retool-temporal-ui-1.0.0 + pullPolicy: IfNotPresent + + service: + # set type to NodePort if access to web needs access from outside the cluster + # for more info see https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: ClusterIP + port: 8080 + annotations: {} + # loadBalancerIP: + + ingress: + enabled: false + # className: + annotations: {} + # kubernetes.io/ingress.class: traefik + # ingress.kubernetes.io/ssl-redirect: "false" + # traefik.frontend.rule.type: PathPrefix + hosts: + - "/" + # - "domain.com/xyz" + # - "domain.com" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + podAnnotations: {} + podLabels: {} + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +schema: + setup: + enabled: false + backoffLimit: 100 + update: + enabled: false + backoffLimit: 100 + +elasticsearch: + enabled: false + replicas: 3 + persistence: + enabled: false + imageTag: 7.16.2 + host: elasticsearch-master-headless + scheme: http + port: 9200 + version: "v7" + logLevel: "error" + username: "" + password: "" + visibilityIndex: "temporal_visibility_v1_dev" + +prometheus: + enabled: false + nodeExporter: + enabled: false + +grafana: + enabled: false + replicas: 1 + testFramework: + enabled: false + rbac: + create: false + pspEnabled: false + namespaced: true + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default + datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: TemporalMetrics + type: prometheus + url: http://{{ .Release.Name }}-prometheus-server + access: proxy + isDefault: true + dashboards: + default: + server-general-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/server/server-general.json + datasource: TemporalMetrics + sdk-general-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/sdk/sdk-general.json + datasource: TemporalMetrics + misc-advanced-visibility-specific-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/advanced-visibility-specific.json + datasource: TemporalMetrics + misc-clustermonitoring-kubernetes-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/clustermonitoring-kubernetes.json + datasource: TemporalMetrics + misc-frontend-service-specific-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/frontend-service-specific.json + datasource: TemporalMetrics + misc-history-service-specific-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/history-service-specific.json + datasource: TemporalMetrics + misc-matching-service-specific-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/matching-service-specific.json + datasource: TemporalMetrics + misc-worker-service-specific-github: + url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/worker-service-specific.json + datasource: TemporalMetrics + +postgresql: + enabled: false + +cassandra: + enabled: false + +mysql: + enabled: false diff --git a/charts/retool-temporal-services/values/values.antiaffinity.yaml b/charts/retool-temporal-services/values/values.antiaffinity.yaml new file mode 100644 index 00000000..29cd8779 --- /dev/null +++ b/charts/retool-temporal-services/values/values.antiaffinity.yaml @@ -0,0 +1,228 @@ +cassandra: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - frontend + - history + - matching + - worker + topologyKey: kubernetes.io/hostname + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - elasticsearch-master + topologyKey: kubernetes.io/hostname + - weight: 5 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - prometheus + topologyKey: kubernetes.io/hostname + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - grafana + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cassandra + topologyKey: kubernetes.io/hostname + +server: + frontend: + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - history + - matching + - worker + topologyKey: kubernetes.io/hostname + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cassandra + topologyKey: kubernetes.io/hostname + - weight: 75 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - elasticsearch-master + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - frontend + topologyKey: kubernetes.io/hostname + + history: + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - frontend + - matching + - worker + topologyKey: kubernetes.io/hostname + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cassandra + topologyKey: kubernetes.io/hostname + - weight: 75 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - elasticsearch-master + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - history + topologyKey: kubernetes.io/hostname + + matching: + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - frontend + - history + - worker + topologyKey: kubernetes.io/hostname + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cassandra + topologyKey: kubernetes.io/hostname + - weight: 75 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - elasticsearch-master + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - matching + topologyKey: kubernetes.io/hostname + + worker: + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - frontend + - matching + - history + topologyKey: kubernetes.io/hostname + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cassandra + topologyKey: kubernetes.io/hostname + - weight: 75 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - elasticsearch-master + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - worker + topologyKey: kubernetes.io/hostname diff --git a/charts/retool-temporal-services/values/values.aurora-mysql.yaml b/charts/retool-temporal-services/values/values.aurora-mysql.yaml new file mode 100644 index 00000000..21b61b66 --- /dev/null +++ b/charts/retool-temporal-services/values/values.aurora-mysql.yaml @@ -0,0 +1,47 @@ +server: + config: + persistence: + default: + driver: "sql" + + sql: + driver: "mysql" + host: _HOST_ + port: 3306 + database: temporal + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + connectAttributes: + tx_isolation: 'READ-COMMITTED' + + visibility: + driver: "sql" + + sql: + driver: "mysql" + host: _HOST_ + port: 3306 + database: temporal_visibility + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + connectAttributes: + tx_isolation: 'READ-COMMITTED' + +cassandra: + enabled: false + +mysql: + enabled: true + +postgresql: + enabled: false + +schema: + setup: + enabled: false + update: + enabled: false diff --git a/charts/retool-temporal-services/values/values.cassandra.yaml b/charts/retool-temporal-services/values/values.cassandra.yaml new file mode 100644 index 00000000..af622ca7 --- /dev/null +++ b/charts/retool-temporal-services/values/values.cassandra.yaml @@ -0,0 +1,53 @@ +server: + config: + logLevel: "debug,info" + + numHistoryShards: 512 + + persistence: + default: + driver: "cassandra" + + cassandra: + hosts: ["cassandra.default.svc.cluster.local"] + port: 9042 + keyspace: temporal + user: "user" + password: "password" + existingSecret: "" + replicationFactor: 1 + consistency: + default: + consistency: "local_quorum" + serialConsistency: "local_serial" + + visibility: + driver: "cassandra" + + cassandra: + hosts: ["cassandra.default.svc.cluster.local"] + port: 9042 + keyspace: temporal_visibility + user: "user" + password: "password" + existingSecret: "" + replicationFactor: 1 + consistency: + default: + consistency: "local_quorum" + serialConsistency: "local_serial" + +cassandra: + enabled: false + +mysql: + enabled: false + +postgresql: + enabled: false + +schema: + setup: + enabled: false + update: + enabled: false diff --git a/charts/retool-temporal-services/values/values.cloudsqlproxy.yaml b/charts/retool-temporal-services/values/values.cloudsqlproxy.yaml new file mode 100644 index 00000000..967437df --- /dev/null +++ b/charts/retool-temporal-services/values/values.cloudsqlproxy.yaml @@ -0,0 +1,20 @@ +server: + sidecarContainers: + - name: cloud-sql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - "/cloud_sql_proxy" + - "-ip_address_types=PRIVATE" + - "-instances=_PROJECTNAME_:_REGION_:_INSTANCENAME_=tcp:5432" + - "-credential_file=/etc/google-cloud-key/key.json" + securityContext: + runAsNonRoot: true + volumeMounts: + - name: google-cloud-key + mountPath: /etc/google-cloud-key + readOnly: true + + additionalVolumes: + - name: google-cloud-key + secret: + secretName: cloud-sql-proxy-sa diff --git a/charts/retool-temporal-services/values/values.dynamic_config.yaml b/charts/retool-temporal-services/values/values.dynamic_config.yaml new file mode 100644 index 00000000..df797cd3 --- /dev/null +++ b/charts/retool-temporal-services/values/values.dynamic_config.yaml @@ -0,0 +1,8 @@ +server: + dynamicConfig: + matching.numTaskqueueReadPartitions: + - value: 5 + constraints: {} + matching.numTaskqueueWritePartitions: + - value: 5 + constraints: {} diff --git a/charts/retool-temporal-services/values/values.elasticsearch.yaml b/charts/retool-temporal-services/values/values.elasticsearch.yaml new file mode 100644 index 00000000..bd16922b --- /dev/null +++ b/charts/retool-temporal-services/values/values.elasticsearch.yaml @@ -0,0 +1,8 @@ +elasticsearch: + enabled: false + external: true + host: "elasticsearch-master-headless" + port: "9200" + version: "v7" + scheme: "http" + logLevel: "error" diff --git a/charts/retool-temporal-services/values/values.mysql.yaml b/charts/retool-temporal-services/values/values.mysql.yaml new file mode 100644 index 00000000..8d228269 --- /dev/null +++ b/charts/retool-temporal-services/values/values.mysql.yaml @@ -0,0 +1,43 @@ +server: + config: + persistence: + default: + driver: "sql" + + sql: + driver: "mysql" + host: _HOST_ + port: 3306 + database: temporal + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + + visibility: + driver: "sql" + + sql: + driver: "mysql" + host: _HOST_ + port: 3306 + database: temporal_visibility + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + +cassandra: + enabled: false + +mysql: + enabled: true + +postgresql: + enabled: false + +schema: + setup: + enabled: false + update: + enabled: false diff --git a/charts/retool-temporal-services/values/values.ndc.yaml b/charts/retool-temporal-services/values/values.ndc.yaml new file mode 100644 index 00000000..70d6758e --- /dev/null +++ b/charts/retool-temporal-services/values/values.ndc.yaml @@ -0,0 +1,34 @@ +server: + config: + dcRedirectionPolicy: + policy: "selected-apis-forwarding" + toDC: "" + + clusterMetadata: + enableGlobalNamespace: true + replicationConsumer: + type: rpc + failoverVersionIncrement: 100 + masterClusterName: cluster_a + currentClusterName: # + # clusterInformation: + # : + # enabled: true + # initialFailoverVersion: + # rpcName: "frontend" + # rpcAddress: + # cluster_a: + # enabled: true + # initialFailoverVersion: 1 + # rpcName: "frontend" + # rpcAddress: "localhost:7233" + # cluster_b: + # enabled: true + # initialFailoverVersion: 2 + # rpcName: "frontend" + # rpcAddress: "localhost:8233" + # cluster_c: + # enabled: false + # initialFailoverVersion: 3 + # rpcName: "frontend" + # rpcAddress: "localhost:9233" diff --git a/charts/retool-temporal-services/values/values.postgresql.yaml b/charts/retool-temporal-services/values/values.postgresql.yaml new file mode 100644 index 00000000..2460e1a9 --- /dev/null +++ b/charts/retool-temporal-services/values/values.postgresql.yaml @@ -0,0 +1,43 @@ +server: + config: + persistence: + default: + driver: "sql" + + sql: + driver: "postgres" + host: _HOST_ + port: 5432 + database: temporal + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + + visibility: + driver: "sql" + + sql: + driver: "postgres" + host: _HOST_ + port: 5432 + database: temporal_visibility + user: _USERNAME_ + password: _PASSWORD_ + maxConns: 20 + maxConnLifetime: "1h" + +cassandra: + enabled: false + +mysql: + enabled: false + +postgresql: + enabled: true + +schema: + setup: + enabled: false + update: + enabled: false diff --git a/charts/retool-temporal-services/values/values.resources.yaml b/charts/retool-temporal-services/values/values.resources.yaml new file mode 100644 index 00000000..8d0a9aad --- /dev/null +++ b/charts/retool-temporal-services/values/values.resources.yaml @@ -0,0 +1,24 @@ +server: + frontend: + resources: + requests: + cpu: 100m + memory: 512Mi + + history: + resources: + requests: + cpu: 100m + memory: 512Mi + + matching: + resources: + requests: + cpu: 100m + memory: 512Mi + + worker: + resources: + requests: + cpu: 100m + memory: 512Mi diff --git a/charts/retool/Chart.yaml b/charts/retool/Chart.yaml index ff991315..08ed994e 100644 --- a/charts/retool/Chart.yaml +++ b/charts/retool/Chart.yaml @@ -11,3 +11,6 @@ dependencies: version: 12.1.5 repository: https://charts.bitnami.com/bitnami condition: postgresql.enabled + - name: retool-temporal-services + version: 1.1.2 + condition: retool-temporal-services.enabled,workflows.enabled diff --git a/charts/retool/templates/_helpers.tpl b/charts/retool/templates/_helpers.tpl index 3c18d28b..eb133b8e 100644 --- a/charts/retool/templates/_helpers.tpl +++ b/charts/retool/templates/_helpers.tpl @@ -122,3 +122,43 @@ Set postgresql user {{- .Values.config.postgresql.user | quote -}} {{- end -}} {{- end -}} + +{{/* +Set Temporal frontend host +*/}} +{{- define "retool.temporal.host" -}} +{{- if (.Values.workflows.temporal).enabled -}} +{{- .Values.workflows.temporal.host | quote -}} +{{- else -}} +{{- printf "%s-%s" (include "temporal.fullname" (index .Subcharts "retool-temporal-services")) "frontend" -}} +{{- end -}} +{{- end -}} + +{{/* +Set Temporal frontend port +*/}} +{{- define "retool.temporal.port" -}} +{{- if (.Values.workflows.temporal).enabled -}} +{{- .Values.workflows.temporal.port | quote -}} +{{- else -}} +{{- "7233" | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Set Temporal namespace +*/}} +{{- define "retool.temporal.namespace" -}} +{{- if (.Values.workflows.temporal).enabled -}} +{{- .Values.workflows.temporal.namespace | quote -}} +{{- else -}} +{{- "workflows" | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Set code executor service name +*/}} +{{- define "retool.codeExecutor.name" -}} +{{ template "retool.fullname" . }}-code-executor +{{- end -}} \ No newline at end of file diff --git a/charts/retool/templates/deployment_backend.yaml b/charts/retool/templates/deployment_backend.yaml index 3560ddcc..89a7e1f0 100644 --- a/charts/retool/templates/deployment_backend.yaml +++ b/charts/retool/templates/deployment_backend.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ template "retool.fullname" . }} + name: {{ template "retool.fullname" . }}-main-backend labels: {{- include "retool.labels" . | nindent 4 }} {{- if .Values.deployment.annotations }} @@ -60,6 +60,9 @@ spec: {{- if gt (int (toString (.Values.replicaCount))) 1 }} - name: SERVICE_TYPE value: MAIN_BACKEND,DB_CONNECTOR,DB_SSH_CONNECTOR + {{- else }} + - name: SERVICE_TYPE + value: MAIN_BACKEND,DB_CONNECTOR,DB_SSH_CONNECTOR,JOBS_RUNNER {{- end }} - name: CLIENT_ID value: {{ default "" .Values.config.auth.google.clientId }} @@ -77,6 +80,44 @@ spec: value: {{ template "retool.postgresql.user" . }} - name: POSTGRES_SSL_ENABLED value: {{ template "retool.postgresql.ssl_enabled" . }} + {{- if .Values.config.dbConnectorTimeout }} + - name: DBCONNECTOR_QUERY_TIMEOUT_MS + value: {{ .Values.config.dbConnectorTimeout | quote }} + {{- end }} + {{- if and (.Values.workflows.enabled) (or (index .Values "retool-temporal-services-helm" "enabled") (.Values.workflows.temporal.enabled)) }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_HOST + value: {{ template "retool.temporal.host" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_PORT + value: {{ template "retool.temporal.port" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_NAMESPACE + value: {{ template "retool.temporal.namespace" . }} + {{- end }} + {{- if (.Values.workflows.enabled) }} + - name: WORKFLOW_BACKEND_HOST + value: http://{{ template "retool.fullname" . }}-workflow-backend + {{- end }} + {{- if (.Values.workflows.temporal).sslEnabled }} + - name: WORKFLOW_TEMPORAL_TLS_ENABLED + value: "true" + {{- if (and (.Values.workflows.temporal).sslCert (.Values.workflows.temporal).sslKey) }} + - name: WORKFLOW_TEMPORAL_TLS_CRT + value: {{ .Values.workflows.temporal.sslCert }} + - name: WORKFLOW_TEMPORAL_TLS_KEY + valueFrom: + secretKeyRef: + {{- if (.Values.workflows.temporal).sslKeySecretName }} + name: {{ .Values.workflows.temporal.sslKeySecretName }} + key: {{ .Values.workflows.temporal.sslKeySecretKey | default "temporal-tls-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: "temporal-tls-key" + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.codeExecutor.enabled }} + - name: CODE_EXECUTOR_INGRESS_DOMAIN + value: http://{{ template "retool.codeExecutor.name" . }} + {{- end }} {{- if and (not .Values.externalSecrets.enabled) (not .Values.externalSecrets.externalSecretsOperator.enabled) }} - name: LICENSE_KEY valueFrom: @@ -158,6 +199,10 @@ spec: envFrom: - secretRef: name: {{ .Values.externalSecrets.name }} + {{- range .Values.externalSecrets.secrets }} + - secretRef: + name: {{ .name }} + {{- end }} {{- end }} {{- if .Values.externalSecrets.externalSecretsOperator.enabled }} envFrom: @@ -257,7 +302,7 @@ spec: {{- end }} --- {{- if .Values.podDisruptionBudget }} -{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} apiVersion: policy/v1 {{- else -}} apiVersion: policy/v1beta1 diff --git a/charts/retool/templates/deployment_code_executor.yaml b/charts/retool/templates/deployment_code_executor.yaml new file mode 100644 index 00000000..7f52e795 --- /dev/null +++ b/charts/retool/templates/deployment_code_executor.yaml @@ -0,0 +1,125 @@ +{{- if .Values.codeExecutor.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "retool.codeExecutor.name" . }} + labels: + retoolService: {{ template "retool.codeExecutor.name" . }} +{{- include "retool.labels" . | nindent 4 }} +{{- if .Values.deployment.annotations }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.codeExecutor.replicaCount }} + selector: + matchLabels: + retoolService: {{ template "retool.codeExecutor.name" . }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: + prometheus.io/job: {{ template "retool.codeExecutor.name" . }} + prometheus.io/scrape: 'true' + prometheus.io/port: '9090' +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.codeExecutor.annotations }} +{{ toYaml .Values.codeExecutor.annotations | indent 8 }} +{{- end }} + labels: + retoolService: {{ template "retool.codeExecutor.name" . }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.codeExecutor.labels }} +{{ toYaml .Values.codeExecutor.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "retool.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if .Values.initContainers }} + initContainers: +{{- range $key, $value := .Values.initContainers }} + - name: "{{ $key }}" +{{ toYaml $value | indent 8 }} +{{- end }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.codeExecutor.image.repository }}:{{ required "Please set a value for .Values.codeExecutor.image.tag" .Values.codeExecutor.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + privileged: true + env: + - name: NODE_ENV + value: production + - name: NODE_OPTIONS + value: {{(.Values.codeExecutor.config).nodeOptions | default "--max_old_space_size=1024" }} + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range .Values.environmentSecrets }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretKeyRef.name }} + key: {{ .secretKeyRef.key }} + {{- end }} + {{- with .Values.environmentVariables }} +{{ toYaml . | indent 10 }} + {{- end }} + ports: + - containerPort: 3004 + name: {{ template "retool.name" . }} + protocol: TCP + - containerPort: 9090 + name: metrics + protocol: TCP +{{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.livenessProbe.path }} + port: 3004 + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} +{{- end }} +{{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.readinessProbe.path }} + port: 3004 + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} +{{- end }} + resources: +{{ toYaml .Values.codeExecutor.resources | indent 10 }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "retool.codeExecutor.name" . }} +spec: + selector: + retoolService: {{ template "retool.codeExecutor.name" . }} + ports: + - protocol: TCP + port: 80 + targetPort: 3004 + name: {{ template "retool.name" . }} + - protocol: TCP + port: 9090 + targetPort: metrics + name: metrics +{{- end }} diff --git a/charts/retool/templates/deployment_jobs.yaml b/charts/retool/templates/deployment_jobs.yaml index 54b366ce..de9d12b9 100644 --- a/charts/retool/templates/deployment_jobs.yaml +++ b/charts/retool/templates/deployment_jobs.yaml @@ -153,6 +153,10 @@ spec: envFrom: - secretRef: name: {{ .Values.externalSecrets.name }} + {{- range .Values.externalSecrets.secrets }} + - secretRef: + name: {{ .name }} + {{- end }} {{- end }} {{- if .Values.externalSecrets.externalSecretsOperator.enabled }} envFrom: diff --git a/charts/retool/templates/deployment_workflows.yaml b/charts/retool/templates/deployment_workflows.yaml new file mode 100644 index 00000000..ecad55bb --- /dev/null +++ b/charts/retool/templates/deployment_workflows.yaml @@ -0,0 +1,310 @@ +{{- if .Values.workflows.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "retool.fullname" . }}-workflow-backend + labels: + retoolService: {{ template "retool.fullname" . }}-workflow-backend +{{- include "retool.labels" . | nindent 4 }} +{{- if .Values.deployment.annotations }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} +{{- end }} +spec: + replicas: 1 + selector: + matchLabels: + retoolService: {{ template "retool.fullname" . }}-workflow-backend + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.backend.annotations }} +{{ toYaml .Values.backend.annotations | indent 8 }} +{{- end }} + labels: + retoolService: {{ template "retool.fullname" . }}-workflow-backend +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.workflows.labels }} +{{ toYaml .Values.workflows.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "retool.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if .Values.initContainers }} + initContainers: +{{- range $key, $value := .Values.initContainers }} + - name: "{{ $key }}" +{{ toYaml $value | indent 8 }} +{{- end }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ required "Please set a value for .Values.image.tag" .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - bash + - -c + - chmod -R +x ./docker_scripts; sync; ./docker_scripts/wait-for-it.sh -t 0 {{ template "retool.postgresql.host" . }}:{{ template "retool.postgresql.port" . }}; ./docker_scripts/start_api.sh + {{- if .Values.commandline.args }} +{{ toYaml .Values.commandline.args | indent 10 }} + {{- end }} + env: + - name: NODE_ENV + value: production + - name: SERVICE_TYPE + value: DB_CONNECTOR,DB_SSH_CONNECTOR,WORKFLOW_BACKEND + - name: DBCONNECTOR_POSTGRES_POOL_MAX_SIZE + value: "100" + - name: DBCONNECTOR_QUERY_TIMEOUT_MS + {{- if .Values.workflows.dbConnectorTimeout }} + value: {{ .Values.workflows.dbConnectorTimeout | quote}} + {{- else if .Values.config.dbConnectorTimeout }} + value: {{ .Values.config.dbConnectorTimeout | quote}} + {{- else }} + value: "5400000" + {{- end }} + - name: DISABLE_DATABASE_MIGRATIONS + value: "true" + {{- if or (index .Values "retool-temporal-services-helm" "enabled") (.Values.workflows.temporal.enabled) }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_HOST + value: {{ template "retool.temporal.host" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_PORT + value: {{ template "retool.temporal.port" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_NAMESPACE + value: {{ template "retool.temporal.namespace" . }} + {{- end }} + {{- if (.Values.workflows.temporal).sslEnabled }} + - name: WORKFLOW_TEMPORAL_TLS_ENABLED + value: "true" + {{- if (and (.Values.workflows.temporal).sslCert (.Values.workflows.temporal).sslKey) }} + - name: WORKFLOW_TEMPORAL_TLS_CRT + value: {{ .Values.workflows.temporal.sslCert }} + - name: WORKFLOW_TEMPORAL_TLS_KEY + valueFrom: + secretKeyRef: + {{- if (.Values.workflows.temporal).sslKeySecretName }} + name: {{ .Values.workflows.temporal.sslKeySecretName }} + key: {{ .Values.workflows.temporal.sslKeySecretKey | default "temporal-tls-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: "temporal-tls-key" + {{- end }} + {{- end }} + {{- end }} + - name: CLIENT_ID + value: {{ default "" .Values.config.auth.google.clientId }} + - name: COOKIE_INSECURE + value: {{ .Values.config.useInsecureCookies | quote }} + - name: RESTRICTED_DOMAIN + value: {{ default "" .Values.config.auth.google.domain }} + - name: POSTGRES_HOST + value: {{ template "retool.postgresql.host" . }} + - name: POSTGRES_PORT + value: {{ template "retool.postgresql.port" . }} + - name: POSTGRES_DB + value: {{ template "retool.postgresql.db" . }} + - name: POSTGRES_USER + value: {{ template "retool.postgresql.user" . }} + - name: POSTGRES_SSL_ENABLED + value: {{ template "retool.postgresql.ssl_enabled" . }} + - name: WORKFLOW_BACKEND_HOST + value: http://{{ template "retool.fullname" . }}-workflow-backend + {{- if .Values.codeExecutor.enabled }} + - name: CODE_EXECUTOR_INGRESS_DOMAIN + value: http://{{ template "retool.codeExecutor.name" . }} + {{- end }} + {{- if and (not .Values.externalSecrets.enabled) (not .Values.externalSecrets.externalSecretsOperator.enabled) }} + - name: LICENSE_KEY + valueFrom: + secretKeyRef: + {{- if .Values.config.licenseKeySecretName }} + name: {{ .Values.config.licenseKeySecretName }} + key: {{ .Values.config.licenseKeySecretKey | default "license-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: license-key + {{- end }} + - name: JWT_SECRET + valueFrom: + secretKeyRef: + {{- if .Values.config.jwtSecretSecretName }} + name: {{ .Values.config.jwtSecretSecretName }} + key: {{ .Values.config.jwtSecretSecretKey | default "jwt-secret" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: jwt-secret + {{- end }} + - name: ENCRYPTION_KEY + valueFrom: + secretKeyRef: + {{- if .Values.config.encryptionKeySecretName }} + name: {{ .Values.config.encryptionKeySecretName }} + key: {{ .Values.config.encryptionKeySecretKey | default "encryption-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: encryption-key + {{- end }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.postgresql.enabled }} + name: {{ template "retool.postgresql.fullname" . }} + key: postgres-password + {{- else }} + {{- if .Values.config.postgresql.passwordSecretName }} + name: {{ .Values.config.postgresql.passwordSecretName }} + key: {{ .Values.config.postgresql.passwordSecretKey | default "postgresql-password" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: postgresql-password + {{- end }} + {{- end }} + - name: CLIENT_SECRET + valueFrom: + secretKeyRef: + {{- if .Values.config.auth.google.clientSecretSecretName }} + name: {{ .Values.config.auth.google.clientSecretSecretName }} + key: {{ .Values.config.auth.google.clientSecretSecretKey | default "google-client-secret" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: google-client-secret + {{- end }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range .Values.environmentSecrets }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretKeyRef.name }} + key: {{ .secretKeyRef.key }} + {{- end }} + {{- with .Values.environmentVariables }} +{{ toYaml . | indent 10 }} + {{- end }} + {{- if .Values.externalSecrets.enabled }} + envFrom: + - secretRef: + name: {{ .Values.externalSecrets.name }} + {{- range .Values.externalSecrets.secrets }} + - secretRef: + name: {{ .name }} + {{- end }} + {{- end }} + {{- if .Values.externalSecrets.externalSecretsOperator.enabled }} + envFrom: + {{- range .Values.externalSecrets.externalSecretsOperator.secretRef }} + - secretRef: + name: {{ .name }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ .Values.service.internalPort }} + name: {{ template "retool.name" . }} + protocol: TCP +{{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.livenessProbe.path }} + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} +{{- end }} +{{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.readinessProbe.path }} + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + {{- range $configFile := (keys .Values.files) }} + - name: {{ template "retool.name" $ }} + mountPath: "/usr/share/retool/config/{{ $configFile }}" + subPath: {{ $configFile }} + {{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} +{{- end }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 6 }} +{{- end }} +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} +{{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} + volumes: +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8 }} +{{- end }} +--- +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ template "retool.fullname" . }} +spec: + {{ toYaml .Values.podDisruptionBudget }} + selector: + matchLabels: + retoolService: {{ template "retool.fullname" . }}-workflow-backend + {{- include "retool.selectorLabels" . | nindent 6 }} +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "retool.fullname" . }}-workflow-backend +spec: + selector: + retoolService: {{ template "retool.fullname" . }}-workflow-backend + ports: + - protocol: TCP + port: 80 + targetPort: {{ .Values.service.internalPort }} +{{- end }} diff --git a/charts/retool/templates/deployment_workflows_worker.yaml b/charts/retool/templates/deployment_workflows_worker.yaml new file mode 100644 index 00000000..0380adff --- /dev/null +++ b/charts/retool/templates/deployment_workflows_worker.yaml @@ -0,0 +1,342 @@ +{{- if .Values.workflows.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "retool.fullname" . }}-workflow-worker + labels: + retoolService: {{ template "retool.fullname" . }}-workflow-worker +{{- include "retool.labels" . | nindent 4 }} +{{- if .Values.deployment.annotations }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.workflows.replicaCount }} + selector: + matchLabels: + retoolService: {{ template "retool.fullname" . }}-workflow-worker + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: + prometheus.io/job: {{ template "retool.fullname" . }}-workflow-worker + prometheus.io/scrape: 'true' + prometheus.io/port: '9090' +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} +{{- if .Values.backend.annotations }} +{{ toYaml .Values.backend.annotations | indent 8 }} +{{- end }} +{{- if .Values.workflows.annotations }} +{{ toYaml .Values.workflows.annotations | indent 8 }} +{{- end }} + labels: + retoolService: {{ template "retool.fullname" . }}-workflow-worker +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.workflows.labels }} +{{ toYaml .Values.workflows.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "retool.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if .Values.initContainers }} + initContainers: +{{- range $key, $value := .Values.initContainers }} + - name: "{{ $key }}" +{{ toYaml $value | indent 8 }} +{{- end }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ required "Please set a value for .Values.image.tag" .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - bash + - -c + - chmod -R +x ./docker_scripts; sync; ./docker_scripts/wait-for-it.sh -t 0 {{ template "retool.postgresql.host" . }}:{{ template "retool.postgresql.port" . }}; ./docker_scripts/start_api.sh + {{- if .Values.commandline.args }} +{{ toYaml .Values.commandline.args | indent 10 }} + {{- end }} + env: + - name: NODE_ENV + value: production + - name: NODE_OPTIONS + value: {{(.Values.workflows.config).nodeOptions | default "--max_old_space_size=1024" }} + - name: SERVICE_TYPE + value: WORKFLOW_TEMPORAL_WORKER + - name: DBCONNECTOR_POSTGRES_POOL_MAX_SIZE + value: "100" + - name: DBCONNECTOR_QUERY_TIMEOUT_MS + {{- if .Values.workflows.dbConnectorTimeout }} + value: {{ .Values.workflows.dbConnectorTimeout | quote}} + {{- else if .Values.config.dbConnectorTimeout }} + value: {{ .Values.config.dbConnectorTimeout | quote}} + {{- else }} + value: "5400000" + {{- end }} + - name: DISABLE_DATABASE_MIGRATIONS + value: "true" + {{- if or (index .Values "retool-temporal-services-helm" "enabled") (.Values.workflows.temporal.enabled) }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_HOST + value: {{ template "retool.temporal.host" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_FRONTEND_PORT + value: {{ template "retool.temporal.port" . }} + - name: WORKFLOW_TEMPORAL_CLUSTER_NAMESPACE + value: {{ template "retool.temporal.namespace" . }} + {{- end }} + {{- if (.Values.workflows.temporal).sslEnabled }} + - name: WORKFLOW_TEMPORAL_TLS_ENABLED + value: "true" + {{- if (and (.Values.workflows.temporal).sslCert (.Values.workflows.temporal).sslKey) }} + - name: WORKFLOW_TEMPORAL_TLS_CRT + value: {{ .Values.workflows.temporal.sslCert }} + - name: WORKFLOW_TEMPORAL_TLS_KEY + valueFrom: + secretKeyRef: + {{- if (.Values.workflows.temporal).sslKeySecretName }} + name: {{ .Values.workflows.temporal.sslKeySecretName }} + key: {{ .Values.workflows.temporal.sslKeySecretKey | default "temporal-tls-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: "temporal-tls-key" + {{- end }} + {{- end }} + {{- end }} + - name: WORKFLOW_WORKER_HEALTHCHECK_PORT + value: "3005" + - name: WORKFLOW_BACKEND_HOST + value: http://{{ template "retool.fullname" . }}-workflow-backend + - name: CLIENT_ID + value: {{ default "" .Values.config.auth.google.clientId }} + - name: COOKIE_INSECURE + value: {{ .Values.config.useInsecureCookies | quote }} + - name: RESTRICTED_DOMAIN + value: {{ default "" .Values.config.auth.google.domain }} + - name: POSTGRES_HOST + value: {{ template "retool.postgresql.host" . }} + - name: POSTGRES_PORT + value: {{ template "retool.postgresql.port" . }} + - name: POSTGRES_DB + value: {{ template "retool.postgresql.db" . }} + - name: POSTGRES_USER + value: {{ template "retool.postgresql.user" . }} + - name: POSTGRES_SSL_ENABLED + value: {{ template "retool.postgresql.ssl_enabled" . }} + {{- if .Values.codeExecutor.enabled }} + - name: CODE_EXECUTOR_INGRESS_DOMAIN + value: http://{{ template "retool.codeExecutor.name" . }} + {{- end }} + {{- if and (((.Values.workflows.config).otelCollector).enabled) (((.Values.workflows.config).otelCollector).endpoint) }} + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{ ((.Values.workflows.config).otelCollector).endpoint }} + {{- else if ((.Values.workflows.config).otelCollector).enabled }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://$(HOST_IP):4317" + {{- end }} + {{- if and (not .Values.externalSecrets.enabled) (not .Values.externalSecrets.externalSecretsOperator.enabled) }} + - name: LICENSE_KEY + valueFrom: + secretKeyRef: + {{- if .Values.config.licenseKeySecretName }} + name: {{ .Values.config.licenseKeySecretName }} + key: {{ .Values.config.licenseKeySecretKey | default "license-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: license-key + {{- end }} + - name: JWT_SECRET + valueFrom: + secretKeyRef: + {{- if .Values.config.jwtSecretSecretName }} + name: {{ .Values.config.jwtSecretSecretName }} + key: {{ .Values.config.jwtSecretSecretKey | default "jwt-secret" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: jwt-secret + {{- end }} + - name: ENCRYPTION_KEY + valueFrom: + secretKeyRef: + {{- if .Values.config.encryptionKeySecretName }} + name: {{ .Values.config.encryptionKeySecretName }} + key: {{ .Values.config.encryptionKeySecretKey | default "encryption-key" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: encryption-key + {{- end }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.postgresql.enabled }} + name: {{ template "retool.postgresql.fullname" . }} + key: postgres-password + {{- else }} + {{- if .Values.config.postgresql.passwordSecretName }} + name: {{ .Values.config.postgresql.passwordSecretName }} + key: {{ .Values.config.postgresql.passwordSecretKey | default "postgresql-password" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: postgresql-password + {{- end }} + {{- end }} + - name: CLIENT_SECRET + valueFrom: + secretKeyRef: + {{- if .Values.config.auth.google.clientSecretSecretName }} + name: {{ .Values.config.auth.google.clientSecretSecretName }} + key: {{ .Values.config.auth.google.clientSecretSecretKey | default "google-client-secret" }} + {{- else }} + name: {{ template "retool.fullname" . }} + key: google-client-secret + {{- end }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range .Values.environmentSecrets }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretKeyRef.name }} + key: {{ .secretKeyRef.key }} + {{- end }} + {{- with .Values.environmentVariables }} +{{ toYaml . | indent 10 }} + {{- end }} + {{- with .Values.workflows.config.environmentVariables }} +{{ toYaml . | indent 10 }} + {{- end }} + {{- if .Values.externalSecrets.enabled }} + envFrom: + - secretRef: + name: {{ .Values.externalSecrets.name }} + {{- range .Values.externalSecrets.secrets }} + - secretRef: + name: {{ .name }} + {{- end }} + {{- end }} + {{- if .Values.externalSecrets.externalSecretsOperator.enabled }} + envFrom: + {{- range .Values.externalSecrets.externalSecretsOperator.secretRef }} + - secretRef: + name: {{ .name }} + {{- end }} + {{- end }} + ports: + - containerPort: 3005 + name: {{ template "retool.name" . }} + protocol: TCP + - containerPort: 9090 + name: metrics + protocol: TCP + +{{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.livenessProbe.path }} + port: 3005 + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} +{{- end }} +{{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.readinessProbe.path }} + port: 3005 + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} +{{- end }} + resources: +{{ toYaml .Values.workflows.resources | indent 10 }} + volumeMounts: + {{- range $configFile := (keys .Values.files) }} + - name: {{ template "retool.name" $ }} + mountPath: "/usr/share/retool/config/{{ $configFile }}" + subPath: {{ $configFile }} + {{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} +{{- end }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 6 }} +{{- end }} +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} +{{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} + volumes: +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8 }} +{{- end }} +--- +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ template "retool.fullname" . }} +spec: + {{ toYaml .Values.podDisruptionBudget }} + selector: + matchLabels: + {{- include "retool.selectorLabels" . | nindent 6 }} +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "retool.fullname" . }}-workflow-worker +spec: + selector: + retoolService: {{ template "retool.fullname" . }}-workflow-worker + ports: + - protocol: TCP + port: 3005 + targetPort: 3005 + name: {{ template "retool.name" . }} + - protocol: TCP + port: 9090 + targetPort: metrics + name: metrics +{{- end }} diff --git a/charts/retool/templates/ingress.yaml b/charts/retool/templates/ingress.yaml index bf4e7c7d..dca69f18 100644 --- a/charts/retool/templates/ingress.yaml +++ b/charts/retool/templates/ingress.yaml @@ -2,9 +2,9 @@ {{- $fullName := include "retool.fullname" . -}} {{- $svcPort := .Values.service.externalPort -}} {{- $pathType := .Values.ingress.pathType -}} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.Version -}} apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.Version -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 @@ -22,7 +22,7 @@ metadata: {{- end }} name: {{ template "retool.fullname" . }} spec: - {{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + {{- if and .Values.ingress.ingressClassName (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.Version) }} ingressClassName: {{ .Values.ingress.ingressClassName }} {{- end }} rules: @@ -31,11 +31,11 @@ spec: http: paths: - path: - {{- if and $pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + {{- if and $pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.Version) }} pathType: {{ $pathType }} {{- end }} backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.Version }} service: name: {{ $fullName }} port: @@ -51,11 +51,11 @@ spec: paths: {{- range .paths }} - path: {{ .path }} - {{- if and $pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + {{- if and $pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.Version) }} pathType: {{ $pathType }} {{- end }} backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.Version }} service: name: {{ $fullName }} port: diff --git a/values.yaml b/values.yaml index 83ba9d76..ba791976 100644 --- a/values.yaml +++ b/values.yaml @@ -8,6 +8,8 @@ config: # licenseKeySecretKey is the key in the k8s secret, default: license-key # licenseKeySecretKey: useInsecureCookies: false + # Timeout for queries, in ms. + # dbConnectorTimeout: 120000 auth: google: clientId: @@ -91,6 +93,10 @@ externalSecrets: # This mode only allows a single secret name to be provided. enabled: false name: retool-config + # Array of secrets to be use as env variables. (Optional) + secrets: [] + # - name: retool-config + # - name: retool-db # Support for External Secrets Operator: https://github.com/external-secrets/external-secrets externalSecretsOperator: enabled: false @@ -143,11 +149,22 @@ ingress: # - retool.example.com # servicePort: service-port pathType: ImplementationSpecific + # For supporting other ingress controllers that require customizing the .backend.service.name and .backend.service.port.name, + # like AWS ALB extraPaths allows that customization, and it takes precedence in the list of paths of for the host, + # this is in order to allow a rule like ssl-redirect from port 80-->443 to be first ( otherwise there wouldn't be a redirect ) + # extraPaths: + # - path: /* + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + # pathType: ImplementationSpecific postgresql: # We highly recommend you do NOT use this subchart as is to run Postgres in a container # for your production instance of Retool; it is a default. Please use a managed Postgres, - # or self-host more permanantly. Use enabled: false and set in config above to do so. + # or self-host more permanently. Use enabled: false and set in config above to do so. enabled: true ssl_enabled: false auth: @@ -279,6 +296,216 @@ backend: # Labels for backend pods labels: {} +workflows: + enabled: true + + # A replicaCount of 1 will launch 6 pods -- 1 workflow backend, 1 workflow worker, and 4 pods that make up the executor temporal cluster + # Scaling this number will increase the number of workflow workers, e.g. a replicaCount of 4 + # will launch 9 pods -- 1 workflow backend, 4 workflow workers, and 4 for temporal cluster + + # ADVANCED: The temporal cluster can be scaled separately in the subchart (charts/retool-temporal-services/values.yaml) + # If your needs require scaling temporal, reach out to us for guidance -- it is likely the bottleneck is DB or worker replicaCount + replicaCount: 1 + + # Timeout for queries, in ms. This will set the timeout for workflows-related pods only + # If this value is not set but config.dbConnectorTimeout is, we will set workflows pod timeouts + # to .Values.config.dbConnectorTimeout + # dbConnectorTimeout: 120000 + + # Annotations for workflows worker pods + annotations: {} + + # Labels for workflows worker pods + labels: {} + + # IMPORTANT: Incompatible with retool-temporal-services-helm subchart + # This allows configuring a Retool Workflows deployment that uses your own Temporal cluster + # instead of deploying a new one. Set enabled to true and add the config variables. + # NOTE: Temporal Frontend with required TLS or mTLS not currently supported + temporal: + # set enabled to true if using a pre-existing temporal cluster + enabled: false + # discoverable service name for the temporal frontend service + # host: + # discoverable service port for the temporal frontend service + # port: + # temporal namespace to use for Temporal Workflows related to Retool + # namespace: + # whether to use TLS/SSL when connecting to Temporal + # sslEnabled: false + # base64 encoded string of TLS/SSL client certificate to use for mTLS + # sslCert: + # base64 encoded string of TLS/SSL client certificate secret key to use for mTLS + # sslKey: + # recommended alternative to sslKey. Name and key of k8s secret containing base64 encoded sslKey + # sslKeySecretName: + # sslKeySecretKey + + # Config for workflows worker pods. Node heap size limits can be overridden here + # otelCollector can be set to an OpenTelemetry Collector in your k8s cluster. This will configure Temporal metrics collection which + # provides observability into Workflows worker performance, particularly useful in high QPS use-cases + # environmentVariables will only be set on the workflows worker + # only change the CONCURRENT_*_LIMIT values if you have higher load usecases and deploy + # code_executor. Otherwise, the worker may OOM if the Workflows blocks use too much memory. + config: {} + # config: { + # nodeOptions: --max_old_space_size=1024 + # otelCollector: { + # enabled: true + # endpoint: http://$(HOST_IP):4317 + # } + # environmentVariables: [] + # - name: WORKFLOW_TEMPORAL_CONCURRENT_TASKS_LIMIT + # value: "100" + # - name: WORKFLOW_TEMPORAL_CONCURRENT_ACTIVITIES_LIMIT + # value: "100" + # } + + # Resources for the workflow worker - these are sane inputs that bias towards stability + # Can adjust but may see OOM errors if memory too low for heavy workflow load + resources: + limits: + cpu: 2000m + memory: 8192Mi + requests: + cpu: 1000m + memory: 2048Mi + +codeExecutor: + # Enable this for Python support and running code more securely within a separate + # sandboxed environment + enabled: false + + replicaCount: 1 + + # Annotations for code executor pods + annotations: {} + + # Labels for code executor pods + labels: {} + + # Config for code executor. Node heap size limits can be overridden here + config: {} + # config: { + # nodeOptions: --max_old_space_size=1024 + # } + + # Resources for the code executor. Most common issues will be seen with CPU usage as this will + # most likely be CPU bound. Adjust the CPU if latency increases under load. + resources: + limits: + cpu: 2000m + memory: 2048Mi + requests: + cpu: 1000m + memory: 1024Mi + + image: + repository: tryretool/code-executor-service + tag: + pullPolicy: IfNotPresent + +retool-temporal-services-helm: + # Disable this if using your own Temporal Cluster + enabled: false + server: + # Defines image to be used for temporal server + image: + repository: tryretool/one-offs + tag: retool-temporal-1.1.3 + pullPolicy: IfNotPresent + # this configures grpc_health_probe (https://github.com/grpc-ecosystem/grpc-health-probe) + # for healthchecks instead of native k8s. + # Set this to true if deploying in a k8s cluster on version <1.24 + useLegacyHealthProbe: false + config: + # the below values specify the database for temporal internals and workflow state + # both can point to the same db, and even the same as retool main above, although throughput + # will be limited. We strongly suggest using two total DBs: one for retool-main and one + # for default and visibility below + persistence: + default: + sql: + # host: + # port: + # the dbname used for temporal + # database: temporal + # user: + # password: + # existingSecret is the name of the secret where password is stored + # existingSecret: + # secretKey is the key in the k8s secret + # secretKey: + # options for SSL connections to database + # tls: + # enabled: true + # сaFile: + # certFile: + # keyFile: + # enableHostVerification: false + # serverName: + visibility: + sql: + # host: + # port: + # the dbname used for temporal visibility + # database: temporal_visibility + # user: + # password: + # existingSecret is the name of the secret where password is stored + # existingSecret: + # secretKey is the key in the k8s secret + # secretKey: + # options for SSL connections to database + # tls: + # enabled: true + # сaFile: + # certFile: + # keyFile: + # enableHostVerification: false + # serverName: + + # use-cases with very high throughput demands (>10k workflow blocks/sec) can modify + # below value to be higher, such as 512 or 1024 + numHistoryShards: 128 + + # define resources for each temporal service -- these are sane starting points that allow + # for scaling to ~3 workflow workers without hitting bottlenecks + resources: + limits: + cpu: 500m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + # example of setting service-specific resources, here we increase memory limit for history server + history: + resources: + limits: + cpu: 500m + memory: 2Gi + requests: + cpu: 100m + memory: 128Mi + + # Setting this value to true will spin up the temporal web UI, admintools, + # along with prometheus and grafana clusters for debugging performance + # TODO: define this in _helpers.tpl + # visibilityDebugMode: false + # DEBUGGING: below pods can be used for debugging and watching metrics + # launches prometheus pods and listeners on temporal services and worker + prometheus: + enabled: false + # launches a grafana pod with helpful workflows executor metrics and graphs + grafana: + enabled: false + # launches the temporal web UI, which allows you to see which workflows are currently running + web: + enabled: false + # launches the temporal admintools pod, which allows you to manage your cluster, e.g. terminate workflows + admintools: + enabled: false + persistentVolumeClaim: # set to true to use pvc enabled: false