diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index 1f83353..b02539f 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -20,62 +20,55 @@ Or you can modify a `values.yaml` file and override the desired values: helm install memgraph/memgraph-high-availability -f values.yaml ``` -## Running the Memgraph HA Helm Chart locally - -To run Memgraph HA Helm Chart locally, affinity needs to be disabled because the cluster will be running on a single node. - -To disable the affinity, run the following command with the specified set of flags: - -``` -helm install memgraph/memgraph-high-availability --set memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=,memgraph.env.MEMGRAPH_ORGANIZATION_NAME=,memgraph.affinity.enabled=false -``` - -The affinity is disabled either by running the command above, or by modifying the `values.yaml` file. - ## Configuration Options The following table lists the configurable parameters of the Memgraph chart and their default values. - -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | -| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.22.0` | -| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | -| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | -| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | -| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | -| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | -| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | -| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | -| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | -| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | -| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | -| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | -| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | -| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | -| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | -| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | -| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | -| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | -| `memgraph.externalAccess.serviceType` | NodePort or LoadBalancer. Use LoadBalancer for Cloud production deployment and NodePort for local testing | `LoadBalancer` | -| `memgraph.ports.boltPort` | Bolt port used on coordinator and data instances. | `7687` | -| `memgraph.ports.managementPort` | Management port used on coordinator and data instances. | `10000` | -| `memgraph.ports.replicationPort` | Replication port used on data instances. | `20000` | -| `memgraph.ports.coordinatorPort` | Coordinator port used on coordinators. | `12000` | -| `data` | Configuration for data instances | See `data` section | -| `coordinators` | Configuration for coordinator instances | See `coordinators` section | -| `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | -| `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | +| Parameter | Description | Default | +| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -------------------------- | +| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | +| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.22.0` | +| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | +| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | +| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | +| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | +| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | +| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | +| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | +| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | +| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | +| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | +| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | +| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | +| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | +| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | +| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | +| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | +| `memgraph.externalAccess.serviceType` | NodePort or LoadBalancer. Use LoadBalancer for Cloud production deployment and NodePort for local testing | `LoadBalancer` | +| `memgraph.ports.boltPort` | Bolt port used on coordinator and data instances. | `7687` | +| `memgraph.ports.managementPort` | Management port used on coordinator and data instances. | `10000` | +| `memgraph.ports.replicationPort` | Replication port used on data instances. | `20000` | +| `memgraph.ports.coordinatorPort` | Coordinator port used on coordinators. | `12000` | +| `memgraph.affinity.unique` | Schedule pods on different nodes in the cluster | `false` | +| `memgraph.affinity.parity` | Schedule pods on the same node with maximum one coordinator and one data node | `false` | +| `memgraph.affinity.nodeSelection` | Schedule pods on nodes with specific labels | `false` | +| `memgraph.affinity.roleLabelKey` | Label key for node selection | `role` | +| `memgraph.affinity.dataNodeLabelValue` | Label value for data nodes | `data-node` | +| `memgraph.affinity.coordinatorNodeLabelValue` | Label value for coordinator nodes | `coordinator-node` | +| `data` | Configuration for data instances | See `data` section | +| `coordinators` | Configuration for coordinator instances | See `coordinators` section | +| `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | +| `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | For the `data` and `coordinators` sections, each item in the list has the following parameters: -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `id` | ID of the instance | `0` for data, `1` for coordinators | -| `args` | List of arguments for the instance | See `args` section | +| Parameter | Description | Default | +| --------- | ---------------------------------- | ---------------------------------- | +| `id` | ID of the instance | `0` for data, `1` for coordinators | +| `args` | List of arguments for the instance | See `args` section | + The `args` section contains a list of arguments for the instance. The default values are the same for all instances: diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 6511096..b36325b 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -9,28 +9,72 @@ spec: selector: matchLabels: app: memgraph-coordinator-{{ $coordinator.id }} + role: coordinator template: metadata: labels: app: memgraph-coordinator-{{ $coordinator.id }} + role: coordinator instance-type: coordinator + spec: - {{ if $.Values.memgraph.affinity.enabled }} affinity: + {{- if $.Values.memgraph.affinity.nodeSelection }} + # Node Selection Affinity: Scheduled on nodes with specific label key and value + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.memgraph.affinity.roleLabelKey }} + operator: In + values: + - {{ $.Values.memgraph.affinity.coordinatorNodeLabelValue }} + podAntiAffinity : + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + {{- else if $.Values.memgraph.affinity.unique }} + # Unique Affinity: Schedule pods on different nodes podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - - key: app + - key: role operator: In values: - - memgraph-coordinator-1 - - memgraph-coordinator-2 - - memgraph-coordinator-3 - - memgraph-data-0 - - memgraph-data-1 + - coordinator + - data topologyKey: "kubernetes.io/hostname" - {{ end }} + {{- else if $.Values.memgraph.affinity.parity }} + # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + {{- else }} + # Default Affinity: Avoid scheduling on the same node + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + {{- end }} initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index e588922..e59859b 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -9,28 +9,82 @@ spec: selector: matchLabels: app: memgraph-data-{{ $data.id }} + role: data template: metadata: labels: app: memgraph-data-{{ $data.id }} + role: data instance-type: data + spec: - {{ if $.Values.memgraph.affinity.enabled }} affinity: + {{- if $.Values.memgraph.affinity.nodeSelection }} + # Node Selection Affinity: Scheduled on nodes with specific label key and value + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.memgraph.affinity.roleLabelKey }} + operator: In + values: + - {{ $.Values.memgraph.affinity.dataNodeLabelValue }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" + {{- else if $.Values.memgraph.affinity.unique }} + # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - - key: app + - key: role operator: In values: - - memgraph-coordinator-1 - - memgraph-coordinator-2 - - memgraph-coordinator-3 - - memgraph-data-0 - - memgraph-data-1 + - coordinator + - data topologyKey: "kubernetes.io/hostname" - {{ end }} + {{- else if $.Values.memgraph.affinity.parity }} + # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" + {{- else }} + # Default Affinity: Avoid scheduling on the same node + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" + {{- end }} + initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 1fc3b4d..4497d43 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -32,8 +32,6 @@ memgraph: logPVCClassName: "" logPVC: true logPVCSize: "256Mi" - affinity: - enabled: true ports: boltPort: 7687 managementPort: 10000 @@ -41,6 +39,17 @@ memgraph: coordinatorPort: 12000 coordLoadBalancer: enabled: true + # Affinity controls the scheduling of the memgraph-high-availability pods. By default data pods will avoid being scheduled on the same node as other data pods, and coordinator pods will avoid being scheduled on the same node as other coordinator pods. Deployment won't fail if there is no sufficient nodes. + affinity: + # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node. If there is more pods than nodes, deployment will fail. + unique: false + # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. + parity: false + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. If not sufficient nodes, deployment will fail. + nodeSelection: false + roleLabelKey: "role" + dataNodeLabelValue: "data-node" + coordinatorNodeLabelValue: "coordinator-node" # If you are experiencing issues with the sysctlInitContainer, you can disable it here. # This is made to increase the max_map_count, necessary for high memory loads in Memgraph