diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 0604740..b9cecf4 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -19,15 +19,24 @@ spec: {{- if $.Values.affinity.enabled }} affinity: {{- if $.Values.affinity.nodeSelection }} - # Node Selection Affinity: Schedule coordinator pods on nodes with a specific labels + # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: "{{ .Values.affinity.coordinatorNodeLabel }}" + - key: {{ $.Values.affinity.roleLabelKey }} operator: In values: - - "true" + - {{ $.Values.affinity.coordinatorNodeLabelValue }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes podAntiAffinity: diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index 6b79ea9..f4b595e 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -19,34 +19,24 @@ spec: {{- if $.Values.affinity.enabled }} affinity: {{- if $.Values.affinity.nodeSelection }} - # Node Selection Affinity: Prefer nodes with the specific dataNodeLabel and prefer spreading across nodes + # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: "{{ $.Values.affinity.dataNodeLabel }}" + - key: {{ $.Values.affinity.roleLabelKey }} operator: In values: - - "true" - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: + - {{ $.Values.affinity.dataNodeLabelValue }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: matchExpressions: - - key: kubernetes.io/hostname + - key: role operator: In values: - - "true" - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - data - topologyKey: "kubernetes.io/hostname" + - data + topologyKey: "kubernetes.io/hostname" {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod podAntiAffinity: diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 6d57b2f..14b0f36 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -38,16 +38,17 @@ memgraph: logPVCSize: "256Mi" # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. -# The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. -# The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. -# The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. affinity: enabled: true + # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. unique: false + # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. parity: false + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. nodeSelection: false - dataNodeLabel: "data-node" - coordinatorNodeLabel: "coordinator-node" + roleLabelKey: "role" + dataNodeLabelValue: "data-node" + coordinatorNodeLabelValue: "coordinator-node" data: - id: "0"