From 7dc44ffd56aff89a83b0e420ec6b3e8409f19808 Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 11:19:06 +0100 Subject: [PATCH 01/10] Init node selectors. --- .../templates/coordinators.yaml | 74 +++++++++++++-- .../templates/data.yaml | 94 +++++++++++++++++-- charts/memgraph-high-availability/values.yaml | 13 ++- 3 files changed, 164 insertions(+), 17 deletions(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 74aeb00..e2dd108 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -9,27 +9,85 @@ spec: selector: matchLabels: app: memgraph-coordinator-{{ $coordinator.id }} + role: coordinator template: metadata: labels: app: memgraph-coordinator-{{ $coordinator.id }} + role: coordinator spec: - {{ if $.Values.memgraph.affinity.enabled }} + {{- if .Values.affinity.enabled }} affinity: + {{- if .Values.affinity.nodeSelection }} + # Node Selection Affinity: Schedule coordinator pods on nodes with a specific labels + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "{{ .Values.affinity.coordinatorNodeLabel }}" + operator: In + values: + - "true" + {{- else if .Values.affinity.unique }} + # Unique Affinity: Schedule pods on different nodes + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + - data + topologyKey: "kubernetes.io/hostname" + {{- else if .Values.affinity.parity }} + # Parity Affinity: One coordinator and one data node per node + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - - key: app + - key: role operator: In values: - - memgraph-coordinator-1 - - memgraph-coordinator-2 - - memgraph-coordinator-3 - - memgraph-data-0 - - memgraph-data-1 + - coordinator + - data topologyKey: "kubernetes.io/hostname" - {{ end }} + {{- else if .Values.affinity.paritySoft }} + # Parity Soft Affinity: Prefer one coordinator and one data node per node + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + - data + topologyKey: "kubernetes.io/hostname" + {{- end }} + {{- end }} initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index 5d1758a..e179599 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -9,27 +9,105 @@ spec: selector: matchLabels: app: memgraph-data-{{ $data.id }} + role: data template: metadata: labels: app: memgraph-data-{{ $data.id }} + role: data spec: - {{ if $.Values.memgraph.affinity.enabled }} + {{- if $.Values.memgraph.affinity.enabled }} affinity: + {{- if $.Values.affinity.nodeSelection }} + # Node Selection Affinity: Prefer nodes with the specific dataNodeLabel and prefer spreading across nodes + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "{{ $.Values.affinity.dataNodeLabel }}" + operator: In + values: + - "true" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "true" + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" + {{- else if $.Values.affinity.unique }} + # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - - key: app + - key: role operator: In values: - - memgraph-coordinator-1 - - memgraph-coordinator-2 - - memgraph-coordinator-3 - - memgraph-data-0 - - memgraph-data-1 + - coordinator + - data topologyKey: "kubernetes.io/hostname" - {{ end }} + {{- else if $.Values.affinity.parity }} + # Parity Affinity: One coordinator and one data node per node + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + - data + topologyKey: "kubernetes.io/hostname" + {{- else if $.Values.affinity.paritySoft }} + # Parity Soft Affinity: Prefer one coordinator and one data node per node + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + - data + topologyKey: "kubernetes.io/hostname" + {{- end }} + {{- end }} + initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index eb5dcaf..99c60af 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -36,9 +36,20 @@ memgraph: logPVCClassName: "" logPVC: true logPVCSize: "256Mi" + + # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. + # The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. + # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. + # The paritySoft affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. If not sufficent nodes, deployment won't fail, but will try to schedule as many pods by the rule as possible. + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. affinity: enabled: true - + unique: true + parity: false + paritySoft: false + nodeSelection: false + dataNodeLabel: "data-node" + coordinatorNodeLabel: "coordinator-node" data: - id: "0" From 6fe019f3a4d24b4132019cf10872bc44a979ec32 Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 12:34:12 +0100 Subject: [PATCH 02/10] Update values. --- .../templates/coordinators.yaml | 10 +++---- .../templates/data.yaml | 2 +- charts/memgraph-high-availability/values.yaml | 26 +++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index e2dd108..c085115 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -16,9 +16,9 @@ spec: app: memgraph-coordinator-{{ $coordinator.id }} role: coordinator spec: - {{- if .Values.affinity.enabled }} + {{- if $.Values.affinity.enabled }} affinity: - {{- if .Values.affinity.nodeSelection }} + {{- if $.Values.affinity.nodeSelection }} # Node Selection Affinity: Schedule coordinator pods on nodes with a specific labels nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -28,7 +28,7 @@ spec: operator: In values: - "true" - {{- else if .Values.affinity.unique }} + {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -40,7 +40,7 @@ spec: - coordinator - data topologyKey: "kubernetes.io/hostname" - {{- else if .Values.affinity.parity }} + {{- else if $.Values.affinity.parity }} # Parity Affinity: One coordinator and one data node per node podAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -61,7 +61,7 @@ spec: - coordinator - data topologyKey: "kubernetes.io/hostname" - {{- else if .Values.affinity.paritySoft }} + {{- else if $.Values.affinity.paritySoft }} # Parity Soft Affinity: Prefer one coordinator and one data node per node podAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index e179599..91d50c1 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -16,7 +16,7 @@ spec: app: memgraph-data-{{ $data.id }} role: data spec: - {{- if $.Values.memgraph.affinity.enabled }} + {{- if $.Values.affinity.enabled }} affinity: {{- if $.Values.affinity.nodeSelection }} # Node Selection Affinity: Prefer nodes with the specific dataNodeLabel and prefer spreading across nodes diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 99c60af..534ec95 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -37,19 +37,19 @@ memgraph: logPVC: true logPVCSize: "256Mi" - # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. - # The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. - # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. - # The paritySoft affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. If not sufficent nodes, deployment won't fail, but will try to schedule as many pods by the rule as possible. - # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. - affinity: - enabled: true - unique: true - parity: false - paritySoft: false - nodeSelection: false - dataNodeLabel: "data-node" - coordinatorNodeLabel: "coordinator-node" +# Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. +# The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. +# The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. +# The paritySoft affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. If not sufficent nodes, deployment won't fail, but will try to schedule as many pods by the rule as possible. +# The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. +affinity: + enabled: true + unique: true + parity: false + paritySoft: false + nodeSelection: false + dataNodeLabel: "data-node" + coordinatorNodeLabel: "coordinator-node" data: - id: "0" From c5e0a92c45a5266d91cc873ac2d9c024a5e1ad8c Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 14:24:50 +0100 Subject: [PATCH 03/10] Delete parity soft. --- .../templates/coordinators.yaml | 46 +++++------------- .../templates/data.yaml | 47 +++++-------------- charts/memgraph-high-availability/values.yaml | 4 +- 3 files changed, 23 insertions(+), 74 deletions(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index c085115..0604740 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -43,14 +43,16 @@ spec: {{- else if $.Values.affinity.parity }} # Parity Affinity: One coordinator and one data node per node podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - data - topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -59,35 +61,9 @@ spec: operator: In values: - coordinator - - data topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.paritySoft }} - # Parity Soft Affinity: Prefer one coordinator and one data node per node - podAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - data - topologyKey: "kubernetes.io/hostname" - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - - data - topologyKey: "kubernetes.io/hostname" - {{- end }} {{- end }} + {{- end }} initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index 91d50c1..6b79ea9 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -62,14 +62,16 @@ spec: {{- else if $.Values.affinity.parity }} # Parity Affinity: One coordinator and one data node per node podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -77,37 +79,10 @@ spec: - key: role operator: In values: - - coordinator - data topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.paritySoft }} - # Parity Soft Affinity: Prefer one coordinator and one data node per node - podAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - topologyKey: "kubernetes.io/hostname" - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - - data - topologyKey: "kubernetes.io/hostname" {{- end }} - {{- end }} - + {{- end }} initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 534ec95..6d57b2f 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -40,13 +40,11 @@ memgraph: # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. # The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. -# The paritySoft affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. If not sufficent nodes, deployment won't fail, but will try to schedule as many pods by the rule as possible. # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. affinity: enabled: true - unique: true + unique: false parity: false - paritySoft: false nodeSelection: false dataNodeLabel: "data-node" coordinatorNodeLabel: "coordinator-node" From 211cead4943a5d696ea2991d92841a4c8627f39f Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 15:04:15 +0100 Subject: [PATCH 04/10] Update. --- .../templates/coordinators.yaml | 15 ++++++++-- .../templates/data.yaml | 28 ++++++------------- charts/memgraph-high-availability/values.yaml | 11 ++++---- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 0604740..b9cecf4 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -19,15 +19,24 @@ spec: {{- if $.Values.affinity.enabled }} affinity: {{- if $.Values.affinity.nodeSelection }} - # Node Selection Affinity: Schedule coordinator pods on nodes with a specific labels + # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: "{{ .Values.affinity.coordinatorNodeLabel }}" + - key: {{ $.Values.affinity.roleLabelKey }} operator: In values: - - "true" + - {{ $.Values.affinity.coordinatorNodeLabelValue }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes podAntiAffinity: diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index 6b79ea9..f4b595e 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -19,34 +19,24 @@ spec: {{- if $.Values.affinity.enabled }} affinity: {{- if $.Values.affinity.nodeSelection }} - # Node Selection Affinity: Prefer nodes with the specific dataNodeLabel and prefer spreading across nodes + # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: "{{ $.Values.affinity.dataNodeLabel }}" + - key: {{ $.Values.affinity.roleLabelKey }} operator: In values: - - "true" - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - preference: + - {{ $.Values.affinity.dataNodeLabelValue }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: matchExpressions: - - key: kubernetes.io/hostname + - key: role operator: In values: - - "true" - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - data - topologyKey: "kubernetes.io/hostname" + - data + topologyKey: "kubernetes.io/hostname" {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod podAntiAffinity: diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 6d57b2f..14b0f36 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -38,16 +38,17 @@ memgraph: logPVCSize: "256Mi" # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. -# The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. -# The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. -# The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. affinity: enabled: true + # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. unique: false + # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. parity: false + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. nodeSelection: false - dataNodeLabel: "data-node" - coordinatorNodeLabel: "coordinator-node" + roleLabelKey: "role" + dataNodeLabelValue: "data-node" + coordinatorNodeLabelValue: "coordinator-node" data: - id: "0" From b364ad86e5ccefb2e9a6303baa4009966f32dfd0 Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 15:07:37 +0100 Subject: [PATCH 05/10] Update messaging. --- charts/memgraph-high-availability/values.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 14b0f36..ff17518 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -37,14 +37,14 @@ memgraph: logPVC: true logPVCSize: "256Mi" -# Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not. +# Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on random nodes in the cluster that k8s scheduler chooses. affinity: enabled: true - # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail. + # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node. If there is more pods than nodes, deployment will fail. unique: false # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. parity: false - # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes. + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. If not sufficient nodes, deployment will fail. nodeSelection: false roleLabelKey: "role" dataNodeLabelValue: "data-node" From 905ef22d62b66df0748c18865afcb602871d5e55 Mon Sep 17 00:00:00 2001 From: antejavor Date: Wed, 27 Nov 2024 15:13:39 +0100 Subject: [PATCH 06/10] Update config. --- charts/memgraph-high-availability/README.md | 71 ++++++++++--------- .../templates/coordinators.yaml | 18 ++--- 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index 78e8dd7..75b5505 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -26,41 +26,48 @@ helm install memgraph/memgraph-high-availability -f values.yaml The following table lists the configurable parameters of the Memgraph chart and their default values. -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | -| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.17.0` | -| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | -| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | -| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | -| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | -| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | -| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | -| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | -| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | -| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | -| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | -| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | -| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | -| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | -| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | -| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | -| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | -| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | -| `data` | Configuration for data instances | See `data` section | -| `coordinators` | Configuration for coordinator instances | See `coordinators` section | +| Parameter | Description | Default | +| -------------------------------------------------- | --------------------------------------------------------------------------------------------------- | -------------------------- | +| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | +| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.17.0` | +| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | +| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | +| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | +| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | +| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | +| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | +| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | +| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | +| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | +| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | +| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | +| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | +| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | +| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | +| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | +| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | +| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | +| `memgraph.affinity.unique` | Schedule pods on different nodes in the cluster | `false` | +| `memgraph.affinity.parity` | Schedule pods on the same node with maximum one coordinator and one data node | `false` | +| `memgraph.affinity.nodeSelection` | Schedule pods on nodes with specific labels | `false` | +| `memgraph.affinity.roleLabelKey` | Label key for node selection | `role` | +| `memgraph.affinity.dataNodeLabelValue` | Label value for data nodes | `data-node` | +| `memgraph.affinity.coordinatorNodeLabelValue` | Label value for coordinator nodes | `coordinator-node` | +| `data` | Configuration for data instances | See `data` section | +| `coordinators` | Configuration for coordinator instances | See `coordinators` section | + For the `data` and `coordinators` sections, each item in the list has the following parameters: -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `id` | ID of the instance | `0` for data, `1` for coordinators | -| `boltPort` | Bolt port of the instance | `7687` | -| `managementPort` | Management port of the data instance | `10000` | -| `replicationPort` (data only) | Replication port of the data instance | `20000` | -| `coordinatorPort` (coordinators only) | Coordinator port of the coordinator instance | `12000` | -| `args` | List of arguments for the instance | See `args` section | +| Parameter | Description | Default | +| ------------------------------------- | -------------------------------------------- | ---------------------------------- | +| `id` | ID of the instance | `0` for data, `1` for coordinators | +| `boltPort` | Bolt port of the instance | `7687` | +| `managementPort` | Management port of the data instance | `10000` | +| `replicationPort` (data only) | Replication port of the data instance | `20000` | +| `coordinatorPort` (coordinators only) | Coordinator port of the coordinator instance | `12000` | +| `args` | List of arguments for the instance | See `args` section | The `args` section contains a list of arguments for the instance. The default values are the same for all instances: diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index b9cecf4..2222707 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -28,15 +28,15 @@ spec: operator: In values: - {{ $.Values.affinity.coordinatorNodeLabelValue }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - topologyKey: "kubernetes.io/hostname" + podAntiAffinity : + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" {{- else if $.Values.affinity.unique }} # Unique Affinity: Schedule pods on different nodes podAntiAffinity: From 14c94bd994f663c616d7c5ac3e2e45c9723d6baa Mon Sep 17 00:00:00 2001 From: antejavor Date: Thu, 28 Nov 2024 10:20:19 +0100 Subject: [PATCH 07/10] Fix values. --- charts/memgraph-high-availability/values.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index f120227..066d563 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -38,8 +38,7 @@ memgraph: replicationPort: 20000 coordinatorPort: 12000 coordLoadBalancer: - enabled: true - + enabled: true # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on random nodes in the cluster that k8s scheduler chooses. affinity: @@ -54,7 +53,6 @@ affinity: dataNodeLabelValue: "data-node" coordinatorNodeLabelValue: "coordinator-node" - # If you are experiencing issues with the sysctlInitContainer, you can disable it here. # This is made to increase the max_map_count, necessary for high memory loads in Memgraph # If you are experiencing crashing pod with the: Max virtual memory areas vm.max_map_count is too low @@ -63,7 +61,6 @@ sysctlInitContainer: enabled: true maxMapCount: 262144 - data: - id: "0" args: From 0ca808759d9efa5fddd2b37d4dabc8d63a4cad25 Mon Sep 17 00:00:00 2001 From: antejavor Date: Thu, 28 Nov 2024 14:31:58 +0100 Subject: [PATCH 08/10] Fix indent. --- .../templates/coordinators.yaml | 12 ++++----- .../templates/data.yaml | 12 ++++----- charts/memgraph-high-availability/values.yaml | 25 +++++++++---------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 4cf710f..35fb970 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -18,18 +18,18 @@ spec: instance-type: coordinator spec: - {{- if $.Values.affinity.enabled }} + {{- if $.Values.memgraph.affinity.enabled }} affinity: - {{- if $.Values.affinity.nodeSelection }} + {{- if $.Values.memgraph.affinity.nodeSelection }} # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: {{ $.Values.affinity.roleLabelKey }} + - key: {{ $.Values.memgraph.affinity.roleLabelKey }} operator: In values: - - {{ $.Values.affinity.coordinatorNodeLabelValue }} + - {{ $.Values.memgraph.affinity.coordinatorNodeLabelValue }} podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -39,7 +39,7 @@ spec: values: - coordinator topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.unique }} + {{- else if $.Values.memgraph.affinity.unique }} # Unique Affinity: Schedule pods on different nodes podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -51,7 +51,7 @@ spec: - coordinator - data topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.parity }} + {{- else if $.Values.memgraph.affinity.parity }} # Parity Affinity: One coordinator and one data node per node podAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index af85019..af2f10f 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -18,18 +18,18 @@ spec: instance-type: data spec: - {{- if $.Values.affinity.enabled }} + {{- if $.Values.memgraph.affinity.enabled }} affinity: - {{- if $.Values.affinity.nodeSelection }} + {{- if $.Values.memgraph.affinity.nodeSelection }} # Node Selection Affinity: Scheduled on nodes with specific label key and value nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: {{ $.Values.affinity.roleLabelKey }} + - key: {{ $.Values.memgraph.affinity.roleLabelKey }} operator: In values: - - {{ $.Values.affinity.dataNodeLabelValue }} + - {{ $.Values.memgraph.affinity.dataNodeLabelValue }} podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -39,7 +39,7 @@ spec: values: - data topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.unique }} + {{- else if $.Values.memgraph.affinity.unique }} # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -51,7 +51,7 @@ spec: - coordinator - data topologyKey: "kubernetes.io/hostname" - {{- else if $.Values.affinity.parity }} + {{- else if $.Values.memgraph.affinity.parity }} # Parity Affinity: One coordinator and one data node per node podAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 066d563..22264a9 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -39,19 +39,18 @@ memgraph: coordinatorPort: 12000 coordLoadBalancer: enabled: true - -# Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on random nodes in the cluster that k8s scheduler chooses. -affinity: - enabled: true - # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node. If there is more pods than nodes, deployment will fail. - unique: false - # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. - parity: false - # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. If not sufficient nodes, deployment will fail. - nodeSelection: false - roleLabelKey: "role" - dataNodeLabelValue: "data-node" - coordinatorNodeLabelValue: "coordinator-node" + # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on random nodes in the cluster that k8s scheduler chooses. + affinity: + enabled: true + # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node. If there is more pods than nodes, deployment will fail. + unique: false + # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. + parity: false + # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. If not sufficient nodes, deployment will fail. + nodeSelection: false + roleLabelKey: "role" + dataNodeLabelValue: "data-node" + coordinatorNodeLabelValue: "coordinator-node" # If you are experiencing issues with the sysctlInitContainer, you can disable it here. # This is made to increase the max_map_count, necessary for high memory loads in Memgraph From ac5f51d94dca8e9c9066640f6d8b27279f9c1aab Mon Sep 17 00:00:00 2001 From: antejavor Date: Thu, 28 Nov 2024 14:44:31 +0100 Subject: [PATCH 09/10] Fix default option. --- .../templates/coordinators.yaml | 15 ++++++++++++++- .../templates/data.yaml | 13 +++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 35fb970..48c4fa5 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -73,7 +73,20 @@ spec: values: - coordinator topologyKey: "kubernetes.io/hostname" - {{- end }} + {{- else }} + # Default Affinity: Avoid scheduling on the same node + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" + {{- end }} {{- end }} initContainers: - name: init diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index af2f10f..5b8509b 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -73,6 +73,19 @@ spec: values: - data topologyKey: "kubernetes.io/hostname" + {{- else }} + # Default Affinity: Avoid scheduling on the same node + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: role + operator: In + values: + - data + topologyKey: "kubernetes.io/hostname" {{- end }} {{- end }} initContainers: From 3b6d2a1a6ad156717ea05209635b74eec9f82a66 Mon Sep 17 00:00:00 2001 From: antejavor Date: Fri, 29 Nov 2024 10:42:13 +0100 Subject: [PATCH 10/10] Remove affinity option. --- charts/memgraph-high-availability/README.md | 93 ++++++++----------- .../templates/coordinators.yaml | 15 +-- .../templates/data.yaml | 23 ++--- charts/memgraph-high-availability/values.yaml | 3 +- 4 files changed, 52 insertions(+), 82 deletions(-) diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index ee31437..b02539f 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -20,67 +20,54 @@ Or you can modify a `values.yaml` file and override the desired values: helm install memgraph/memgraph-high-availability -f values.yaml ``` -## Running the Memgraph HA Helm Chart locally - -To run Memgraph HA Helm Chart locally, affinity needs to be disabled because the cluster will be running on a single node. - -To disable the affinity, run the following command with the specified set of flags: - -``` -helm install memgraph/memgraph-high-availability --set memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=,memgraph.env.MEMGRAPH_ORGANIZATION_NAME=,memgraph.affinity.enabled=false -``` - -The affinity is disabled either by running the command above, or by modifying the `values.yaml` file. - ## Configuration Options The following table lists the configurable parameters of the Memgraph chart and their default values. -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | -| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.22.0` | -| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | -| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | -| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | -| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | -| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | -| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | -| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | -| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | -| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | -| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | -| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | -| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | -| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | -| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | -| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | -| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | -| `memgraph.externalAccess.serviceType` | NodePort or LoadBalancer. Use LoadBalancer for Cloud production deployment and NodePort for local testing | `LoadBalancer` | -| `memgraph.ports.boltPort` | Bolt port used on coordinator and data instances. | `7687` | -| `memgraph.ports.managementPort` | Management port used on coordinator and data instances. | `10000` | -| `memgraph.ports.replicationPort` | Replication port used on data instances. | `20000` | -| `memgraph.ports.coordinatorPort` | Coordinator port used on coordinators. | `12000` | -| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | -| `memgraph.affinity.unique` | Schedule pods on different nodes in the cluster | `false` | -| `memgraph.affinity.parity` | Schedule pods on the same node with maximum one coordinator and one data node | `false` | -| `memgraph.affinity.nodeSelection` | Schedule pods on nodes with specific labels | `false` | -| `memgraph.affinity.roleLabelKey` | Label key for node selection | `role` | -| `memgraph.affinity.dataNodeLabelValue` | Label value for data nodes | `data-node` | -| `memgraph.affinity.coordinatorNodeLabelValue` | Label value for coordinator nodes | `coordinator-node` | -| `data` | Configuration for data instances | See `data` section | -| `coordinators` | Configuration for coordinator instances | See `coordinators` section | -| `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | -| `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | +| Parameter | Description | Default | +| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -------------------------- | +| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | +| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.22.0` | +| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | +| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | +| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | +| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | +| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | +| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | +| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | +| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | +| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | +| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | +| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | +| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | +| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | +| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | +| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | +| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | +| `memgraph.externalAccess.serviceType` | NodePort or LoadBalancer. Use LoadBalancer for Cloud production deployment and NodePort for local testing | `LoadBalancer` | +| `memgraph.ports.boltPort` | Bolt port used on coordinator and data instances. | `7687` | +| `memgraph.ports.managementPort` | Management port used on coordinator and data instances. | `10000` | +| `memgraph.ports.replicationPort` | Replication port used on data instances. | `20000` | +| `memgraph.ports.coordinatorPort` | Coordinator port used on coordinators. | `12000` | +| `memgraph.affinity.unique` | Schedule pods on different nodes in the cluster | `false` | +| `memgraph.affinity.parity` | Schedule pods on the same node with maximum one coordinator and one data node | `false` | +| `memgraph.affinity.nodeSelection` | Schedule pods on nodes with specific labels | `false` | +| `memgraph.affinity.roleLabelKey` | Label key for node selection | `role` | +| `memgraph.affinity.dataNodeLabelValue` | Label value for data nodes | `data-node` | +| `memgraph.affinity.coordinatorNodeLabelValue` | Label value for coordinator nodes | `coordinator-node` | +| `data` | Configuration for data instances | See `data` section | +| `coordinators` | Configuration for coordinator instances | See `coordinators` section | +| `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | +| `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | For the `data` and `coordinators` sections, each item in the list has the following parameters: -| Parameter | Description | Default | -|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| -| `id` | ID of the instance | `0` for data, `1` for coordinators | -| `args` | List of arguments for the instance | See `args` section | +| Parameter | Description | Default | +| --------- | ---------------------------------- | ---------------------------------- | +| `id` | ID of the instance | `0` for data, `1` for coordinators | +| `args` | List of arguments for the instance | See `args` section | diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 48c4fa5..b36325b 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -18,7 +18,6 @@ spec: instance-type: coordinator spec: - {{- if $.Values.memgraph.affinity.enabled }} affinity: {{- if $.Values.memgraph.affinity.nodeSelection }} # Node Selection Affinity: Scheduled on nodes with specific label key and value @@ -52,18 +51,7 @@ spec: - data topologyKey: "kubernetes.io/hostname" {{- else if $.Values.memgraph.affinity.parity }} - # Parity Affinity: One coordinator and one data node per node - podAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - data - topologyKey: "kubernetes.io/hostname" + # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -87,7 +75,6 @@ spec: - coordinator topologyKey: "kubernetes.io/hostname" {{- end }} - {{- end }} initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index 5b8509b..e59859b 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -18,7 +18,6 @@ spec: instance-type: data spec: - {{- if $.Values.memgraph.affinity.enabled }} affinity: {{- if $.Values.memgraph.affinity.nodeSelection }} # Node Selection Affinity: Scheduled on nodes with specific label key and value @@ -52,18 +51,16 @@ spec: - data topologyKey: "kubernetes.io/hostname" {{- else if $.Values.memgraph.affinity.parity }} - # Parity Affinity: One coordinator and one data node per node + # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs podAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: role - operator: In - values: - - coordinator - topologyKey: "kubernetes.io/hostname" + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: role + operator: In + values: + - coordinator + topologyKey: "kubernetes.io/hostname" podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -87,7 +84,7 @@ spec: - data topologyKey: "kubernetes.io/hostname" {{- end }} - {{- end }} + initContainers: - name: init image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 22264a9..4497d43 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -39,9 +39,8 @@ memgraph: coordinatorPort: 12000 coordLoadBalancer: enabled: true - # Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on random nodes in the cluster that k8s scheduler chooses. + # Affinity controls the scheduling of the memgraph-high-availability pods. By default data pods will avoid being scheduled on the same node as other data pods, and coordinator pods will avoid being scheduled on the same node as other coordinator pods. Deployment won't fail if there is no sufficient nodes. affinity: - enabled: true # The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node. If there is more pods than nodes, deployment will fail. unique: false # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail.