Skip to content

Commit

Permalink
Update.
Browse files Browse the repository at this point in the history
  • Loading branch information
antejavor committed Nov 27, 2024
1 parent c5e0a92 commit 211cead
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
15 changes: 12 additions & 3 deletions charts/memgraph-high-availability/templates/coordinators.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,24 @@ spec:
{{- if $.Values.affinity.enabled }}
affinity:
{{- if $.Values.affinity.nodeSelection }}
# Node Selection Affinity: Schedule coordinator pods on nodes with a specific labels
# Node Selection Affinity: Scheduled on nodes with specific label key and value
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "{{ .Values.affinity.coordinatorNodeLabel }}"
- key: {{ $.Values.affinity.roleLabelKey }}
operator: In
values:
- "true"
- {{ $.Values.affinity.coordinatorNodeLabelValue }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: role
operator: In
values:
- coordinator
topologyKey: "kubernetes.io/hostname"
{{- else if $.Values.affinity.unique }}
# Unique Affinity: Schedule pods on different nodes
podAntiAffinity:
Expand Down
28 changes: 9 additions & 19 deletions charts/memgraph-high-availability/templates/data.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,34 +19,24 @@ spec:
{{- if $.Values.affinity.enabled }}
affinity:
{{- if $.Values.affinity.nodeSelection }}
# Node Selection Affinity: Prefer nodes with the specific dataNodeLabel and prefer spreading across nodes
# Node Selection Affinity: Scheduled on nodes with specific label key and value
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "{{ $.Values.affinity.dataNodeLabel }}"
- key: {{ $.Values.affinity.roleLabelKey }}
operator: In
values:
- "true"
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
- {{ $.Values.affinity.dataNodeLabelValue }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: kubernetes.io/hostname
- key: role
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: role
operator: In
values:
- data
topologyKey: "kubernetes.io/hostname"
- data
topologyKey: "kubernetes.io/hostname"
{{- else if $.Values.affinity.unique }}
# Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod
podAntiAffinity:
Expand Down
11 changes: 6 additions & 5 deletions charts/memgraph-high-availability/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,17 @@ memgraph:
logPVCSize: "256Mi"

# Affinity controls the scheduling of the memgraph-high-availability pods, by disabling affinity, pods will be schedule on any node in the cluster, no matter if they are on the same node or not.
# The default unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail.
# The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail.
# The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes.
affinity:
enabled: true
# The unique affinity, will schedule the pods on different nodes in the cluster. This means coordinators and data nodes will not be scheduled on the same node, this is the most common use case for high availability. If not sufficient nodes, deployment will fail.
unique: false
# The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail.
parity: false
# The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. The pods will prefer to spread on multiple data and coordinator nodes.
nodeSelection: false
dataNodeLabel: "data-node"
coordinatorNodeLabel: "coordinator-node"
roleLabelKey: "role"
dataNodeLabelValue: "data-node"
coordinatorNodeLabelValue: "coordinator-node"

data:
- id: "0"
Expand Down

0 comments on commit 211cead

Please sign in to comment.