diff --git a/charts/memgraph-high-availability/README.md b/charts/memgraph-high-availability/README.md index c10f556..abeea7f 100644 --- a/charts/memgraph-high-availability/README.md +++ b/charts/memgraph-high-availability/README.md @@ -38,43 +38,46 @@ The affinity is disabled either by running the command above, or by modifying th The following table lists the configurable parameters of the Memgraph chart and their default values. -| Parameter | Description | Default | -| -------------------------------------------------- | --------------------------------------------------------------------------------------------------- | -------------------------- | -| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | -| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.17.0` | -| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | -| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | -| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | -| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | -| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | -| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | -| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | -| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | -| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | -| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | -| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | -| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | -| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | -| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | -| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | -| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | -| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | -| `data` | Configuration for data instances | See `data` section | -| `coordinators` | Configuration for coordinator instances | See `coordinators` section | + +| Parameter | Description | Default | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| +| `memgraph.image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | +| `memgraph.image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `2.22.0` | +| `memgraph.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE` | Memgraph enterprise license | `` | +| `memgraph.env.MEMGRAPH_ORGANIZATION_NAME` | Organization name | `` | +| `memgraph.probes.startup.failureThreshold` | Startup probe failure threshold | `30` | +| `memgraph.probes.startup.periodSeconds` | Startup probe period in seconds | `10` | +| `memgraph.probes.readiness.initialDelaySeconds` | Readiness probe initial delay in seconds | `5` | +| `memgraph.probes.readiness.periodSeconds` | Readiness probe period in seconds | `5` | +| `memgraph.probes.liveness.initialDelaySeconds` | Liveness probe initial delay in seconds | `30` | +| `memgraph.probes.liveness.periodSeconds` | Liveness probe period in seconds | `10` | +| `memgraph.data.volumeClaim.storagePVC` | Enable storage PVC | `true` | +| `memgraph.data.volumeClaim.storagePVCSize` | Size of the storage PVC | `1Gi` | +| `memgraph.data.volumeClaim.logPVC` | Enable log PVC | `false` | +| `memgraph.data.volumeClaim.logPVCSize` | Size of the log PVC | `256Mi` | +| `memgraph.coordinators.volumeClaim.storagePVC` | Enable storage PVC for coordinators | `true` | +| `memgraph.coordinators.volumeClaim.storagePVCSize` | Size of the storage PVC for coordinators | `1Gi` | +| `memgraph.coordinators.volumeClaim.logPVC` | Enable log PVC for coordinators | `false` | +| `memgraph.coordinators.volumeClaim.logPVCSize` | Size of the log PVC for coordinators | `256Mi` | +| `memgraph.affinity.enabled` | Enables affinity so each instance is deployed to unique node | `true` | +| `memgraph.externalAccess.serviceType` | NodePort or LoadBalancer. Use LoadBalancer for Cloud production deployment and NodePort for local testing | `LoadBalancer` | +| `memgraph.ports.boltPort` | Bolt port used on coordinator and data instances. | `7687` | +| `memgraph.ports.managementPort` | Management port used on coordinator and data instances. | `10000` | +| `memgraph.ports.replicationPort` | Replication port used on data instances. | `20000` | +| `memgraph.ports.coordinatorPort` | Coordinator port used on coordinators. | `12000` | +| `data` | Configuration for data instances | See `data` section | +| `coordinators` | Configuration for coordinator instances | See `coordinators` section | | `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | | `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | For the `data` and `coordinators` sections, each item in the list has the following parameters: -| Parameter | Description | Default | -| ------------------------------------- | -------------------------------------------- | ---------------------------------- | -| `id` | ID of the instance | `0` for data, `1` for coordinators | -| `boltPort` | Bolt port of the instance | `7687` | -| `managementPort` | Management port of the data instance | `10000` | -| `replicationPort` (data only) | Replication port of the data instance | `20000` | -| `coordinatorPort` (coordinators only) | Coordinator port of the coordinator instance | `12000` | -| `args` | List of arguments for the instance | See `args` section | +| Parameter | Description | Default | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------| +| `id` | ID of the instance | `0` for data, `1` for coordinators | +| `args` | List of arguments for the instance | See `args` section | + The `args` section contains a list of arguments for the instance. The default values are the same for all instances: diff --git a/charts/memgraph-high-availability/aws/README.md b/charts/memgraph-high-availability/aws/README.md index 2879872..1f2ff24 100644 --- a/charts/memgraph-high-availability/aws/README.md +++ b/charts/memgraph-high-availability/aws/README.md @@ -26,7 +26,7 @@ eksctl create cluster -f cluster.yaml` ``` should be sufficient. Make sure to change the path to the public SSH key if you want to have SSH access to EC2 instances. After creating the cluster, `kubectl` should pick up -the AWS context and you can verify this by running `kubectl context current-context`. My is pointing to `andi.skrgat@test-cluster-ha.eu-west-1.eksctl.io`. +the AWS context and you can verify this by running `kubectl config current-context`. My is pointing to `andi.skrgat@test-cluster-ha.eu-west-1.eksctl.io`. ## Add Helm Charts repository @@ -57,7 +57,8 @@ aws eks describe-nodegroup --cluster-name test-cluster-ha --nodegroup-name stand and then provide full access to it: ``` -aws iam list-attached-role-policies --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- +aws iam attach-role-policy --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess +aws iam list-attached-role-policies --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- ``` It is also important to create Inbound Rule in the Security Group attached to the eksctl cluster which will allow TCP traffic diff --git a/charts/memgraph-high-availability/aws/cluster.yaml b/charts/memgraph-high-availability/aws/cluster.yaml index acec423..6d98ab2 100644 --- a/charts/memgraph-high-availability/aws/cluster.yaml +++ b/charts/memgraph-high-availability/aws/cluster.yaml @@ -37,7 +37,7 @@ managedNodeGroups: instanceSelector: {} instanceType: t3.small labels: - alpha.eksctl.io/cluster-name: test-cluster-ha + alpha.eksctl.io/cluster-name: mg-ha alpha.eksctl.io/nodegroup-name: standard-workers maxSize: 5 minSize: 5 @@ -58,7 +58,7 @@ managedNodeGroups: volumeThroughput: 125 volumeType: gp3 metadata: - name: test-cluster-ha + name: mg-ha region: eu-west-1 version: "1.30" privateCluster: diff --git a/charts/memgraph-high-availability/templates/NOTES.txt b/charts/memgraph-high-availability/templates/NOTES.txt index cf1e705..64581a7 100644 --- a/charts/memgraph-high-availability/templates/NOTES.txt +++ b/charts/memgraph-high-availability/templates/NOTES.txt @@ -8,10 +8,18 @@ The cluster setup requires the proper enterprise license to work since HA is an You can connect to Memgraph instances via Lab, mgconsole, or any other client. By default, all Memgraph instances (coordinators and data instances) listen on port 7687 for a bolt connection. Make sure your are connecting to the correct ip address and port. For details check the configuration on your cloud provider(aws, gcp, azure, etc.) -If you are connecting via mgconsole, you can use the following command: +To start, you should add coordinators and register data instances in order to completely setup cluster. In both cases you only need to modify 'bolt_server' part and set it to the DNS +of the node on which instance is being started. Node ports are fixed. Example: -mgconsole --host --port +ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":32002", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; +ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":32003", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; +REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":32010", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; +REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": ":32011", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; -If you are connecting via Lab, specify your instance IP address and port in Memgraph Lab GUI. +If you are connecting via Lab, specify your coordinator instance IP address and port in Memgraph Lab GUI and select Memgraph HA cluster type of connection. -If you are using minikube, you can find out your instance ip using `minikube ip`. +If you are using minikube, you can find out your node ip using `minikube ip`. + +ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "34.251.38.32:32003", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; +REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "52.50.209.155:32010", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; +REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "34.24.10.69:32011", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; diff --git a/charts/memgraph-high-availability/templates/cluster-setup.yaml b/charts/memgraph-high-availability/templates/cluster-setup.yaml deleted file mode 100644 index e533b61..0000000 --- a/charts/memgraph-high-availability/templates/cluster-setup.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: memgraph-setup -spec: - template: - spec: - containers: - - name: memgraph-setup - image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" - command: ["/bin/bash", "-c"] - args: - - | - # Install netcat - echo "Installing netcat..." - apt-get update && apt-get install -y netcat-openbsd - - # Wait until the pods are available - echo "Waiting for pods to become available for Bolt connection..." - until nc -z memgraph-coordinator-1.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-coordinator-2.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-coordinator-3.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-data-0.default.svc.cluster.local 7687; do sleep 1; done - until nc -z memgraph-data-1.default.svc.cluster.local 7687; do sleep 1; done - echo "Pods are available for Bolt connection!" - - sleep 5 - - # Run the mgconsole commands - echo "Running mgconsole commands..." - echo 'ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "memgraph-coordinator-2.default.svc.cluster.local:7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "memgraph-coordinator-3.default.svc.cluster.local:7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "memgraph-data-0.default.svc.cluster.local:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "memgraph-data-1.default.svc.cluster.local:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"};' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo 'SET INSTANCE instance_1 TO MAIN;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - sleep 3 - echo "SHOW INSTANCES on coord1" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-1.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES on coord2" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-2.default.svc.cluster.local --port 7687 - echo "SHOW INSTANCES on coord3" - echo 'SHOW INSTANCES;' | mgconsole --host memgraph-coordinator-3.default.svc.cluster.local --port 7687 - echo "RETURN 0 on 1st data instance" - echo 'RETURN 0;' | mgconsole --host memgraph-data-0.default.svc.cluster.local --port 7687 - echo "RETURN 0 on 2nd data instance" - echo 'RETURN 0;' | mgconsole --host memgraph-data-1.default.svc.cluster.local --port 7687 - securityContext: - runAsUser: 0 - - restartPolicy: Never - backoffLimit: 4 diff --git a/charts/memgraph-high-availability/templates/coordinators.yaml b/charts/memgraph-high-availability/templates/coordinators.yaml index 1b36e64..6511096 100644 --- a/charts/memgraph-high-availability/templates/coordinators.yaml +++ b/charts/memgraph-high-availability/templates/coordinators.yaml @@ -13,6 +13,7 @@ spec: metadata: labels: app: memgraph-coordinator-{{ $coordinator.id }} + instance-type: coordinator spec: {{ if $.Values.memgraph.affinity.enabled }} affinity: @@ -66,9 +67,9 @@ spec: image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} ports: - - containerPort: {{ $coordinator.boltPort }} - - containerPort: {{ $coordinator.managementPort }} - - containerPort: {{ $coordinator.coordinatorPort }} + - containerPort: {{ $.Values.memgraph.ports.boltPort }} + - containerPort: {{ $.Values.memgraph.ports.managementPort }} + - containerPort: {{ $.Values.memgraph.ports.coordinatorPort }} args: {{- range $arg := $coordinator.args }} - "{{ $arg }}" diff --git a/charts/memgraph-high-availability/templates/data.yaml b/charts/memgraph-high-availability/templates/data.yaml index f2c53a5..e588922 100644 --- a/charts/memgraph-high-availability/templates/data.yaml +++ b/charts/memgraph-high-availability/templates/data.yaml @@ -13,6 +13,7 @@ spec: metadata: labels: app: memgraph-data-{{ $data.id }} + instance-type: data spec: {{ if $.Values.memgraph.affinity.enabled }} affinity: @@ -66,9 +67,9 @@ spec: image: "{{ $.Values.memgraph.image.repository }}:{{ $.Values.memgraph.image.tag }}" imagePullPolicy: {{ $.Values.memgraph.image.pullPolicy }} ports: - - containerPort: {{ $data.boltPort }} - - containerPort: {{ $data.managementPort }} - - containerPort: {{ $data.replicationPort }} + - containerPort: {{ $.Values.memgraph.ports.boltPort }} + - containerPort: {{ $.Values.memgraph.ports.managementPort }} + - containerPort: {{ $.Values.memgraph.ports.replicationPort }} args: {{- range $arg := $data.args }} - "{{ $arg }}" diff --git a/charts/memgraph-high-availability/templates/load-balancer.yaml b/charts/memgraph-high-availability/templates/load-balancer.yaml new file mode 100644 index 0000000..c8c497f --- /dev/null +++ b/charts/memgraph-high-availability/templates/load-balancer.yaml @@ -0,0 +1,15 @@ +{{- if $.Values.memgraph.coordLoadBalancer.enabled}} +apiVersion: v1 +kind: Service +metadata: + name: external-access +spec: + type: LoadBalancer + selector: + instance-type: coordinator + ports: + - protocol: TCP + name: bolt + port: {{ $.Values.memgraph.ports.boltPort }} + targetPort: {{ $.Values.memgraph.ports.boltPort }} +{{- end }} diff --git a/charts/memgraph-high-availability/templates/services-coordinators.yaml b/charts/memgraph-high-availability/templates/services-coordinators.yaml index f8a4153..13ed7aa 100644 --- a/charts/memgraph-high-availability/templates/services-coordinators.yaml +++ b/charts/memgraph-high-availability/templates/services-coordinators.yaml @@ -12,16 +12,16 @@ spec: ports: - protocol: TCP name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} + port: {{ $.Values.memgraph.ports.boltPort }} + targetPort: {{ $.Values.memgraph.ports.boltPort }} - protocol: TCP name: coordinator - port: {{ .coordinatorPort }} - targetPort: {{ .coordinatorPort }} + port: {{ $.Values.memgraph.ports.coordinatorPort }} + targetPort: {{ $.Values.memgraph.ports.coordinatorPort }} - protocol: TCP name: management - port: {{ .managementPort }} - targetPort: {{ .managementPort }} + port: {{ $.Values.memgraph.ports.managementPort }} + targetPort: {{ $.Values.memgraph.ports.managementPort }} {{- end }} # Service for coordinators instances external @@ -38,6 +38,7 @@ spec: ports: - protocol: TCP name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} + port: {{ $.Values.memgraph.ports.boltPort }} + targetPort: {{ $.Values.memgraph.ports.boltPort }} + nodePort: {{ add 32000 .id }} {{- end }} diff --git a/charts/memgraph-high-availability/templates/services-data.yaml b/charts/memgraph-high-availability/templates/services-data.yaml index 30b5c22..e9c3569 100644 --- a/charts/memgraph-high-availability/templates/services-data.yaml +++ b/charts/memgraph-high-availability/templates/services-data.yaml @@ -12,16 +12,16 @@ spec: ports: - protocol: TCP name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} + port: {{ $.Values.memgraph.ports.boltPort }} + targetPort: {{ $.Values.memgraph.ports.boltPort }} - protocol: TCP name: management - port: {{ .managementPort }} - targetPort: {{ .managementPort }} + port: {{ $.Values.memgraph.ports.managementPort }} + targetPort: {{ $.Values.memgraph.ports.managementPort }} - protocol: TCP name: replication - port: {{ .replicationPort }} - targetPort: {{ .replicationPort }} + port: {{ $.Values.memgraph.ports.replicationPort }} + targetPort: {{ $.Values.memgraph.ports.replicationPort }} {{- end }} # Service for data instances external @@ -38,6 +38,7 @@ spec: ports: - protocol: TCP name: bolt - port: {{ .boltPort }} - targetPort: {{ .boltPort }} + port: {{ $.Values.memgraph.ports.boltPort }} + targetPort: {{ $.Values.memgraph.ports.boltPort }} + nodePort: {{ add 32010 .id }} {{- end }} diff --git a/charts/memgraph-high-availability/values.yaml b/charts/memgraph-high-availability/values.yaml index 3edaa70..ec04309 100644 --- a/charts/memgraph-high-availability/values.yaml +++ b/charts/memgraph-high-availability/values.yaml @@ -1,7 +1,3 @@ -# Default values for memgraph-high-availability. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates.| - memgraph: image: repository: memgraph/memgraph @@ -38,6 +34,13 @@ memgraph: logPVCSize: "256Mi" affinity: enabled: true + ports: + boltPort: 7687 + managementPort: 10000 + replicationPort: 20000 + coordinatorPort: 12000 + coordLoadBalancer: + enabled: true # If you are experiencing issues with the sysctlInitContainer, you can disable it here. # This is made to increase the max_map_count, necessary for high memory loads in Memgraph @@ -49,9 +52,6 @@ sysctlInitContainer: data: - id: "0" - boltPort: 7687 - managementPort: 10000 - replicationPort: 20000 args: - "--experimental-enabled=high-availability" - "--management-port=10000" @@ -61,9 +61,6 @@ data: - "--log-file=/var/log/memgraph/memgraph.log" - id: "1" - boltPort: 7687 - managementPort: 10000 - replicationPort: 20000 args: - "--experimental-enabled=high-availability" - "--management-port=10000" @@ -74,9 +71,6 @@ data: coordinators: - id: "1" - boltPort: 7687 - managementPort: 10000 - coordinatorPort: 12000 args: - "--experimental-enabled=high-availability" - "--coordinator-id=1" @@ -90,9 +84,6 @@ coordinators: - "--nuraft-log-file=/var/log/memgraph/memgraph.log" - id: "2" - boltPort: 7687 - managementPort: 10000 - coordinatorPort: 12000 args: - "--experimental-enabled=high-availability" - "--coordinator-id=2" @@ -106,9 +97,6 @@ coordinators: - "--nuraft-log-file=/var/log/memgraph/memgraph.log" - id: "3" - boltPort: 7687 - managementPort: 10000 - coordinatorPort: 12000 args: - "--experimental-enabled=high-availability" - "--coordinator-id=3"