diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dabfe5d..af5f8f8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -80,12 +80,6 @@ jobs: - name: Runnign integration tests run: make integration_tests - - name: Installing das-cli package - run: sudo apt -y install $(find . -name *.deb -type f | head -n 1) - - - name: Runnign integration tests - run: make integration_tests - tag: needs: - integration-tests diff --git a/CHANGELOG b/CHANGELOG index 1889b28..47bac4c 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -33,3 +33,4 @@ [#73] das-cli python-library version is raising an error [#76] Put version number in openfaas docker image name [#75] Enable the configuration of a Redis cluster instead of maintaining only a standalone instance +[#87] Configuration of a Mongodb cluster instead of maintaining only a standalone instance diff --git a/Makefile b/Makefile index f4ab8c6..81f2ea8 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,8 @@ man_pages: integration_tests: ifdef DAS_CLI_TEST_CLUSTER - @bats tests/integration/*.bats --filter-tags 'redis:cluster' + @bats tests/integration/*.bats --filter-tags 'cluster' endif - @bats tests/integration/*.bats --filter-tags '!redis:cluster' + @bats tests/integration/*.bats --filter-tags '!cluster' diff --git a/src/commands/config/config_cli.py b/src/commands/config/config_cli.py index 3e07dba..cc2e96b 100644 --- a/src/commands/config/config_cli.py +++ b/src/commands/config/config_cli.py @@ -1,4 +1,5 @@ from injector import inject +from typing import List, Dict from common import Settings, StdoutSeverity from common import ( Command, @@ -8,6 +9,7 @@ RemoteContextManager, get_server_username, get_public_ip, + get_rand_token, ) @@ -41,7 +43,7 @@ class ConfigSet(Command): redis.container_name Specifies the name of the Docker container running the Redis server. - redis.custer + redis.cluster Indicates whether a Redis cluster is being used (true/false). redis.nodes @@ -71,6 +73,24 @@ class ConfigSet(Command): mongodb.password The password for connecting to the MongoDB server. + mongodb.cluster + Indicates whether a MongoDB cluster is being used (true/false). + + mongodb.cluster_secret_key + This key is uploaded to all nodes within the MongoDB cluster. It is used for mutual authentication between nodes, ensuring that only authorized nodes can communicate with each other. + + mongodb.nodes + Receives a list of nodes for MongoDB configuration. For a single-node setup, there must be at least one node specified with the default context. For a cluster setup, there must be at least three nodes specified. Additionally, it is necessary to configure an SSH key and utilize this key on each node to ensure SSH connectivity between them. This is essential because Docker communicates between nodes remotely to deploy images with MongoDB. To establish SSH connectivity, generate an SSH key using `ssh-keygen` and add this key to all servers in the cluster. Ensure that port 22 is open on all servers to allow SSH connections. + + mongodb.nodes.[].context + The name of the Docker context containing connection information for the remote Docker instances of other nodes. + + mongodb.nodes.[].ip + The IP address of the node. + + mongodb.nodes.[].username + The username for connecting to the node. + loader.* These variables control the Loader settings, responsible for validating and loading meta files into the database, such as: @@ -114,11 +134,44 @@ def _set_config(self, config_dict): else: self._settings.set(key, value) - def _redis_cluster(self, redis_port) -> list: + def _build_nodes(self, is_cluster: bool, port: int) -> List[Dict]: + nodes = [] + server_user = get_server_username() + current_node = { + "context": "default", + "ip": "localhost", + "username": server_user, + } + + if is_cluster: + server_public_ip = get_public_ip() + + if server_public_ip is None: + raise Exception( + "The server's public ip could not be solved. Make sure it has internet access." + ) + + current_node["ip"] = server_public_ip + + nodes = self._build_cluster(server_user, port) + + nodes.insert( + 0, + current_node, + ) + + return nodes + + def _build_cluster( + self, + username: str, + port: int, + min_nodes: int = 3, + ) -> List[Dict]: total_nodes = self.prompt( - "Enter the total number of nodes for the redis cluster (>= 3)", + f"Enter the total number of nodes for the cluster (>= {min_nodes})", hide_input=False, - type=IntRange(3), + type=IntRange(min_nodes), ) servers = [] @@ -126,7 +179,7 @@ def _redis_cluster(self, redis_port) -> list: server_ip = self.prompt( f"Enter the ip address for the server-{i + 1}", hide_input=False, - type=ReachableIpAddress(redis_port), + type=ReachableIpAddress(username, port), ) server_username = self.prompt( f"Enter the server username for the server-{i + 1}", @@ -142,41 +195,22 @@ def _redis_cluster(self, redis_port) -> list: remote_context_manager = RemoteContextManager(servers) cluster_contexts = remote_context_manager.create_context() - old_servers = self._settings.get("redis.nodes", []) - remote_context_manager.set_servers(old_servers) - remote_context_manager.remove_context() - return cluster_contexts - def _redis_nodes(self, redis_cluster, redis_port) -> list: - nodes = [] - server_user = get_server_username() - redis_current_node = { - "context": "default", - "ip": "localhost", - "username": server_user, - } - - if redis_cluster: - server_public_ip = get_public_ip() - - if server_public_ip is None: - raise Exception( - "The server's public ip could not be solved. Make sure it has internet access." - ) - - redis_current_node["ip"] = server_public_ip + def _destroy_contexts(self, servers: List[Dict]): + remote_context_manager = RemoteContextManager(servers) + remote_context_manager.remove_context() - nodes = self._redis_cluster(redis_port) + def _redis_nodes(self, redis_cluster, redis_port) -> List[Dict]: + redis_nodes = self._build_nodes(redis_cluster, redis_port) - nodes.insert( - 0, - redis_current_node, + self._destroy_contexts( + servers=self._settings.get("redis.nodes", []), ) - return nodes + return redis_nodes - def _redis(self) -> dict: + def _redis(self) -> Dict: redis_port = self.prompt( "Enter Redis port", default=self._settings.get("redis.port", 6379), @@ -184,7 +218,7 @@ def _redis(self) -> dict: ) cluster_default_value = "yes" if self._settings.get("redis.cluster") else "no" redis_cluster = self.prompt( - "Is it a redis cluster? (yes/no) ", + "Is it a Redis cluster? (yes/no) ", hide_input=False, default=cluster_default_value, type=bool, @@ -197,6 +231,15 @@ def _redis(self) -> dict: "redis.nodes": lambda: self._redis_nodes(redis_cluster, redis_port), } + def _mongodb_nodes(self, mongodb_cluster, mongodb_port) -> List[Dict]: + mongodb_nodes = self._build_nodes(mongodb_cluster, mongodb_port) + + self._destroy_contexts( + servers=self._settings.get("mongodb.nodes", []), + ) + + return mongodb_nodes + def _mongodb(self) -> dict: mongodb_port = self.prompt( "Enter MongoDB port", @@ -212,11 +255,28 @@ def _mongodb(self) -> dict: # hide_input=True, # When hide_input is set I cannot set the answers based on a text file making impossible to test this command default=self._settings.get("mongodb.password", "admin"), ) + cluster_default_value = "yes" if self._settings.get("mongodb.cluster") else "no" + is_mongodb_cluster = self.prompt( + "Is it a MongoDB cluster? (yes/no) ", + hide_input=False, + default=cluster_default_value, + type=bool, + ) + cluster_secret_key = self._settings.get( + "mongodb.cluster_secret_key", + get_rand_token(num_bytes=15), + ) return { "mongodb.port": mongodb_port, "mongodb.container_name": f"das-cli-mongodb-{mongodb_port}", "mongodb.username": mongodb_username, "mongodb.password": mongodb_password, + "mongodb.cluster": is_mongodb_cluster, + "mongodb.nodes": lambda: self._mongodb_nodes( + is_mongodb_cluster, + mongodb_port, + ), + "mongodb.cluster_secret_key": cluster_secret_key, } def _loader(self) -> dict: diff --git a/src/commands/db/db_cli.py b/src/commands/db/db_cli.py index 7702c00..41eab48 100644 --- a/src/commands/db/db_cli.py +++ b/src/commands/db/db_cli.py @@ -7,6 +7,7 @@ ) from .mongodb_container_manager import MongodbContainerManager from .redis_container_manager import RedisContainerManager +from typing import Union, AnyStr class DbStop(Command): @@ -28,42 +29,42 @@ class DbStop(Command): """ @inject - def __init__(self, settings: Settings) -> None: + def __init__( + self, + settings: Settings, + redis_container_manager: RedisContainerManager, + mongodb_container_manager: MongodbContainerManager, + ) -> None: super().__init__() self._settings = settings + self._redis_container_manager = redis_container_manager + self._mongodb_container_manager = mongodb_container_manager - def _redis_node(self, redis_container_name, redis_node): - node_context = redis_node.get("context") - node_ip = redis_node.get("ip") - node_username = redis_node.get("username") - + def _redis_node(self, context, ip, username): try: - redis_container_manager = RedisContainerManager( - redis_container_name, - exec_context=node_context, - ) - - redis_container_manager.stop() + self._redis_container_manager.set_exec_context(context) + self._redis_container_manager.stop() + self._redis_container_manager.unset_exec_context() self.stdout( - f"The Redis service at {node_ip} has been stopped by the {node_username} user", + f"The Redis service at {ip} has been stopped by the server user {username}", severity=StdoutSeverity.SUCCESS, ) except DockerContainerNotFoundError: + container_name = self._redis_container_manager.get_container().get_name() self.stdout( - f"The Redis service named {redis_container_name} at {node_ip} is already stopped by the {node_username} user.", + f"The Redis service named {container_name} at {ip} is already stopped.", severity=StdoutSeverity.WARNING, ) def _redis(self): - self.stdout(f"Stopping redis service...") + self.stdout(f"Stopping Redis service...") - redis_container_name = self._settings.get("redis.container_name") redis_nodes = self._settings.get("redis.nodes", []) try: for redis_node in redis_nodes: - self._redis_node(redis_container_name, redis_node) + self._redis_node(**redis_node) except DockerError as e: self.stdout( f"\nError occurred while trying to stop Redis\n", @@ -71,18 +72,32 @@ def _redis(self): ) raise e - def _mongodb(self): - mongodb_container_name = self._settings.get("mongodb.container_name") - + def _mongodb_node(self, context, ip, username): try: - MongodbContainerManager(mongodb_container_name).stop() + self._mongodb_container_manager.set_exec_context(context) + self._mongodb_container_manager.stop() + self._mongodb_container_manager.unset_exec_context() - self.stdout("MongoDB service stopped", severity=StdoutSeverity.SUCCESS) + self.stdout( + f"The MongoDB service at {ip} has been stopped by the server user {username}", + severity=StdoutSeverity.SUCCESS, + ) except DockerContainerNotFoundError: + container_name = self._mongodb_container_manager.get_container().get_name() self.stdout( - f"The MongoDB service named {mongodb_container_name} is already stopped.", + f"The MongoDB service named {container_name} at {ip} is already stopped.", severity=StdoutSeverity.WARNING, ) + + def _mongodb(self): + self.stdout(f"Stopping MongoDB service...") + + mongodb_nodes = self._settings.get("mongodb.nodes", []) + + try: + for mongodb_node in mongodb_nodes: + self._mongodb_node(**mongodb_node) + except DockerError as e: self.stdout( f"\nError occurred while trying to stop MongoDB\n", @@ -117,96 +132,102 @@ class DbStart(Command): """ @inject - def __init__(self, settings: Settings) -> None: + def __init__( + self, + settings: Settings, + redis_container_manager: RedisContainerManager, + mongodb_container_manager: MongodbContainerManager, + ) -> None: super().__init__() self._settings = settings + self._redis_container_manager = redis_container_manager + self._mongodb_container_manager = mongodb_container_manager - def _redis_node( - self, - redis_container_name: str, - redis_port: int, - redis_node: dict, - ): + def _redis_node(self, redis_node: dict, redis_port: int) -> None: node_context = redis_node.get("context") node_ip = redis_node.get("ip") node_username = redis_node.get("username") - redis_container_manager = RedisContainerManager( - redis_container_name, - exec_context=node_context, - ) - try: - redis_container_manager.start_container(redis_port) + self._redis_container_manager.set_exec_context(node_context) + self._redis_container_manager.start_container(redis_port) + self._redis_container_manager.unset_exec_context() self.stdout( - f"Redis has started successfully on port {redis_port} at {node_ip}, operating under the user {node_username}.", + f"Redis has started successfully on port {redis_port} at {node_ip}, operating under the server user {node_username}.", severity=StdoutSeverity.SUCCESS, ) except DockerContainerDuplicateError: self.stdout( - f"Redis is already running. It is currently listening on port {redis_port} at IP address {node_ip} under the user {node_username}.", + f"Redis is already running. It is currently listening on port {redis_port} at {node_ip} under the server user {node_username}.", severity=StdoutSeverity.WARNING, ) - - def _redis_cluster( - self, - redis_container_name: str, - redis_nodes: dict, - redis_port: int, - ): - redis_container_manager = RedisContainerManager(redis_container_name) - - redis_container_manager.start_cluster(redis_nodes, redis_port) - - def _redis(self): - self.stdout(f"Starting Redis service...") - - redis_container_name = self._settings.get("redis.container_name") - redis_port = self._settings.get("redis.port") - redis_nodes = self._settings.get("redis.nodes", []) - redis_cluster = self._settings.get("redis.cluster") - - try: - for redis_node in redis_nodes: - self._redis_node(redis_container_name, redis_port, redis_node) - - if redis_cluster: - self._redis_cluster(redis_container_name, redis_nodes, redis_port) - except DockerError as e: - cluster_text = "cluster" if redis_cluster else "" - self.stdout( - f"\nError occurred while trying to start Redis {cluster_text} on port {redis_port}\n", + f"\nError occurred while trying to start Redis on port {redis_port} at {node_ip} under the server user {node_username}.\n", severity=StdoutSeverity.ERROR, ) - raise e - def _mongodb(self): - self.stdout(f"Starting MongoDB service...") + def _redis(self) -> None: + self.stdout(f"Starting Redis service...") - mongodb_container_name = self._settings.get("mongodb.container_name") - mongodb_port = self._settings.get("mongodb.port") - mongodb_username = self._settings.get("mongodb.username") - mongodb_password = self._settings.get("mongodb.password") + redis_port = self._settings.get("redis.port") + redis_nodes = self._settings.get("redis.nodes", []) + redis_cluster = self._settings.get("redis.cluster", False) + + for redis_node in redis_nodes: + self._redis_node(redis_node, redis_port) + + if redis_cluster: + try: + self._redis_container_manager.start_cluster(redis_nodes, redis_port) + except Exception as e: + self.stdout( + f"\nFailed to start the cluster. Please check the conectivity between the nodes and try again.\n", + severity=StdoutSeverity.ERROR, + ) + raise e + + def _mongodb_node( + self, + mongodb_node: dict, + mongodb_port: int, + mongodb_username: str, + mongodb_password: str, + is_cluster_enabled: bool = False, + mongodb_cluster_secret_key: Union[AnyStr, None] = None, + ) -> None: + node_context = mongodb_node.get("context") + node_ip = mongodb_node.get("ip") + node_username = mongodb_node.get("username") + + cluster_node = ( + dict( + host=node_ip, + username=node_username, + ) + if is_cluster_enabled + else None + ) try: - mongodb_container_manager = MongodbContainerManager(mongodb_container_name) - - mongodb_container_manager.start_container( + self._mongodb_container_manager.set_exec_context(node_context) + self._mongodb_container_manager.start_container( mongodb_port, mongodb_username, mongodb_password, + cluster_node, + mongodb_cluster_secret_key, ) + self._mongodb_container_manager.unset_exec_context() + self.stdout( - f"MongoDB started on port {mongodb_port}", + f"MongoDB has started successfully on port {mongodb_port} at {node_ip}, operating under the server user {node_username}.", severity=StdoutSeverity.SUCCESS, ) - except DockerContainerDuplicateError: self.stdout( - f"MongoDB is already running. It's listening on port {mongodb_port}", + f"MongoDB is already running. It is currently listening on port {mongodb_port} at {node_ip} under the server user {node_username}.", severity=StdoutSeverity.WARNING, ) except DockerError as e: @@ -216,6 +237,41 @@ def _mongodb(self): ) raise e + def _mongodb(self) -> None: + self.stdout(f"Starting MongoDB service...") + + mongodb_port = self._settings.get("mongodb.port") + mongodb_username = self._settings.get("mongodb.username") + mongodb_password = self._settings.get("mongodb.password") + mongodb_nodes = self._settings.get("mongodb.nodes", []) + mongodb_cluster = self._settings.get("mongodb.cluster", False) + mongodb_cluster_secret_key = self._settings.get("mongodb.cluster_secret_key") + + for mongodb_node in mongodb_nodes: + self._mongodb_node( + mongodb_node, + mongodb_port, + mongodb_username, + mongodb_password, + mongodb_cluster, + mongodb_cluster_secret_key, + ) + + if mongodb_cluster: + try: + self._mongodb_container_manager.start_cluster( + mongodb_nodes, + mongodb_port, + mongodb_username, + mongodb_password, + ) + except Exception as e: + self.stdout( + f"\nFailed to start the cluster. Please check the conectivity between the nodes and try again.\n", + severity=StdoutSeverity.ERROR, + ) + raise e + def run(self): self._settings.raise_on_missing_file() diff --git a/src/commands/db/db_module.py b/src/commands/db/db_module.py index cf93abc..39d2353 100644 --- a/src/commands/db/db_module.py +++ b/src/commands/db/db_module.py @@ -1,7 +1,30 @@ from common import Module -from .db_cli import DbCli +from .db_cli import DbCli, RedisContainerManager, Settings, MongodbContainerManager class DbModule(Module): _instance = DbCli - _dependecy_injection = [] + + def __init__(self) -> None: + super().__init__() + + self._settings = Settings() + + self._dependecy_injection = [ + ( + RedisContainerManager, + self._redis_container_manager_factory, + ), + ( + MongodbContainerManager, + self._mongodb_container_manager_factory, + ), + ] + + def _redis_container_manager_factory(self) -> RedisContainerManager: + container_name = self._settings.get("redis.container_name") + return RedisContainerManager(container_name) + + def _mongodb_container_manager_factory(self) -> MongodbContainerManager: + container_name = self._settings.get("mongodb.container_name") + return MongodbContainerManager(container_name) diff --git a/src/commands/db/mongodb_container_manager.py b/src/commands/db/mongodb_container_manager.py index f39cded..c59fd46 100644 --- a/src/commands/db/mongodb_container_manager.py +++ b/src/commands/db/mongodb_container_manager.py @@ -1,8 +1,14 @@ -from common import Container, ContainerManager +import json +import io +from common import Container, ContainerManager, ssh, get_rand_token from config import MONGODB_IMAGE_NAME, MONGODB_IMAGE_VERSION +from typing import List, Dict, Union, AnyStr +from common.docker.exceptions import DockerError class MongodbContainerManager(ContainerManager): + _repl_set = "rs0" + def __init__(self, mongodb_container_name) -> None: container = Container( mongodb_container_name, @@ -12,15 +18,62 @@ def __init__(self, mongodb_container_name) -> None: super().__init__(container) + def _upload_key_to_server(self, cluster_node, mongodb_cluster_secret_key): + keyfile_server_path = f"/tmp/{get_rand_token(num_bytes=5)}.txt" + + try: + with ssh.open(cluster_node["host"], cluster_node["username"]) as ( + ssh_conn, + sftp_conn, + ): + content_stream = io.BytesIO(mongodb_cluster_secret_key.encode("utf-8")) + remote_file = sftp_conn.open(keyfile_server_path, "w") + remote_file.write(content_stream.read()) + + ssh_conn.exec_command(f"chmod 400 {keyfile_server_path}") + ssh_conn.exec_command( + f"chown 999:999 {keyfile_server_path}" + ) # Inside the container 999 is the mongodb's uid and gid + + return keyfile_server_path + + except Exception as e: + raise RuntimeError( + f"Failed to upload key to server at {cluster_node['host']} (username: {cluster_node['username']}): {e}" + ) + + def _get_cluster_node_config(self, cluster_node, mongodb_cluster_secret_key): + if not cluster_node: + return {} + + keyfile_path = "/data/keyfile.txt" + keyfile_server_path = self._upload_key_to_server( + cluster_node, + mongodb_cluster_secret_key, + ) + + return { + "command": f"--replSet {self._repl_set} --keyFile {keyfile_path} --auth", + "volumes": { + keyfile_server_path: { + "bind": keyfile_path, + "mode": "ro", + } + }, + } + def start_container( self, port: int, username: str, password: str, + cluster_node: Union[Dict, None] = None, + mongodb_cluster_secret_key: Union[AnyStr, None] = None, ): self.raise_running_container() - container_id = self._start_container( + container = self._start_container( + **self._get_cluster_node_config(cluster_node, mongodb_cluster_secret_key), restart_policy={ "Name": "on-failure", "MaximumRetryCount": 5, @@ -32,6 +85,49 @@ def start_container( "MONGO_INITDB_ROOT_USERNAME": username, "MONGO_INITDB_ROOT_PASSWORD": password, }, + healthcheck={ + "Test": ["CMD-SHELL", "mongosh --eval 'db.adminCommand(\"ping\")'"], + }, ) - return container_id + if not self.wait_for_container(container): + raise DockerError("Timeout waiting for MongoDB container to start.") + + return container + + def _get_replica_set_config( + self, + mongodb_port: int, + mongodb_nodes: List[Dict], + ) -> dict: + rs_config = { + "_id": self._repl_set, + "members": [], + } + + for index, mongodb_node in enumerate(mongodb_nodes): + mongodb_node_ip = mongodb_node["ip"] + + rs_config["members"].append( + { + "_id": index, + "host": f"{mongodb_node_ip}:{mongodb_port}", + } + ) + + return rs_config + + def start_cluster( + self, + mongodb_nodes: List[Dict], + mongodb_port: int, + mongodb_username: str, + mongodb_password: str, + ): + rl_config = self._get_replica_set_config(mongodb_port, mongodb_nodes) + rl_config_json = json.dumps(rl_config) + + self.set_exec_context(mongodb_nodes[0]["context"]) + self._exec_container( + f"mongosh -u {mongodb_username} -p {mongodb_password} --eval 'rs.initiate({rl_config_json})'" + ) diff --git a/src/commands/db/redis_container_manager.py b/src/commands/db/redis_container_manager.py index 5d53f65..ecab1f6 100644 --- a/src/commands/db/redis_container_manager.py +++ b/src/commands/db/redis_container_manager.py @@ -21,6 +21,7 @@ def start_container( self, port: int, ): + self.raise_running_container() command_params = [ "redis-server", diff --git a/src/commands/metta/metta_loader_container_manager.py b/src/commands/metta/metta_loader_container_manager.py index 1342af5..155d892 100644 --- a/src/commands/metta/metta_loader_container_manager.py +++ b/src/commands/metta/metta_loader_container_manager.py @@ -65,7 +65,7 @@ def start_container( self.logs() - exit_code = self.container_status(container) + exit_code = self.get_container_exit_status(container) if exit_code != 0: raise DockerError( diff --git a/src/commands/metta/metta_syntax_container_manager.py b/src/commands/metta/metta_syntax_container_manager.py index 74fe159..95fa7d6 100644 --- a/src/commands/metta/metta_syntax_container_manager.py +++ b/src/commands/metta/metta_syntax_container_manager.py @@ -48,7 +48,7 @@ def start_container(self, filepath): self.logs() - exit_code = self.container_status(container) + exit_code = self.get_container_exit_status(container) if exit_code != 0: raise DockerError() diff --git a/src/common/__init__.py b/src/common/__init__.py index 498e2a7..18e7f0f 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -16,5 +16,7 @@ is_executable_bin, remove_special_characters, get_server_username, + get_rand_token, ) from .logger import logger +from . import ssh diff --git a/src/common/docker/container_manager.py b/src/common/docker/container_manager.py index 9d5d174..47a432f 100644 --- a/src/common/docker/container_manager.py +++ b/src/common/docker/container_manager.py @@ -1,5 +1,6 @@ import docker import curses +import time import docker.errors from typing import Any, Union, AnyStr from .docker_manager import DockerManager @@ -51,7 +52,14 @@ def _exec_container(self, command: str): container_name = self.get_container().get_name() container = self.get_docker_client().containers.get(container_name) - return container.exec_run(command, tty=True) + exec_result = container.exec_run(command, tty=True) + + if exec_result.exit_code != 0: + raise DockerError( + f"Command '{command}' failed with exit code {exec_result.exit_code}. Output: {exec_result.output}" + ) + + return exec_result except docker.errors.APIError as e: raise DockerError(e.explanation) @@ -175,10 +183,37 @@ def stop(self): except docker.errors.APIError as e: raise DockerError(e.explanation) - def container_status(self, container) -> int: + def get_container_exit_status(self, container) -> int: try: return container.wait()["StatusCode"] except docker.errors.NotFound: container = self.get_docker_client().containers.get(container) exit_code = container.attrs["State"]["ExitCode"] return exit_code + + def get_container_status(self, container) -> int: + try: + return container.attrs["State"]["ExitCode"] + except docker.errors.NotFound: + return -1 + + def is_container_running(self, container): + status_code = self.get_container_status(container) + return status_code == 0 + + def is_container_healthy(self, container): + inspect_results = self.get_docker_client().api.inspect_container(container.name) + return inspect_results["State"]["Health"]["Status"] == "healthy" + + def wait_for_container(self, container, timeout=60, interval=2): + elapsed_time = 0 + while elapsed_time < timeout: + if self.is_container_running(container) and self.is_container_healthy( + container + ): + return True + + time.sleep(interval) + elapsed_time += interval + + return False diff --git a/src/common/docker/docker_manager.py b/src/common/docker/docker_manager.py index 396b885..919cabd 100644 --- a/src/common/docker/docker_manager.py +++ b/src/common/docker/docker_manager.py @@ -11,6 +11,12 @@ def __init__( self, exec_context: Union[AnyStr, None] = None, ) -> None: + self.set_exec_context(exec_context) + + def unset_exec_context(self) -> None: + self.set_exec_context(None) + + def set_exec_context(self, exec_context: Union[AnyStr, None] = None): self._exec_context = exec_context def _get_client(self, use: Union[AnyStr, None] = None) -> docker.DockerClient: diff --git a/src/common/network.py b/src/common/network.py index 4494542..e4e5942 100644 --- a/src/common/network.py +++ b/src/common/network.py @@ -24,10 +24,21 @@ def get_ssh_user_and_ip(text: str) -> tuple: return None -def is_server_port_available(host, start_port: int, end_port: Union[int, None] = None): +def is_server_port_available( + username: str, + host: str, + start_port: int, + end_port: Union[int, None] = None, +): def server_up(host, port): - command = f"nc -zv {host} {port}" - result = subprocess.call(command, shell=True) + + command = f"ssh {username}@{host} \"ufw status | grep '{port}.*ALLOW'\"" + result = subprocess.call( + command, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) return result == 0 diff --git a/src/common/prompt_types.py b/src/common/prompt_types.py index e7f6520..609b85b 100644 --- a/src/common/prompt_types.py +++ b/src/common/prompt_types.py @@ -1,6 +1,7 @@ import re from click import IntRange, ParamType, Choice, Path from common.network import is_server_port_available +from typing import Union class FunctionVersion(ParamType): @@ -15,23 +16,31 @@ def convert(self, value, param, ctx): return value def __repr__(self): - return "ReachableIpAddress(%r, %r)" % (self.port) + return "FunctionVersion()" class ReachableIpAddress(ParamType): name = "reachable ip address" - def __init__(self, port=None): + def __init__(self, username: str, port: Union[int, None] = None): self.port = port + self.username = username def convert(self, value, param, ctx): - if not is_server_port_available(host=value, start_port=22): + if not is_server_port_available( + username=self.username, + host=value, + start_port=22, + ): self.fail("%s is not reachable via SSH." % (value,), param, ctx) - if not is_server_port_available(host=value, start_port=self.port): + if not is_server_port_available( + username=self.username, + host=value, + start_port=self.port, + ): self.fail( - "It appears that the Redis port %s on %s is not open." - % (self.port, value), + "It appears that the port %s on %s is not open." % (self.port, value), param, ctx, ) @@ -39,4 +48,4 @@ def convert(self, value, param, ctx): return value def __repr__(self): - return "ReachableIpAddress(%r, %r)" % (self.port) + return "ReachableIpAddress(%r, %r)" % (self.port, self.username) diff --git a/src/common/ssh.py b/src/common/ssh.py new file mode 100644 index 0000000..38095c4 --- /dev/null +++ b/src/common/ssh.py @@ -0,0 +1,39 @@ +import paramiko +from typing import Union + + +class SSHConnection: + def __init__(self, ssh_host, ssh_port, ssh_username, ssh_password): + self.ssh_host = ssh_host + self.ssh_port = ssh_port + self.ssh_username = ssh_username + self.ssh_password = ssh_password + self.ssh = None + self.sftp = None + + def __enter__(self): + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh.connect( + self.ssh_host, + port=self.ssh_port, + username=self.ssh_username, + password=self.ssh_password, + ) + self.sftp = self.ssh.open_sftp() + return self.ssh, self.sftp + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.sftp: + self.sftp.close() + if self.ssh: + self.ssh.close() + + +def open( + host: str, + username: str, + password: Union[str, None] = None, + port: int = 22, +): + return SSHConnection(host, port, username, password) diff --git a/src/common/utils.py b/src/common/utils.py index 30f6422..aab600c 100644 --- a/src/common/utils.py +++ b/src/common/utils.py @@ -1,6 +1,9 @@ import os import sys import getpass +import secrets +import base64 +import string def is_executable_bin(): @@ -26,3 +29,14 @@ def remove_special_characters(text): clean_text = re.sub(pattern, "", text) return clean_text.strip() + + +def get_rand_token(num_bytes: int = 756, only_alpha: bool = True) -> str: + if only_alpha: + alphabet = string.ascii_letters + string.digits + token = "".join(secrets.choice(alphabet) for _ in range(num_bytes)) + return token + + random_bytes = secrets.token_bytes(num_bytes) + + return base64.b64encode(random_bytes).decode("utf-8") diff --git a/src/config/config.py b/src/config/config.py index b796c13..1a68040 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -1,5 +1,6 @@ from common.utils import get_server_username + VERSION = '0.2.21' RELEASE_NOTES_URL = ( "https://raw.githubusercontent.com/singnet/das/master/docs/release-notes.md" @@ -31,8 +32,3 @@ JUPYTER_NOTEBOOK_IMAGE_NAME = "trueagi/das" JUPYTER_NOTEBOOK_IMAGE_VERSION = "latest-jupyter-notebook" - -# OTHERS - -CLI_GROUP_NAME = "das" -CLI_USER_NAME = "das" diff --git a/tests/integration/fixtures/config/mongodb_cluster.json b/tests/integration/fixtures/config/mongodb_cluster.json new file mode 100644 index 0000000..2ab9181 --- /dev/null +++ b/tests/integration/fixtures/config/mongodb_cluster.json @@ -0,0 +1,51 @@ +{ + "redis": { + "port": 6379, + "container_name": "das-cli-redis-6379", + "cluster": false, + "nodes": [ + { + "context": "default", + "ip": "localhost", + "username": "rafaellevi" + } + ] + }, + "mongodb": { + "cluster": true, + "port": 28100, + "container_name": "das-cli-mongodb-28100", + "username": "admin", + "password": "admin", + "cluster_secret_key": "eri290eiw299e0", + "nodes": [ + { + "context": "default", + "ip": "104.207.150.215", + "username": "root" + }, + { + "context": "747083fe-a5f7-4426-b116-d2d1d9794e09", + "ip": "45.63.85.181", + "username": "root" + }, + { + "context": "b9164d81-e68e-4012-84f1-b7d2cd7f216c", + "ip": "45.32.130.104", + "username": "root" + } + ] + }, + "loader": { + "container_name": "das-cli-loader" + }, + "openfaas": { + "container_name": "das-cli-openfaas-8080", + "version": "1.12.10", + "function": "query-engine" + }, + "jupyter_notebook": { + "port": 8888, + "container_name": "das-cli-jupyter-notebook-8888" + } +} diff --git a/tests/integration/fixtures/config/redis_cluster.json b/tests/integration/fixtures/config/redis_cluster.json index 1492715..5a17963 100644 --- a/tests/integration/fixtures/config/redis_cluster.json +++ b/tests/integration/fixtures/config/redis_cluster.json @@ -22,10 +22,19 @@ ] }, "mongodb": { + "cluster": false, "port": 27017, "container_name": "das-cli-mongodb-27017", "username": "admin", - "password": "admin" + "password": "admin", + "cluster_secret_key": "eri290eiw299e0-92", + "nodes": [ + { + "context": "default", + "ip": "localhost", + "username": "rafaellevi" + } + ] }, "loader": { "container_name": "das-cli-loader" diff --git a/tests/integration/fixtures/config/simple.json b/tests/integration/fixtures/config/simple.json index b2b4234..0fa528d 100644 --- a/tests/integration/fixtures/config/simple.json +++ b/tests/integration/fixtures/config/simple.json @@ -12,10 +12,19 @@ ] }, "mongodb": { + "cluster": false, "port": 27017, "container_name": "das-cli-mongodb-27017", "username": "admin", - "password": "admin" + "password": "admin", + "cluster_secret_key": "eri290eiw299e0-92", + "nodes": [ + { + "context": "default", + "ip": "localhost", + "username": "rafaellevi" + } + ] }, "loader": { "container_name": "das-cli-loader" diff --git a/tests/integration/test_config.bats b/tests/integration/test_config.bats index 172a46f..225389f 100644 --- a/tests/integration/test_config.bats +++ b/tests/integration/test_config.bats @@ -48,6 +48,7 @@ setup() { local mongodb_port="27017" local mongodb_username="admin" local mongodb_password="admin" + local mongodb_cluster="no" local jupyter_notebook_port="8888" run das-cli config set </dev/null run das-cli db stop - assert_output "Stopping redis service... -The Redis service at localhost has been stopped by the ${redis_node1_username} user -MongoDB service stopped" + assert_output "Stopping Redis service... +The Redis service at localhost has been stopped by the server user ${redis_node1_username} +Stopping MongoDB service... +The MongoDB service at localhost has been stopped by the server user ${mongodb_node1_username}" run is_service_up redis assert_failure @@ -132,13 +147,13 @@ MongoDB service stopped" @test "It should warns up when db is already stopped" { local redis_container_name="$(get_config .redis.container_name)" local mongodb_container_name="$(get_config .mongodb.container_name)" - local redis_node1_username="$(get_config ".redis.nodes[0].username")" run das-cli db stop - assert_output "Stopping redis service... -The Redis service named ${redis_container_name} at localhost is already stopped by the ${redis_node1_username} user. -The MongoDB service named ${mongodb_container_name} is already stopped." + assert_output "Stopping Redis service... +The Redis service named ${redis_container_name} at localhost is already stopped. +Stopping MongoDB service... +The MongoDB service named ${mongodb_container_name} at localhost is already stopped." run is_service_up redis assert_failure diff --git a/tests/integration/test_db_mongodb_cluster.bats b/tests/integration/test_db_mongodb_cluster.bats new file mode 100644 index 0000000..5387394 --- /dev/null +++ b/tests/integration/test_db_mongodb_cluster.bats @@ -0,0 +1,192 @@ +#!/usr/local/bin/bats + +load 'libs/bats-support/load' +load 'libs/bats-assert/load' +load 'libs/utils' +load 'libs/docker' + +setup() { + + use_config "mongodb_cluster" + + local mongodb_node2_ip="$(get_config .mongodb.nodes[1].ip)" + local mongodb_node2_username="$(get_config .mongodb.nodes[1].username)" + + local mongodb_node3_ip="$(get_config .mongodb.nodes[2].ip)" + local mongodb_node3_username="$(get_config .mongodb.nodes[2].username)" + + set_config ".mongodb.nodes[0].username" "\"$current_user\"" + set_config ".mongodb.nodes[1].context" "\"$(set_ssh_context "$mongodb_node2_username" "$mongodb_node2_ip")\"" + set_config ".mongodb.nodes[2].context" "\"$(set_ssh_context "$mongodb_node3_username" "$mongodb_node3_ip")\"" + + das-cli db stop +} + +teardown() { + das-cli db stop +} + +# bats test_tags=cluster +@test "Starting db with mongodb cluster" { + local mongodb_port="$(get_config ".mongodb.port")" + local mongodb_username="$(get_config ".mongodb.username")" + local mongodb_password="$(get_config ".mongodb.password")" + + local mongodb_node1_context="$(get_config ".mongodb.nodes[0].context")" + local mongodb_node1_ip="$(get_config ".mongodb.nodes[0].ip")" + local mongodb_node1_username="$(get_config ".mongodb.nodes[0].username")" + + local mongodb_node2_context="$(get_config ".mongodb.nodes[1].context")" + local mongodb_node2_ip="$(get_config ".mongodb.nodes[1].ip")" + local mongodb_node2_username="$(get_config ".mongodb.nodes[1].username")" + + local mongodb_node3_context="$(get_config ".mongodb.nodes[2].context")" + local mongodb_node3_ip="$(get_config ".mongodb.nodes[2].ip")" + local mongodb_node3_username="$(get_config ".mongodb.nodes[2].username")" + + run timeout 5m das-cli db start + + assert_success + + assert_line --partial "Starting MongoDB service..." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node1_ip}, operating under the server user ${mongodb_node1_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node2_ip}, operating under the server user ${mongodb_node2_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node3_ip}, operating under the server user ${mongodb_node3_username}." + + unset_ssh_context "$mongodb_context_02" + unset_ssh_context "$mongodb_context_03" + + run exec_cmd_on_service "mongodb" "mongosh -u ${mongodb_username} -p ${mongodb_password} --eval 'rs.status().members.filter(member => member.state === 1 || member.state === 2).length' | tail -n 1" + + assert [ "$(clean_string $output)" == "3" ] + + run is_service_up mongodb + assert_success +} + +# bats test_tags=cluster +@test "Stopping db with mongodb cluster" { + local mongodb_port="$(get_config ".mongodb.port")" + + local mongodb_node1_context="$(get_config ".mongodb.nodes[0].context")" + local mongodb_node1_ip="$(get_config ".mongodb.nodes[0].ip")" + local mongodb_node1_username="$(get_config ".mongodb.nodes[0].username")" + + local mongodb_node2_context="$(get_config ".mongodb.nodes[1].context")" + local mongodb_node2_ip="$(get_config ".mongodb.nodes[1].ip")" + local mongodb_node2_username="$(get_config ".mongodb.nodes[1].username")" + + local mongodb_node3_context="$(get_config ".mongodb.nodes[2].context")" + local mongodb_node3_ip="$(get_config ".mongodb.nodes[2].ip")" + local mongodb_node3_username="$(get_config ".mongodb.nodes[2].username")" + + das-cli db start + + run timeout 5m das-cli db stop + + assert_success + + + assert_line --partial "Stopping MongoDB service..." + assert_line --partial "The MongoDB service at ${mongodb_node2_ip} has been stopped by the server user ${mongodb_node2_username}" + assert_line --partial "The MongoDB service at ${mongodb_node1_ip} has been stopped by the server user ${mongodb_node1_username}" + assert_line --partial "The MongoDB service at ${mongodb_node3_ip} has been stopped by the server user ${mongodb_node3_username}" + + unset_ssh_context "$mongodb_context_02" + unset_ssh_context "$mongodb_context_03" + + run is_service_up mongodb + assert_failure + +} + +# bats test_tags=cluster +@test "Restarting db with mongodb cluster after cluster is up" { + local mongodb_port="$(get_config ".mongodb.port")" + local mongodb_username="$(get_config ".mongodb.username")" + local mongodb_password="$(get_config ".mongodb.password")" + + local mongodb_node1_context="$(get_config ".mongodb.nodes[0].context")" + local mongodb_node1_ip="$(get_config ".mongodb.nodes[0].ip")" + local mongodb_node1_username="$(get_config ".mongodb.nodes[0].username")" + + local mongodb_node2_context="$(get_config ".mongodb.nodes[1].context")" + local mongodb_node2_ip="$(get_config ".mongodb.nodes[1].ip")" + local mongodb_node2_username="$(get_config ".mongodb.nodes[1].username")" + + local mongodb_node3_context="$(get_config ".mongodb.nodes[2].context")" + local mongodb_node3_ip="$(get_config ".mongodb.nodes[2].ip")" + local mongodb_node3_username="$(get_config ".mongodb.nodes[2].username")" + + das-cli db start + + run timeout 5m das-cli db restart + + assert_success + + assert_line --partial "Stopping MongoDB service..." + assert_line --partial "The MongoDB service at ${mongodb_node2_ip} has been stopped by the server user ${mongodb_node2_username}" + assert_line --partial "The MongoDB service at ${mongodb_node1_ip} has been stopped by the server user ${mongodb_node1_username}" + assert_line --partial "The MongoDB service at ${mongodb_node3_ip} has been stopped by the server user ${mongodb_node3_username}" + + assert_line --partial "Starting MongoDB service..." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node1_ip}, operating under the server user ${mongodb_node1_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node2_ip}, operating under the server user ${mongodb_node2_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node3_ip}, operating under the server user ${mongodb_node3_username}." + + unset_ssh_context "$mongodb_context_02" + unset_ssh_context "$mongodb_context_03" + + run exec_cmd_on_service "mongodb" "mongosh -u ${mongodb_username} -p ${mongodb_password} --eval 'rs.status().members.filter(member => member.state === 1 || member.state === 2).length' | tail -n 1" + + assert [ "$(clean_string $output)" == "3" ] + + run is_service_up mongodb + assert_success +} + +# bats test_tags=cluster +@test "Restarting db with mongodb cluster before cluster is up" { + local mongodb_port="$(get_config ".mongodb.port")" + local mongodb_username="$(get_config ".mongodb.username")" + local mongodb_password="$(get_config ".mongodb.password")" + local mongodb_container_name="$(get_config ".mongodb.container_name")" + local mongodb_container_name="$(get_config ".mongodb.container_name")" + + local mongodb_node1_context="$(get_config ".mongodb.nodes[0].context")" + local mongodb_node1_ip="$(get_config ".mongodb.nodes[0].ip")" + local mongodb_node1_username="$(get_config ".mongodb.nodes[0].username")" + + local mongodb_node2_context="$(get_config ".mongodb.nodes[1].context")" + local mongodb_node2_ip="$(get_config ".mongodb.nodes[1].ip")" + local mongodb_node2_username="$(get_config ".mongodb.nodes[1].username")" + + local mongodb_node3_context="$(get_config ".mongodb.nodes[2].context")" + local mongodb_node3_ip="$(get_config ".mongodb.nodes[2].ip")" + local mongodb_node3_username="$(get_config ".mongodb.nodes[2].username")" + + run timeout 5m das-cli db restart + + assert_success + + + assert_line --partial "Stopping MongoDB service..." + assert_line --partial "The MongoDB service named ${mongodb_container_name} at ${mongodb_node1_ip} is already stopped." + assert_line --partial "The MongoDB service named ${mongodb_container_name} at ${mongodb_node2_ip} is already stopped." + assert_line --partial "The MongoDB service named ${mongodb_container_name} at ${mongodb_node3_ip} is already stopped." + + assert_line --partial "Starting MongoDB service..." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node1_ip}, operating under the server user ${mongodb_node1_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node2_ip}, operating under the server user ${mongodb_node2_username}." + assert_line --partial "MongoDB has started successfully on port ${mongodb_port} at ${mongodb_node3_ip}, operating under the server user ${mongodb_node3_username}." + + unset_ssh_context "$mongodb_context_02" + unset_ssh_context "$mongodb_context_03" + + run exec_cmd_on_service "mongodb" "mongosh -u ${mongodb_username} -p ${mongodb_password} --eval 'rs.status().members.filter(member => member.state === 1 || member.state === 2).length' | tail -n 1" + + assert [ "$(clean_string $output)" == "3" ] + + run is_service_up mongodb + assert_success +} diff --git a/tests/integration/test_db_cluster.bats b/tests/integration/test_db_redis_cluster.bats similarity index 63% rename from tests/integration/test_db_cluster.bats rename to tests/integration/test_db_redis_cluster.bats index 3398019..bfcc205 100644 --- a/tests/integration/test_db_cluster.bats +++ b/tests/integration/test_db_redis_cluster.bats @@ -26,7 +26,7 @@ teardown() { das-cli db stop } -# bats test_tags=redis:cluster +# bats test_tags=cluster @test "Starting db with redis cluster" { local mongodb_port="$(get_config ".mongodb.port")" local redis_port="$(get_config ".redis.port")" @@ -47,11 +47,10 @@ teardown() { assert_success - assert_output "Stopping redis service... -Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the user ${redis_node1_username}. -Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the user ${redis_node2_username}. -Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the user ${redis_node3_username}. -MongoDB started on port ${mongodb_port}" + assert_line --partial "Starting Redis service..." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the server user ${redis_node1_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the server user ${redis_node2_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the server user ${redis_node3_username}." unset_ssh_context "$redis_context_02" unset_ssh_context "$redis_context_03" @@ -64,7 +63,7 @@ MongoDB started on port ${mongodb_port}" assert_success } -# bats test_tags=redis:cluster +# bats test_tags=cluster @test "Stopping db with redis cluster" { local redis_port="$(get_config ".redis.port")" @@ -86,11 +85,11 @@ MongoDB started on port ${mongodb_port}" assert_success - assert_output "Stopping redis service... -The Redis service at ${redis_node1_ip} has been stopped by the ${redis_node1_username} user -The Redis service at ${redis_node2_ip} has been stopped by the ${redis_node2_username} user -The Redis service at ${redis_node3_ip} has been stopped by the ${redis_node3_username} user -MongoDB service stopped" + + assert_line --partial "Stopping Redis service..." + assert_line --partial "The Redis service at ${redis_node2_ip} has been stopped by the server user ${redis_node2_username}" + assert_line --partial "The Redis service at ${redis_node1_ip} has been stopped by the server user ${redis_node1_username}" + assert_line --partial "The Redis service at ${redis_node3_ip} has been stopped by the server user ${redis_node3_username}" unset_ssh_context "$redis_context_02" unset_ssh_context "$redis_context_03" @@ -100,7 +99,7 @@ MongoDB service stopped" } -# bats test_tags=redis:cluster +# bats test_tags=cluster @test "Restarting db with redis cluster after cluster is up" { local mongodb_port="$(get_config ".mongodb.port")" local redis_port="$(get_config ".redis.port")" @@ -123,16 +122,15 @@ MongoDB service stopped" assert_success - assert_output "Stopping redis service... -The Redis service at ${redis_node1_ip} has been stopped by the root user -The Redis service at ${redis_node2_ip} has been stopped by the root user -The Redis service at ${redis_node3_ip} has been stopped by the root user -MongoDB service stopped -Stopping redis service... -Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the user ${redis_node1_username}. -Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the user ${redis_node2_username}. -Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the user ${redis_node3_username}. -MongoDB started on port ${mongodb_port}" + assert_line --partial "Stopping Redis service..." + assert_line --partial "The Redis service at ${redis_node2_ip} has been stopped by the server user ${redis_node2_username}" + assert_line --partial "The Redis service at ${redis_node1_ip} has been stopped by the server user ${redis_node1_username}" + assert_line --partial "The Redis service at ${redis_node3_ip} has been stopped by the server user ${redis_node3_username}" + + assert_line --partial "Starting Redis service..." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the server user ${redis_node1_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the server user ${redis_node2_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the server user ${redis_node3_username}." unset_ssh_context "$redis_context_02" unset_ssh_context "$redis_context_03" @@ -145,7 +143,7 @@ MongoDB started on port ${mongodb_port}" assert_success } -# bats test_tags=redis:cluster +# bats test_tags=cluster @test "Restarting db with redis cluster before cluster is up" { local mongodb_port="$(get_config ".mongodb.port")" local mongodb_container_name="$(get_config ".mongodb.container_name")" @@ -168,16 +166,16 @@ MongoDB started on port ${mongodb_port}" assert_success - assert_output "Stopping redis service... -The Redis service named ${redis_container_name} at ${redis_node1_ip} is already stopped by the ${redis_node1_username} user. -The Redis service named ${redis_container_name} at ${redis_node2_ip} is already stopped by the ${redis_node2_username} user. -The Redis service named ${redis_container_name} at ${redis_node3_ip} is already stopped by the ${redis_node3_username} user. -The MongoDB service named ${mongodb_container_name} is already stopped. -Stopping redis service... -Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the user ${redis_node1_username}. -Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the user ${redis_node2_username}. -Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the user ${redis_node3_username}. -MongoDB started on port ${mongodb_port}" + + assert_line --partial "Stopping Redis service..." + assert_line --partial "The Redis service named ${redis_container_name} at ${redis_node1_ip} is already stopped." + assert_line --partial "The Redis service named ${redis_container_name} at ${redis_node2_ip} is already stopped." + assert_line --partial "The Redis service named ${redis_container_name} at ${redis_node3_ip} is already stopped." + + assert_line --partial "Starting Redis service..." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node1_ip}, operating under the server user ${redis_node1_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node2_ip}, operating under the server user ${redis_node2_username}." + assert_line --partial "Redis has started successfully on port ${redis_port} at ${redis_node3_ip}, operating under the server user ${redis_node3_username}." unset_ssh_context "$redis_context_02" unset_ssh_context "$redis_context_03" diff --git a/tests/integration/test_python_library.bats b/tests/integration/test_python_library.bats index 5f9c103..af00354 100644 --- a/tests/integration/test_python_library.bats +++ b/tests/integration/test_python_library.bats @@ -91,9 +91,6 @@ One or more packages could not be updated." } @test "Update library versions to latest version" { - local current_hyperon_das_version="$(get_python_package_version hyperon-das)" - local current_hyperon_das_atomdb_version="$(get_python_package_version hyperon-das-atomdb)" - local latest_hyperon_das_version="$(get_python_package_latest_version hyperon-das)" local latest_hyperon_das_atomdb_version="$(get_python_package_latest_version hyperon-das-atomdb)" @@ -103,10 +100,10 @@ One or more packages could not be updated." Updating package hyperon-das-atomdb... All package has been successfully updated. hyperon-das - INSTALLED: $current_hyperon_das_version + INSTALLED: $latest_hyperon_das_version LATEST: $latest_hyperon_das_version hyperon-das-atomdb - INSTALLED: $current_hyperon_das_atomdb_version + INSTALLED: $latest_hyperon_das_atomdb_version LATEST: $latest_hyperon_das_atomdb_version" }