From f27733ca9e1e13c849530e215f696647d6daa403 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Wed, 25 Sep 2024 14:31:08 +0200 Subject: [PATCH 1/8] feat: add ha_cluster_info module --- .gitlab-ci.yml | 13 + .sanity-ansible-ignore-2.13.txt | 4 + .sanity-ansible-ignore-2.14.txt | 6 + .sanity-ansible-ignore-2.15.txt | 6 + .sanity-ansible-ignore-2.16.txt | 4 + .sanity-ansible-ignore-2.17.txt | 2 + README.md | 79 ++ library/ha_cluster_info.py | 424 ++++++++++ tests/library | 1 + tests/tests_cluster_advanced_knet_full.yml | 66 +- .../tests_cluster_advanced_knet_implicit.yml | 34 + tests/tests_cluster_advanced_udp_full.yml | 48 +- tests/tests_cluster_basic.yml | 38 + tests/tests_cluster_basic_disabled.yml | 38 + tests/tests_cluster_destroy.yml | 12 + tests/unit/test_ha_cluster_info.py | 741 ++++++++++++++++++ 16 files changed, 1504 insertions(+), 12 deletions(-) create mode 100644 library/ha_cluster_info.py create mode 120000 tests/library create mode 100644 tests/unit/test_ha_cluster_info.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1f1fa0b9..6a1e036a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -167,6 +167,19 @@ unit_tests: - pcs --version - PYTHONPATH="./library:./module_utils:$PYTHONPATH" python -m unittest --verbose tests/unit/*.py +unit_tests_rhel8: + parallel: + matrix: + - BASE_IMAGE_NAME: *ALL_IMAGES + rules: + - if: $BASE_IMAGE_NAME =~ /^LsrRhel8.*/ + stage: tier0 + script: + - dnf install -y pcs + - *symlink_pcs_to_pyenv + - pcs --version + - PYTHONPATH="./library:./module_utils:$PYTHONPATH" python -m unittest --verbose tests/unit/test_ha_cluster_info.py + # tier 1 build_tier1_ci_yml: variables: diff --git a/.sanity-ansible-ignore-2.13.txt b/.sanity-ansible-ignore-2.13.txt index ebe4703e..845566e6 100644 --- a/.sanity-ansible-ignore-2.13.txt +++ b/.sanity-ansible-ignore-2.13.txt @@ -20,3 +20,7 @@ plugins/modules/pcs_qdevice_certs.py import-2.7!skip plugins/modules/pcs_qdevice_certs.py import-3.6!skip plugins/modules/pcs_qdevice_certs.py import-3.7!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py compile-2.7!skip +plugins/modules/ha_cluster_info.py import-2.7!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.14.txt b/.sanity-ansible-ignore-2.14.txt index 33afc64e..43cb08c8 100644 --- a/.sanity-ansible-ignore-2.14.txt +++ b/.sanity-ansible-ignore-2.14.txt @@ -26,3 +26,9 @@ plugins/modules/pcs_qdevice_certs.py import-3.5!skip plugins/modules/pcs_qdevice_certs.py import-3.6!skip plugins/modules/pcs_qdevice_certs.py import-3.7!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py compile-2.7!skip +plugins/modules/ha_cluster_info.py compile-3.5!skip +plugins/modules/ha_cluster_info.py import-2.7!skip +plugins/modules/ha_cluster_info.py import-3.5!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.15.txt b/.sanity-ansible-ignore-2.15.txt index 33afc64e..43cb08c8 100644 --- a/.sanity-ansible-ignore-2.15.txt +++ b/.sanity-ansible-ignore-2.15.txt @@ -26,3 +26,9 @@ plugins/modules/pcs_qdevice_certs.py import-3.5!skip plugins/modules/pcs_qdevice_certs.py import-3.6!skip plugins/modules/pcs_qdevice_certs.py import-3.7!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py compile-2.7!skip +plugins/modules/ha_cluster_info.py compile-3.5!skip +plugins/modules/ha_cluster_info.py import-2.7!skip +plugins/modules/ha_cluster_info.py import-3.5!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.16.txt b/.sanity-ansible-ignore-2.16.txt index ebe4703e..845566e6 100644 --- a/.sanity-ansible-ignore-2.16.txt +++ b/.sanity-ansible-ignore-2.16.txt @@ -20,3 +20,7 @@ plugins/modules/pcs_qdevice_certs.py import-2.7!skip plugins/modules/pcs_qdevice_certs.py import-3.6!skip plugins/modules/pcs_qdevice_certs.py import-3.7!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py compile-2.7!skip +plugins/modules/ha_cluster_info.py import-2.7!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.17.txt b/.sanity-ansible-ignore-2.17.txt index 4801c906..f3822b34 100644 --- a/.sanity-ansible-ignore-2.17.txt +++ b/.sanity-ansible-ignore-2.17.txt @@ -10,3 +10,5 @@ plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py compile-3.7!skip plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py compile-3.8!skip plugins/modules/pcs_qdevice_certs.py import-3.7!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/README.md b/README.md index 48dd5f09..2541b7e1 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,12 @@ An Ansible role for managing High Availability Clustering. * Pacemaker Access Control Lists (ACLs) * node and resource utilization * Pacemaker Alerts +* The role provides `ha_cluster_info` module which exports current cluster + configuration. The module is capable of exporting: + * single-link or multi-link cluster + * Corosync transport options including compression and encryption + * Corosync totem options + * Corosync quorum options ## Requirements @@ -1545,6 +1551,79 @@ all: monitoring. Defaults to empty list if not set. Always refer to the devices using the long, stable device name (`/dev/disk/by-id/`). +## Export current cluster configuration + +The role provides `ha_cluster_info` module which exports current cluster +configuration in a dictionary matching the structure of this role variables. If +the role is run with these variables, it recreates the same cluster. + +Note that the dictionary of variables may not be complete and manual +modification of it is expected. Most notably, you need to set +[`ha_cluster_hacluster_password`](#ha_cluster_hacluster_password). + +Note that depending on pcs version installed on managed nodes, certain variables +may not be present in the export. + +* Following variables are present in the export: + * [`ha_cluster_cluster_present`](#ha_cluster_cluster_present) + * [`ha_cluster_start_on_boot`](#ha_cluster_start_on_boot) + * [`ha_cluster_cluster_name`](#ha_cluster_cluster_name) + * [`ha_cluster_transport`](#ha_cluster_transport) + * [`ha_cluster_totem`](#ha_cluster_totem) + * [`ha_cluster_quorum`](#ha_cluster_quorum) + * [`ha_cluster_node_options`](#ha_cluster_node_options) - currently only + `node_name`, `corosync_addresses` and `pcs_address` are present + +* Following variables are never present in the export (consult the role + documentation for impact of the variables missing when running the role): + * [`ha_cluster_hacluster_password`](#ha_cluster_hacluster_password) - This is + a mandatory variable for the role but it cannot be extracted from existing + clusters. + * [`ha_cluster_corosync_key_src`](#ha_cluster_corosync_key_src), + [`ha_cluster_pacemaker_key_src`](#ha_cluster_pacemaker_key_src) and + [`ha_cluster_fence_virt_key_src`](#ha_cluster_fence_virt_key_src) - These + are supposed to contain paths to files with the keys. Since the keys + themselves are not exported, these variables are not present in the export + either. Corosync and pacemaker keys are supposed to be unique for each + cluster. + * [`ha_cluster_regenerate_keys`](#ha_cluster_regenerate_keys) - It is your + responsibility to decide if you want to use existing keys or generate new + ones. + +To export current cluster configuration and store it in +`ha_cluster_info_result` variable, write a task like this: + +```yaml +- name: Get current cluster configuration + linux-system-roles.ha_cluster.ha_cluster_info: + register: ha_cluster_info_result +``` + +Then you may use the `ha_cluster_info_result` variable in your playbook +depending on your needs. + +If you just want to see the content of the variable, use the ansible debug +module like this: + +```yaml +- name: Print ha_cluster_info_result variable + debug: + var: ha_cluster_info_result +``` + +Or you may want to save the configuration to a file on your controller node in +YAML format with a task similar to this one, so that you can write a playbook +around it: + +```yaml +- name: Save current cluster configuration to a file + delegate_to: localhost + copy: + content: "{{ + ha_cluster_info_result.ha_cluster | to_nice_yaml(sort_keys=false) }}" + dest: /path/to/file +``` + ## Example Playbooks Following examples show what the structure of the role variables looks like. diff --git a/library/ha_cluster_info.py b/library/ha_cluster_info.py new file mode 100644 index 00000000..03dc9578 --- /dev/null +++ b/library/ha_cluster_info.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# make ansible-test happy, even though the module requires Python 3 +from __future__ import absolute_import, division, print_function + +# make ansible-test happy, even though the module requires Python 3 +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +DOCUMENTATION = r""" +--- +module: ha_cluster_info +short_description: Export HA cluster configuration +description: + This module exports live cluster configuration in form of variables which + recreate the same configuration when passed to ha_cluster role. Note that + the set of variables may not be complete and manual modification of the + result is expected (at least setting ha_cluster_hacluster_password is + required). +author: + - Tomas Jelinek (@tomjelinek) +requirements: + - pcs-0.10.8 or newer installed on managed nodes + - pcs-0.10.8 or newer for exporting corosync configuration + - python 3.6 or newer +""" + +EXAMPLES = r""" +- name: Get HA cluster configuration + ha_cluster_info: + register: my_ha_cluster_info +""" + +RETURN = r""" +ha_cluster: + returned: success + type: dict + description: + - Information about existing cluster on the node. If passed to + ha_cluster role, the role recreates the same cluster. Note that the + set of variables may not be complete and manual modification of the + result is expected. The variables are documented in the role. + - Note that depending on pcs version present on the managed node, + certain variables may not be exported. + - HORIZONTALLINE + - Following variables are present in the output + - ha_cluster_cluster_present + - ha_cluster_start_on_boot + - ha_cluster_cluster_name + - ha_cluster_transport + - ha_cluster_totem + - ha_cluster_quorum + - ha_cluster_node_options - currently only node_name, + corosync_addresses and pcs_address are present + - HORIZONTALLINE + - Following variables are required for running ha_cluster role but are + never present in this module output + - ha_cluster_hacluster_password + - HORIZONTALLINE + - Following variables are never present in this module output (consult + the role documentation for impact of the variables missing) + - ha_cluster_corosync_key_src + - ha_cluster_pacemaker_key_src + - ha_cluster_fence_virt_key_src + - ha_cluster_regenerate_keys + - HORIZONTALLINE +""" + +import json +import os.path +from typing import Any, Dict, List + +from ansible.module_utils.basic import AnsibleModule + +COROSYNC_CONF_PATH = "/etc/corosync/corosync.conf" +KNOWN_HOSTS_PATH = "/var/lib/pcsd/known-hosts" + + +class PcsCliError(Exception): + """ + Parent exception for errors from running pcs CLI + """ + + def __init__( + self, pcs_command: List[str], rc: int, stdout: str, stderr: str + ): + self.pcs_command = pcs_command + self.rc = rc + self.stdout = stdout + self.stderr = stderr + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict( + pcs_command=self.pcs_command, + rc=self.rc, + stdout=self.stdout, + stderr=self.stderr, + ) + + +class PcsCliRunError(PcsCliError): + """ + Running pcs has failed + """ + + +class PcsCliJsonError(PcsCliError): + """ + Pcs output cannot be decoded as a JSON + """ + + def __init__( + self, + pcs_command: List[str], + rc: int, + stdout: str, + stderr: str, + json_error: str, + ): + # pylint: disable=too-many-arguments + # pylint 3.3 produces too-many-positional-arguments, but pylint 3.2 + # complies that it doesn't know such an option. So we need + # unknown-option-value to silence pylint 3.2. + # pylint: disable=unknown-option-value + # pylint: disable=too-many-positional-arguments + super().__init__(pcs_command, rc, stdout, stderr) + self.json_error = json_error + + @property + def kwargs(self) -> Dict[str, Any]: + result = super().kwargs + result.update(dict(json_error=self.json_error)) + return result + + +class PcsJsonParseError(Exception): + """ + Unable to parse JSON data + """ + + def __init__(self, error: str, data: str, data_desc: str): + self.error = error + self.data = data + self.data_desc = data_desc + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict(error=self.error, data=self.data, data_desc=self.data_desc) + + +class PcsJsonMissingKey(Exception): + """ + A key is not present in pcs JSON output + """ + + def __init__(self, key: str, data: Dict[str, Any], data_desc: str): + self.key = key + self.data = data + self.data_desc = data_desc + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict(key=self.key, data=self.data, data_desc=self.data_desc) + + +def dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Convert a dict to a list of dicts with keys 'name' and 'value' + """ + return [dict(name=name, value=value) for name, value in input_dict.items()] + + +def is_service_enabled(module: AnsibleModule, service: str) -> bool: + """ + Check whether a specified service is enabled in the OS + + service -- name of the service to check without the ".service" suffix + """ + env = { + # make sure to get output of external processes in English and ASCII + "LC_ALL": "C", + } + # wokeignore:rule=dummy + rc, dummy_stdout, dummy_stderr = module.run_command( + ["systemctl", "is-enabled", f"{service}.service"], + check_rc=False, + environ_update=env, + ) + return rc == 0 + + +def call_pcs_cli(module: AnsibleModule, command: List[str]) -> Dict[str, Any]: + """ + Run pcs CLI with the specified command, transform resulting JSON into a dict + + command -- pcs command to run without the "pcs" prefix + """ + env = { + # make sure to get output of external processes in English and ASCII + "LC_ALL": "C", + } + full_command = ["pcs"] + command + rc, stdout, stderr = module.run_command( + full_command, + check_rc=False, + environ_update=env, + ) + if rc != 0: + raise PcsCliRunError(full_command, rc, stdout, stderr) + try: + return json.loads(stdout) + except json.JSONDecodeError as e: + raise PcsCliJsonError(full_command, rc, stdout, stderr, str(e)) from e + + +def export_start_on_boot(module: AnsibleModule) -> bool: + """ + Detect wheter a cluster is configured to start on boot + """ + return is_service_enabled(module, "corosync") or is_service_enabled( + module, "pacemaker" + ) + + +def export_corosync_conf(module: AnsibleModule) -> Dict[str, Any]: + """ + Export corosync configuration + """ + conf_dict = call_pcs_cli( + module, ["cluster", "config", "--output-format=json"] + ) + result: Dict[str, Any] = dict() + try: + result["ha_cluster_cluster_name"] = conf_dict["cluster_name"] + + transport = dict(type=conf_dict["transport"].lower()) + if conf_dict["transport_options"]: + transport["options"] = dict_to_nv_list( + conf_dict["transport_options"] + ) + if conf_dict["links_options"]: + link_list = [] + for link_dict in conf_dict["links_options"].values(): + # linknumber is an index in links_options, but it is present in + # link_dict as well + link_list.append(dict_to_nv_list(link_dict)) + transport["links"] = link_list + if conf_dict["compression_options"]: + transport["compression"] = dict_to_nv_list( + conf_dict["compression_options"] + ) + if conf_dict["crypto_options"]: + transport["crypto"] = dict_to_nv_list(conf_dict["crypto_options"]) + result["ha_cluster_transport"] = transport + + if conf_dict["totem_options"]: + result["ha_cluster_totem"] = dict( + options=dict_to_nv_list(conf_dict["totem_options"]) + ) + if conf_dict["quorum_options"]: + result["ha_cluster_quorum"] = dict( + options=dict_to_nv_list(conf_dict["quorum_options"]) + ) + + if conf_dict["nodes"]: + node_list = [] + for index, node_dict in enumerate(conf_dict["nodes"]): + try: + node_list.append( + dict( + node_name=node_dict["name"], + corosync_addresses=[ + addr_dict["addr"] + for addr_dict in sorted( + node_dict["addrs"], + key=lambda item: item["link"], + ) + ], + ) + ) + except KeyError as e: + raise PcsJsonMissingKey( + e.args[0], + conf_dict, + f"corosync configuration for node on index {index}", + ) from e + result["ha_cluster_node_options"] = node_list + + except KeyError as e: + raise PcsJsonMissingKey( + e.args[0], conf_dict, "corosync configuration" + ) from e + return result + + +def load_pcsd_known_hosts() -> Dict[str, str]: + """ + Load pcsd known hosts and return dict node_name: node_address + """ + result: Dict[str, str] = dict() + if not os.path.exists(KNOWN_HOSTS_PATH): + return result + try: + with open(KNOWN_HOSTS_PATH, "r", encoding="utf-8") as known_hosts_file: + known_hosts = json.load(known_hosts_file) + for host_name, host_data in known_hosts.get("known_hosts", {}).items(): + if not host_data.get("dest_list"): + continue + # currently no more than one address is supported by both the role + # and pcs + addr = host_data.get("dest_list")[0].get("addr") + port = host_data.get("dest_list")[0].get("port") + if not addr: + continue + host_addr = addr + if port: + host_addr = ( + f"[{addr}]:{port}" if ":" in addr else f"{addr}:{port}" + ) + result[host_name] = host_addr + return result + except json.JSONDecodeError as e: + # cannot show actual data as they contain sensitive information - tokens + raise PcsJsonParseError( + str(e), "not logging data", "known hosts" + ) from e + + +def merge_known_hosts( + result: Dict[str, Any], node_addr: Dict[str, str] +) -> None: + """ + Merge pcs node addresses into ha_cluster_node_options key + + result -- structure with exported corosync configuration + node_addr -- node_name: node_addr map loaded from known hosts + """ + # node_addr may contain records for nodes which are not part of the cluster + # being exported. These are ignored. We are only interested in pcs + # addresses of nodes forming the exported cluster. + if not node_addr: + return + if "ha_cluster_node_options" not in result: + return + for node_options in result["ha_cluster_node_options"]: + if node_options["node_name"] in node_addr: + node_options["pcs_address"] = node_addr[node_options["node_name"]] + + +def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: + """ + Export existing HA cluster configuration + """ + # Until pcs is able to export the whole configuration in one go, we need to + # put it together from separate parts provided by pcs. Some parts are only + # available in recent pcs versions. Check pcs capabilities. + result: dict[str, Any] = dict() + + result["ha_cluster_start_on_boot"] = export_start_on_boot(module) + + # Corosync config is availabe via CLI since pcs-0.10.8, via API v2 since + # pcs-0.12.0 and pcs-0.11.9. For old pcs versions, CLI must be used, and + # there is no benefit in implementing access via API on top of that. + # No need to check pcs capabilities. If this is not supported by pcs, + # exporting anything else is pointless (and not supported by pcs anyway). + result.update(**export_corosync_conf(module)) + + # known-hosts file is available since pcs-0.10, but is not exported by pcs + # in any version. + # No need to check pcs capabilities. + merge_known_hosts(result, load_pcsd_known_hosts()) + + return result + + +def main() -> None: + """ + Top level module function + """ + module_args: Dict[str, Any] = dict() + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + module_result: Dict[str, Any] = dict() + ha_cluster_result: Dict[str, Any] = dict() + module_result["ha_cluster"] = ha_cluster_result + + try: + if os.path.exists(COROSYNC_CONF_PATH): + ha_cluster_result.update(**export_cluster_configuration(module)) + ha_cluster_result["ha_cluster_cluster_present"] = True + else: + ha_cluster_result["ha_cluster_cluster_present"] = False + module.exit_json(**module_result) + except PcsJsonMissingKey as e: + module.fail_json( + msg=f"Missing key in pcs {e.data_desc} JSON output: {e.key}", + pcs_error=e.kwargs, + ) + except PcsCliJsonError as e: + module.fail_json( + msg="Error while parsing pcs JSON output", pcs_error=e.kwargs + ) + except PcsCliError as e: + module.fail_json(msg="Error while running pcs", pcs_error=e.kwargs) + + +if __name__ == "__main__": + main() diff --git a/tests/library b/tests/library new file mode 120000 index 00000000..53bed968 --- /dev/null +++ b/tests/library @@ -0,0 +1 @@ +../library \ No newline at end of file diff --git a/tests/tests_cluster_advanced_knet_full.yml b/tests/tests_cluster_advanced_knet_full.yml index 1f766a03..70cb27ce 100644 --- a/tests/tests_cluster_advanced_knet_full.yml +++ b/tests/tests_cluster_advanced_knet_full.yml @@ -12,6 +12,11 @@ value: ipv4-6 - name: link_mode value: active + # Links variable defines two links, one with implicit linknumber and the + # other with specified linknumber. On top of that, the links are not + # listed in order by linknumbers. This is intentional. It tests that the + # role is capable of handling such links definitions, assigning correct + # linknumbers and not mixing the links. links: - - name: transport # yamllint disable-line rule:hyphens @@ -25,7 +30,7 @@ value: 5 compression: - name: level - value: 5 + value: "5" - name: model value: zlib crypto: @@ -36,15 +41,15 @@ ha_cluster_totem: options: - name: send_join - value: 0 + value: "0" - name: token_retransmits_before_loss_const - value: 5 + value: "5" ha_cluster_quorum: options: - name: auto_tie_breaker - value: 1 + value: "1" - name: wait_for_all - value: 1 + value: "1" tasks: - name: Run test @@ -119,6 +124,57 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml + - name: Export cluster configuration + ha_cluster_info: + register: __test_info + + - name: Check exported configuration + vars: + __test_exported_config: > + {{ + __test_info.ha_cluster | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: true + ha_cluster_transport: + type: knet + options: "{{ ha_cluster_transport.options }}" + links: + - + - name: link_priority # yamllint disable-line rule:hyphens + value: "5" + - name: linknumber + value: "0" + - + - name: link_priority # yamllint disable-line rule:hyphens + value: "10" + - name: transport + value: udp + - name: linknumber + value: "1" + compression: "{{ ha_cluster_transport.compression }}" + crypto: "{{ ha_cluster_transport.crypto }}" + ha_cluster_totem: "{{ ha_cluster_totem }}" + ha_cluster_quorum: "{{ ha_cluster_quorum }}" + ha_cluster_node_options: "it depends on test environment" + block: + - name: Print exported configuration + debug: + var: __test_exported_config + + - name: Print expected configuration + debug: + var: __test_expected_config + + - name: Compare expected and exported configuration + assert: + that: + - __test_exported_config == __test_expected_config + always: - name: Unset node addresses variable set_fact: diff --git a/tests/tests_cluster_advanced_knet_implicit.yml b/tests/tests_cluster_advanced_knet_implicit.yml index 7e93bf95..c8351dac 100644 --- a/tests/tests_cluster_advanced_knet_implicit.yml +++ b/tests/tests_cluster_advanced_knet_implicit.yml @@ -61,3 +61,37 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml + + - name: Export cluster configuration + ha_cluster_info: + register: __test_info + + - name: Check exported configuration + vars: + __test_exported_config: > + {{ + __test_info.ha_cluster | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: true + ha_cluster_transport: + type: knet + crypto: "{{ ha_cluster_transport.crypto }}" + ha_cluster_node_options: "it depends on test environment" + block: + - name: Print exported configuration + debug: + var: __test_exported_config + + - name: Print expected configuration + debug: + var: __test_expected_config + + - name: Compare expected and exported configuration + assert: + that: + - __test_exported_config == __test_expected_config diff --git a/tests/tests_cluster_advanced_udp_full.yml b/tests/tests_cluster_advanced_udp_full.yml index 2e23ef77..16aa7360 100644 --- a/tests/tests_cluster_advanced_udp_full.yml +++ b/tests/tests_cluster_advanced_udp_full.yml @@ -11,25 +11,25 @@ - name: ip_version value: ipv4-6 - name: netmtu - value: 1024 + value: "1024" links: - - name: broadcast # yamllint disable-line rule:hyphens - value: 1 + value: "1" - name: ttl - value: 64 + value: "64" ha_cluster_totem: options: - name: send_join - value: 0 + value: "0" - name: token_retransmits_before_loss_const - value: 5 + value: "5" ha_cluster_quorum: options: - name: auto_tie_breaker - value: 1 + value: "1" - name: wait_for_all - value: 1 + value: "1" tasks: - name: Run test @@ -85,3 +85,37 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml + + - name: Export cluster configuration + ha_cluster_info: + register: __test_info + + - name: Check exported configuration + vars: + __test_exported_config: > + {{ + __test_info.ha_cluster | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: true + ha_cluster_transport: "{{ ha_cluster_transport }}" + ha_cluster_totem: "{{ ha_cluster_totem }}" + ha_cluster_quorum: "{{ ha_cluster_quorum }}" + ha_cluster_node_options: "it depends on test environment" + block: + - name: Print exported configuration + debug: + var: __test_exported_config + + - name: Print expected configuration + debug: + var: __test_expected_config + + - name: Compare expected and exported configuration + assert: + that: + - __test_exported_config == __test_expected_config diff --git a/tests/tests_cluster_basic.yml b/tests/tests_cluster_basic.yml index 5d5dd230..88e717ae 100644 --- a/tests/tests_cluster_basic.yml +++ b/tests/tests_cluster_basic.yml @@ -111,3 +111,41 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml + + - name: Export cluster configuration + ha_cluster_info: + register: __test_info + + - name: Check exported configuration + vars: + __test_exported_config: > + {{ + __test_info.ha_cluster | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: true + ha_cluster_transport: + type: knet + crypto: + - name: cipher + value: aes256 + - name: hash + value: sha256 + ha_cluster_node_options: "it depends on test environment" + block: + - name: Print exported configuration + debug: + var: __test_exported_config + + - name: Print expected configuration + debug: + var: __test_expected_config + + - name: Compare expected and exported configuration + assert: + that: + - __test_exported_config == __test_expected_config diff --git a/tests/tests_cluster_basic_disabled.yml b/tests/tests_cluster_basic_disabled.yml index 39760cb6..fbba0c04 100644 --- a/tests/tests_cluster_basic_disabled.yml +++ b/tests/tests_cluster_basic_disabled.yml @@ -36,3 +36,41 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml + + - name: Export cluster configuration + ha_cluster_info: + register: __test_info + + - name: Check exported configuration + vars: + __test_exported_config: > + {{ + __test_info.ha_cluster | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: false + ha_cluster_transport: + type: knet + crypto: + - name: cipher + value: aes256 + - name: hash + value: sha256 + ha_cluster_node_options: "it depends on test environment" + block: + - name: Print exported configuration + debug: + var: __test_exported_config + + - name: Print expected configuration + debug: + var: __test_expected_config + + - name: Compare expected and exported configuration + assert: + that: + - __test_exported_config == __test_expected_config diff --git a/tests/tests_cluster_destroy.yml b/tests/tests_cluster_destroy.yml index 36d31d16..db171363 100644 --- a/tests/tests_cluster_destroy.yml +++ b/tests/tests_cluster_destroy.yml @@ -40,3 +40,15 @@ - not stat_corosync_conf.stat.exists - not stat_cib_xml.stat.exists - not stat_fence_xvm_key.stat.exists + + - name: Export cluster configuration + ha_cluster_info: + register: test_info + + - name: Check exported configuration + assert: + that: + - test_info.ha_cluster == expected_configuration + vars: + expected_configuration: + ha_cluster_cluster_present: false diff --git a/tests/unit/test_ha_cluster_info.py b/tests/unit/test_ha_cluster_info.py new file mode 100644 index 00000000..29ea63f9 --- /dev/null +++ b/tests/unit/test_ha_cluster_info.py @@ -0,0 +1,741 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring + +import json +import sys +from copy import deepcopy +from importlib import import_module +from typing import Any, Dict +from unittest import TestCase, mock + +sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( + "ha_cluster_lsr" +) + +import ha_cluster_info + + +class DictToNvList(TestCase): + def test_no_item(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict()), + [], + ) + + def test_one_item(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict(one="1")), + [dict(name="one", value="1")], + ) + + def test_two_items(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict(one="1", two="2")), + [dict(name="one", value="1"), dict(name="two", value="2")], + ) + + +class IsServiceEnabled(TestCase): + def setUp(self) -> None: + self.module_mock = mock.Mock() + self.module_mock.run_command = mock.Mock() + + def test_is_enabled(self) -> None: + self.module_mock.run_command.return_value = (0, "enabled", "") + self.assertTrue( + ha_cluster_info.is_service_enabled(self.module_mock, "corosync") + ) + self.module_mock.run_command.assert_called_once_with( + ["systemctl", "is-enabled", "corosync.service"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + def test_is_disabled(self) -> None: + self.module_mock.run_command.return_value = (1, "disabled", "") + self.assertFalse( + ha_cluster_info.is_service_enabled(self.module_mock, "pacemaker") + ) + self.module_mock.run_command.assert_called_once_with( + ["systemctl", "is-enabled", "pacemaker.service"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + def test_unexpected_output(self) -> None: + self.module_mock.run_command.return_value = (4, "not-found", "") + self.assertFalse( + ha_cluster_info.is_service_enabled(self.module_mock, "pcmk") + ) + self.module_mock.run_command.assert_called_once_with( + ["systemctl", "is-enabled", "pcmk.service"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + +class CallPcsCli(TestCase): + def setUp(self) -> None: + self.module_mock = mock.Mock() + self.module_mock.run_command = mock.Mock() + + def test_success(self) -> None: + self.module_mock.run_command.return_value = ( + 0, + """{"json": "test data", "foo": "bar"}""", + "", + ) + self.assertEqual( + ha_cluster_info.call_pcs_cli( + self.module_mock, ["cluster", "config"] + ), + dict(json="test data", foo="bar"), + ) + self.module_mock.run_command.assert_called_once_with( + ["pcs", "cluster", "config"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + def test_pcs_error(self) -> None: + self.module_mock.run_command.return_value = ( + 1, + "some stdout message", + "some stderr message", + ) + with self.assertRaises(ha_cluster_info.PcsCliRunError) as cm: + ha_cluster_info.call_pcs_cli( + self.module_mock, ["cluster", "config"] + ) + self.assertEqual( + cm.exception.kwargs, + dict( + pcs_command=["pcs", "cluster", "config"], + stdout="some stdout message", + stderr="some stderr message", + rc=1, + ), + ) + self.module_mock.run_command.assert_called_once_with( + ["pcs", "cluster", "config"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + def test_json_error(self) -> None: + self.module_mock.run_command.return_value = ( + 0, + "not a json", + "", + ) + with self.assertRaises(ha_cluster_info.PcsCliJsonError) as cm: + ha_cluster_info.call_pcs_cli( + self.module_mock, ["cluster", "config"] + ) + self.assertEqual( + cm.exception.kwargs, + dict( + json_error="Expecting value: line 1 column 1 (char 0)", + pcs_command=["pcs", "cluster", "config"], + stdout="not a json", + stderr="", + rc=0, + ), + ) + self.module_mock.run_command.assert_called_once_with( + ["pcs", "cluster", "config"], + check_rc=False, + environ_update={"LC_ALL": "C"}, + ) + + +class ExportStartOnBoot(TestCase): + @mock.patch("ha_cluster_info.is_service_enabled") + def test_main(self, mock_is_enabled: mock.Mock) -> None: + module = mock.Mock() + mock_is_enabled.side_effect = [False, False] + self.assertFalse(ha_cluster_info.export_start_on_boot(module)) + + mock_is_enabled.side_effect = [True, False] + self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + + mock_is_enabled.side_effect = [False, True] + self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + + mock_is_enabled.side_effect = [True, True] + self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + + +class ExportCorosyncConf(TestCase): + maxDiff = None + + def setUp(self) -> None: + self.module_mock = mock.Mock() + self.module_mock.run_command = mock.Mock() + + def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: + self.module_mock.run_command.reset_mock() + self.module_mock.run_command.return_value = (0, json.dumps(data), "") + with self.assertRaises(ha_cluster_info.PcsJsonMissingKey) as cm: + ha_cluster_info.export_corosync_conf(self.module_mock) + self.assertEqual( + cm.exception.kwargs, + dict(data=data, key=key, data_desc="corosync configuration"), + ) + + def test_missing_keys(self) -> None: + self.assert_missing_key(dict(), "cluster_name") + self.assert_missing_key(dict(cluster_name="x"), "transport") + self.assert_missing_key( + dict(cluster_name="x", transport="x"), "transport_options" + ) + self.assert_missing_key( + dict(cluster_name="x", transport="x", transport_options=dict()), + "links_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + ), + "compression_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + ), + "crypto_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + ), + "totem_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + ), + "quorum_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + ), + "nodes", + ) + + def call_pcs(self, pcs_data: Dict[str, Any]) -> Dict[str, Any]: + self.module_mock.run_command.return_value = ( + 0, + json.dumps(pcs_data), + "", + ) + return ha_cluster_info.export_corosync_conf(self.module_mock) + + def test_minimal(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ), + ) + + def test_simple_options_mirroring(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + totem_options=dict(totem1="a", totem2="b"), + transport_options=dict(transport1="c", transport2="d"), + compression_options=dict(compression1="e", compression2="f"), + crypto_options=dict(crypto1="g", crypto2="h"), + quorum_options=dict(quorum1="i", quorum2="j"), + links_options=dict(), + nodes=[], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + options=[ + dict(name="transport1", value="c"), + dict(name="transport2", value="d"), + ], + compression=[ + dict(name="compression1", value="e"), + dict(name="compression2", value="f"), + ], + crypto=[ + dict(name="crypto1", value="g"), + dict(name="crypto2", value="h"), + ], + ), + ha_cluster_totem=dict( + options=[ + dict(name="totem1", value="a"), + dict(name="totem2", value="b"), + ], + ), + ha_cluster_quorum=dict( + options=[ + dict(name="quorum1", value="i"), + dict(name="quorum2", value="j"), + ], + ), + ), + ) + + def test_one_link(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options={"0": dict(name1="value1", name2="value2")}, + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + links=[ + [ + dict(name="name1", value="value1"), + dict(name="name2", value="value2"), + ] + ], + ), + ), + ) + + def test_more_links(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options={ + "0": dict(linknumber="0", name0="value0"), + "7": dict(linknumber="7", name7="value7"), + "3": dict(linknumber="3", name3="value3"), + }, + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + links=[ + [ + dict(name="linknumber", value="0"), + dict(name="name0", value="value0"), + ], + [ + dict(name="linknumber", value="7"), + dict(name="name7", value="value7"), + ], + [ + dict(name="linknumber", value="3"), + dict(name="name3", value="value3"), + ], + ], + ), + ), + ) + + def test_nodes_one_link(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ), + ) + + def test_nodes_multiple_links(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ), + ) + + def test_nodes_no_address(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[], + ), + ], + ) + role_data = self.call_pcs(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=[]), + ], + ), + ) + + def assert_missing_key_nodes( + self, data: Dict[str, Any], key: str, index: str = "0" + ) -> None: + self.module_mock.run_command.reset_mock() + self.module_mock.run_command.return_value = (0, json.dumps(data), "") + with self.assertRaises(ha_cluster_info.PcsJsonMissingKey) as cm: + ha_cluster_info.export_corosync_conf(self.module_mock) + self.assertEqual( + cm.exception.kwargs, + dict( + data=data, + key=key, + data_desc=f"corosync configuration for node on index {index}", + ), + ) + + def test_nodes_missing_keys(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + nodes=[dict(foo="bar")], + ) + + pcs_data["nodes"] = [ + dict(), + ] + self.assert_missing_key_nodes(pcs_data, "name") + + pcs_data["nodes"] = [ + dict(name="nodename"), + ] + self.assert_missing_key_nodes(pcs_data, "addrs") + + pcs_data["nodes"] = [ + dict(name="nodename", addrs=[dict()]), + ] + self.assert_missing_key_nodes(pcs_data, "link") + + pcs_data["nodes"] = [ + dict(name="nodename", addrs=[dict(link="0")]), + ] + self.assert_missing_key_nodes(pcs_data, "addr") + + +class LoadPcsdKnownHosts(TestCase): + file_path = "/var/lib/pcsd/known-hosts" + + @mock.patch("ha_cluster_info.os.path.exists") + def test_file_not_present(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = False + self.assertEqual(ha_cluster_info.load_pcsd_known_hosts(), dict()) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_json_error(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "not a json" + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + with self.assertRaises(ha_cluster_info.PcsJsonParseError) as cm: + ha_cluster_info.load_pcsd_known_hosts() + self.assertEqual( + cm.exception.kwargs, + dict( + data="not logging data", + data_desc="known hosts", + error="Expecting value: line 1 column 1 (char 0)", + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_json_empty(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "{}" + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + self.assertEqual( + ha_cluster_info.load_pcsd_known_hosts(), + dict(), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_extract(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = json.dumps( + dict( + known_hosts=dict( + node1=dict(), + node2=dict(dest_list=[]), + node3=dict(dest_list=[dict()]), + node4=dict(dest_list=[dict(addr="node4A")]), + node5=dict(dest_list=[dict(port="10005")]), + node6=dict(dest_list=[dict(addr="node6A", port="10006")]), + node7=dict( + dest_list=[dict(addr="2001:db8::7", port="10007")] + ), + node8=dict( + dest_list=[ + dict(addr="192.0.2.8", port="10008"), + dict(addr="node8B"), + ] + ), + ) + ) + ) + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + self.assertEqual( + ha_cluster_info.load_pcsd_known_hosts(), + dict( + node4="node4A", + node6="node6A:10006", + node7="[2001:db8::7]:10007", + node8="192.0.2.8:10008", + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + +class MergeKnownHosts(TestCase): + maxDiff = None + + def test_no_known_hosts(self) -> None: + data = dict( + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ) + expected_result = deepcopy(data) + ha_cluster_info.merge_known_hosts(data, dict()) + self.assertEqual(data, expected_result) + + def test_no_node_options(self) -> None: + data: Dict[str, Any] = dict() + ha_cluster_info.merge_known_hosts(data, dict(node1="node1A")) + self.assertEqual(data, dict()) + + def test_merge(self) -> None: + data = dict( + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + dict(node_name="node4", pcs_address="node4addr"), + ], + other_key="is not touched", + ) + known_hosts = dict(node1="node1A", node3="node3A", node4="node4A") + ha_cluster_info.merge_known_hosts(data, known_hosts) + self.assertEqual( + data, + dict( + ha_cluster_node_options=[ + dict( + node_name="node1", + corosync_addresses=["node1addr"], + pcs_address="node1A", + ), + dict( + node_name="node2", + corosync_addresses=["node2addr"], + ), + dict(node_name="node4", pcs_address="node4A"), + ], + other_key="is not touched", + ), + ) + + +class ExportClusterConfiguration(TestCase): + maxDiff = None + + @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + @mock.patch("ha_cluster_info.export_corosync_conf") + @mock.patch("ha_cluster_info.export_start_on_boot") + def test_export( + self, + mock_export_start_on_boot: mock.Mock, + mock_export_corosync_conf: mock.Mock, + mock_load_pcsd_known_hosts: mock.Mock, + ) -> None: + module_mock = mock.Mock() + mock_export_start_on_boot.return_value = True + mock_export_corosync_conf.return_value = dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ha_cluster_node_options=[ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ) + mock_load_pcsd_known_hosts.return_value = dict( + node1="node1pcs", + node2="node2pcs", + ) + self.assertEqual( + ha_cluster_info.export_cluster_configuration(module_mock), + dict( + ha_cluster_start_on_boot=True, + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ha_cluster_node_options=[ + dict( + node_name="node1", + corosync_addresses=["node1addr"], + pcs_address="node1pcs", + ), + dict( + node_name="node2", + corosync_addresses=["node2addr"], + pcs_address="node2pcs", + ), + ], + ), + ) From 5320fd621cdb64527283c3ce1efc49f8805646eb Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Wed, 2 Oct 2024 17:06:15 +0200 Subject: [PATCH 2/8] refactor: ha_cluster_info: reorganize exceptions Implementing changes proposed in code review --- library/ha_cluster_info.py | 85 +++++++++++------------------- tests/unit/test_ha_cluster_info.py | 20 +++---- 2 files changed, 41 insertions(+), 64 deletions(-) diff --git a/library/ha_cluster_info.py b/library/ha_cluster_info.py index 03dc9578..931a94da 100644 --- a/library/ha_cluster_info.py +++ b/library/ha_cluster_info.py @@ -74,7 +74,7 @@ import json import os.path -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from ansible.module_utils.basic import AnsibleModule @@ -82,9 +82,9 @@ KNOWN_HOSTS_PATH = "/var/lib/pcsd/known-hosts" -class PcsCliError(Exception): +class CliCommandError(Exception): """ - Parent exception for errors from running pcs CLI + Running pcs has failed """ def __init__( @@ -108,60 +108,37 @@ def kwargs(self) -> Dict[str, Any]: ) -class PcsCliRunError(PcsCliError): - """ - Running pcs has failed - """ - - -class PcsCliJsonError(PcsCliError): +class JsonParseError(Exception): """ - Pcs output cannot be decoded as a JSON + Unable to parse JSON data """ def __init__( self, - pcs_command: List[str], - rc: int, - stdout: str, - stderr: str, - json_error: str, + error: str, + data: str, + data_desc: str, + additional_info: Optional[str] = None, ): - # pylint: disable=too-many-arguments - # pylint 3.3 produces too-many-positional-arguments, but pylint 3.2 - # complies that it doesn't know such an option. So we need - # unknown-option-value to silence pylint 3.2. - # pylint: disable=unknown-option-value - # pylint: disable=too-many-positional-arguments - super().__init__(pcs_command, rc, stdout, stderr) - self.json_error = json_error - - @property - def kwargs(self) -> Dict[str, Any]: - result = super().kwargs - result.update(dict(json_error=self.json_error)) - return result - - -class PcsJsonParseError(Exception): - """ - Unable to parse JSON data - """ - - def __init__(self, error: str, data: str, data_desc: str): self.error = error self.data = data self.data_desc = data_desc + self.additional_info = additional_info @property def kwargs(self) -> Dict[str, Any]: """ Arguments given to the constructor """ - return dict(error=self.error, data=self.data, data_desc=self.data_desc) + return dict( + error=self.error, + data=self.data, + data_desc=self.data_desc, + additional_info=self.additional_info, + ) -class PcsJsonMissingKey(Exception): +class JsonMissingKey(Exception): """ A key is not present in pcs JSON output """ @@ -222,11 +199,13 @@ def call_pcs_cli(module: AnsibleModule, command: List[str]) -> Dict[str, Any]: environ_update=env, ) if rc != 0: - raise PcsCliRunError(full_command, rc, stdout, stderr) + raise CliCommandError(full_command, rc, stdout, stderr) try: return json.loads(stdout) except json.JSONDecodeError as e: - raise PcsCliJsonError(full_command, rc, stdout, stderr, str(e)) from e + raise JsonParseError( + str(e), stdout, " ".join(full_command), stderr + ) from e def export_start_on_boot(module: AnsibleModule) -> bool: @@ -295,7 +274,7 @@ def export_corosync_conf(module: AnsibleModule) -> Dict[str, Any]: ) ) except KeyError as e: - raise PcsJsonMissingKey( + raise JsonMissingKey( e.args[0], conf_dict, f"corosync configuration for node on index {index}", @@ -303,7 +282,7 @@ def export_corosync_conf(module: AnsibleModule) -> Dict[str, Any]: result["ha_cluster_node_options"] = node_list except KeyError as e: - raise PcsJsonMissingKey( + raise JsonMissingKey( e.args[0], conf_dict, "corosync configuration" ) from e return result @@ -337,9 +316,7 @@ def load_pcsd_known_hosts() -> Dict[str, str]: return result except json.JSONDecodeError as e: # cannot show actual data as they contain sensitive information - tokens - raise PcsJsonParseError( - str(e), "not logging data", "known hosts" - ) from e + raise JsonParseError(str(e), "not logging data", "known hosts") from e def merge_known_hosts( @@ -407,17 +384,17 @@ def main() -> None: else: ha_cluster_result["ha_cluster_cluster_present"] = False module.exit_json(**module_result) - except PcsJsonMissingKey as e: + except JsonMissingKey as e: module.fail_json( - msg=f"Missing key in pcs {e.data_desc} JSON output: {e.key}", - pcs_error=e.kwargs, + msg=f"Missing key {e.key} in pcs {e.data_desc} JSON output", + error_details=e.kwargs, ) - except PcsCliJsonError as e: + except JsonParseError as e: module.fail_json( - msg="Error while parsing pcs JSON output", pcs_error=e.kwargs + msg="Error while parsing pcs JSON output", error_details=e.kwargs ) - except PcsCliError as e: - module.fail_json(msg="Error while running pcs", pcs_error=e.kwargs) + except CliCommandError as e: + module.fail_json(msg="Error while running pcs", error_details=e.kwargs) if __name__ == "__main__": diff --git a/tests/unit/test_ha_cluster_info.py b/tests/unit/test_ha_cluster_info.py index 29ea63f9..be321c79 100644 --- a/tests/unit/test_ha_cluster_info.py +++ b/tests/unit/test_ha_cluster_info.py @@ -110,7 +110,7 @@ def test_pcs_error(self) -> None: "some stdout message", "some stderr message", ) - with self.assertRaises(ha_cluster_info.PcsCliRunError) as cm: + with self.assertRaises(ha_cluster_info.CliCommandError) as cm: ha_cluster_info.call_pcs_cli( self.module_mock, ["cluster", "config"] ) @@ -135,18 +135,17 @@ def test_json_error(self) -> None: "not a json", "", ) - with self.assertRaises(ha_cluster_info.PcsCliJsonError) as cm: + with self.assertRaises(ha_cluster_info.JsonParseError) as cm: ha_cluster_info.call_pcs_cli( self.module_mock, ["cluster", "config"] ) self.assertEqual( cm.exception.kwargs, dict( - json_error="Expecting value: line 1 column 1 (char 0)", - pcs_command=["pcs", "cluster", "config"], - stdout="not a json", - stderr="", - rc=0, + data="not a json", + data_desc="pcs cluster config", + error="Expecting value: line 1 column 1 (char 0)", + additional_info="", ), ) self.module_mock.run_command.assert_called_once_with( @@ -183,7 +182,7 @@ def setUp(self) -> None: def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: self.module_mock.run_command.reset_mock() self.module_mock.run_command.return_value = (0, json.dumps(data), "") - with self.assertRaises(ha_cluster_info.PcsJsonMissingKey) as cm: + with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: ha_cluster_info.export_corosync_conf(self.module_mock) self.assertEqual( cm.exception.kwargs, @@ -509,7 +508,7 @@ def assert_missing_key_nodes( ) -> None: self.module_mock.run_command.reset_mock() self.module_mock.run_command.return_value = (0, json.dumps(data), "") - with self.assertRaises(ha_cluster_info.PcsJsonMissingKey) as cm: + with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: ha_cluster_info.export_corosync_conf(self.module_mock) self.assertEqual( cm.exception.kwargs, @@ -570,7 +569,7 @@ def test_json_error(self, mock_exists: mock.Mock) -> None: with mock.patch( "ha_cluster_info.open", mock.mock_open(read_data=mock_data) ) as mock_open: - with self.assertRaises(ha_cluster_info.PcsJsonParseError) as cm: + with self.assertRaises(ha_cluster_info.JsonParseError) as cm: ha_cluster_info.load_pcsd_known_hosts() self.assertEqual( cm.exception.kwargs, @@ -578,6 +577,7 @@ def test_json_error(self, mock_exists: mock.Mock) -> None: data="not logging data", data_desc="known hosts", error="Expecting value: line 1 column 1 (char 0)", + additional_info=None, ), ) mock_open.assert_called_once_with( From b613dcd82069ef584f79e07ff6e3279fb11ce3c3 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Thu, 3 Oct 2024 15:18:31 +0200 Subject: [PATCH 3/8] refactor: ha_cluster_info: split data loading and transformation Implementing changes proposed in code review --- library/ha_cluster_info.py | 282 ++++++---- tests/unit/test_ha_cluster_info.py | 837 ++++++++++++++++------------- 2 files changed, 646 insertions(+), 473 deletions(-) diff --git a/library/ha_cluster_info.py b/library/ha_cluster_info.py index 931a94da..a3aeba62 100644 --- a/library/ha_cluster_info.py +++ b/library/ha_cluster_info.py @@ -74,7 +74,7 @@ import json import os.path -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Tuple from ansible.module_utils.basic import AnsibleModule @@ -82,6 +82,17 @@ KNOWN_HOSTS_PATH = "/var/lib/pcsd/known-hosts" +CommandRunner = Callable[ + # parameters: args, environ_update + # environ_update should be a keyword argument, but they are not possible in + # Callable. typing.Protocol would have to be used to type that, but that is + # not available in Python 3.6 + [List[str], Optional[Dict[str, str]]], + # return value: rc, stdout, stderr + Tuple[int, str, str], +] + + class CliCommandError(Exception): """ Running pcs has failed @@ -156,14 +167,10 @@ def kwargs(self) -> Dict[str, Any]: return dict(key=self.key, data=self.data, data_desc=self.data_desc) -def dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Convert a dict to a list of dicts with keys 'name' and 'value' - """ - return [dict(name=name, value=value) for name, value in input_dict.items()] +# functions loading data from cluster -def is_service_enabled(module: AnsibleModule, service: str) -> bool: +def is_service_enabled(run_command: CommandRunner, service: str) -> bool: """ Check whether a specified service is enabled in the OS @@ -174,15 +181,24 @@ def is_service_enabled(module: AnsibleModule, service: str) -> bool: "LC_ALL": "C", } # wokeignore:rule=dummy - rc, dummy_stdout, dummy_stderr = module.run_command( - ["systemctl", "is-enabled", f"{service}.service"], - check_rc=False, - environ_update=env, + rc, dummy_stdout, dummy_stderr = run_command( + ["systemctl", "is-enabled", f"{service}.service"], env ) return rc == 0 -def call_pcs_cli(module: AnsibleModule, command: List[str]) -> Dict[str, Any]: +def load_start_on_boot(run_command: CommandRunner) -> bool: + """ + Detect wheter a cluster is configured to start on boot + """ + return is_service_enabled(run_command, "corosync") or is_service_enabled( + run_command, "pacemaker" + ) + + +def call_pcs_cli( + run_command: CommandRunner, command: List[str] +) -> Dict[str, Any]: """ Run pcs CLI with the specified command, transform resulting JSON into a dict @@ -193,11 +209,7 @@ def call_pcs_cli(module: AnsibleModule, command: List[str]) -> Dict[str, Any]: "LC_ALL": "C", } full_command = ["pcs"] + command - rc, stdout, stderr = module.run_command( - full_command, - check_rc=False, - environ_update=env, - ) + rc, stdout, stderr = run_command(full_command, env) if rc != 0: raise CliCommandError(full_command, rc, stdout, stderr) try: @@ -208,86 +220,15 @@ def call_pcs_cli(module: AnsibleModule, command: List[str]) -> Dict[str, Any]: ) from e -def export_start_on_boot(module: AnsibleModule) -> bool: +def load_corosync_conf(run_command: CommandRunner) -> Dict[str, Any]: """ - Detect wheter a cluster is configured to start on boot + Get corosync configuration from pcs """ - return is_service_enabled(module, "corosync") or is_service_enabled( - module, "pacemaker" + return call_pcs_cli( + run_command, ["cluster", "config", "--output-format=json"] ) -def export_corosync_conf(module: AnsibleModule) -> Dict[str, Any]: - """ - Export corosync configuration - """ - conf_dict = call_pcs_cli( - module, ["cluster", "config", "--output-format=json"] - ) - result: Dict[str, Any] = dict() - try: - result["ha_cluster_cluster_name"] = conf_dict["cluster_name"] - - transport = dict(type=conf_dict["transport"].lower()) - if conf_dict["transport_options"]: - transport["options"] = dict_to_nv_list( - conf_dict["transport_options"] - ) - if conf_dict["links_options"]: - link_list = [] - for link_dict in conf_dict["links_options"].values(): - # linknumber is an index in links_options, but it is present in - # link_dict as well - link_list.append(dict_to_nv_list(link_dict)) - transport["links"] = link_list - if conf_dict["compression_options"]: - transport["compression"] = dict_to_nv_list( - conf_dict["compression_options"] - ) - if conf_dict["crypto_options"]: - transport["crypto"] = dict_to_nv_list(conf_dict["crypto_options"]) - result["ha_cluster_transport"] = transport - - if conf_dict["totem_options"]: - result["ha_cluster_totem"] = dict( - options=dict_to_nv_list(conf_dict["totem_options"]) - ) - if conf_dict["quorum_options"]: - result["ha_cluster_quorum"] = dict( - options=dict_to_nv_list(conf_dict["quorum_options"]) - ) - - if conf_dict["nodes"]: - node_list = [] - for index, node_dict in enumerate(conf_dict["nodes"]): - try: - node_list.append( - dict( - node_name=node_dict["name"], - corosync_addresses=[ - addr_dict["addr"] - for addr_dict in sorted( - node_dict["addrs"], - key=lambda item: item["link"], - ) - ], - ) - ) - except KeyError as e: - raise JsonMissingKey( - e.args[0], - conf_dict, - f"corosync configuration for node on index {index}", - ) from e - result["ha_cluster_node_options"] = node_list - - except KeyError as e: - raise JsonMissingKey( - e.args[0], conf_dict, "corosync configuration" - ) from e - return result - - def load_pcsd_known_hosts() -> Dict[str, str]: """ Load pcsd known hosts and return dict node_name: node_address @@ -319,25 +260,121 @@ def load_pcsd_known_hosts() -> Dict[str, str]: raise JsonParseError(str(e), "not logging data", "known hosts") from e -def merge_known_hosts( - result: Dict[str, Any], node_addr: Dict[str, str] -) -> None: +# functions transforming data from pcs format to role format + + +def dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Convert a dict to a list of dicts with keys 'name' and 'value' + """ + return [dict(name=name, value=value) for name, value in input_dict.items()] + + +def export_corosync_options( + corosync_conf_dict: Dict[str, Any] +) -> Dict[str, Any]: """ - Merge pcs node addresses into ha_cluster_node_options key + Transform corosync config from pcs format to role format excluding nodes - result -- structure with exported corosync configuration - node_addr -- node_name: node_addr map loaded from known hosts + corosync_conf_dict -- corosync config structure provided by pcs """ - # node_addr may contain records for nodes which are not part of the cluster - # being exported. These are ignored. We are only interested in pcs - # addresses of nodes forming the exported cluster. - if not node_addr: - return - if "ha_cluster_node_options" not in result: - return - for node_options in result["ha_cluster_node_options"]: - if node_options["node_name"] in node_addr: - node_options["pcs_address"] = node_addr[node_options["node_name"]] + result: Dict[str, Any] = dict() + try: + result["ha_cluster_cluster_name"] = corosync_conf_dict["cluster_name"] + + transport = dict(type=corosync_conf_dict["transport"].lower()) + if corosync_conf_dict["transport_options"]: + transport["options"] = dict_to_nv_list( + corosync_conf_dict["transport_options"] + ) + if corosync_conf_dict["links_options"]: + link_list = [] + for link_dict in corosync_conf_dict["links_options"].values(): + # linknumber is an index in links_options, but it is present in + # link_dict as well + link_list.append(dict_to_nv_list(link_dict)) + transport["links"] = link_list + if corosync_conf_dict["compression_options"]: + transport["compression"] = dict_to_nv_list( + corosync_conf_dict["compression_options"] + ) + if corosync_conf_dict["crypto_options"]: + transport["crypto"] = dict_to_nv_list( + corosync_conf_dict["crypto_options"] + ) + result["ha_cluster_transport"] = transport + + if corosync_conf_dict["totem_options"]: + result["ha_cluster_totem"] = dict( + options=dict_to_nv_list(corosync_conf_dict["totem_options"]) + ) + + if corosync_conf_dict["quorum_options"]: + result["ha_cluster_quorum"] = dict( + options=dict_to_nv_list(corosync_conf_dict["quorum_options"]) + ) + except KeyError as e: + raise JsonMissingKey( + e.args[0], corosync_conf_dict, "corosync configuration" + ) from e + return result + + +def export_cluster_nodes( + corosync_conf_nodes: List[Dict[str, Any]], pcs_node_addr: Dict[str, str] +) -> List[Dict[str, Any]]: + """ + Transform node configuration from pcs format to role format + + corosync_conf_dict -- corosync config structure provided by pcs + pcs_node_addr -- dict holding pcs address for cluster nodes + """ + node_list: List[Dict[str, Any]] = [] + if not corosync_conf_nodes: + return node_list + for index, node_dict in enumerate(corosync_conf_nodes): + # corosync node configuration + try: + one_node = dict( + node_name=node_dict["name"], + corosync_addresses=[ + addr_dict["addr"] + for addr_dict in sorted( + node_dict["addrs"], + key=lambda item: item["link"], + ) + ], + ) + except KeyError as e: + raise JsonMissingKey( + e.args[0], + dict(nodes=corosync_conf_nodes), + f"corosync configuration for node on index {index}", + ) from e + # pcs node configuration + if one_node["node_name"] in pcs_node_addr: + one_node["pcs_address"] = pcs_node_addr[one_node["node_name"]] + # finish one node export + node_list.append(one_node) + return node_list + + +# ansible module tools and top layer functions + + +def get_cmd_runner(module: AnsibleModule) -> CommandRunner: + """ + Provide a function responsible for running external processes + """ + + def runner( + args: List[str], environ_update: Optional[Dict[str, str]] = None + ) -> Tuple[int, str, str]: + return module.run_command( + args, check_rc=False, environ_update=environ_update + ) + + return runner def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: @@ -348,20 +385,41 @@ def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: # put it together from separate parts provided by pcs. Some parts are only # available in recent pcs versions. Check pcs capabilities. result: dict[str, Any] = dict() + cmd_runner = get_cmd_runner(module) - result["ha_cluster_start_on_boot"] = export_start_on_boot(module) + result["ha_cluster_start_on_boot"] = load_start_on_boot(cmd_runner) # Corosync config is availabe via CLI since pcs-0.10.8, via API v2 since # pcs-0.12.0 and pcs-0.11.9. For old pcs versions, CLI must be used, and # there is no benefit in implementing access via API on top of that. # No need to check pcs capabilities. If this is not supported by pcs, # exporting anything else is pointless (and not supported by pcs anyway). - result.update(**export_corosync_conf(module)) - + corosync_conf_pcs = load_corosync_conf(cmd_runner) # known-hosts file is available since pcs-0.10, but is not exported by pcs # in any version. # No need to check pcs capabilities. - merge_known_hosts(result, load_pcsd_known_hosts()) + known_hosts_pcs = load_pcsd_known_hosts() + + # Convert corosync config to role format + corosync_conf_role = export_corosync_options(corosync_conf_pcs) + for key in ( + "ha_cluster_cluster_name", + "ha_cluster_transport", + "ha_cluster_totem", + "ha_cluster_quorum", + ): + if key in corosync_conf_role: + result[key] = corosync_conf_role[key] + + # Convert cluster definition to role format + try: + result["ha_cluster_node_options"] = export_cluster_nodes( + corosync_conf_pcs["nodes"], known_hosts_pcs + ) + except KeyError as e: + raise JsonMissingKey( + e.args[0], corosync_conf_pcs, "corosync configuration" + ) from e return result diff --git a/tests/unit/test_ha_cluster_info.py b/tests/unit/test_ha_cluster_info.py index be321c79..3ef13db6 100644 --- a/tests/unit/test_ha_cluster_info.py +++ b/tests/unit/test_ha_cluster_info.py @@ -10,9 +10,8 @@ import json import sys -from copy import deepcopy from importlib import import_module -from typing import Any, Dict +from typing import Any, Dict, List from unittest import TestCase, mock sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( @@ -21,99 +20,87 @@ import ha_cluster_info - -class DictToNvList(TestCase): - def test_no_item(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict()), - [], - ) - - def test_one_item(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict(one="1")), - [dict(name="one", value="1")], - ) - - def test_two_items(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict(one="1", two="2")), - [dict(name="one", value="1"), dict(name="two", value="2")], - ) +# functions loading data from cluster class IsServiceEnabled(TestCase): def setUp(self) -> None: - self.module_mock = mock.Mock() - self.module_mock.run_command = mock.Mock() + self.runner_mock = mock.Mock() def test_is_enabled(self) -> None: - self.module_mock.run_command.return_value = (0, "enabled", "") + self.runner_mock.return_value = (0, "enabled", "") self.assertTrue( - ha_cluster_info.is_service_enabled(self.module_mock, "corosync") + ha_cluster_info.is_service_enabled(self.runner_mock, "corosync") ) - self.module_mock.run_command.assert_called_once_with( + self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "corosync.service"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) def test_is_disabled(self) -> None: - self.module_mock.run_command.return_value = (1, "disabled", "") + self.runner_mock.return_value = (1, "disabled", "") self.assertFalse( - ha_cluster_info.is_service_enabled(self.module_mock, "pacemaker") + ha_cluster_info.is_service_enabled(self.runner_mock, "pacemaker") ) - self.module_mock.run_command.assert_called_once_with( + self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "pacemaker.service"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) def test_unexpected_output(self) -> None: - self.module_mock.run_command.return_value = (4, "not-found", "") + self.runner_mock.return_value = (4, "not-found", "") self.assertFalse( - ha_cluster_info.is_service_enabled(self.module_mock, "pcmk") + ha_cluster_info.is_service_enabled(self.runner_mock, "pcmk") ) - self.module_mock.run_command.assert_called_once_with( + self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "pcmk.service"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) -class CallPcsCli(TestCase): - def setUp(self) -> None: - self.module_mock = mock.Mock() - self.module_mock.run_command = mock.Mock() +class LoadStartOnBoot(TestCase): + @mock.patch("ha_cluster_info.is_service_enabled") + def test_main(self, mock_is_enabled: mock.Mock) -> None: + runner_mock = mock.Mock() + mock_is_enabled.side_effect = [False, False] + self.assertFalse(ha_cluster_info.load_start_on_boot(runner_mock)) + mock_is_enabled.side_effect = [True, False] + self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) + + mock_is_enabled.side_effect = [False, True] + self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) + + mock_is_enabled.side_effect = [True, True] + self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) + + +class CallPcsCli(TestCase): def test_success(self) -> None: - self.module_mock.run_command.return_value = ( + runner_mock = mock.Mock() + runner_mock.return_value = ( 0, """{"json": "test data", "foo": "bar"}""", "", ) self.assertEqual( - ha_cluster_info.call_pcs_cli( - self.module_mock, ["cluster", "config"] - ), + ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]), dict(json="test data", foo="bar"), ) - self.module_mock.run_command.assert_called_once_with( + runner_mock.assert_called_once_with( ["pcs", "cluster", "config"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) def test_pcs_error(self) -> None: - self.module_mock.run_command.return_value = ( + runner_mock = mock.Mock() + runner_mock.return_value = ( 1, "some stdout message", "some stderr message", ) with self.assertRaises(ha_cluster_info.CliCommandError) as cm: - ha_cluster_info.call_pcs_cli( - self.module_mock, ["cluster", "config"] - ) + ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]) self.assertEqual( cm.exception.kwargs, dict( @@ -123,22 +110,20 @@ def test_pcs_error(self) -> None: rc=1, ), ) - self.module_mock.run_command.assert_called_once_with( + runner_mock.assert_called_once_with( ["pcs", "cluster", "config"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) def test_json_error(self) -> None: - self.module_mock.run_command.return_value = ( + runner_mock = mock.Mock() + runner_mock.return_value = ( 0, "not a json", "", ) with self.assertRaises(ha_cluster_info.JsonParseError) as cm: - ha_cluster_info.call_pcs_cli( - self.module_mock, ["cluster", "config"] - ) + ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]) self.assertEqual( cm.exception.kwargs, dict( @@ -148,42 +133,176 @@ def test_json_error(self) -> None: additional_info="", ), ) - self.module_mock.run_command.assert_called_once_with( + runner_mock.assert_called_once_with( ["pcs", "cluster", "config"], - check_rc=False, - environ_update={"LC_ALL": "C"}, + {"LC_ALL": "C"}, ) -class ExportStartOnBoot(TestCase): - @mock.patch("ha_cluster_info.is_service_enabled") - def test_main(self, mock_is_enabled: mock.Mock) -> None: - module = mock.Mock() - mock_is_enabled.side_effect = [False, False] - self.assertFalse(ha_cluster_info.export_start_on_boot(module)) +class LoadCorosyncConf(TestCase): + pcs_command = ["pcs", "cluster", "config", "--output-format=json"] + env = {"LC_ALL": "C"} - mock_is_enabled.side_effect = [True, False] - self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + def test_success(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (0, """{"some": "json"}""", "") + self.assertEqual( + ha_cluster_info.load_corosync_conf(runner_mock), dict(some="json") + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) - mock_is_enabled.side_effect = [False, True] - self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + def test_pcs_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (1, "stdout message", "stderr message") + with self.assertRaises(ha_cluster_info.CliCommandError) as cm: + ha_cluster_info.load_corosync_conf(runner_mock) + self.assertEqual( + cm.exception.kwargs, + dict( + pcs_command=self.pcs_command, + stdout="stdout message", + stderr="stderr message", + rc=1, + ), + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) - mock_is_enabled.side_effect = [True, True] - self.assertTrue(ha_cluster_info.export_start_on_boot(module)) + def test_json_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (0, "not a json", "") + with self.assertRaises(ha_cluster_info.JsonParseError) as cm: + ha_cluster_info.load_corosync_conf(runner_mock) + self.assertEqual( + cm.exception.kwargs, + dict( + data="not a json", + data_desc=" ".join(self.pcs_command), + error="Expecting value: line 1 column 1 (char 0)", + additional_info="", + ), + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) + + +class LoadPcsdKnownHosts(TestCase): + file_path = "/var/lib/pcsd/known-hosts" + + @mock.patch("ha_cluster_info.os.path.exists") + def test_file_not_present(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = False + self.assertEqual(ha_cluster_info.load_pcsd_known_hosts(), dict()) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_json_error(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "not a json" + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + with self.assertRaises(ha_cluster_info.JsonParseError) as cm: + ha_cluster_info.load_pcsd_known_hosts() + self.assertEqual( + cm.exception.kwargs, + dict( + data="not logging data", + data_desc="known hosts", + error="Expecting value: line 1 column 1 (char 0)", + additional_info=None, + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_json_empty(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "{}" + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + self.assertEqual( + ha_cluster_info.load_pcsd_known_hosts(), + dict(), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_info.os.path.exists") + def test_extract(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = json.dumps( + dict( + known_hosts=dict( + node1=dict(), + node2=dict(dest_list=[]), + node3=dict(dest_list=[dict()]), + node4=dict(dest_list=[dict(addr="node4A")]), + node5=dict(dest_list=[dict(port="10005")]), + node6=dict(dest_list=[dict(addr="node6A", port="10006")]), + node7=dict( + dest_list=[dict(addr="2001:db8::7", port="10007")] + ), + node8=dict( + dest_list=[ + dict(addr="192.0.2.8", port="10008"), + dict(addr="node8B"), + ] + ), + ) + ) + ) + with mock.patch( + "ha_cluster_info.open", mock.mock_open(read_data=mock_data) + ) as mock_open: + self.assertEqual( + ha_cluster_info.load_pcsd_known_hosts(), + dict( + node4="node4A", + node6="node6A:10006", + node7="[2001:db8::7]:10007", + node8="192.0.2.8:10008", + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + +# functions transforming data from pcs format to role format + + +class DictToNvList(TestCase): + def test_no_item(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict()), + [], + ) + + def test_one_item(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict(one="1")), + [dict(name="one", value="1")], + ) + + def test_two_items(self) -> None: + self.assertEqual( + ha_cluster_info.dict_to_nv_list(dict(one="1", two="2")), + [dict(name="one", value="1"), dict(name="two", value="2")], + ) class ExportCorosyncConf(TestCase): maxDiff = None - def setUp(self) -> None: - self.module_mock = mock.Mock() - self.module_mock.run_command = mock.Mock() - def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: - self.module_mock.run_command.reset_mock() - self.module_mock.run_command.return_value = (0, json.dumps(data), "") with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: - ha_cluster_info.export_corosync_conf(self.module_mock) + ha_cluster_info.export_corosync_options(data) self.assertEqual( cm.exception.kwargs, dict(data=data, key=key, data_desc="corosync configuration"), @@ -241,27 +360,6 @@ def test_missing_keys(self) -> None: ), "quorum_options", ) - self.assert_missing_key( - dict( - cluster_name="x", - transport="x", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - ), - "nodes", - ) - - def call_pcs(self, pcs_data: Dict[str, Any]) -> Dict[str, Any]: - self.module_mock.run_command.return_value = ( - 0, - json.dumps(pcs_data), - "", - ) - return ha_cluster_info.export_corosync_conf(self.module_mock) def test_minimal(self) -> None: pcs_data = dict( @@ -273,9 +371,8 @@ def test_minimal(self) -> None: crypto_options=dict(), totem_options=dict(), quorum_options=dict(), - nodes=[], ) - role_data = self.call_pcs(pcs_data) + role_data = ha_cluster_info.export_corosync_options(pcs_data) self.assertEqual( role_data, dict( @@ -294,9 +391,8 @@ def test_simple_options_mirroring(self) -> None: crypto_options=dict(crypto1="g", crypto2="h"), quorum_options=dict(quorum1="i", quorum2="j"), links_options=dict(), - nodes=[], ) - role_data = self.call_pcs(pcs_data) + role_data = ha_cluster_info.export_corosync_options(pcs_data) self.assertEqual( role_data, dict( @@ -341,9 +437,8 @@ def test_one_link(self) -> None: crypto_options=dict(), totem_options=dict(), quorum_options=dict(), - nodes=[], ) - role_data = self.call_pcs(pcs_data) + role_data = ha_cluster_info.export_corosync_options(pcs_data) self.assertEqual( role_data, dict( @@ -374,9 +469,8 @@ def test_more_links(self) -> None: crypto_options=dict(), totem_options=dict(), quorum_options=dict(), - nodes=[], ) - role_data = self.call_pcs(pcs_data) + role_data = ha_cluster_info.export_corosync_options(pcs_data) self.assertEqual( role_data, dict( @@ -401,80 +495,172 @@ def test_more_links(self) -> None: ), ) - def test_nodes_one_link(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - nodes=[ - dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="IPv4")], - ), - dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], - ), - ], + +class ExportClusterNodes(TestCase): + maxDiff = None + + def assert_missing_key( + self, data: List[Dict[str, Any]], key: str, index: str = "0" + ) -> None: + with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: + ha_cluster_info.export_cluster_nodes(data, {}) + self.assertEqual( + cm.exception.kwargs, + dict( + data=dict(nodes=data), + key=key, + data_desc=f"corosync configuration for node on index {index}", + ), ) - role_data = self.call_pcs(pcs_data) + + def test_no_nodes(self) -> None: + self.assertEqual(ha_cluster_info.export_cluster_nodes([], {}), []) + + def test_corosync_nodes_missing_keys(self) -> None: + corosync_data: List[Dict[str, Any]] = [dict()] + self.assert_missing_key(corosync_data, "name") + + corosync_data = [dict(name="nodename")] + self.assert_missing_key(corosync_data, "addrs") + + corosync_data = [dict(name="nodename", addrs=[dict()])] + self.assert_missing_key(corosync_data, "link") + + corosync_data = [dict(name="nodename", addrs=[dict(link="0")])] + self.assert_missing_key(corosync_data, "addr") + + def test_corosync_nodes_one_link(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ] + role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) self.assertEqual( role_data, + [ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ) + + def test_corosync_nodes_multiple_links(self) -> None: + corosync_data = [ dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), + name="node1", + nodeid=1, + addrs=[ + dict(addr="node1addr1", link="0", type="IPv4"), + dict(addr="node1addr2", link="1", type="IPv6"), ], ), - ) - - def test_nodes_multiple_links(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - nodes=[ + dict( + name="node2", + nodeid=2, + addrs=[ + dict(addr="node2addr1", link="0", type="IPv4"), + dict(addr="node2addr2", link="1", type="IPv6"), + ], + ), + ] + role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + role_data, + [ dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="IPv4")], + node_name="node1", + corosync_addresses=["node1addr1", "node1addr2"], ), dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], + node_name="node2", + corosync_addresses=["node2addr1", "node2addr2"], ), ], ) - role_data = self.call_pcs(pcs_data) + + def test_corosync_nodes_no_address(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[], + ), + ] + role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + role_data, + [ + dict(node_name="node1", corosync_addresses=[]), + ], + ) + + def test_pcs_nodes_no_cluster_nodes(self) -> None: + corosync_data: List[Dict[str, Any]] = [] + pcs_data = dict(node1="node1A") + role_data = ha_cluster_info.export_cluster_nodes( + corosync_data, pcs_data + ) self.assertEqual( role_data, + [], + ) + + def test_pcs_nodes(self) -> None: + corosync_data = [ dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), - ], + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="FQDN")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], ), + ] + pcs_data = dict(node1="node1A", node3="node3A") + role_data = ha_cluster_info.export_cluster_nodes( + corosync_data, pcs_data + ) + self.assertEqual( + role_data, + [ + dict( + node_name="node1", + corosync_addresses=["node1addr"], + pcs_address="node1A", + ), + dict( + node_name="node2", + corosync_addresses=["node2addr"], + ), + ], ) - def test_nodes_no_address(self) -> None: - pcs_data = dict( + +# ansible module tools and top layer functions + + +class ExportClusterConfiguration(TestCase): + maxDiff = None + + @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + def test_export_minimal( + self, + mock_load_pcsd_known_hosts: mock.Mock, + ) -> None: + module_mock = mock.Mock() + module_mock.run_command = mock.Mock() + runner_mock = module_mock.run_command + + corosync_conf_data = dict( cluster_name="my-cluster", transport="KNET", transport_options=dict(), @@ -487,255 +673,184 @@ def test_nodes_no_address(self) -> None: dict( name="node1", nodeid=1, - addrs=[], + addrs=[dict(addr="node1addr", link="0", type="IPv4")], ), ], ) - role_data = self.call_pcs(pcs_data) + runner_mock.side_effect = [ + (0, "", ""), + (0, json.dumps(corosync_conf_data), ""), + ] + + mock_load_pcsd_known_hosts.return_value = dict() + self.assertEqual( - role_data, + ha_cluster_info.export_cluster_configuration(module_mock), dict( + ha_cluster_start_on_boot=True, ha_cluster_cluster_name="my-cluster", ha_cluster_transport=dict(type="knet"), ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=[]), + dict( + node_name="node1", + corosync_addresses=["node1addr"], + ), ], ), ) - def assert_missing_key_nodes( - self, data: Dict[str, Any], key: str, index: str = "0" - ) -> None: - self.module_mock.run_command.reset_mock() - self.module_mock.run_command.return_value = (0, json.dumps(data), "") - with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: - ha_cluster_info.export_corosync_conf(self.module_mock) - self.assertEqual( - cm.exception.kwargs, - dict( - data=data, - key=key, - data_desc=f"corosync configuration for node on index {index}", + common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) + expected_calls = [ + mock.call( + ["systemctl", "is-enabled", "corosync.service"], **common_args ), - ) + mock.call( + ["pcs", "cluster", "config", "--output-format=json"], + **common_args, + ), + ] + runner_mock.assert_has_calls(expected_calls) + self.assertEqual(runner_mock.call_count, len(expected_calls)) - def test_nodes_missing_keys(self) -> None: - pcs_data = dict( + mock_load_pcsd_known_hosts.assert_called_once_with() + + @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + def test_export( + self, + mock_load_pcsd_known_hosts: mock.Mock, + ) -> None: + module_mock = mock.Mock() + module_mock.run_command = mock.Mock() + runner_mock = module_mock.run_command + + corosync_conf_data = dict( cluster_name="my-cluster", transport="KNET", - transport_options=dict(), + transport_options=dict(transport_key="transport_val"), links_options=dict(), compression_options=dict(), crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - nodes=[dict(foo="bar")], - ) - - pcs_data["nodes"] = [ - dict(), - ] - self.assert_missing_key_nodes(pcs_data, "name") - - pcs_data["nodes"] = [ - dict(name="nodename"), - ] - self.assert_missing_key_nodes(pcs_data, "addrs") - - pcs_data["nodes"] = [ - dict(name="nodename", addrs=[dict()]), - ] - self.assert_missing_key_nodes(pcs_data, "link") - - pcs_data["nodes"] = [ - dict(name="nodename", addrs=[dict(link="0")]), - ] - self.assert_missing_key_nodes(pcs_data, "addr") - - -class LoadPcsdKnownHosts(TestCase): - file_path = "/var/lib/pcsd/known-hosts" - - @mock.patch("ha_cluster_info.os.path.exists") - def test_file_not_present(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = False - self.assertEqual(ha_cluster_info.load_pcsd_known_hosts(), dict()) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_json_error(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = "not a json" - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - with self.assertRaises(ha_cluster_info.JsonParseError) as cm: - ha_cluster_info.load_pcsd_known_hosts() - self.assertEqual( - cm.exception.kwargs, + totem_options=dict(totem_key="totem_val"), + quorum_options=dict(quorum_key="quorum_val"), + nodes=[ dict( - data="not logging data", - data_desc="known hosts", - error="Expecting value: line 1 column 1 (char 0)", - additional_info=None, + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], ), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_json_empty(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = "{}" - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - self.assertEqual( - ha_cluster_info.load_pcsd_known_hosts(), - dict(), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_extract(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = json.dumps( - dict( - known_hosts=dict( - node1=dict(), - node2=dict(dest_list=[]), - node3=dict(dest_list=[dict()]), - node4=dict(dest_list=[dict(addr="node4A")]), - node5=dict(dest_list=[dict(port="10005")]), - node6=dict(dest_list=[dict(addr="node6A", port="10006")]), - node7=dict( - dest_list=[dict(addr="2001:db8::7", port="10007")] - ), - node8=dict( - dest_list=[ - dict(addr="192.0.2.8", port="10008"), - dict(addr="node8B"), - ] - ), - ) - ) - ) - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - self.assertEqual( - ha_cluster_info.load_pcsd_known_hosts(), dict( - node4="node4A", - node6="node6A:10006", - node7="[2001:db8::7]:10007", - node8="192.0.2.8:10008", + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="IPv4")], ), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - -class MergeKnownHosts(TestCase): - maxDiff = None - - def test_no_known_hosts(self) -> None: - data = dict( - ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), ], ) - expected_result = deepcopy(data) - ha_cluster_info.merge_known_hosts(data, dict()) - self.assertEqual(data, expected_result) - - def test_no_node_options(self) -> None: - data: Dict[str, Any] = dict() - ha_cluster_info.merge_known_hosts(data, dict(node1="node1A")) - self.assertEqual(data, dict()) + runner_mock.side_effect = [ + (0, "", ""), + (0, json.dumps(corosync_conf_data), ""), + ] - def test_merge(self) -> None: - data = dict( - ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), - dict(node_name="node4", pcs_address="node4addr"), - ], - other_key="is not touched", + mock_load_pcsd_known_hosts.return_value = dict( + node1="node1pcs", + node2="node2pcs", ) - known_hosts = dict(node1="node1A", node3="node3A", node4="node4A") - ha_cluster_info.merge_known_hosts(data, known_hosts) + self.assertEqual( - data, + ha_cluster_info.export_cluster_configuration(module_mock), dict( + ha_cluster_start_on_boot=True, + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + options=[dict(name="transport_key", value="transport_val")], + ), + ha_cluster_totem=dict( + options=[dict(name="totem_key", value="totem_val")], + ), + ha_cluster_quorum=dict( + options=[dict(name="quorum_key", value="quorum_val")], + ), ha_cluster_node_options=[ dict( node_name="node1", corosync_addresses=["node1addr"], - pcs_address="node1A", + pcs_address="node1pcs", ), dict( node_name="node2", corosync_addresses=["node2addr"], + pcs_address="node2pcs", ), - dict(node_name="node4", pcs_address="node4A"), ], - other_key="is not touched", ), ) + common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) + expected_calls = [ + mock.call( + ["systemctl", "is-enabled", "corosync.service"], **common_args + ), + mock.call( + ["pcs", "cluster", "config", "--output-format=json"], + **common_args, + ), + ] + runner_mock.assert_has_calls(expected_calls) + self.assertEqual(runner_mock.call_count, len(expected_calls)) -class ExportClusterConfiguration(TestCase): - maxDiff = None + mock_load_pcsd_known_hosts.assert_called_once_with() @mock.patch("ha_cluster_info.load_pcsd_known_hosts") - @mock.patch("ha_cluster_info.export_corosync_conf") - @mock.patch("ha_cluster_info.export_start_on_boot") - def test_export( + def test_missing_corosync_nodes_key( self, - mock_export_start_on_boot: mock.Mock, - mock_export_corosync_conf: mock.Mock, mock_load_pcsd_known_hosts: mock.Mock, ) -> None: module_mock = mock.Mock() - mock_export_start_on_boot.return_value = True - mock_export_corosync_conf.return_value = dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ha_cluster_node_options=[ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), - ], + module_mock.run_command = mock.Mock() + runner_mock = module_mock.run_command + + corosync_conf_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), ) + runner_mock.side_effect = [ + (0, "", ""), + (0, json.dumps(corosync_conf_data), ""), + ] + mock_load_pcsd_known_hosts.return_value = dict( node1="node1pcs", node2="node2pcs", ) + + with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: + ha_cluster_info.export_cluster_configuration(module_mock) self.assertEqual( - ha_cluster_info.export_cluster_configuration(module_mock), + cm.exception.kwargs, dict( - ha_cluster_start_on_boot=True, - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ha_cluster_node_options=[ - dict( - node_name="node1", - corosync_addresses=["node1addr"], - pcs_address="node1pcs", - ), - dict( - node_name="node2", - corosync_addresses=["node2addr"], - pcs_address="node2pcs", - ), - ], + data=corosync_conf_data, + key="nodes", + data_desc="corosync configuration", ), ) + + common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) + expected_calls = [ + mock.call( + ["systemctl", "is-enabled", "corosync.service"], **common_args + ), + mock.call( + ["pcs", "cluster", "config", "--output-format=json"], + **common_args, + ), + ] + runner_mock.assert_has_calls(expected_calls) + self.assertEqual(runner_mock.call_count, len(expected_calls)) + + mock_load_pcsd_known_hosts.assert_called_once_with() From 23a0d5e8e48cedab5c1e05c18bd3a147297dae2c Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Fri, 4 Oct 2024 12:07:08 +0200 Subject: [PATCH 4/8] refactor: ha_cluster_info: split to several modules Implementing changes proposed in code review --- .sanity-ansible-ignore-2.13.txt | 4 + .sanity-ansible-ignore-2.14.txt | 8 + .sanity-ansible-ignore-2.15.txt | 8 + .sanity-ansible-ignore-2.16.txt | 4 + library/ha_cluster_info.py | 311 +-------- module_utils/ha_cluster_lsr/info/__init__.py | 0 module_utils/ha_cluster_lsr/info/exporter.py | 128 ++++ module_utils/ha_cluster_lsr/info/loader.py | 182 ++++++ tests/unit/test_ha_cluster_info.py | 636 +------------------ tests/unit/test_info_exporter.py | 384 +++++++++++ tests/unit/test_info_loader.py | 273 ++++++++ 11 files changed, 1009 insertions(+), 929 deletions(-) create mode 100644 module_utils/ha_cluster_lsr/info/__init__.py create mode 100644 module_utils/ha_cluster_lsr/info/exporter.py create mode 100644 module_utils/ha_cluster_lsr/info/loader.py create mode 100644 tests/unit/test_info_exporter.py create mode 100644 tests/unit/test_info_loader.py diff --git a/.sanity-ansible-ignore-2.13.txt b/.sanity-ansible-ignore-2.13.txt index 845566e6..850df1fc 100644 --- a/.sanity-ansible-ignore-2.13.txt +++ b/.sanity-ansible-ignore-2.13.txt @@ -23,4 +23,8 @@ plugins/modules/pcs_qdevice_certs.py import-3.8!skip plugins/modules/ha_cluster_info.py compile-2.7!skip plugins/modules/ha_cluster_info.py import-2.7!skip plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.14.txt b/.sanity-ansible-ignore-2.14.txt index 43cb08c8..fc80944e 100644 --- a/.sanity-ansible-ignore-2.14.txt +++ b/.sanity-ansible-ignore-2.14.txt @@ -31,4 +31,12 @@ plugins/modules/ha_cluster_info.py compile-3.5!skip plugins/modules/ha_cluster_info.py import-2.7!skip plugins/modules/ha_cluster_info.py import-3.5!skip plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-3.5!skip tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.15.txt b/.sanity-ansible-ignore-2.15.txt index 43cb08c8..fc80944e 100644 --- a/.sanity-ansible-ignore-2.15.txt +++ b/.sanity-ansible-ignore-2.15.txt @@ -31,4 +31,12 @@ plugins/modules/ha_cluster_info.py compile-3.5!skip plugins/modules/ha_cluster_info.py import-2.7!skip plugins/modules/ha_cluster_info.py import-3.5!skip plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-3.5!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-3.5!skip tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/.sanity-ansible-ignore-2.16.txt b/.sanity-ansible-ignore-2.16.txt index 845566e6..850df1fc 100644 --- a/.sanity-ansible-ignore-2.16.txt +++ b/.sanity-ansible-ignore-2.16.txt @@ -23,4 +23,8 @@ plugins/modules/pcs_qdevice_certs.py import-3.8!skip plugins/modules/ha_cluster_info.py compile-2.7!skip plugins/modules/ha_cluster_info.py import-2.7!skip plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +plugins/module_utils/ha_cluster_lsr/info/exporter.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/exporter.py import-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py compile-2.7!skip +plugins/module_utils/ha_cluster_lsr/info/loader.py import-2.7!skip tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip diff --git a/library/ha_cluster_info.py b/library/ha_cluster_info.py index a3aeba62..f6c3e453 100644 --- a/library/ha_cluster_info.py +++ b/library/ha_cluster_info.py @@ -11,7 +11,6 @@ # make ansible-test happy, even though the module requires Python 3 # pylint: disable=invalid-name __metaclass__ = type -# pylint: enable=invalid-name DOCUMENTATION = r""" --- @@ -72,297 +71,15 @@ - HORIZONTALLINE """ -import json -import os.path -from typing import Any, Callable, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from ansible.module_utils.basic import AnsibleModule -COROSYNC_CONF_PATH = "/etc/corosync/corosync.conf" -KNOWN_HOSTS_PATH = "/var/lib/pcsd/known-hosts" +# pylint: disable=no-name-in-module +from ansible.module_utils.ha_cluster_lsr.info import exporter, loader -CommandRunner = Callable[ - # parameters: args, environ_update - # environ_update should be a keyword argument, but they are not possible in - # Callable. typing.Protocol would have to be used to type that, but that is - # not available in Python 3.6 - [List[str], Optional[Dict[str, str]]], - # return value: rc, stdout, stderr - Tuple[int, str, str], -] - - -class CliCommandError(Exception): - """ - Running pcs has failed - """ - - def __init__( - self, pcs_command: List[str], rc: int, stdout: str, stderr: str - ): - self.pcs_command = pcs_command - self.rc = rc - self.stdout = stdout - self.stderr = stderr - - @property - def kwargs(self) -> Dict[str, Any]: - """ - Arguments given to the constructor - """ - return dict( - pcs_command=self.pcs_command, - rc=self.rc, - stdout=self.stdout, - stderr=self.stderr, - ) - - -class JsonParseError(Exception): - """ - Unable to parse JSON data - """ - - def __init__( - self, - error: str, - data: str, - data_desc: str, - additional_info: Optional[str] = None, - ): - self.error = error - self.data = data - self.data_desc = data_desc - self.additional_info = additional_info - - @property - def kwargs(self) -> Dict[str, Any]: - """ - Arguments given to the constructor - """ - return dict( - error=self.error, - data=self.data, - data_desc=self.data_desc, - additional_info=self.additional_info, - ) - - -class JsonMissingKey(Exception): - """ - A key is not present in pcs JSON output - """ - - def __init__(self, key: str, data: Dict[str, Any], data_desc: str): - self.key = key - self.data = data - self.data_desc = data_desc - - @property - def kwargs(self) -> Dict[str, Any]: - """ - Arguments given to the constructor - """ - return dict(key=self.key, data=self.data, data_desc=self.data_desc) - - -# functions loading data from cluster - - -def is_service_enabled(run_command: CommandRunner, service: str) -> bool: - """ - Check whether a specified service is enabled in the OS - - service -- name of the service to check without the ".service" suffix - """ - env = { - # make sure to get output of external processes in English and ASCII - "LC_ALL": "C", - } - # wokeignore:rule=dummy - rc, dummy_stdout, dummy_stderr = run_command( - ["systemctl", "is-enabled", f"{service}.service"], env - ) - return rc == 0 - - -def load_start_on_boot(run_command: CommandRunner) -> bool: - """ - Detect wheter a cluster is configured to start on boot - """ - return is_service_enabled(run_command, "corosync") or is_service_enabled( - run_command, "pacemaker" - ) - - -def call_pcs_cli( - run_command: CommandRunner, command: List[str] -) -> Dict[str, Any]: - """ - Run pcs CLI with the specified command, transform resulting JSON into a dict - - command -- pcs command to run without the "pcs" prefix - """ - env = { - # make sure to get output of external processes in English and ASCII - "LC_ALL": "C", - } - full_command = ["pcs"] + command - rc, stdout, stderr = run_command(full_command, env) - if rc != 0: - raise CliCommandError(full_command, rc, stdout, stderr) - try: - return json.loads(stdout) - except json.JSONDecodeError as e: - raise JsonParseError( - str(e), stdout, " ".join(full_command), stderr - ) from e - - -def load_corosync_conf(run_command: CommandRunner) -> Dict[str, Any]: - """ - Get corosync configuration from pcs - """ - return call_pcs_cli( - run_command, ["cluster", "config", "--output-format=json"] - ) - - -def load_pcsd_known_hosts() -> Dict[str, str]: - """ - Load pcsd known hosts and return dict node_name: node_address - """ - result: Dict[str, str] = dict() - if not os.path.exists(KNOWN_HOSTS_PATH): - return result - try: - with open(KNOWN_HOSTS_PATH, "r", encoding="utf-8") as known_hosts_file: - known_hosts = json.load(known_hosts_file) - for host_name, host_data in known_hosts.get("known_hosts", {}).items(): - if not host_data.get("dest_list"): - continue - # currently no more than one address is supported by both the role - # and pcs - addr = host_data.get("dest_list")[0].get("addr") - port = host_data.get("dest_list")[0].get("port") - if not addr: - continue - host_addr = addr - if port: - host_addr = ( - f"[{addr}]:{port}" if ":" in addr else f"{addr}:{port}" - ) - result[host_name] = host_addr - return result - except json.JSONDecodeError as e: - # cannot show actual data as they contain sensitive information - tokens - raise JsonParseError(str(e), "not logging data", "known hosts") from e - - -# functions transforming data from pcs format to role format - - -def dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Convert a dict to a list of dicts with keys 'name' and 'value' - """ - return [dict(name=name, value=value) for name, value in input_dict.items()] - - -def export_corosync_options( - corosync_conf_dict: Dict[str, Any] -) -> Dict[str, Any]: - """ - Transform corosync config from pcs format to role format excluding nodes - - corosync_conf_dict -- corosync config structure provided by pcs - """ - result: Dict[str, Any] = dict() - try: - result["ha_cluster_cluster_name"] = corosync_conf_dict["cluster_name"] - - transport = dict(type=corosync_conf_dict["transport"].lower()) - if corosync_conf_dict["transport_options"]: - transport["options"] = dict_to_nv_list( - corosync_conf_dict["transport_options"] - ) - if corosync_conf_dict["links_options"]: - link_list = [] - for link_dict in corosync_conf_dict["links_options"].values(): - # linknumber is an index in links_options, but it is present in - # link_dict as well - link_list.append(dict_to_nv_list(link_dict)) - transport["links"] = link_list - if corosync_conf_dict["compression_options"]: - transport["compression"] = dict_to_nv_list( - corosync_conf_dict["compression_options"] - ) - if corosync_conf_dict["crypto_options"]: - transport["crypto"] = dict_to_nv_list( - corosync_conf_dict["crypto_options"] - ) - result["ha_cluster_transport"] = transport - - if corosync_conf_dict["totem_options"]: - result["ha_cluster_totem"] = dict( - options=dict_to_nv_list(corosync_conf_dict["totem_options"]) - ) - - if corosync_conf_dict["quorum_options"]: - result["ha_cluster_quorum"] = dict( - options=dict_to_nv_list(corosync_conf_dict["quorum_options"]) - ) - except KeyError as e: - raise JsonMissingKey( - e.args[0], corosync_conf_dict, "corosync configuration" - ) from e - return result - - -def export_cluster_nodes( - corosync_conf_nodes: List[Dict[str, Any]], pcs_node_addr: Dict[str, str] -) -> List[Dict[str, Any]]: - """ - Transform node configuration from pcs format to role format - - corosync_conf_dict -- corosync config structure provided by pcs - pcs_node_addr -- dict holding pcs address for cluster nodes - """ - node_list: List[Dict[str, Any]] = [] - if not corosync_conf_nodes: - return node_list - for index, node_dict in enumerate(corosync_conf_nodes): - # corosync node configuration - try: - one_node = dict( - node_name=node_dict["name"], - corosync_addresses=[ - addr_dict["addr"] - for addr_dict in sorted( - node_dict["addrs"], - key=lambda item: item["link"], - ) - ], - ) - except KeyError as e: - raise JsonMissingKey( - e.args[0], - dict(nodes=corosync_conf_nodes), - f"corosync configuration for node on index {index}", - ) from e - # pcs node configuration - if one_node["node_name"] in pcs_node_addr: - one_node["pcs_address"] = pcs_node_addr[one_node["node_name"]] - # finish one node export - node_list.append(one_node) - return node_list - - -# ansible module tools and top layer functions - - -def get_cmd_runner(module: AnsibleModule) -> CommandRunner: +def get_cmd_runner(module: AnsibleModule) -> loader.CommandRunner: """ Provide a function responsible for running external processes """ @@ -387,21 +104,21 @@ def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: result: dict[str, Any] = dict() cmd_runner = get_cmd_runner(module) - result["ha_cluster_start_on_boot"] = load_start_on_boot(cmd_runner) + result["ha_cluster_start_on_boot"] = loader.get_start_on_boot(cmd_runner) # Corosync config is availabe via CLI since pcs-0.10.8, via API v2 since # pcs-0.12.0 and pcs-0.11.9. For old pcs versions, CLI must be used, and # there is no benefit in implementing access via API on top of that. # No need to check pcs capabilities. If this is not supported by pcs, # exporting anything else is pointless (and not supported by pcs anyway). - corosync_conf_pcs = load_corosync_conf(cmd_runner) + corosync_conf_pcs = loader.get_corosync_conf(cmd_runner) # known-hosts file is available since pcs-0.10, but is not exported by pcs # in any version. # No need to check pcs capabilities. - known_hosts_pcs = load_pcsd_known_hosts() + known_hosts_pcs = loader.get_pcsd_known_hosts() # Convert corosync config to role format - corosync_conf_role = export_corosync_options(corosync_conf_pcs) + corosync_conf_role = exporter.export_corosync_options(corosync_conf_pcs) for key in ( "ha_cluster_cluster_name", "ha_cluster_transport", @@ -413,11 +130,11 @@ def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: # Convert cluster definition to role format try: - result["ha_cluster_node_options"] = export_cluster_nodes( + result["ha_cluster_node_options"] = exporter.export_cluster_nodes( corosync_conf_pcs["nodes"], known_hosts_pcs ) except KeyError as e: - raise JsonMissingKey( + raise exporter.JsonMissingKey( e.args[0], corosync_conf_pcs, "corosync configuration" ) from e @@ -436,22 +153,22 @@ def main() -> None: module_result["ha_cluster"] = ha_cluster_result try: - if os.path.exists(COROSYNC_CONF_PATH): + if loader.has_corosync_conf(): ha_cluster_result.update(**export_cluster_configuration(module)) ha_cluster_result["ha_cluster_cluster_present"] = True else: ha_cluster_result["ha_cluster_cluster_present"] = False module.exit_json(**module_result) - except JsonMissingKey as e: + except exporter.JsonMissingKey as e: module.fail_json( msg=f"Missing key {e.key} in pcs {e.data_desc} JSON output", error_details=e.kwargs, ) - except JsonParseError as e: + except loader.JsonParseError as e: module.fail_json( msg="Error while parsing pcs JSON output", error_details=e.kwargs ) - except CliCommandError as e: + except loader.CliCommandError as e: module.fail_json(msg="Error while running pcs", error_details=e.kwargs) diff --git a/module_utils/ha_cluster_lsr/info/__init__.py b/module_utils/ha_cluster_lsr/info/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/module_utils/ha_cluster_lsr/info/exporter.py b/module_utils/ha_cluster_lsr/info/exporter.py new file mode 100644 index 00000000..78dcfdbf --- /dev/null +++ b/module_utils/ha_cluster_lsr/info/exporter.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# make ansible-test happy, even though the module requires Python 3 +from __future__ import absolute_import, division, print_function + +# make ansible-test happy, even though the module requires Python 3 +# pylint: disable=invalid-name +__metaclass__ = type + +from typing import Any, Dict, List + + +class JsonMissingKey(Exception): + """ + A key is not present in pcs JSON output + """ + + def __init__(self, key: str, data: Dict[str, Any], data_desc: str): + self.key = key + self.data = data + self.data_desc = data_desc + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict(key=self.key, data=self.data, data_desc=self.data_desc) + + +def _dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Convert a dict to a list of dicts with keys 'name' and 'value' + """ + return [dict(name=name, value=value) for name, value in input_dict.items()] + + +def export_corosync_options( + corosync_conf_dict: Dict[str, Any] +) -> Dict[str, Any]: + """ + Transform corosync config from pcs format to role format excluding nodes + + corosync_conf_dict -- corosync config structure provided by pcs + """ + result: Dict[str, Any] = dict() + try: + result["ha_cluster_cluster_name"] = corosync_conf_dict["cluster_name"] + + transport = dict(type=corosync_conf_dict["transport"].lower()) + if corosync_conf_dict["transport_options"]: + transport["options"] = _dict_to_nv_list( + corosync_conf_dict["transport_options"] + ) + if corosync_conf_dict["links_options"]: + link_list = [] + for link_dict in corosync_conf_dict["links_options"].values(): + # linknumber is an index in links_options, but it is present in + # link_dict as well + link_list.append(_dict_to_nv_list(link_dict)) + transport["links"] = link_list + if corosync_conf_dict["compression_options"]: + transport["compression"] = _dict_to_nv_list( + corosync_conf_dict["compression_options"] + ) + if corosync_conf_dict["crypto_options"]: + transport["crypto"] = _dict_to_nv_list( + corosync_conf_dict["crypto_options"] + ) + result["ha_cluster_transport"] = transport + + if corosync_conf_dict["totem_options"]: + result["ha_cluster_totem"] = dict( + options=_dict_to_nv_list(corosync_conf_dict["totem_options"]) + ) + + if corosync_conf_dict["quorum_options"]: + result["ha_cluster_quorum"] = dict( + options=_dict_to_nv_list(corosync_conf_dict["quorum_options"]) + ) + except KeyError as e: + raise JsonMissingKey( + e.args[0], corosync_conf_dict, "corosync configuration" + ) from e + return result + + +def export_cluster_nodes( + corosync_conf_nodes: List[Dict[str, Any]], pcs_node_addr: Dict[str, str] +) -> List[Dict[str, Any]]: + """ + Transform node configuration from pcs format to role format + + corosync_conf_dict -- corosync config structure provided by pcs + pcs_node_addr -- dict holding pcs address for cluster nodes + """ + node_list: List[Dict[str, Any]] = [] + if not corosync_conf_nodes: + return node_list + for index, node_dict in enumerate(corosync_conf_nodes): + # corosync node configuration + try: + one_node = dict( + node_name=node_dict["name"], + corosync_addresses=[ + addr_dict["addr"] + for addr_dict in sorted( + node_dict["addrs"], + key=lambda item: item["link"], + ) + ], + ) + except KeyError as e: + raise JsonMissingKey( + e.args[0], + dict(nodes=corosync_conf_nodes), + f"corosync configuration for node on index {index}", + ) from e + # pcs node configuration + if one_node["node_name"] in pcs_node_addr: + one_node["pcs_address"] = pcs_node_addr[one_node["node_name"]] + # finish one node export + node_list.append(one_node) + return node_list diff --git a/module_utils/ha_cluster_lsr/info/loader.py b/module_utils/ha_cluster_lsr/info/loader.py new file mode 100644 index 00000000..f755826c --- /dev/null +++ b/module_utils/ha_cluster_lsr/info/loader.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# make ansible-test happy, even though the module requires Python 3 +from __future__ import absolute_import, division, print_function + +# make ansible-test happy, even though the module requires Python 3 +# pylint: disable=invalid-name +__metaclass__ = type + +import json +import os.path +from typing import Any, Callable, Dict, List, Optional, Tuple + +COROSYNC_CONF_PATH = "/etc/corosync/corosync.conf" +KNOWN_HOSTS_PATH = "/var/lib/pcsd/known-hosts" + +CommandRunner = Callable[ + # parameters: args, environ_update + # environ_update should be a keyword argument, but they are not possible in + # Callable. typing.Protocol would have to be used to type that, but that is + # not available in Python 3.6 + [List[str], Optional[Dict[str, str]]], + # return value: rc, stdout, stderr + Tuple[int, str, str], +] + + +class CliCommandError(Exception): + """ + Running pcs has failed + """ + + def __init__( + self, pcs_command: List[str], rc: int, stdout: str, stderr: str + ): + self.pcs_command = pcs_command + self.rc = rc + self.stdout = stdout + self.stderr = stderr + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict( + pcs_command=self.pcs_command, + rc=self.rc, + stdout=self.stdout, + stderr=self.stderr, + ) + + +class JsonParseError(Exception): + """ + Unable to parse JSON data + """ + + def __init__( + self, + error: str, + data: str, + data_desc: str, + additional_info: Optional[str] = None, + ): + self.error = error + self.data = data + self.data_desc = data_desc + self.additional_info = additional_info + + @property + def kwargs(self) -> Dict[str, Any]: + """ + Arguments given to the constructor + """ + return dict( + error=self.error, + data=self.data, + data_desc=self.data_desc, + additional_info=self.additional_info, + ) + + +def _is_service_enabled(run_command: CommandRunner, service: str) -> bool: + """ + Check whether a specified service is enabled in the OS + + service -- name of the service to check without the ".service" suffix + """ + env = { + # make sure to get output of external processes in English and ASCII + "LC_ALL": "C", + } + # wokeignore:rule=dummy + rc, dummy_stdout, dummy_stderr = run_command( + ["systemctl", "is-enabled", f"{service}.service"], env + ) + return rc == 0 + + +def get_start_on_boot(run_command: CommandRunner) -> bool: + """ + Detect wheter a cluster is configured to start on boot + """ + return _is_service_enabled(run_command, "corosync") or _is_service_enabled( + run_command, "pacemaker" + ) + + +def _call_pcs_cli( + run_command: CommandRunner, command: List[str] +) -> Dict[str, Any]: + """ + Run pcs CLI with the specified command, transform resulting JSON into a dict + + command -- pcs command to run without the "pcs" prefix + """ + env = { + # make sure to get output of external processes in English and ASCII + "LC_ALL": "C", + } + full_command = ["pcs"] + command + rc, stdout, stderr = run_command(full_command, env) + if rc != 0: + raise CliCommandError(full_command, rc, stdout, stderr) + try: + return json.loads(stdout) + except json.JSONDecodeError as e: + raise JsonParseError( + str(e), stdout, " ".join(full_command), stderr + ) from e + + +def has_corosync_conf() -> bool: + """ + Check whether corosync.conf file is present + """ + return os.path.exists(COROSYNC_CONF_PATH) + + +def get_corosync_conf(run_command: CommandRunner) -> Dict[str, Any]: + """ + Get corosync configuration from pcs + """ + return _call_pcs_cli( + run_command, ["cluster", "config", "--output-format=json"] + ) + + +def get_pcsd_known_hosts() -> Dict[str, str]: + """ + Load pcsd known hosts and return dict node_name: node_address + """ + result: Dict[str, str] = dict() + if not os.path.exists(KNOWN_HOSTS_PATH): + return result + try: + with open(KNOWN_HOSTS_PATH, "r", encoding="utf-8") as known_hosts_file: + known_hosts = json.load(known_hosts_file) + for host_name, host_data in known_hosts.get("known_hosts", {}).items(): + if not host_data.get("dest_list"): + continue + # currently no more than one address is supported by both the role + # and pcs + addr = host_data.get("dest_list")[0].get("addr") + port = host_data.get("dest_list")[0].get("port") + if not addr: + continue + host_addr = addr + if port: + host_addr = ( + f"[{addr}]:{port}" if ":" in addr else f"{addr}:{port}" + ) + result[host_name] = host_addr + return result + except json.JSONDecodeError as e: + # cannot show actual data as they contain sensitive information - tokens + raise JsonParseError(str(e), "not logging data", "known hosts") from e diff --git a/tests/unit/test_ha_cluster_info.py b/tests/unit/test_ha_cluster_info.py index 3ef13db6..52443dc7 100644 --- a/tests/unit/test_ha_cluster_info.py +++ b/tests/unit/test_ha_cluster_info.py @@ -11,7 +11,6 @@ import json import sys from importlib import import_module -from typing import Any, Dict, List from unittest import TestCase, mock sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( @@ -20,638 +19,11 @@ import ha_cluster_info -# functions loading data from cluster - - -class IsServiceEnabled(TestCase): - def setUp(self) -> None: - self.runner_mock = mock.Mock() - - def test_is_enabled(self) -> None: - self.runner_mock.return_value = (0, "enabled", "") - self.assertTrue( - ha_cluster_info.is_service_enabled(self.runner_mock, "corosync") - ) - self.runner_mock.assert_called_once_with( - ["systemctl", "is-enabled", "corosync.service"], - {"LC_ALL": "C"}, - ) - - def test_is_disabled(self) -> None: - self.runner_mock.return_value = (1, "disabled", "") - self.assertFalse( - ha_cluster_info.is_service_enabled(self.runner_mock, "pacemaker") - ) - self.runner_mock.assert_called_once_with( - ["systemctl", "is-enabled", "pacemaker.service"], - {"LC_ALL": "C"}, - ) - - def test_unexpected_output(self) -> None: - self.runner_mock.return_value = (4, "not-found", "") - self.assertFalse( - ha_cluster_info.is_service_enabled(self.runner_mock, "pcmk") - ) - self.runner_mock.assert_called_once_with( - ["systemctl", "is-enabled", "pcmk.service"], - {"LC_ALL": "C"}, - ) - - -class LoadStartOnBoot(TestCase): - @mock.patch("ha_cluster_info.is_service_enabled") - def test_main(self, mock_is_enabled: mock.Mock) -> None: - runner_mock = mock.Mock() - mock_is_enabled.side_effect = [False, False] - self.assertFalse(ha_cluster_info.load_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [True, False] - self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [False, True] - self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [True, True] - self.assertTrue(ha_cluster_info.load_start_on_boot(runner_mock)) - - -class CallPcsCli(TestCase): - def test_success(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = ( - 0, - """{"json": "test data", "foo": "bar"}""", - "", - ) - self.assertEqual( - ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]), - dict(json="test data", foo="bar"), - ) - runner_mock.assert_called_once_with( - ["pcs", "cluster", "config"], - {"LC_ALL": "C"}, - ) - - def test_pcs_error(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = ( - 1, - "some stdout message", - "some stderr message", - ) - with self.assertRaises(ha_cluster_info.CliCommandError) as cm: - ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]) - self.assertEqual( - cm.exception.kwargs, - dict( - pcs_command=["pcs", "cluster", "config"], - stdout="some stdout message", - stderr="some stderr message", - rc=1, - ), - ) - runner_mock.assert_called_once_with( - ["pcs", "cluster", "config"], - {"LC_ALL": "C"}, - ) - - def test_json_error(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = ( - 0, - "not a json", - "", - ) - with self.assertRaises(ha_cluster_info.JsonParseError) as cm: - ha_cluster_info.call_pcs_cli(runner_mock, ["cluster", "config"]) - self.assertEqual( - cm.exception.kwargs, - dict( - data="not a json", - data_desc="pcs cluster config", - error="Expecting value: line 1 column 1 (char 0)", - additional_info="", - ), - ) - runner_mock.assert_called_once_with( - ["pcs", "cluster", "config"], - {"LC_ALL": "C"}, - ) - - -class LoadCorosyncConf(TestCase): - pcs_command = ["pcs", "cluster", "config", "--output-format=json"] - env = {"LC_ALL": "C"} - - def test_success(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = (0, """{"some": "json"}""", "") - self.assertEqual( - ha_cluster_info.load_corosync_conf(runner_mock), dict(some="json") - ) - runner_mock.assert_called_once_with(self.pcs_command, self.env) - - def test_pcs_error(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = (1, "stdout message", "stderr message") - with self.assertRaises(ha_cluster_info.CliCommandError) as cm: - ha_cluster_info.load_corosync_conf(runner_mock) - self.assertEqual( - cm.exception.kwargs, - dict( - pcs_command=self.pcs_command, - stdout="stdout message", - stderr="stderr message", - rc=1, - ), - ) - runner_mock.assert_called_once_with(self.pcs_command, self.env) - - def test_json_error(self) -> None: - runner_mock = mock.Mock() - runner_mock.return_value = (0, "not a json", "") - with self.assertRaises(ha_cluster_info.JsonParseError) as cm: - ha_cluster_info.load_corosync_conf(runner_mock) - self.assertEqual( - cm.exception.kwargs, - dict( - data="not a json", - data_desc=" ".join(self.pcs_command), - error="Expecting value: line 1 column 1 (char 0)", - additional_info="", - ), - ) - runner_mock.assert_called_once_with(self.pcs_command, self.env) - - -class LoadPcsdKnownHosts(TestCase): - file_path = "/var/lib/pcsd/known-hosts" - - @mock.patch("ha_cluster_info.os.path.exists") - def test_file_not_present(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = False - self.assertEqual(ha_cluster_info.load_pcsd_known_hosts(), dict()) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_json_error(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = "not a json" - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - with self.assertRaises(ha_cluster_info.JsonParseError) as cm: - ha_cluster_info.load_pcsd_known_hosts() - self.assertEqual( - cm.exception.kwargs, - dict( - data="not logging data", - data_desc="known hosts", - error="Expecting value: line 1 column 1 (char 0)", - additional_info=None, - ), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_json_empty(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = "{}" - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - self.assertEqual( - ha_cluster_info.load_pcsd_known_hosts(), - dict(), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - @mock.patch("ha_cluster_info.os.path.exists") - def test_extract(self, mock_exists: mock.Mock) -> None: - mock_exists.return_value = True - mock_data = json.dumps( - dict( - known_hosts=dict( - node1=dict(), - node2=dict(dest_list=[]), - node3=dict(dest_list=[dict()]), - node4=dict(dest_list=[dict(addr="node4A")]), - node5=dict(dest_list=[dict(port="10005")]), - node6=dict(dest_list=[dict(addr="node6A", port="10006")]), - node7=dict( - dest_list=[dict(addr="2001:db8::7", port="10007")] - ), - node8=dict( - dest_list=[ - dict(addr="192.0.2.8", port="10008"), - dict(addr="node8B"), - ] - ), - ) - ) - ) - with mock.patch( - "ha_cluster_info.open", mock.mock_open(read_data=mock_data) - ) as mock_open: - self.assertEqual( - ha_cluster_info.load_pcsd_known_hosts(), - dict( - node4="node4A", - node6="node6A:10006", - node7="[2001:db8::7]:10007", - node8="192.0.2.8:10008", - ), - ) - mock_open.assert_called_once_with( - self.file_path, "r", encoding="utf-8" - ) - mock_exists.assert_called_once_with(self.file_path) - - -# functions transforming data from pcs format to role format - - -class DictToNvList(TestCase): - def test_no_item(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict()), - [], - ) - - def test_one_item(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict(one="1")), - [dict(name="one", value="1")], - ) - - def test_two_items(self) -> None: - self.assertEqual( - ha_cluster_info.dict_to_nv_list(dict(one="1", two="2")), - [dict(name="one", value="1"), dict(name="two", value="2")], - ) - - -class ExportCorosyncConf(TestCase): - maxDiff = None - - def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: - with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: - ha_cluster_info.export_corosync_options(data) - self.assertEqual( - cm.exception.kwargs, - dict(data=data, key=key, data_desc="corosync configuration"), - ) - - def test_missing_keys(self) -> None: - self.assert_missing_key(dict(), "cluster_name") - self.assert_missing_key(dict(cluster_name="x"), "transport") - self.assert_missing_key( - dict(cluster_name="x", transport="x"), "transport_options" - ) - self.assert_missing_key( - dict(cluster_name="x", transport="x", transport_options=dict()), - "links_options", - ) - self.assert_missing_key( - dict( - cluster_name="x", - transport="x", - transport_options=dict(), - links_options=dict(), - ), - "compression_options", - ) - self.assert_missing_key( - dict( - cluster_name="x", - transport="x", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - ), - "crypto_options", - ) - self.assert_missing_key( - dict( - cluster_name="x", - transport="x", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - ), - "totem_options", - ) - self.assert_missing_key( - dict( - cluster_name="x", - transport="x", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - ), - "quorum_options", - ) - - def test_minimal(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - transport_options=dict(), - links_options=dict(), - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - ) - role_data = ha_cluster_info.export_corosync_options(pcs_data) - self.assertEqual( - role_data, - dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ), - ) - - def test_simple_options_mirroring(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - totem_options=dict(totem1="a", totem2="b"), - transport_options=dict(transport1="c", transport2="d"), - compression_options=dict(compression1="e", compression2="f"), - crypto_options=dict(crypto1="g", crypto2="h"), - quorum_options=dict(quorum1="i", quorum2="j"), - links_options=dict(), - ) - role_data = ha_cluster_info.export_corosync_options(pcs_data) - self.assertEqual( - role_data, - dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - options=[ - dict(name="transport1", value="c"), - dict(name="transport2", value="d"), - ], - compression=[ - dict(name="compression1", value="e"), - dict(name="compression2", value="f"), - ], - crypto=[ - dict(name="crypto1", value="g"), - dict(name="crypto2", value="h"), - ], - ), - ha_cluster_totem=dict( - options=[ - dict(name="totem1", value="a"), - dict(name="totem2", value="b"), - ], - ), - ha_cluster_quorum=dict( - options=[ - dict(name="quorum1", value="i"), - dict(name="quorum2", value="j"), - ], - ), - ), - ) - - def test_one_link(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - transport_options=dict(), - links_options={"0": dict(name1="value1", name2="value2")}, - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - ) - role_data = ha_cluster_info.export_corosync_options(pcs_data) - self.assertEqual( - role_data, - dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - links=[ - [ - dict(name="name1", value="value1"), - dict(name="name2", value="value2"), - ] - ], - ), - ), - ) - - def test_more_links(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", - transport="KNET", - transport_options=dict(), - links_options={ - "0": dict(linknumber="0", name0="value0"), - "7": dict(linknumber="7", name7="value7"), - "3": dict(linknumber="3", name3="value3"), - }, - compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - ) - role_data = ha_cluster_info.export_corosync_options(pcs_data) - self.assertEqual( - role_data, - dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - links=[ - [ - dict(name="linknumber", value="0"), - dict(name="name0", value="value0"), - ], - [ - dict(name="linknumber", value="7"), - dict(name="name7", value="value7"), - ], - [ - dict(name="linknumber", value="3"), - dict(name="name3", value="value3"), - ], - ], - ), - ), - ) - - -class ExportClusterNodes(TestCase): - maxDiff = None - - def assert_missing_key( - self, data: List[Dict[str, Any]], key: str, index: str = "0" - ) -> None: - with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: - ha_cluster_info.export_cluster_nodes(data, {}) - self.assertEqual( - cm.exception.kwargs, - dict( - data=dict(nodes=data), - key=key, - data_desc=f"corosync configuration for node on index {index}", - ), - ) - - def test_no_nodes(self) -> None: - self.assertEqual(ha_cluster_info.export_cluster_nodes([], {}), []) - - def test_corosync_nodes_missing_keys(self) -> None: - corosync_data: List[Dict[str, Any]] = [dict()] - self.assert_missing_key(corosync_data, "name") - - corosync_data = [dict(name="nodename")] - self.assert_missing_key(corosync_data, "addrs") - - corosync_data = [dict(name="nodename", addrs=[dict()])] - self.assert_missing_key(corosync_data, "link") - - corosync_data = [dict(name="nodename", addrs=[dict(link="0")])] - self.assert_missing_key(corosync_data, "addr") - - def test_corosync_nodes_one_link(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="IPv4")], - ), - dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], - ), - ] - role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) - self.assertEqual( - role_data, - [ - dict(node_name="node1", corosync_addresses=["node1addr"]), - dict(node_name="node2", corosync_addresses=["node2addr"]), - ], - ) - - def test_corosync_nodes_multiple_links(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[ - dict(addr="node1addr1", link="0", type="IPv4"), - dict(addr="node1addr2", link="1", type="IPv6"), - ], - ), - dict( - name="node2", - nodeid=2, - addrs=[ - dict(addr="node2addr1", link="0", type="IPv4"), - dict(addr="node2addr2", link="1", type="IPv6"), - ], - ), - ] - role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) - self.assertEqual( - role_data, - [ - dict( - node_name="node1", - corosync_addresses=["node1addr1", "node1addr2"], - ), - dict( - node_name="node2", - corosync_addresses=["node2addr1", "node2addr2"], - ), - ], - ) - - def test_corosync_nodes_no_address(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[], - ), - ] - role_data = ha_cluster_info.export_cluster_nodes(corosync_data, {}) - self.assertEqual( - role_data, - [ - dict(node_name="node1", corosync_addresses=[]), - ], - ) - - def test_pcs_nodes_no_cluster_nodes(self) -> None: - corosync_data: List[Dict[str, Any]] = [] - pcs_data = dict(node1="node1A") - role_data = ha_cluster_info.export_cluster_nodes( - corosync_data, pcs_data - ) - self.assertEqual( - role_data, - [], - ) - - def test_pcs_nodes(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="FQDN")], - ), - dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], - ), - ] - pcs_data = dict(node1="node1A", node3="node3A") - role_data = ha_cluster_info.export_cluster_nodes( - corosync_data, pcs_data - ) - self.assertEqual( - role_data, - [ - dict( - node_name="node1", - corosync_addresses=["node1addr"], - pcs_address="node1A", - ), - dict( - node_name="node2", - corosync_addresses=["node2addr"], - ), - ], - ) - - -# ansible module tools and top layer functions - class ExportClusterConfiguration(TestCase): maxDiff = None - @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") def test_export_minimal( self, mock_load_pcsd_known_hosts: mock.Mock, @@ -714,7 +86,7 @@ def test_export_minimal( mock_load_pcsd_known_hosts.assert_called_once_with() - @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") def test_export( self, mock_load_pcsd_known_hosts: mock.Mock, @@ -800,7 +172,7 @@ def test_export( mock_load_pcsd_known_hosts.assert_called_once_with() - @mock.patch("ha_cluster_info.load_pcsd_known_hosts") + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") def test_missing_corosync_nodes_key( self, mock_load_pcsd_known_hosts: mock.Mock, @@ -829,7 +201,7 @@ def test_missing_corosync_nodes_key( node2="node2pcs", ) - with self.assertRaises(ha_cluster_info.JsonMissingKey) as cm: + with self.assertRaises(ha_cluster_info.exporter.JsonMissingKey) as cm: ha_cluster_info.export_cluster_configuration(module_mock) self.assertEqual( cm.exception.kwargs, diff --git a/tests/unit/test_info_exporter.py b/tests/unit/test_info_exporter.py new file mode 100644 index 00000000..4680e0dc --- /dev/null +++ b/tests/unit/test_info_exporter.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring + +import sys +from importlib import import_module +from typing import Any, Dict, List +from unittest import TestCase + +sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( + "ha_cluster_lsr" +) + +from ha_cluster_lsr.info import exporter + + +class DictToNvList(TestCase): + # pylint: disable=protected-access + def test_no_item(self) -> None: + self.assertEqual( + exporter._dict_to_nv_list(dict()), + [], + ) + + def test_one_item(self) -> None: + self.assertEqual( + exporter._dict_to_nv_list(dict(one="1")), + [dict(name="one", value="1")], + ) + + def test_two_items(self) -> None: + self.assertEqual( + exporter._dict_to_nv_list(dict(one="1", two="2")), + [dict(name="one", value="1"), dict(name="two", value="2")], + ) + + +class ExportCorosyncConf(TestCase): + maxDiff = None + + def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_corosync_options(data) + self.assertEqual( + cm.exception.kwargs, + dict(data=data, key=key, data_desc="corosync configuration"), + ) + + def test_missing_keys(self) -> None: + self.assert_missing_key(dict(), "cluster_name") + self.assert_missing_key(dict(cluster_name="x"), "transport") + self.assert_missing_key( + dict(cluster_name="x", transport="x"), "transport_options" + ) + self.assert_missing_key( + dict(cluster_name="x", transport="x", transport_options=dict()), + "links_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + ), + "compression_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + ), + "crypto_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + ), + "totem_options", + ) + self.assert_missing_key( + dict( + cluster_name="x", + transport="x", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + ), + "quorum_options", + ) + + def test_minimal(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options=dict(), + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + ) + role_data = exporter.export_corosync_options(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict(type="knet"), + ), + ) + + def test_simple_options_mirroring(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + totem_options=dict(totem1="a", totem2="b"), + transport_options=dict(transport1="c", transport2="d"), + compression_options=dict(compression1="e", compression2="f"), + crypto_options=dict(crypto1="g", crypto2="h"), + quorum_options=dict(quorum1="i", quorum2="j"), + links_options=dict(), + ) + role_data = exporter.export_corosync_options(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + options=[ + dict(name="transport1", value="c"), + dict(name="transport2", value="d"), + ], + compression=[ + dict(name="compression1", value="e"), + dict(name="compression2", value="f"), + ], + crypto=[ + dict(name="crypto1", value="g"), + dict(name="crypto2", value="h"), + ], + ), + ha_cluster_totem=dict( + options=[ + dict(name="totem1", value="a"), + dict(name="totem2", value="b"), + ], + ), + ha_cluster_quorum=dict( + options=[ + dict(name="quorum1", value="i"), + dict(name="quorum2", value="j"), + ], + ), + ), + ) + + def test_one_link(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options={"0": dict(name1="value1", name2="value2")}, + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + ) + role_data = exporter.export_corosync_options(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + links=[ + [ + dict(name="name1", value="value1"), + dict(name="name2", value="value2"), + ] + ], + ), + ), + ) + + def test_more_links(self) -> None: + pcs_data = dict( + cluster_name="my-cluster", + transport="KNET", + transport_options=dict(), + links_options={ + "0": dict(linknumber="0", name0="value0"), + "7": dict(linknumber="7", name7="value7"), + "3": dict(linknumber="3", name3="value3"), + }, + compression_options=dict(), + crypto_options=dict(), + totem_options=dict(), + quorum_options=dict(), + ) + role_data = exporter.export_corosync_options(pcs_data) + self.assertEqual( + role_data, + dict( + ha_cluster_cluster_name="my-cluster", + ha_cluster_transport=dict( + type="knet", + links=[ + [ + dict(name="linknumber", value="0"), + dict(name="name0", value="value0"), + ], + [ + dict(name="linknumber", value="7"), + dict(name="name7", value="value7"), + ], + [ + dict(name="linknumber", value="3"), + dict(name="name3", value="value3"), + ], + ], + ), + ), + ) + + +class ExportClusterNodes(TestCase): + maxDiff = None + + def assert_missing_key( + self, data: List[Dict[str, Any]], key: str, index: str = "0" + ) -> None: + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_cluster_nodes(data, {}) + self.assertEqual( + cm.exception.kwargs, + dict( + data=dict(nodes=data), + key=key, + data_desc=f"corosync configuration for node on index {index}", + ), + ) + + def test_no_nodes(self) -> None: + self.assertEqual(exporter.export_cluster_nodes([], {}), []) + + def test_corosync_nodes_missing_keys(self) -> None: + corosync_data: List[Dict[str, Any]] = [dict()] + self.assert_missing_key(corosync_data, "name") + + corosync_data = [dict(name="nodename")] + self.assert_missing_key(corosync_data, "addrs") + + corosync_data = [dict(name="nodename", addrs=[dict()])] + self.assert_missing_key(corosync_data, "link") + + corosync_data = [dict(name="nodename", addrs=[dict(link="0")])] + self.assert_missing_key(corosync_data, "addr") + + def test_corosync_nodes_one_link(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ] + role_data = exporter.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + role_data, + [ + dict(node_name="node1", corosync_addresses=["node1addr"]), + dict(node_name="node2", corosync_addresses=["node2addr"]), + ], + ) + + def test_corosync_nodes_multiple_links(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[ + dict(addr="node1addr1", link="0", type="IPv4"), + dict(addr="node1addr2", link="1", type="IPv6"), + ], + ), + dict( + name="node2", + nodeid=2, + addrs=[ + dict(addr="node2addr1", link="0", type="IPv4"), + dict(addr="node2addr2", link="1", type="IPv6"), + ], + ), + ] + role_data = exporter.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + role_data, + [ + dict( + node_name="node1", + corosync_addresses=["node1addr1", "node1addr2"], + ), + dict( + node_name="node2", + corosync_addresses=["node2addr1", "node2addr2"], + ), + ], + ) + + def test_corosync_nodes_no_address(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[], + ), + ] + role_data = exporter.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + role_data, + [ + dict(node_name="node1", corosync_addresses=[]), + ], + ) + + def test_pcs_nodes_no_cluster_nodes(self) -> None: + corosync_data: List[Dict[str, Any]] = [] + pcs_data = dict(node1="node1A") + role_data = exporter.export_cluster_nodes(corosync_data, pcs_data) + self.assertEqual( + role_data, + [], + ) + + def test_pcs_nodes(self) -> None: + corosync_data = [ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="FQDN")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ] + pcs_data = dict(node1="node1A", node3="node3A") + role_data = exporter.export_cluster_nodes(corosync_data, pcs_data) + self.assertEqual( + role_data, + [ + dict( + node_name="node1", + corosync_addresses=["node1addr"], + pcs_address="node1A", + ), + dict( + node_name="node2", + corosync_addresses=["node2addr"], + ), + ], + ) diff --git a/tests/unit/test_info_loader.py b/tests/unit/test_info_loader.py new file mode 100644 index 00000000..05268dfd --- /dev/null +++ b/tests/unit/test_info_loader.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2024 Red Hat, Inc. +# Author: Tomas Jelinek +# SPDX-License-Identifier: MIT + +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring + +import json +import sys +from importlib import import_module +from unittest import TestCase, mock + +sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( + "ha_cluster_lsr" +) + +from ha_cluster_lsr.info import loader + + +class IsServiceEnabled(TestCase): + # pylint: disable=protected-access + def setUp(self) -> None: + self.runner_mock = mock.Mock() + + def test_is_enabled(self) -> None: + self.runner_mock.return_value = (0, "enabled", "") + self.assertTrue( + loader._is_service_enabled(self.runner_mock, "corosync") + ) + self.runner_mock.assert_called_once_with( + ["systemctl", "is-enabled", "corosync.service"], + {"LC_ALL": "C"}, + ) + + def test_is_disabled(self) -> None: + self.runner_mock.return_value = (1, "disabled", "") + self.assertFalse( + loader._is_service_enabled(self.runner_mock, "pacemaker") + ) + self.runner_mock.assert_called_once_with( + ["systemctl", "is-enabled", "pacemaker.service"], + {"LC_ALL": "C"}, + ) + + def test_unexpected_output(self) -> None: + self.runner_mock.return_value = (4, "not-found", "") + self.assertFalse(loader._is_service_enabled(self.runner_mock, "pcmk")) + self.runner_mock.assert_called_once_with( + ["systemctl", "is-enabled", "pcmk.service"], + {"LC_ALL": "C"}, + ) + + +class GetStartOnBoot(TestCase): + @mock.patch("ha_cluster_lsr.info.loader._is_service_enabled") + def test_main(self, mock_is_enabled: mock.Mock) -> None: + runner_mock = mock.Mock() + mock_is_enabled.side_effect = [False, False] + self.assertFalse(loader.get_start_on_boot(runner_mock)) + + mock_is_enabled.side_effect = [True, False] + self.assertTrue(loader.get_start_on_boot(runner_mock)) + + mock_is_enabled.side_effect = [False, True] + self.assertTrue(loader.get_start_on_boot(runner_mock)) + + mock_is_enabled.side_effect = [True, True] + self.assertTrue(loader.get_start_on_boot(runner_mock)) + + +class CallPcsCli(TestCase): + # pylint: disable=protected-access + def test_success(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = ( + 0, + """{"json": "test data", "foo": "bar"}""", + "", + ) + self.assertEqual( + loader._call_pcs_cli(runner_mock, ["cluster", "config"]), + dict(json="test data", foo="bar"), + ) + runner_mock.assert_called_once_with( + ["pcs", "cluster", "config"], + {"LC_ALL": "C"}, + ) + + def test_pcs_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = ( + 1, + "some stdout message", + "some stderr message", + ) + with self.assertRaises(loader.CliCommandError) as cm: + loader._call_pcs_cli(runner_mock, ["cluster", "config"]) + self.assertEqual( + cm.exception.kwargs, + dict( + pcs_command=["pcs", "cluster", "config"], + stdout="some stdout message", + stderr="some stderr message", + rc=1, + ), + ) + runner_mock.assert_called_once_with( + ["pcs", "cluster", "config"], + {"LC_ALL": "C"}, + ) + + def test_json_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = ( + 0, + "not a json", + "", + ) + with self.assertRaises(loader.JsonParseError) as cm: + loader._call_pcs_cli(runner_mock, ["cluster", "config"]) + self.assertEqual( + cm.exception.kwargs, + dict( + data="not a json", + data_desc="pcs cluster config", + error="Expecting value: line 1 column 1 (char 0)", + additional_info="", + ), + ) + runner_mock.assert_called_once_with( + ["pcs", "cluster", "config"], + {"LC_ALL": "C"}, + ) + + +class GetCorosyncConf(TestCase): + pcs_command = ["pcs", "cluster", "config", "--output-format=json"] + env = {"LC_ALL": "C"} + + def test_success(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (0, """{"some": "json"}""", "") + self.assertEqual( + loader.get_corosync_conf(runner_mock), dict(some="json") + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) + + def test_pcs_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (1, "stdout message", "stderr message") + with self.assertRaises(loader.CliCommandError) as cm: + loader.get_corosync_conf(runner_mock) + self.assertEqual( + cm.exception.kwargs, + dict( + pcs_command=self.pcs_command, + stdout="stdout message", + stderr="stderr message", + rc=1, + ), + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) + + def test_json_error(self) -> None: + runner_mock = mock.Mock() + runner_mock.return_value = (0, "not a json", "") + with self.assertRaises(loader.JsonParseError) as cm: + loader.get_corosync_conf(runner_mock) + self.assertEqual( + cm.exception.kwargs, + dict( + data="not a json", + data_desc=" ".join(self.pcs_command), + error="Expecting value: line 1 column 1 (char 0)", + additional_info="", + ), + ) + runner_mock.assert_called_once_with(self.pcs_command, self.env) + + +class GetPcsdKnownHosts(TestCase): + file_path = "/var/lib/pcsd/known-hosts" + + @mock.patch("ha_cluster_lsr.info.loader.os.path.exists") + def test_file_not_present(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = False + self.assertEqual(loader.get_pcsd_known_hosts(), dict()) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_lsr.info.loader.os.path.exists") + def test_json_error(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "not a json" + with mock.patch( + "ha_cluster_lsr.info.loader.open", + mock.mock_open(read_data=mock_data), + ) as mock_open: + with self.assertRaises(loader.JsonParseError) as cm: + loader.get_pcsd_known_hosts() + self.assertEqual( + cm.exception.kwargs, + dict( + data="not logging data", + data_desc="known hosts", + error="Expecting value: line 1 column 1 (char 0)", + additional_info=None, + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_lsr.info.loader.os.path.exists") + def test_json_empty(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = "{}" + with mock.patch( + "ha_cluster_lsr.info.loader.open", + mock.mock_open(read_data=mock_data), + ) as mock_open: + self.assertEqual( + loader.get_pcsd_known_hosts(), + dict(), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) + + @mock.patch("ha_cluster_lsr.info.loader.os.path.exists") + def test_extract(self, mock_exists: mock.Mock) -> None: + mock_exists.return_value = True + mock_data = json.dumps( + dict( + known_hosts=dict( + node1=dict(), + node2=dict(dest_list=[]), + node3=dict(dest_list=[dict()]), + node4=dict(dest_list=[dict(addr="node4A")]), + node5=dict(dest_list=[dict(port="10005")]), + node6=dict(dest_list=[dict(addr="node6A", port="10006")]), + node7=dict( + dest_list=[dict(addr="2001:db8::7", port="10007")] + ), + node8=dict( + dest_list=[ + dict(addr="192.0.2.8", port="10008"), + dict(addr="node8B"), + ] + ), + ) + ) + ) + with mock.patch( + "ha_cluster_lsr.info.loader.open", + mock.mock_open(read_data=mock_data), + ) as mock_open: + self.assertEqual( + loader.get_pcsd_known_hosts(), + dict( + node4="node4A", + node6="node6A:10006", + node7="[2001:db8::7]:10007", + node8="192.0.2.8:10008", + ), + ) + mock_open.assert_called_once_with( + self.file_path, "r", encoding="utf-8" + ) + mock_exists.assert_called_once_with(self.file_path) From 1fed0d2038b8969ca97415ee60145f2f5de3aea8 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Mon, 7 Oct 2024 17:19:32 +0200 Subject: [PATCH 5/8] refactor: ha_cluster_info: cleanup Implementing changes proposed in code review --- library/ha_cluster_info.py | 42 +- module_utils/ha_cluster_lsr/info/exporter.py | 131 ++++-- module_utils/ha_cluster_lsr/info/loader.py | 11 +- tests/unit/test_ha_cluster_info.py | 81 ++-- tests/unit/test_info_exporter.py | 436 +++++++++++-------- tests/unit/test_info_loader.py | 26 +- 6 files changed, 423 insertions(+), 304 deletions(-) diff --git a/library/ha_cluster_info.py b/library/ha_cluster_info.py index f6c3e453..653a8616 100644 --- a/library/ha_cluster_info.py +++ b/library/ha_cluster_info.py @@ -104,7 +104,11 @@ def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: result: dict[str, Any] = dict() cmd_runner = get_cmd_runner(module) - result["ha_cluster_start_on_boot"] = loader.get_start_on_boot(cmd_runner) + corosync_enabled = loader.is_service_enabled(cmd_runner, "corosync") + pacemaker_enabled = loader.is_service_enabled(cmd_runner, "pacemaker") + result["ha_cluster_start_on_boot"] = exporter.export_start_on_boot( + corosync_enabled, pacemaker_enabled + ) # Corosync config is availabe via CLI since pcs-0.10.8, via API v2 since # pcs-0.12.0 and pcs-0.11.9. For old pcs versions, CLI must be used, and @@ -118,25 +122,23 @@ def export_cluster_configuration(module: AnsibleModule) -> Dict[str, Any]: known_hosts_pcs = loader.get_pcsd_known_hosts() # Convert corosync config to role format - corosync_conf_role = exporter.export_corosync_options(corosync_conf_pcs) - for key in ( - "ha_cluster_cluster_name", - "ha_cluster_transport", - "ha_cluster_totem", - "ha_cluster_quorum", - ): - if key in corosync_conf_role: - result[key] = corosync_conf_role[key] - - # Convert cluster definition to role format - try: - result["ha_cluster_node_options"] = exporter.export_cluster_nodes( - corosync_conf_pcs["nodes"], known_hosts_pcs - ) - except KeyError as e: - raise exporter.JsonMissingKey( - e.args[0], corosync_conf_pcs, "corosync configuration" - ) from e + result["ha_cluster_cluster_name"] = exporter.export_corosync_cluster_name( + corosync_conf_pcs + ) + result["ha_cluster_transport"] = exporter.export_corosync_transport( + corosync_conf_pcs + ) + exported_totem = exporter.export_corosync_totem(corosync_conf_pcs) + if exported_totem: + result["ha_cluster_totem"] = exported_totem + exported_quorum = exporter.export_corosync_quorum(corosync_conf_pcs) + if exported_quorum: + result["ha_cluster_quorum"] = exported_quorum + + # Convert nodes definition to role format + result["ha_cluster_node_options"] = exporter.export_cluster_nodes( + corosync_conf_pcs, known_hosts_pcs + ) return result diff --git a/module_utils/ha_cluster_lsr/info/exporter.py b/module_utils/ha_cluster_lsr/info/exporter.py index 78dcfdbf..8e8f0f51 100644 --- a/module_utils/ha_cluster_lsr/info/exporter.py +++ b/module_utils/ha_cluster_lsr/info/exporter.py @@ -11,7 +11,8 @@ # pylint: disable=invalid-name __metaclass__ = type -from typing import Any, Dict, List +from contextlib import contextmanager +from typing import Any, Dict, Iterator, List class JsonMissingKey(Exception): @@ -39,18 +40,42 @@ def _dict_to_nv_list(input_dict: Dict[str, Any]) -> List[Dict[str, Any]]: return [dict(name=name, value=value) for name, value in input_dict.items()] -def export_corosync_options( +@contextmanager +def _handle_missing_key(data: Dict[str, Any], data_desc: str) -> Iterator[None]: + try: + yield + except KeyError as e: + raise JsonMissingKey(e.args[0], data, data_desc) from e + + +def export_start_on_boot( + corosync_enabled: bool, pacemaker_enabled: bool +) -> bool: + """ + Transform cluster servis status to start_on_boot + """ + return corosync_enabled or pacemaker_enabled + + +def export_corosync_cluster_name(corosync_conf_dict: Dict[str, Any]) -> str: + """ + Extract cluster name form corosync config in pcs format + + corosync_conf_dict -- corosync config structure provided by pcs + """ + with _handle_missing_key(corosync_conf_dict, "corosync configuration"): + return corosync_conf_dict["cluster_name"] + + +def export_corosync_transport( corosync_conf_dict: Dict[str, Any] ) -> Dict[str, Any]: """ - Transform corosync config from pcs format to role format excluding nodes + Export transport options in role format from corosync config in pcs format corosync_conf_dict -- corosync config structure provided by pcs """ - result: Dict[str, Any] = dict() - try: - result["ha_cluster_cluster_name"] = corosync_conf_dict["cluster_name"] - + with _handle_missing_key(corosync_conf_dict, "corosync configuration"): transport = dict(type=corosync_conf_dict["transport"].lower()) if corosync_conf_dict["transport_options"]: transport["options"] = _dict_to_nv_list( @@ -71,26 +96,43 @@ def export_corosync_options( transport["crypto"] = _dict_to_nv_list( corosync_conf_dict["crypto_options"] ) - result["ha_cluster_transport"] = transport + return transport + +def export_corosync_totem(corosync_conf_dict: Dict[str, Any]) -> Dict[str, Any]: + """ + Export totem options in role format from corosync config in pcs format + + corosync_conf_dict -- corosync config structure provided by pcs + """ + with _handle_missing_key(corosync_conf_dict, "corosync configuration"): + result: Dict[str, Any] = dict() if corosync_conf_dict["totem_options"]: - result["ha_cluster_totem"] = dict( - options=_dict_to_nv_list(corosync_conf_dict["totem_options"]) + result["options"] = _dict_to_nv_list( + corosync_conf_dict["totem_options"] ) + return result + + +def export_corosync_quorum( + corosync_conf_dict: Dict[str, Any] +) -> Dict[str, Any]: + """ + Export quorum options in role format from corosync config in pcs format + corosync_conf_dict -- corosync config structure provided by pcs + """ + with _handle_missing_key(corosync_conf_dict, "corosync configuration"): + result: Dict[str, Any] = dict() if corosync_conf_dict["quorum_options"]: - result["ha_cluster_quorum"] = dict( - options=_dict_to_nv_list(corosync_conf_dict["quorum_options"]) + result["options"] = _dict_to_nv_list( + corosync_conf_dict["quorum_options"] ) - except KeyError as e: - raise JsonMissingKey( - e.args[0], corosync_conf_dict, "corosync configuration" - ) from e - return result + return result def export_cluster_nodes( - corosync_conf_nodes: List[Dict[str, Any]], pcs_node_addr: Dict[str, str] + corosync_conf_dict: Dict[str, Any], pcs_node_addr: Dict[str, str] ) -> List[Dict[str, Any]]: """ Transform node configuration from pcs format to role format @@ -98,31 +140,30 @@ def export_cluster_nodes( corosync_conf_dict -- corosync config structure provided by pcs pcs_node_addr -- dict holding pcs address for cluster nodes """ - node_list: List[Dict[str, Any]] = [] - if not corosync_conf_nodes: - return node_list - for index, node_dict in enumerate(corosync_conf_nodes): - # corosync node configuration - try: - one_node = dict( - node_name=node_dict["name"], - corosync_addresses=[ - addr_dict["addr"] - for addr_dict in sorted( - node_dict["addrs"], - key=lambda item: item["link"], - ) - ], - ) - except KeyError as e: - raise JsonMissingKey( - e.args[0], - dict(nodes=corosync_conf_nodes), + with _handle_missing_key(corosync_conf_dict, "corosync configuration"): + node_list: List[Dict[str, Any]] = [] + corosync_nodes = corosync_conf_dict["nodes"] + if not corosync_nodes: + return node_list + for index, node_dict in enumerate(corosync_nodes): + # corosync node configuration + with _handle_missing_key( + corosync_conf_dict, f"corosync configuration for node on index {index}", - ) from e - # pcs node configuration - if one_node["node_name"] in pcs_node_addr: - one_node["pcs_address"] = pcs_node_addr[one_node["node_name"]] - # finish one node export - node_list.append(one_node) - return node_list + ): + one_node = dict( + node_name=node_dict["name"], + corosync_addresses=[ + addr_dict["addr"] + for addr_dict in sorted( + node_dict["addrs"], + key=lambda item: item["link"], + ) + ], + ) + # pcs node configuration + if one_node["node_name"] in pcs_node_addr: + one_node["pcs_address"] = pcs_node_addr[one_node["node_name"]] + # finish one node export + node_list.append(one_node) + return node_list diff --git a/module_utils/ha_cluster_lsr/info/loader.py b/module_utils/ha_cluster_lsr/info/loader.py index f755826c..408566e2 100644 --- a/module_utils/ha_cluster_lsr/info/loader.py +++ b/module_utils/ha_cluster_lsr/info/loader.py @@ -85,7 +85,7 @@ def kwargs(self) -> Dict[str, Any]: ) -def _is_service_enabled(run_command: CommandRunner, service: str) -> bool: +def is_service_enabled(run_command: CommandRunner, service: str) -> bool: """ Check whether a specified service is enabled in the OS @@ -102,15 +102,6 @@ def _is_service_enabled(run_command: CommandRunner, service: str) -> bool: return rc == 0 -def get_start_on_boot(run_command: CommandRunner) -> bool: - """ - Detect wheter a cluster is configured to start on boot - """ - return _is_service_enabled(run_command, "corosync") or _is_service_enabled( - run_command, "pacemaker" - ) - - def _call_pcs_cli( run_command: CommandRunner, command: List[str] ) -> Dict[str, Any]: diff --git a/tests/unit/test_ha_cluster_info.py b/tests/unit/test_ha_cluster_info.py index 52443dc7..e1fbe1e9 100644 --- a/tests/unit/test_ha_cluster_info.py +++ b/tests/unit/test_ha_cluster_info.py @@ -11,6 +11,7 @@ import json import sys from importlib import import_module +from typing import List from unittest import TestCase, mock sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( @@ -23,10 +24,28 @@ class ExportClusterConfiguration(TestCase): maxDiff = None - @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") - def test_export_minimal( + @staticmethod + def fixture_expected_runner_calls() -> List[mock._Call]: + common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) + return [ + mock.call( + ["systemctl", "is-enabled", "corosync.service"], **common_args + ), + mock.call( + ["systemctl", "is-enabled", "pacemaker.service"], **common_args + ), + mock.call( + ["pcs", "cluster", "config", "--output-format=json"], + **common_args, + ), + ] + + def assert_export_minimal( self, mock_load_pcsd_known_hosts: mock.Mock, + corosync_enabled: bool, + pacemaker_enabled: bool, + cluster_start_on_boot: bool, ) -> None: module_mock = mock.Mock() module_mock.run_command = mock.Mock() @@ -50,7 +69,8 @@ def test_export_minimal( ], ) runner_mock.side_effect = [ - (0, "", ""), + (0 if corosync_enabled else 1, "", ""), + (0 if pacemaker_enabled else 1, "", ""), (0, json.dumps(corosync_conf_data), ""), ] @@ -59,7 +79,7 @@ def test_export_minimal( self.assertEqual( ha_cluster_info.export_cluster_configuration(module_mock), dict( - ha_cluster_start_on_boot=True, + ha_cluster_start_on_boot=cluster_start_on_boot, ha_cluster_cluster_name="my-cluster", ha_cluster_transport=dict(type="knet"), ha_cluster_node_options=[ @@ -71,21 +91,30 @@ def test_export_minimal( ), ) - common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) - expected_calls = [ - mock.call( - ["systemctl", "is-enabled", "corosync.service"], **common_args - ), - mock.call( - ["pcs", "cluster", "config", "--output-format=json"], - **common_args, - ), - ] + expected_calls = self.fixture_expected_runner_calls() runner_mock.assert_has_calls(expected_calls) self.assertEqual(runner_mock.call_count, len(expected_calls)) mock_load_pcsd_known_hosts.assert_called_once_with() + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") + def test_export_minimal_enabled( + self, + mock_load_pcsd_known_hosts: mock.Mock, + ) -> None: + self.assert_export_minimal( + mock_load_pcsd_known_hosts, True, False, True + ) + + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") + def test_export_minimal_disabled( + self, + mock_load_pcsd_known_hosts: mock.Mock, + ) -> None: + self.assert_export_minimal( + mock_load_pcsd_known_hosts, False, False, False + ) + @mock.patch("ha_cluster_info.loader.get_pcsd_known_hosts") def test_export( self, @@ -118,6 +147,7 @@ def test_export( ], ) runner_mock.side_effect = [ + (0, "", ""), (0, "", ""), (0, json.dumps(corosync_conf_data), ""), ] @@ -157,16 +187,7 @@ def test_export( ), ) - common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) - expected_calls = [ - mock.call( - ["systemctl", "is-enabled", "corosync.service"], **common_args - ), - mock.call( - ["pcs", "cluster", "config", "--output-format=json"], - **common_args, - ), - ] + expected_calls = self.fixture_expected_runner_calls() runner_mock.assert_has_calls(expected_calls) self.assertEqual(runner_mock.call_count, len(expected_calls)) @@ -192,6 +213,7 @@ def test_missing_corosync_nodes_key( quorum_options=dict(), ) runner_mock.side_effect = [ + (0, "", ""), (0, "", ""), (0, json.dumps(corosync_conf_data), ""), ] @@ -212,16 +234,7 @@ def test_missing_corosync_nodes_key( ), ) - common_args = dict(check_rc=False, environ_update={"LC_ALL": "C"}) - expected_calls = [ - mock.call( - ["systemctl", "is-enabled", "corosync.service"], **common_args - ), - mock.call( - ["pcs", "cluster", "config", "--output-format=json"], - **common_args, - ), - ] + expected_calls = self.fixture_expected_runner_calls() runner_mock.assert_has_calls(expected_calls) self.assertEqual(runner_mock.call_count, len(expected_calls)) diff --git a/tests/unit/test_info_exporter.py b/tests/unit/test_info_exporter.py index 4680e0dc..a8d5106e 100644 --- a/tests/unit/test_info_exporter.py +++ b/tests/unit/test_info_exporter.py @@ -9,7 +9,7 @@ import sys from importlib import import_module -from typing import Any, Dict, List +from typing import Any, Dict from unittest import TestCase sys.modules["ansible.module_utils.ha_cluster_lsr"] = import_module( @@ -40,167 +40,152 @@ def test_two_items(self) -> None: ) -class ExportCorosyncConf(TestCase): +class ExportStartOnBoot(TestCase): + def test_main(self) -> None: + self.assertFalse(exporter.export_start_on_boot(False, False)) + self.assertTrue(exporter.export_start_on_boot(False, True)) + self.assertTrue(exporter.export_start_on_boot(True, False)) + self.assertTrue(exporter.export_start_on_boot(True, True)) + + +class ExportCorosyncClusterName(TestCase): maxDiff = None - def assert_missing_key(self, data: Dict[str, Any], key: str) -> None: + def test_missing_key(self) -> None: + corosync_data: Dict[str, Any] = dict() with self.assertRaises(exporter.JsonMissingKey) as cm: - exporter.export_corosync_options(data) + exporter.export_corosync_cluster_name(corosync_data) self.assertEqual( cm.exception.kwargs, - dict(data=data, key=key, data_desc="corosync configuration"), + dict( + data=corosync_data, + key="cluster_name", + data_desc="corosync configuration", + ), ) - def test_missing_keys(self) -> None: - self.assert_missing_key(dict(), "cluster_name") - self.assert_missing_key(dict(cluster_name="x"), "transport") - self.assert_missing_key( - dict(cluster_name="x", transport="x"), "transport_options" + def test_minimal(self) -> None: + corosync_data: Dict[str, Any] = dict( + cluster_name="my-cluster", ) + role_data = exporter.export_corosync_cluster_name(corosync_data) + self.assertEqual(role_data, "my-cluster") + + +class ExportCorosyncTransport(TestCase): + maxDiff = None + + def assert_missing_key( + self, corosync_data: Dict[str, Any], key: str + ) -> None: + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_corosync_transport(corosync_data) + self.assertEqual( + cm.exception.kwargs, + dict( + data=corosync_data, key=key, data_desc="corosync configuration" + ), + ) + + def test_missing_key(self) -> None: self.assert_missing_key( - dict(cluster_name="x", transport="x", transport_options=dict()), - "links_options", + dict(), + "transport", ) self.assert_missing_key( dict( - cluster_name="x", transport="x", - transport_options=dict(), - links_options=dict(), ), - "compression_options", + "transport_options", ) self.assert_missing_key( dict( - cluster_name="x", transport="x", transport_options=dict(), - links_options=dict(), - compression_options=dict(), ), - "crypto_options", + "links_options", ) self.assert_missing_key( dict( - cluster_name="x", transport="x", transport_options=dict(), links_options=dict(), - compression_options=dict(), - crypto_options=dict(), ), - "totem_options", + "compression_options", ) self.assert_missing_key( dict( - cluster_name="x", transport="x", transport_options=dict(), links_options=dict(), compression_options=dict(), - crypto_options=dict(), - totem_options=dict(), ), - "quorum_options", + "crypto_options", ) def test_minimal(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", + corosync_data: Dict[str, Any] = dict( transport="KNET", transport_options=dict(), links_options=dict(), compression_options=dict(), crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), - ) - role_data = exporter.export_corosync_options(pcs_data) - self.assertEqual( - role_data, - dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict(type="knet"), - ), ) + role_data = exporter.export_corosync_transport(corosync_data) + self.assertEqual(role_data, dict(type="knet")) def test_simple_options_mirroring(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", + corosync_data: Dict[str, Any] = dict( transport="KNET", - totem_options=dict(totem1="a", totem2="b"), transport_options=dict(transport1="c", transport2="d"), compression_options=dict(compression1="e", compression2="f"), crypto_options=dict(crypto1="g", crypto2="h"), - quorum_options=dict(quorum1="i", quorum2="j"), links_options=dict(), ) - role_data = exporter.export_corosync_options(pcs_data) + role_data = exporter.export_corosync_transport(corosync_data) self.assertEqual( role_data, dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - options=[ - dict(name="transport1", value="c"), - dict(name="transport2", value="d"), - ], - compression=[ - dict(name="compression1", value="e"), - dict(name="compression2", value="f"), - ], - crypto=[ - dict(name="crypto1", value="g"), - dict(name="crypto2", value="h"), - ], - ), - ha_cluster_totem=dict( - options=[ - dict(name="totem1", value="a"), - dict(name="totem2", value="b"), - ], - ), - ha_cluster_quorum=dict( - options=[ - dict(name="quorum1", value="i"), - dict(name="quorum2", value="j"), - ], - ), + type="knet", + options=[ + dict(name="transport1", value="c"), + dict(name="transport2", value="d"), + ], + compression=[ + dict(name="compression1", value="e"), + dict(name="compression2", value="f"), + ], + crypto=[ + dict(name="crypto1", value="g"), + dict(name="crypto2", value="h"), + ], ), ) def test_one_link(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", + corosync_data: Dict[str, Any] = dict( transport="KNET", transport_options=dict(), links_options={"0": dict(name1="value1", name2="value2")}, compression_options=dict(), crypto_options=dict(), - totem_options=dict(), - quorum_options=dict(), ) - role_data = exporter.export_corosync_options(pcs_data) + role_data = exporter.export_corosync_transport(corosync_data) self.assertEqual( role_data, dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - links=[ - [ - dict(name="name1", value="value1"), - dict(name="name2", value="value2"), - ] - ], - ), + type="knet", + links=[ + [ + dict(name="name1", value="value1"), + dict(name="name2", value="value2"), + ] + ], ), ) def test_more_links(self) -> None: - pcs_data = dict( - cluster_name="my-cluster", + corosync_data: Dict[str, Any] = dict( transport="KNET", transport_options=dict(), links_options={ @@ -210,31 +195,104 @@ def test_more_links(self) -> None: }, compression_options=dict(), crypto_options=dict(), + ) + role_data = exporter.export_corosync_transport(corosync_data) + self.assertEqual( + role_data, + dict( + type="knet", + links=[ + [ + dict(name="linknumber", value="0"), + dict(name="name0", value="value0"), + ], + [ + dict(name="linknumber", value="7"), + dict(name="name7", value="value7"), + ], + [ + dict(name="linknumber", value="3"), + dict(name="name3", value="value3"), + ], + ], + ), + ) + + +class ExportCorosyncTotem(TestCase): + maxDiff = None + + def test_missing_key(self) -> None: + corosync_data: Dict[str, Any] = dict() + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_corosync_totem(corosync_data) + self.assertEqual( + cm.exception.kwargs, + dict( + data=corosync_data, + key="totem_options", + data_desc="corosync configuration", + ), + ) + + def test_minimal(self) -> None: + corosync_data: Dict[str, Any] = dict( totem_options=dict(), + ) + role_data = exporter.export_corosync_totem(corosync_data) + self.assertEqual(role_data, dict()) + + def test_simple_options_mirroring(self) -> None: + corosync_data: Dict[str, Any] = dict( + totem_options=dict(totem1="a", totem2="b"), + ) + role_data = exporter.export_corosync_totem(corosync_data) + self.assertEqual( + role_data, + dict( + options=[ + dict(name="totem1", value="a"), + dict(name="totem2", value="b"), + ], + ), + ) + + +class ExportCorosyncQuorum(TestCase): + maxDiff = None + + def test_missing_key(self) -> None: + corosync_data: Dict[str, Any] = dict() + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_corosync_quorum(corosync_data) + self.assertEqual( + cm.exception.kwargs, + dict( + data=corosync_data, + key="quorum_options", + data_desc="corosync configuration", + ), + ) + + def test_minimal(self) -> None: + corosync_data: Dict[str, Any] = dict( quorum_options=dict(), ) - role_data = exporter.export_corosync_options(pcs_data) + role_data = exporter.export_corosync_quorum(corosync_data) + self.assertEqual(role_data, dict()) + + def test_simple_options_mirroring(self) -> None: + corosync_data: Dict[str, Any] = dict( + quorum_options=dict(quorum1="i", quorum2="j"), + ) + role_data = exporter.export_corosync_quorum(corosync_data) self.assertEqual( role_data, dict( - ha_cluster_cluster_name="my-cluster", - ha_cluster_transport=dict( - type="knet", - links=[ - [ - dict(name="linknumber", value="0"), - dict(name="name0", value="value0"), - ], - [ - dict(name="linknumber", value="7"), - dict(name="name7", value="value7"), - ], - [ - dict(name="linknumber", value="3"), - dict(name="name3", value="value3"), - ], - ], - ), + options=[ + dict(name="quorum1", value="i"), + dict(name="quorum2", value="j"), + ], ), ) @@ -242,49 +300,77 @@ def test_more_links(self) -> None: class ExportClusterNodes(TestCase): maxDiff = None - def assert_missing_key( - self, data: List[Dict[str, Any]], key: str, index: str = "0" + def assert_missing_node_key( + self, corosync_data: Dict[str, Any], key: str, index: int = 0 ) -> None: with self.assertRaises(exporter.JsonMissingKey) as cm: - exporter.export_cluster_nodes(data, {}) + exporter.export_cluster_nodes(corosync_data, {}) self.assertEqual( cm.exception.kwargs, dict( - data=dict(nodes=data), + data=corosync_data, key=key, data_desc=f"corosync configuration for node on index {index}", ), ) + def test_missing_key(self) -> None: + corosync_data: Dict[str, Any] = dict() + with self.assertRaises(exporter.JsonMissingKey) as cm: + exporter.export_cluster_nodes(corosync_data, {}) + self.assertEqual( + cm.exception.kwargs, + dict( + data=corosync_data, + key="nodes", + data_desc="corosync configuration", + ), + ) + def test_no_nodes(self) -> None: - self.assertEqual(exporter.export_cluster_nodes([], {}), []) + self.assertEqual( + exporter.export_cluster_nodes(dict(nodes=[]), {}), + [], + ) def test_corosync_nodes_missing_keys(self) -> None: - corosync_data: List[Dict[str, Any]] = [dict()] - self.assert_missing_key(corosync_data, "name") + corosync_data: Dict[str, Any] = dict(nodes=[dict()]) + self.assert_missing_node_key(corosync_data, "name") - corosync_data = [dict(name="nodename")] - self.assert_missing_key(corosync_data, "addrs") + corosync_data = dict(nodes=[dict(name="nodename")]) + self.assert_missing_node_key(corosync_data, "addrs") - corosync_data = [dict(name="nodename", addrs=[dict()])] - self.assert_missing_key(corosync_data, "link") + corosync_data = dict(nodes=[dict(name="nodename", addrs=[dict()])]) + self.assert_missing_node_key(corosync_data, "link") - corosync_data = [dict(name="nodename", addrs=[dict(link="0")])] - self.assert_missing_key(corosync_data, "addr") + corosync_data = dict( + nodes=[dict(name="nodename", addrs=[dict(link="0")])] + ) + self.assert_missing_node_key(corosync_data, "addr") + + corosync_data = dict( + nodes=[ + dict(name="nodename", addrs=[dict(link="0", addr="addr1")]), + dict(name="node2"), + ] + ) + self.assert_missing_node_key(corosync_data, "addrs", 1) def test_corosync_nodes_one_link(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="IPv4")], - ), - dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], - ), - ] + corosync_data: Dict[str, Any] = dict( + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="IPv4")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ] + ) role_data = exporter.export_cluster_nodes(corosync_data, {}) self.assertEqual( role_data, @@ -295,24 +381,26 @@ def test_corosync_nodes_one_link(self) -> None: ) def test_corosync_nodes_multiple_links(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[ - dict(addr="node1addr1", link="0", type="IPv4"), - dict(addr="node1addr2", link="1", type="IPv6"), - ], - ), - dict( - name="node2", - nodeid=2, - addrs=[ - dict(addr="node2addr1", link="0", type="IPv4"), - dict(addr="node2addr2", link="1", type="IPv6"), - ], - ), - ] + corosync_data: Dict[str, Any] = dict( + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[ + dict(addr="node1addr1", link="0", type="IPv4"), + dict(addr="node1addr2", link="1", type="IPv6"), + ], + ), + dict( + name="node2", + nodeid=2, + addrs=[ + dict(addr="node2addr1", link="0", type="IPv4"), + dict(addr="node2addr2", link="1", type="IPv6"), + ], + ), + ] + ) role_data = exporter.export_cluster_nodes(corosync_data, {}) self.assertEqual( role_data, @@ -329,13 +417,15 @@ def test_corosync_nodes_multiple_links(self) -> None: ) def test_corosync_nodes_no_address(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[], - ), - ] + corosync_data: Dict[str, Any] = dict( + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[], + ), + ] + ) role_data = exporter.export_cluster_nodes(corosync_data, {}) self.assertEqual( role_data, @@ -345,7 +435,7 @@ def test_corosync_nodes_no_address(self) -> None: ) def test_pcs_nodes_no_cluster_nodes(self) -> None: - corosync_data: List[Dict[str, Any]] = [] + corosync_data: Dict[str, Any] = dict(nodes=[]) pcs_data = dict(node1="node1A") role_data = exporter.export_cluster_nodes(corosync_data, pcs_data) self.assertEqual( @@ -354,18 +444,20 @@ def test_pcs_nodes_no_cluster_nodes(self) -> None: ) def test_pcs_nodes(self) -> None: - corosync_data = [ - dict( - name="node1", - nodeid=1, - addrs=[dict(addr="node1addr", link="0", type="FQDN")], - ), - dict( - name="node2", - nodeid=2, - addrs=[dict(addr="node2addr", link="0", type="FQDN")], - ), - ] + corosync_data: Dict[str, Any] = dict( + nodes=[ + dict( + name="node1", + nodeid=1, + addrs=[dict(addr="node1addr", link="0", type="FQDN")], + ), + dict( + name="node2", + nodeid=2, + addrs=[dict(addr="node2addr", link="0", type="FQDN")], + ), + ] + ) pcs_data = dict(node1="node1A", node3="node3A") role_data = exporter.export_cluster_nodes(corosync_data, pcs_data) self.assertEqual( diff --git a/tests/unit/test_info_loader.py b/tests/unit/test_info_loader.py index 05268dfd..4316b17e 100644 --- a/tests/unit/test_info_loader.py +++ b/tests/unit/test_info_loader.py @@ -20,15 +20,12 @@ class IsServiceEnabled(TestCase): - # pylint: disable=protected-access def setUp(self) -> None: self.runner_mock = mock.Mock() def test_is_enabled(self) -> None: self.runner_mock.return_value = (0, "enabled", "") - self.assertTrue( - loader._is_service_enabled(self.runner_mock, "corosync") - ) + self.assertTrue(loader.is_service_enabled(self.runner_mock, "corosync")) self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "corosync.service"], {"LC_ALL": "C"}, @@ -37,7 +34,7 @@ def test_is_enabled(self) -> None: def test_is_disabled(self) -> None: self.runner_mock.return_value = (1, "disabled", "") self.assertFalse( - loader._is_service_enabled(self.runner_mock, "pacemaker") + loader.is_service_enabled(self.runner_mock, "pacemaker") ) self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "pacemaker.service"], @@ -46,30 +43,13 @@ def test_is_disabled(self) -> None: def test_unexpected_output(self) -> None: self.runner_mock.return_value = (4, "not-found", "") - self.assertFalse(loader._is_service_enabled(self.runner_mock, "pcmk")) + self.assertFalse(loader.is_service_enabled(self.runner_mock, "pcmk")) self.runner_mock.assert_called_once_with( ["systemctl", "is-enabled", "pcmk.service"], {"LC_ALL": "C"}, ) -class GetStartOnBoot(TestCase): - @mock.patch("ha_cluster_lsr.info.loader._is_service_enabled") - def test_main(self, mock_is_enabled: mock.Mock) -> None: - runner_mock = mock.Mock() - mock_is_enabled.side_effect = [False, False] - self.assertFalse(loader.get_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [True, False] - self.assertTrue(loader.get_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [False, True] - self.assertTrue(loader.get_start_on_boot(runner_mock)) - - mock_is_enabled.side_effect = [True, True] - self.assertTrue(loader.get_start_on_boot(runner_mock)) - - class CallPcsCli(TestCase): # pylint: disable=protected-access def test_success(self) -> None: From b2121ca21bb7824777ea93980aee922084a040f2 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Mon, 18 Nov 2024 12:52:46 +0100 Subject: [PATCH 6/8] feat: expose ha_cluster_info module via role variables --- README.md | 84 ++++++++++++----- defaults/main.yml | 2 + examples/export.yml | 23 +++++ tasks/main.yml | 90 +++++++++++-------- .../check-and-prepare-role-variables.yml | 10 +++ tests/tests_cluster_advanced_knet_full.yml | 7 +- .../tests_cluster_advanced_knet_implicit.yml | 7 +- tests/tests_cluster_advanced_udp_full.yml | 7 +- tests/tests_cluster_basic.yml | 7 +- tests/tests_cluster_basic_disabled.yml | 7 +- tests/tests_cluster_destroy.yml | 7 +- tests/tests_export_doesnt_destroy_cluster.yml | 65 ++++++++++++++ tests/tests_role_mode_consistency.yml | 47 ++++++++++ 13 files changed, 277 insertions(+), 86 deletions(-) create mode 100644 examples/export.yml create mode 100644 tests/tests_export_doesnt_destroy_cluster.yml create mode 100644 tests/tests_role_mode_consistency.yml diff --git a/README.md b/README.md index 2541b7e1..39f6ca8c 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,7 @@ An Ansible role for managing High Availability Clustering. * Pacemaker Access Control Lists (ACLs) * node and resource utilization * Pacemaker Alerts -* The role provides `ha_cluster_info` module which exports current cluster - configuration. The module is capable of exporting: +* The role is capable of exporting existing cluster configuration: * single-link or multi-link cluster * Corosync transport options including compression and encryption * Corosync totem options @@ -65,6 +64,13 @@ ansible-galaxy collection install -r meta/collection-requirements.yml ### Defined in `defaults/main.yml` +#### `ha_cluster_get_info` + +boolean, default: `false` + +Export existing cluster configuration. See +[Variables Exported by the Role](#variables-exported-by-the-role) for details. + #### `ha_cluster_enable_repos` boolean, default: `true` @@ -125,7 +131,7 @@ boolean, default: `true` If set to `true`, HA cluster will be configured on the hosts according to other variables. If set to `false`, all HA Cluster configuration will be purged from -target hosts. +target hosts. If set to `null`, HA cluster configuration will not be changed. #### `ha_cluster_start_on_boot` @@ -1462,6 +1468,9 @@ for clusters. The items are as follows: Note that you cannot run qnetd on a cluster node as fencing would disrupt qnetd operation. +If you set `ha_cluster_qnetd: null`, then qnetd host configuration will not be +changed. + You may take a look at [an example](#configuring-a-cluster-using-a-quorum-device). @@ -1551,9 +1560,9 @@ all: monitoring. Defaults to empty list if not set. Always refer to the devices using the long, stable device name (`/dev/disk/by-id/`). -## Export current cluster configuration +## Variables Exported by the Role -The role provides `ha_cluster_info` module which exports current cluster +The role contains `ha_cluster_info` module which exports current cluster configuration in a dictionary matching the structure of this role variables. If the role is run with these variables, it recreates the same cluster. @@ -1590,25 +1599,49 @@ may not be present in the export. responsibility to decide if you want to use existing keys or generate new ones. -To export current cluster configuration and store it in -`ha_cluster_info_result` variable, write a task like this: +To export current cluster configuration and store it in `ha_cluster_facts` +variable, run the role with `ha_cluster_get_info: true`. This triggers the +export once the role finishes configuring a cluster or a qnetd host. If you +want to trigger the export without modifying existing configuration, run the +role like this: ```yaml -- name: Get current cluster configuration - linux-system-roles.ha_cluster.ha_cluster_info: - register: ha_cluster_info_result +- hosts: node1 + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + roles: + - linux-system-roles.ha_cluster ``` -Then you may use the `ha_cluster_info_result` variable in your playbook -depending on your needs. +**Note:** By default, `ha_cluster_cluster_present` is set to `true` and +`ha_cluster_qnetd.present` is set to `false`. If you do not set the variables as +show in the example above, the role will reconfigure your cluster on the +specified hosts, remove qnetd configuration from the specified hosts, and then +export configuration. + +You may use the `ha_cluster_facts` variable in your playbook depending on your +needs. If you just want to see the content of the variable, use the ansible debug module like this: ```yaml -- name: Print ha_cluster_info_result variable - debug: - var: ha_cluster_info_result +- hosts: node1 + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + roles: + - linux-system-roles.ha_cluster + + tasks: + - name: Print ha_cluster_info_result variable + debug: + var: ha_cluster_facts ``` Or you may want to save the configuration to a file on your controller node in @@ -1616,12 +1649,21 @@ YAML format with a task similar to this one, so that you can write a playbook around it: ```yaml -- name: Save current cluster configuration to a file - delegate_to: localhost - copy: - content: "{{ - ha_cluster_info_result.ha_cluster | to_nice_yaml(sort_keys=false) }}" - dest: /path/to/file +- hosts: node1 + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + roles: + - linux-system-roles.ha_cluster + + tasks: + - name: Save current cluster configuration to a file + delegate_to: localhost + copy: + content: "{{ ha_cluster_facts | to_nice_yaml(sort_keys=false) }}" + dest: /path/to/file ``` ## Example Playbooks diff --git a/defaults/main.yml b/defaults/main.yml index 869e8276..f26fd7a2 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -75,3 +75,5 @@ ha_cluster_qnetd: present: false regenerate_keys: false start_on_boot: true + +ha_cluster_get_info: false diff --git a/examples/export.yml b/examples/export.yml new file mode 100644 index 00000000..6699904f --- /dev/null +++ b/examples/export.yml @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: MIT +--- +- name: Example ha_cluster role invocation - export cluster configuration + hosts: node1 + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + roles: + - linux-system-roles.ha_cluster + + tasks: + - name: Print ha_cluster_info_result variable + debug: + var: ha_cluster_facts + + - name: Save current cluster configuration to a file + delegate_to: localhost + copy: + content: "{{ ha_cluster_facts | to_nice_yaml(sort_keys=false) }}" + dest: /path/to/file + mode: "0640" diff --git a/tasks/main.yml b/tasks/main.yml index a4abf903..910789bf 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -18,43 +18,47 @@ use: "{{ (__ha_cluster_is_ostree | d(false)) | ternary('ansible.posix.rhel_rpm_ostree', omit) }}" -- name: Check and prepare role variables - include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/check-and-prepare-role-variables.yml # yamllint disable-line rule:line-length - -# The user is created by installing pacemaker packages. We just need to set the -# password. -- name: Provide a password for the hacluster user +- name: Preconfigure hosts when: - - ha_cluster_hacluster_password | string | length > 0 - block: - - name: Generate a password hash - # The arg `-6` means SHA512 based algorithms. - command: - cmd: >- - openssl passwd - -6 - -salt {{ ansible_hostname.replace('-', 'x') | quote }} - {{ ha_cluster_hacluster_password | string | quote }} - register: __ha_cluster_openssl_call_result - changed_when: false - no_log: true - - - name: Set hacluster password - user: - name: hacluster - password: "{{ __ha_cluster_openssl_call_result.stdout }}" - -- name: Configure shell - include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/configure-shell.yml # yamllint disable-line rule:line-length - -- name: Configure firewall and selinux - when: ha_cluster_cluster_present | bool or ha_cluster_qnetd.present | d(false) + - ha_cluster_cluster_present is not none or ha_cluster_qnetd is not none block: - - name: Configure firewall - include_tasks: firewall.yml + - name: Check and prepare role variables + include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/check-and-prepare-role-variables.yml # yamllint disable-line rule:line-length - - name: Configure selinux - include_tasks: selinux.yml + # The user is created by installing pacemaker packages. We just need to set + # the password. + - name: Provide a password for the hacluster user + when: + - ha_cluster_hacluster_password | string | length > 0 + block: + - name: Generate a password hash + # The arg `-6` means SHA512 based algorithms. + command: + cmd: >- + openssl passwd + -6 + -salt {{ ansible_hostname.replace('-', 'x') | quote }} + {{ ha_cluster_hacluster_password | string | quote }} + register: __ha_cluster_openssl_call_result + changed_when: false + no_log: true + + - name: Set hacluster password + user: + name: hacluster + password: "{{ __ha_cluster_openssl_call_result.stdout }}" + + - name: Configure shell + include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/configure-shell.yml + + - name: Configure firewall and selinux + when: ha_cluster_cluster_present | bool or ha_cluster_qnetd.present | d(false) # yamllint disable-line rule:line-length + block: + - name: Configure firewall + include_tasks: firewall.yml + + - name: Configure selinux + include_tasks: selinux.yml - name: Install and configure HA cluster when: ha_cluster_cluster_present | bool @@ -112,7 +116,9 @@ run_once: true - name: Remove cluster configuration - when: not ha_cluster_cluster_present + when: + - ha_cluster_cluster_present is not none + - not ha_cluster_cluster_present block: - name: Remove cluster configuration include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/cluster-destroy-{{ __ha_cluster_pcs_provider }}.yml # yamllint disable-line rule:line-length @@ -123,4 +129,18 @@ state: absent - name: Configure qnetd + when: ha_cluster_qnetd is not none include_tasks: shell_{{ ha_cluster_pacemaker_shell }}/pcs-qnetd.yml + +- name: Export configuration + when: + - ha_cluster_get_info | bool + - ha_cluster_pacemaker_shell == "pcs" + block: + - name: Fetch configuration + ha_cluster_info: + register: __ha_cluster_info + + - name: Set configuration fact + set_fact: + ha_cluster_facts: "{{ __ha_cluster_info.ha_cluster }}" diff --git a/tasks/shell_pcs/check-and-prepare-role-variables.yml b/tasks/shell_pcs/check-and-prepare-role-variables.yml index ae7d7d69..613b9902 100644 --- a/tasks/shell_pcs/check-and-prepare-role-variables.yml +++ b/tasks/shell_pcs/check-and-prepare-role-variables.yml @@ -1,5 +1,15 @@ # SPDX-License-Identifier: MIT --- +- name: Fail if 'cluster present' and 'qnetd present' are inconsistent + fail: + msg: > + Both 'ha_cluster_cluster_present' and 'ha_cluster_qnetd' must be either + defined or set to null + when: > + (ha_cluster_cluster_present is none and ha_cluster_qnetd is not none) + or + (ha_cluster_cluster_present is not none and ha_cluster_qnetd is none) + - name: Discover cluster node names set_fact: __ha_cluster_node_name: "{{ ha_cluster.node_name | d(inventory_hostname) }}" diff --git a/tests/tests_cluster_advanced_knet_full.yml b/tests/tests_cluster_advanced_knet_full.yml index 70cb27ce..e5d156f2 100644 --- a/tests/tests_cluster_advanced_knet_full.yml +++ b/tests/tests_cluster_advanced_knet_full.yml @@ -4,6 +4,7 @@ hosts: all vars_files: vars/main.yml vars: + ha_cluster_get_info: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: type: knet @@ -124,15 +125,11 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml - - name: Export cluster configuration - ha_cluster_info: - register: __test_info - - name: Check exported configuration vars: __test_exported_config: > {{ - __test_info.ha_cluster | combine({ + ha_cluster_facts | combine({ 'ha_cluster_node_options': 'it depends on test environment' }) }} diff --git a/tests/tests_cluster_advanced_knet_implicit.yml b/tests/tests_cluster_advanced_knet_implicit.yml index c8351dac..391c90c3 100644 --- a/tests/tests_cluster_advanced_knet_implicit.yml +++ b/tests/tests_cluster_advanced_knet_implicit.yml @@ -4,6 +4,7 @@ hosts: all vars_files: vars/main.yml vars: + ha_cluster_get_info: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: crypto: @@ -62,15 +63,11 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml - - name: Export cluster configuration - ha_cluster_info: - register: __test_info - - name: Check exported configuration vars: __test_exported_config: > {{ - __test_info.ha_cluster | combine({ + ha_cluster_facts | combine({ 'ha_cluster_node_options': 'it depends on test environment' }) }} diff --git a/tests/tests_cluster_advanced_udp_full.yml b/tests/tests_cluster_advanced_udp_full.yml index 16aa7360..87b448e9 100644 --- a/tests/tests_cluster_advanced_udp_full.yml +++ b/tests/tests_cluster_advanced_udp_full.yml @@ -4,6 +4,7 @@ hosts: all vars_files: vars/main.yml vars: + ha_cluster_get_info: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: type: udp @@ -86,15 +87,11 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml - - name: Export cluster configuration - ha_cluster_info: - register: __test_info - - name: Check exported configuration vars: __test_exported_config: > {{ - __test_info.ha_cluster | combine({ + ha_cluster_facts | combine({ 'ha_cluster_node_options': 'it depends on test environment' }) }} diff --git a/tests/tests_cluster_basic.yml b/tests/tests_cluster_basic.yml index 88e717ae..2fb21d4e 100644 --- a/tests/tests_cluster_basic.yml +++ b/tests/tests_cluster_basic.yml @@ -5,6 +5,7 @@ vars_files: vars/main.yml vars: ha_cluster_cluster_name: test-cluster + ha_cluster_get_info: true tasks: - name: Run test @@ -112,15 +113,11 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml - - name: Export cluster configuration - ha_cluster_info: - register: __test_info - - name: Check exported configuration vars: __test_exported_config: > {{ - __test_info.ha_cluster | combine({ + ha_cluster_facts | combine({ 'ha_cluster_node_options': 'it depends on test environment' }) }} diff --git a/tests/tests_cluster_basic_disabled.yml b/tests/tests_cluster_basic_disabled.yml index fbba0c04..c04593bf 100644 --- a/tests/tests_cluster_basic_disabled.yml +++ b/tests/tests_cluster_basic_disabled.yml @@ -6,6 +6,7 @@ vars: ha_cluster_cluster_name: test-cluster ha_cluster_start_on_boot: false + ha_cluster_get_info: true tasks: - name: Run test @@ -37,15 +38,11 @@ - name: Check firewall and selinux state include_tasks: tasks/check_firewall_selinux.yml - - name: Export cluster configuration - ha_cluster_info: - register: __test_info - - name: Check exported configuration vars: __test_exported_config: > {{ - __test_info.ha_cluster | combine({ + ha_cluster_facts | combine({ 'ha_cluster_node_options': 'it depends on test environment' }) }} diff --git a/tests/tests_cluster_destroy.yml b/tests/tests_cluster_destroy.yml index db171363..2d17aaf3 100644 --- a/tests/tests_cluster_destroy.yml +++ b/tests/tests_cluster_destroy.yml @@ -3,6 +3,7 @@ - name: Deconfigure cluster hosts: all vars: + ha_cluster_get_info: true ha_cluster_cluster_present: false tasks: @@ -41,14 +42,10 @@ - not stat_cib_xml.stat.exists - not stat_fence_xvm_key.stat.exists - - name: Export cluster configuration - ha_cluster_info: - register: test_info - - name: Check exported configuration assert: that: - - test_info.ha_cluster == expected_configuration + - ha_cluster_facts == expected_configuration vars: expected_configuration: ha_cluster_cluster_present: false diff --git a/tests/tests_export_doesnt_destroy_cluster.yml b/tests/tests_export_doesnt_destroy_cluster.yml new file mode 100644 index 00000000..7b67d056 --- /dev/null +++ b/tests/tests_export_doesnt_destroy_cluster.yml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: MIT +--- +- name: Ensure that exporting cluster configuration doesn't modify the cluster + hosts: all + gather_facts: false + tasks: + - name: Run test + tags: tests::verify + vars: + ha_cluster_cluster_name: test-cluster + __test_exported_config: > + {{ + ha_cluster_facts | combine({ + 'ha_cluster_node_options': 'it depends on test environment' + }) + }} + __test_expected_config: + ha_cluster_cluster_present: true + ha_cluster_cluster_name: test-cluster + ha_cluster_start_on_boot: true + ha_cluster_transport: + type: udp + ha_cluster_node_options: "it depends on test environment" + block: + - name: Set up test environment + include_role: + name: linux-system-roles.ha_cluster + tasks_from: test_setup.yml + + # Create a cluster with a non-default configuration (default is knet) + - name: Run the role to create a cluster + include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_transport: + type: udp + + # Export and compare cluster configuration + - name: Run the role to export the cluster config 1 + include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + - name: Compare expected and exported configuration 1 + assert: + that: + - __test_exported_config == __test_expected_config + + # Export again to check that previous export didn't change the cluster + # If it was modified, it would have transport 'knet' instead of 'udp' + - name: Run the role to export the cluster config 2 + include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_present: null + ha_cluster_qnetd: null + ha_cluster_get_info: true + + - name: Compare expected and exported configuration 2 + assert: + that: + - __test_exported_config == __test_expected_config diff --git a/tests/tests_role_mode_consistency.yml b/tests/tests_role_mode_consistency.yml new file mode 100644 index 00000000..672b1cdf --- /dev/null +++ b/tests/tests_role_mode_consistency.yml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: MIT +--- +- name: Ensure cluster_present and qnetd_present are both set or both null + hosts: all + gather_facts: false + tasks: + - name: Run test + tags: tests::verify + block: + - name: Set up test environment + include_role: + name: linux-system-roles.ha_cluster + tasks_from: test_setup.yml + + - name: Test cluster_present==true and qnetd_present==null + block: + - name: Run the role + include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_qnetd: null + + rescue: + - name: Check errors + assert: + that: ansible_failed_result.msg == expected_msg + vars: + expected_msg: > + Both 'ha_cluster_cluster_present' and 'ha_cluster_qnetd' + must be either defined or set to null + + - name: Test cluster_present==null and qnetd_present==true + block: + - name: Run the role + include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_present: null + + rescue: + - name: Check errors + assert: + that: ansible_failed_result.msg == expected_msg + vars: + expected_msg: > + Both 'ha_cluster_cluster_present' and 'ha_cluster_qnetd' + must be either defined or set to null From 365f54c1c763b49b5d3eb6bea683116f47651896 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Mon, 18 Nov 2024 12:53:58 +0100 Subject: [PATCH 7/8] test: update for ansible-test-2.18 --- .sanity-ansible-ignore-2.18.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.sanity-ansible-ignore-2.18.txt b/.sanity-ansible-ignore-2.18.txt index 794e99c2..e5ac0be9 100644 --- a/.sanity-ansible-ignore-2.18.txt +++ b/.sanity-ansible-ignore-2.18.txt @@ -6,3 +6,5 @@ plugins/modules/pcs_api_v2.py import-3.8!skip plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py import-3.8!skip plugins/module_utils/ha_cluster_lsr/pcs_api_v2_utils.py compile-3.8!skip plugins/modules/pcs_qdevice_certs.py import-3.8!skip +plugins/modules/ha_cluster_info.py validate-modules:missing-gplv3-license +tests/ha_cluster/unit/test_ha_cluster_info.py shebang!skip From 4be7b3c97567d59879249c837010648541d69df7 Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Wed, 4 Dec 2024 10:03:20 +0100 Subject: [PATCH 8/8] fix: rename the variable for exporting configuration --- README.md | 18 +++++++++--------- defaults/main.yml | 2 +- examples/export.yml | 2 +- tasks/main.yml | 2 +- tests/tests_cluster_advanced_knet_full.yml | 2 +- tests/tests_cluster_advanced_knet_implicit.yml | 2 +- tests/tests_cluster_advanced_udp_full.yml | 2 +- tests/tests_cluster_basic.yml | 2 +- tests/tests_cluster_basic_disabled.yml | 2 +- tests/tests_cluster_destroy.yml | 2 +- tests/tests_export_doesnt_destroy_cluster.yml | 4 ++-- 11 files changed, 20 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 39f6ca8c..77f636bf 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ ansible-galaxy collection install -r meta/collection-requirements.yml ### Defined in `defaults/main.yml` -#### `ha_cluster_get_info` +#### `ha_cluster_export_configuration` boolean, default: `false` @@ -1600,17 +1600,17 @@ may not be present in the export. ones. To export current cluster configuration and store it in `ha_cluster_facts` -variable, run the role with `ha_cluster_get_info: true`. This triggers the -export once the role finishes configuring a cluster or a qnetd host. If you -want to trigger the export without modifying existing configuration, run the -role like this: +variable, run the role with `ha_cluster_export_configuration: true`. This +triggers the export once the role finishes configuring a cluster or a qnetd +host. If you want to trigger the export without modifying existing +configuration, run the role like this: ```yaml - hosts: node1 vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true roles: - linux-system-roles.ha_cluster @@ -1618,7 +1618,7 @@ role like this: **Note:** By default, `ha_cluster_cluster_present` is set to `true` and `ha_cluster_qnetd.present` is set to `false`. If you do not set the variables as -show in the example above, the role will reconfigure your cluster on the +shown in the example above, the role will reconfigure your cluster on the specified hosts, remove qnetd configuration from the specified hosts, and then export configuration. @@ -1633,7 +1633,7 @@ module like this: vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true roles: - linux-system-roles.ha_cluster @@ -1653,7 +1653,7 @@ around it: vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true roles: - linux-system-roles.ha_cluster diff --git a/defaults/main.yml b/defaults/main.yml index f26fd7a2..14d0f969 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -76,4 +76,4 @@ ha_cluster_qnetd: regenerate_keys: false start_on_boot: true -ha_cluster_get_info: false +ha_cluster_export_configuration: false diff --git a/examples/export.yml b/examples/export.yml index 6699904f..b952bb50 100644 --- a/examples/export.yml +++ b/examples/export.yml @@ -5,7 +5,7 @@ vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true roles: - linux-system-roles.ha_cluster diff --git a/tasks/main.yml b/tasks/main.yml index 910789bf..304f6f84 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -134,7 +134,7 @@ - name: Export configuration when: - - ha_cluster_get_info | bool + - ha_cluster_export_configuration | bool - ha_cluster_pacemaker_shell == "pcs" block: - name: Fetch configuration diff --git a/tests/tests_cluster_advanced_knet_full.yml b/tests/tests_cluster_advanced_knet_full.yml index e5d156f2..b98885f7 100644 --- a/tests/tests_cluster_advanced_knet_full.yml +++ b/tests/tests_cluster_advanced_knet_full.yml @@ -4,7 +4,7 @@ hosts: all vars_files: vars/main.yml vars: - ha_cluster_get_info: true + ha_cluster_export_configuration: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: type: knet diff --git a/tests/tests_cluster_advanced_knet_implicit.yml b/tests/tests_cluster_advanced_knet_implicit.yml index 391c90c3..f6f12975 100644 --- a/tests/tests_cluster_advanced_knet_implicit.yml +++ b/tests/tests_cluster_advanced_knet_implicit.yml @@ -4,7 +4,7 @@ hosts: all vars_files: vars/main.yml vars: - ha_cluster_get_info: true + ha_cluster_export_configuration: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: crypto: diff --git a/tests/tests_cluster_advanced_udp_full.yml b/tests/tests_cluster_advanced_udp_full.yml index 87b448e9..38d12f77 100644 --- a/tests/tests_cluster_advanced_udp_full.yml +++ b/tests/tests_cluster_advanced_udp_full.yml @@ -4,7 +4,7 @@ hosts: all vars_files: vars/main.yml vars: - ha_cluster_get_info: true + ha_cluster_export_configuration: true ha_cluster_cluster_name: test-cluster ha_cluster_transport: type: udp diff --git a/tests/tests_cluster_basic.yml b/tests/tests_cluster_basic.yml index 2fb21d4e..fed578df 100644 --- a/tests/tests_cluster_basic.yml +++ b/tests/tests_cluster_basic.yml @@ -5,7 +5,7 @@ vars_files: vars/main.yml vars: ha_cluster_cluster_name: test-cluster - ha_cluster_get_info: true + ha_cluster_export_configuration: true tasks: - name: Run test diff --git a/tests/tests_cluster_basic_disabled.yml b/tests/tests_cluster_basic_disabled.yml index c04593bf..fe09108c 100644 --- a/tests/tests_cluster_basic_disabled.yml +++ b/tests/tests_cluster_basic_disabled.yml @@ -6,7 +6,7 @@ vars: ha_cluster_cluster_name: test-cluster ha_cluster_start_on_boot: false - ha_cluster_get_info: true + ha_cluster_export_configuration: true tasks: - name: Run test diff --git a/tests/tests_cluster_destroy.yml b/tests/tests_cluster_destroy.yml index 2d17aaf3..03df8d2d 100644 --- a/tests/tests_cluster_destroy.yml +++ b/tests/tests_cluster_destroy.yml @@ -3,7 +3,7 @@ - name: Deconfigure cluster hosts: all vars: - ha_cluster_get_info: true + ha_cluster_export_configuration: true ha_cluster_cluster_present: false tasks: diff --git a/tests/tests_export_doesnt_destroy_cluster.yml b/tests/tests_export_doesnt_destroy_cluster.yml index 7b67d056..eef0bd6f 100644 --- a/tests/tests_export_doesnt_destroy_cluster.yml +++ b/tests/tests_export_doesnt_destroy_cluster.yml @@ -42,7 +42,7 @@ vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true - name: Compare expected and exported configuration 1 assert: @@ -57,7 +57,7 @@ vars: ha_cluster_cluster_present: null ha_cluster_qnetd: null - ha_cluster_get_info: true + ha_cluster_export_configuration: true - name: Compare expected and exported configuration 2 assert: