diff --git a/ceph/rados/core_workflows.py b/ceph/rados/core_workflows.py index fa31d14972..c2765789a6 100644 --- a/ceph/rados/core_workflows.py +++ b/ceph/rados/core_workflows.py @@ -1498,6 +1498,31 @@ def fetch_host_node(self, daemon_type: str, daemon_id: str = None) -> object: ) return None + def fetch_host_config(self, daemon_type: str, daemon_id: str = None) -> object: + """ + Fetches the config of the given daemon in a cluster + + Args: + daemon_type (str): type of daemon + daemon_id (str, optional): name of the daemon, ID in case of OSD's + + Returns: + dict: host config + """ + json_str = None + if daemon_type == "osd": + cmd = f"cephadm shell -- ceph daemon {daemon_type}.{daemon_id} config show --format json" + osd_node = self.fetch_host_node(daemon_type, daemon_id) + json_str = osd_node.exec_command(cmd) + else: + cmd = ( + f"cephadm shell -- ceph daemon {daemon_type} config show --format json" + ) + json_str = self.run_ceph_command(cmd) + + json_output = json.loads(json_str) + return json_output + def verify_ec_overwrites(self, **kwargs) -> bool: """ Creates RBD image on overwritten EC pool & replicated metadata pool diff --git a/tests/rados/test_bluestore_min_alloc_size.py b/tests/rados/test_bluestore_min_alloc_size.py index 7af1d06932..ccb49f41ff 100644 --- a/tests/rados/test_bluestore_min_alloc_size.py +++ b/tests/rados/test_bluestore_min_alloc_size.py @@ -37,7 +37,7 @@ def run(ceph_cluster, **kw): build = (re.search(regex, config.get("build", config.get("rhbuild")))).groups()[0] if not float(build) >= 7.0: log.info( - "Test running on version less than 7.0, skipping verifying Reads Balancer functionality" + "Test running on version less than 7.0, skipping verifying Bluestore_min_alloc_size functionality" ) return 0 @@ -134,6 +134,56 @@ def run(ceph_cluster, **kw): f"OSDs successfully deployed with the new alloc size, and verified the size on OSD: {osd_id}" ) + """ + enhance the tests by adding ceph config show osd.id and ceph + daemon osd.id config show | grep min_alloc_size + """ + # Enhanced test: Check ceph config show and daemon output for each OSD + show_config_hdd = mon_obj.show_config( + daemon="osd", id=osd_id, param="bluestore_min_alloc_size_hdd" + ) + + show_config_ssd = mon_obj.show_config( + daemon="osd", id=osd_id, param="bluestore_min_alloc_size_ssd" + ) + + if not show_config_hdd == show_config_ssd == custom_min_alloc_size: + log.error( + f"min_alloc_size does not match the expected custom value of {custom_min_alloc_size}" + f"min_alloc_size_ssd on cluster: {show_config_ssd}" + f"min_alloc_size_hdd on cluster: {show_config_hdd}" + ) + raise Exception("The output from ceph show is not as expected") + + log.info( + "Ceph config show is successfully verified for the value of min_alloc_size" + ) + + # determine osd's block device path + json_out = rados_obj.fetch_host_config( + daemon_type="osd", daemon_id=f"osd.{osd_id}" + ) + daemon_alloc_size_hdd = json_out["bluestore_min_alloc_size_hdd"] + daemon_alloc_size_ssd = json_out["bluestore_min_alloc_size_ssd"] + + if ( + not daemon_alloc_size_hdd + == daemon_alloc_size_ssd + == custom_min_alloc_size + ): + log.error( + f"min_alloc_size does not match the expected custom value of {custom_min_alloc_size}" + f"min_alloc_size_ssd on cluster: {daemon_alloc_size_ssd}" + f"min_alloc_size_hdd on cluster: {daemon_alloc_size_hdd}" + ) + raise Exception("The output from ceph daemon is not as expected") + + log.info( + "Ceph daemon is successfully verified for the value of min_alloc_size" + ) + + return 0 + mon_obj.set_config( section="osd", name="bluestore_min_alloc_size_hdd", @@ -212,6 +262,49 @@ def remove_osd_check_metadata(target_osd, alloc_size): f"OSD : {rm_osd} could not be redeployed with alloc size {default_min_alloc_size}" ) return 1 + """ + add checks for ceph config show and ceph daemon with min_alloc_size + """ + + show_config_hdd = mon_obj.show_config( + daemon="osd", id=osd_id, param="bluestore_min_alloc_size_hdd" + ) + + show_config_ssd = mon_obj.show_config( + daemon="osd", id=osd_id, param="bluestore_min_alloc_size_ssd" + ) + + if not show_config_hdd == show_config_ssd == default_min_alloc_size: + log.error( + f"min_alloc_size does not match the expected custom value of {default_min_alloc_size}" + f"min_alloc_size_ssd on cluster: {show_config_ssd}" + f"min_alloc_size_hdd on cluster: {show_config_hdd}" + ) + raise Exception("Value not updated for min_alloc_size on cluster") + + log.info("Successfully modified the value of min_alloc_size") + + # determine osd's block device path + json_out = rados_obj.fetch_host_config( + daemon_type="osd", daemon_id=f"osd.{osd_id}" + ) + daemon_alloc_size_hdd = json_out["bluestore_min_alloc_size_hdd"] + daemon_alloc_size_ssd = json_out["bluestore_min_alloc_size_ssd"] + + if ( + not daemon_alloc_size_hdd + == daemon_alloc_size_ssd + == default_min_alloc_size + ): + log.error( + f"min_alloc_size does not match the expected custom value of {default_min_alloc_size}" + f"min_alloc_size_ssd on cluster: {daemon_alloc_size_ssd}" + f"min_alloc_size_hdd on cluster: {daemon_alloc_size_hdd}" + ) + raise Exception("Value not updated for min_alloc_size on cluster") + + log.info("Successfully modified the value of min_alloc_size") + log.info( f"OSD : {rm_osd} successfully redeployed with alloc size {default_min_alloc_size}" )