Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

blustore Checked #4255

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 104 additions & 0 deletions tests/rados/test_bluestore_min_alloc_size.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

"""

import json
import re
import time

Expand Down Expand Up @@ -114,6 +115,7 @@ def run(ceph_cluster, **kw):
daemon_type="osd", daemon_id=osd_id
)
log.debug(f"metadata details for OSD : {osd_id} is \n{osd_meta}\n")

if (
not min_alloc_size_hdd
== min_alloc_size_ssd
Expand All @@ -133,6 +135,60 @@ def run(ceph_cluster, **kw):
log.info(
f"OSDs successfully deployed with the new alloc size, and verified the size on OSD: {osd_id}"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The pre_deployment_config_changes and post_deployment_config_verification are the same. Can you create a single method to verify the pre- and post-deployment of OSDs? Please discuss this with the team.

)
"""
enhance the tests by adding ceph config show osd.id and
ceph daemon osd.id config show | grep min_alloc_size
"""
# Enhanced test: Check ceph config show and daemon output for each OSD
show_config_hdd = mon_obj.show_config(
daemon="osd", id=osd_id, param="bluestore_min_alloc_size_hdd"
)

show_config_ssd = mon_obj.show_config(
daemon="osd", id=osd_id, param="bluestore_min_alloc_size_ssd"
)

if not show_config_hdd == show_config_ssd == custom_min_alloc_size:
log.error(
f"min_alloc_size does not match the expected custom value of {custom_min_alloc_size}"
f"min_alloc_size_ssd on cluster: {show_config_ssd}"
f"min_alloc_size_hdd on cluster: {show_config_hdd}"
)
raise Exception("The output from ceph show is not as expected")

log.info(
"Ceph config show is successfully verified for the value of min_alloc_size"
)

# determine the OSD node for chosen osd
osd_node = rados_obj.fetch_host_node(
daemon_type="osd", daemon_id=f"osd.{osd_id}"
)

# determine osd's block device path
_cmd = f"cephadm shell -- ceph daemon osd.{osd_id} config show --format json"
Divya-78 marked this conversation as resolved.
Show resolved Hide resolved
json_str_out, _ = osd_node.exec_command(cmd=_cmd, sudo=True)
json_out = json.loads(json_str_out)
daemon_alloc_size_hdd = json_out["bluestore_min_alloc_size_hdd"]
daemon_alloc_size_ssd = json_out["bluestore_min_alloc_size_ssd"]

if (
not daemon_alloc_size_hdd
Divya-78 marked this conversation as resolved.
Show resolved Hide resolved
== daemon_alloc_size_ssd
== custom_min_alloc_size
):
log.error(
f"min_alloc_size does not match the expected custom value of {custom_min_alloc_size}"
f"min_alloc_size_ssd on cluster: {daemon_alloc_size_ssd}"
f"min_alloc_size_hdd on cluster: {daemon_alloc_size_hdd}"
)
raise Exception("The output from ceph daemon is not as expected")

log.info(
"Ceph daemon is successfully verified for the value of min_alloc_size"
)

return 0

mon_obj.set_config(
section="osd",
Expand Down Expand Up @@ -212,6 +268,54 @@ def remove_osd_check_metadata(target_osd, alloc_size):
f"OSD : {rm_osd} could not be redeployed with alloc size {default_min_alloc_size}"
)
return 1
"""
add checks for ceph config show and ceph daemon with min_alloc_size
"""

show_config_hdd = mon_obj.show_config(
daemon="osd", id=osd_id, param="bluestore_min_alloc_size_hdd"
)

show_config_ssd = mon_obj.show_config(
daemon="osd", id=osd_id, param="bluestore_min_alloc_size_ssd"
)

if not show_config_hdd == show_config_ssd == default_min_alloc_size:
log.error(
f"min_alloc_size does not match the expected custom value of {default_min_alloc_size}"
f"min_alloc_size_ssd on cluster: {show_config_ssd}"
f"min_alloc_size_hdd on cluster: {show_config_hdd}"
)
raise Exception("Value not updated for min_alloc_size on cluster")

log.info("Successfully modified the value of min_alloc_size")

# determine the OSD node for chosen osd
osd_node = rados_obj.fetch_host_node(
daemon_type="osd", daemon_id=f"osd.{osd_id}"
)

# determine osd's block device path
_cmd = f"cephadm shell -- ceph daemon osd.{osd_id} config show --format json"
json_str_out, _ = osd_node.exec_command(cmd=_cmd, sudo=True)
json_out = json.loads(json_str_out)
daemon_alloc_size_hdd = json_out["bluestore_min_alloc_size_hdd"]
daemon_alloc_size_ssd = json_out["bluestore_min_alloc_size_ssd"]

if (
not daemon_alloc_size_hdd
Divya-78 marked this conversation as resolved.
Show resolved Hide resolved
== daemon_alloc_size_ssd
== default_min_alloc_size
):
log.error(
f"min_alloc_size does not match the expected custom value of {default_min_alloc_size}"
f"min_alloc_size_ssd on cluster: {daemon_alloc_size_ssd}"
f"min_alloc_size_hdd on cluster: {daemon_alloc_size_hdd}"
)
raise Exception("Value not updated for min_alloc_size on cluster")

log.info("Successfully modified the value of min_alloc_size")

log.info(
f"OSD : {rm_osd} successfully redeployed with alloc size {default_min_alloc_size}"
)
Expand Down