From fd3096c74db57a0ea98af5907ca659ec6da8fc23 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Thu, 18 Jul 2024 11:03:02 -0700 Subject: [PATCH 01/15] Enable show ip bgp on sup and -n all for show ip bgp network (#3417) #### What I did 1. Enable "show ip bgp" on sup and "-n all" for show ip bgp network. 2. Modify README.doc to make the instructions of building and installing the wheel package more clear. 3. Improve the output format of rexec command #### How I did it Modify the code in show/main.py to enable "show ip bgp ..." on supervisors and modify show/bgp_frr_v4.py to add support for the new features. Update README.md. Modify the rexec implementation to improve the output format. Add unit tests for the above change. #### How to verify it Run on a SONiC chassis --- README.md | 7 + rcli/linecard.py | 14 +- rcli/rexec.py | 12 +- rcli/rshell.py | 4 +- rcli/utils.py | 15 ++ show/bgp_frr_v4.py | 38 +++++- show/main.py | 6 +- .../bgp_network_test_vector.py | 128 +++++++++++++++++- tests/conftest.py | 7 + tests/mock_tables/chassis_state_db.json | 3 + tests/remote_cli_test.py | 8 +- tests/remote_show_test.py | 57 ++++++++ tests/show_bgp_network_test.py | 7 +- 13 files changed, 282 insertions(+), 24 deletions(-) create mode 100644 tests/remote_show_test.py diff --git a/README.md b/README.md index f63b0832a2..91146bc9d0 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ A convenient alternative is to let the SONiC build system configure a build envi ``` python3 setup.py bdist_wheel ``` +Note: This command by default will not update the wheel package in target/. To specify the destination location of wheel package, use "-d" option. #### To run unit tests @@ -73,6 +74,12 @@ python3 setup.py bdist_wheel python3 setup.py test ``` +#### To install the package on a SONiC machine +``` +sudo pip uninstall sonic-utilities +sudo pip install YOUR_WHEEL_PACKAGE +``` +Note: Don't use "--force-reinstall". ### sonic-utilities-data diff --git a/rcli/linecard.py b/rcli/linecard.py index 73c13a73ef..f893428a42 100644 --- a/rcli/linecard.py +++ b/rcli/linecard.py @@ -8,7 +8,7 @@ import termios import tty -from .utils import get_linecard_ip +from .utils import get_linecard_ip, get_linecard_hostname_from_module_name, get_linecard_module_name_from_hostname from paramiko.py3compat import u from paramiko import Channel @@ -31,7 +31,17 @@ def __init__(self, linecard_name, username, password): if not self.ip: sys.exit(1) - self.linecard_name = linecard_name + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + if module_name is None: + # if the module name cannot be found from host, assume the user has passed module name + self.module_name = linecard_name + self.hostname = get_linecard_hostname_from_module_name(linecard_name) + else: + # the user has passed linecard hostname + self.hostname = linecard_name + self.module_name = module_name + self.username = username self.password = password diff --git a/rcli/rexec.py b/rcli/rexec.py index 8831d5585f..21929c8012 100644 --- a/rcli/rexec.py +++ b/rcli/rexec.py @@ -30,20 +30,22 @@ def cli(linecard_names, command, username): if list(linecard_names) == ["all"]: # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") + module_names = sorted(rcli_utils.get_all_linecards(None, None, "")) + else: + module_names = linecard_names linecards = [] # Iterate through each linecard, check if the login was successful - for linecard_name in linecard_names: - linecard = Linecard(linecard_name, username, password) + for module_name in module_names: + linecard = Linecard(module_name, username, password) if not linecard.connection: - click.echo(f"Failed to connect to {linecard_name} with username {username}") + click.echo(f"Failed to connect to {module_name} with username {username}") sys.exit(1) linecards.append(linecard) for linecard in linecards: if linecard.connection: - click.echo(f"======== {linecard.linecard_name} output: ========") + click.echo(f"======== {linecard.module_name}|{linecard.hostname} output: ========") click.echo(linecard.execute_cmd(command)) diff --git a/rcli/rshell.py b/rcli/rshell.py index bac02d42d8..b22187a0f3 100644 --- a/rcli/rshell.py +++ b/rcli/rshell.py @@ -28,14 +28,14 @@ def cli(linecard_name, username): try: linecard = Linecard(linecard_name, username, password) if linecard.connection: - click.echo(f"Connecting to {linecard.linecard_name}") + click.echo(f"Connecting to {linecard.module_name}") # If connection was created, connection exists. # Otherwise, user will see an error message. linecard.start_shell() click.echo("Connection Closed") except paramiko.ssh_exception.AuthenticationException: click.echo( - f"Login failed on '{linecard.linecard_name}' with username '{linecard.username}'") + f"Login failed on '{linecard.module_name}' with username '{linecard.username}'") if __name__=="__main__": diff --git a/rcli/utils.py b/rcli/utils.py index 510e360581..e2f48788ba 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -43,6 +43,20 @@ def get_linecard_module_name_from_hostname(linecard_name: str): return None + +def get_linecard_hostname_from_module_name(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + if module_name.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + return hostname + + return None + + def get_linecard_ip(linecard_name: str): """ Given a linecard name, lookup its IP address in the midplane table @@ -69,6 +83,7 @@ def get_linecard_ip(linecard_name: str): return None return module_ip + def get_module_ip_and_access_from_state_db(module_name): state_db = connect_state_db() data_dict = state_db.get_all( diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 6343e8b7b2..10e5d982cd 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,6 +1,8 @@ import click +import sys +import subprocess -from sonic_py_common import multi_asic +from sonic_py_common import multi_asic, device_info from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon @@ -17,6 +19,12 @@ @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" + if device_info.is_supervisor(): + # if the device is a chassis, the command need to be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) pass @@ -102,10 +110,16 @@ def neighbors(ipaddress, info_type, namespace): def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}'\ - .format(multi_asic.get_namespace_list())) + namespace = namespace.strip() + if multi_asic.is_multi_asic(): + if namespace == multi_asic.DEFAULT_NAMESPACE: + ctx = click.get_current_context() + ctx.fail('-n/--namespace option required. provide namespace from list {}' + .format(multi_asic.get_namespace_list())) + if namespace != "all" and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('invalid namespace {}. provide namespace from list {}' + .format(namespace, multi_asic.get_namespace_list())) command = 'show ip bgp' if ipaddress is not None: @@ -125,5 +139,15 @@ def network(ipaddress, info_type, namespace): if info_type is not None: command += ' {}'.format(info_type) - output = bgp_util.run_bgp_show_command(command, namespace) - click.echo(output.rstrip('\n')) + if namespace == "all": + if multi_asic.is_multi_asic(): + for ns in multi_asic.get_namespace_list(): + click.echo("\n======== namespace {} ========".format(ns)) + output = bgp_util.run_bgp_show_command(command, ns) + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, "") + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/main.py b/show/main.py index d20073fb01..06114eb79f 100755 --- a/show/main.py +++ b/show/main.py @@ -1190,7 +1190,11 @@ def protocol(verbose): ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - +elif device_info.is_supervisor(): + from .bgp_frr_v4 import bgp + ip.add_command(bgp) + from .bgp_frr_v6 import bgp + ipv6.add_command(bgp) # # 'link-local-mode' subcommand ("show ipv6 link-local-mode") # diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index da93e8e8e8..73ece16a66 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -227,6 +227,9 @@ multi_asic_bgp_network_err = \ """Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" +multi_asic_bgp_network_asic_unknown_err = \ + """Error: invalid namespace asic_unknown. provide namespace from list ['asic0', 'asic1']""" + bgp_v4_network_asic0 = \ """ BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 @@ -276,7 +279,7 @@ *=i10.0.0.42/31 10.1.0.2 0 100 0 ? *>i 10.1.0.0 0 100 0 ? *=i10.0.0.44/31 10.1.0.2 0 100 0 ? -*>i 10.1.0.0 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? """ bgp_v4_network_ip_address_asic0 = \ @@ -311,6 +314,111 @@ Last update: Thu Apr 22 02:13:30 2021 """ +bgp_v4_network_all_asic = \ + """ +======== namespace asic0 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? + +======== namespace asic1 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + bgp_v6_network_asic0 = \ """ BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 @@ -429,6 +537,9 @@ def mock_show_bgp_network_multi_asic(param): return bgp_v6_network_ip_address_asic0 elif param == 'bgp_v6_network_bestpath_asic0': return bgp_v6_network_ip_address_asic0_bestpath + elif param == "bgp_v4_network_all_asic": + # this is mocking the output of a single LC + return bgp_v4_network_asic0 else: return '' @@ -454,6 +565,11 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 1, 'rc_output': bgp_v4_network_longer_prefixes_error }, + 'bgp_v4_network_all_asic_on_single_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network + }, 'bgp_v6_network': { 'args': [], 'rc': 0, @@ -499,6 +615,16 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v4_network_bestpath_asic0 }, + 'bgp_v4_network_all_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic + }, + 'bgp_v4_network_asic_unknown': { + 'args': ['-nasic_unknown'], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_asic_unknown_err + }, 'bgp_v6_network_multi_asic': { 'args': [], 'rc': 2, diff --git a/tests/conftest.py b/tests/conftest.py index 72b28515bb..5dd31d523a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -386,6 +386,13 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1( else: return "" + def mock_multi_asic_list(): + return ["asic0", "asic1"] + + # mock multi-asic list + if request.param == "bgp_v4_network_all_asic": + multi_asic.get_namespace_list = mock_multi_asic_list + _old_run_bgp_command = bgp_util.run_bgp_command if request.param == 'ip_route_for_int_ip': bgp_util.run_bgp_command = mock_run_bgp_command_for_static diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 5178c49ca0..6af9e19da4 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -4,6 +4,9 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" } } \ No newline at end of file diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index d9fd672102..9883dfa16b 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -12,9 +12,9 @@ import socket import termios -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world -======== LINE-CARD2 output: ======== +======== LINE-CARD2|sonic-lc3 output: ======== hello world ''' REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... @@ -152,12 +152,12 @@ def test_rexec_all(self): @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" + LINECARD_NAME = "sonic-lc-100" result = runner.invoke( rexec.cli, [LINECARD_NAME, "-c", "show version"]) print(result.output) assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output + assert "Linecard sonic-lc-100 not found\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py new file mode 100644 index 0000000000..6acbb8185f --- /dev/null +++ b/tests/remote_show_test.py @@ -0,0 +1,57 @@ +import mock +import subprocess +from io import BytesIO +from click.testing import CliRunner + + +def mock_rexec_command(*args): + mock_stdout = BytesIO(b"""hello world""") + print(mock_stdout.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=0, stdout=mock_stdout, stderr=BytesIO()) + + +def mock_rexec_error_cmd(*args): + mock_stderr = BytesIO(b"""Error""") + print(mock_stderr.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=1, stdout=BytesIO(), stderr=mock_stderr) + + +MULTI_LC_REXEC_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +hello world +''' + +MULTI_LC_ERR_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +Error +''' + + +class TestRexecBgp(object): + @classmethod + def setup_class(cls): + pass + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + def test_show_ip_bgp_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_error_cmd + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 1 + assert MULTI_LC_ERR_OUTPUT == result.output diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index f610199538..d3f24c8571 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -57,7 +57,8 @@ def setup_class(cls): ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), - ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v4_network_all_asic_on_single_asic')], indirect=['setup_single_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_single_bgp_instance): @@ -84,7 +85,9 @@ def setup_class(cls): ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), - ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0'), + ('bgp_v4_network_all_asic', 'bgp_v4_network_all_asic'), + ('bgp_v4_network', 'bgp_v4_network_asic_unknown')], indirect=['setup_multi_asic_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_multi_asic_bgp_instance): From f2b762138c3236807bf1995e2e2130f7b8e5f386 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Thu, 18 Jul 2024 15:58:23 -0700 Subject: [PATCH 02/15] [SfpUtil] sfp eeprom with option dom is not working on Xcvrs with flat memory (#3385) Signed-off-by: Mihir Patel --- sfputil/main.py | 14 +++++++++ tests/sfputil_test.py | 73 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/sfputil/main.py b/sfputil/main.py index 2674c51b10..309d5c98dd 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -673,6 +673,20 @@ def eeprom(port, dump_dom, namespace): output += convert_sfp_info_to_output_string(xcvr_info) if dump_dom: + try: + api = platform_chassis.get_sfp(physical_port).get_xcvr_api() + except NotImplementedError: + output += "API is currently not implemented for this platform\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + if api is None: + output += "API is none while getting DOM info!\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + else: + if api.is_flat_memory(): + output += "DOM values not supported for flat memory module\n" + continue try: xcvr_dom_info = platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status() except NotImplementedError: diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 537c329819..5854bb201b 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -20,6 +20,46 @@ ERROR_NOT_IMPLEMENTED = 5 ERROR_INVALID_PORT = 6 +FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT = { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no',\ + 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' +} +FLAT_MEMORY_MODULE_EEPROM = """Ethernet16: SFP EEPROM detected + Application Advertisement: N/A + Connector: No separable connector + Encoding: 64B66B + Extended Identifier: Power Class 1(1.5W max) + Extended RateSelect Compliance: QSFP+ Rate Select Version 1 + Identifier: QSFP28 or later + Length Cable Assembly(m): 3 + Nominal Bit Rate(100Mbs): 255 + Specification compliance: + 10/40G Ethernet Compliance Code: 40GBASE-CR4 + Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 + Vendor Name: Mellanox + Vendor OUI: 00-02-c9 + Vendor PN: MCP1600-C003 + Vendor Rev: A2 + Vendor SN: MT1636VS10561 +""" + class TestSfputil(object): def test_format_dict_value_to_string(self): sorted_key_table = [ @@ -585,6 +625,39 @@ def test_show_eeprom_RJ45(self, mock_chassis): expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" assert result.output == expected_output + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @pytest.mark.parametrize("exception, xcvr_api_none, expected_output", [ + (None, False, '''DOM values not supported for flat memory module\n\n'''), + (NotImplementedError, False, '''API is currently not implemented for this platform\n\n'''), + (None, True, '''API is none while getting DOM info!\n\n''') + ]) + @patch('sfputil.main.platform_chassis') + def test_show_eeprom_dom_conditions(self, mock_chassis, exception, xcvr_api_none, expected_output): + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_sfp.get_transceiver_info.return_value = FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT + mock_chassis.get_sfp.return_value = mock_sfp + + if exception: + mock_chassis.get_sfp().get_xcvr_api.side_effect = exception + elif xcvr_api_none: + mock_chassis.get_sfp().get_xcvr_api.return_value = None + else: + mock_api = MagicMock() + mock_chassis.get_sfp().get_xcvr_api.return_value = mock_api + + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + + if exception or xcvr_api_none: + assert result.exit_code == ERROR_NOT_IMPLEMENTED + else: + assert result.exit_code == 0 + assert result.output == FLAT_MEMORY_MODULE_EEPROM + expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=0))) def test_show_eeprom_hexdump_invalid_port(self, mock_chassis): From d1ca905e3b0733170d19abeecad1cfbfd0180698 Mon Sep 17 00:00:00 2001 From: ryanzhu706 Date: Fri, 19 Jul 2024 13:41:01 -0700 Subject: [PATCH 03/15] Update DB version to 202411 on master branch. (#3414) * Update DB version to 202411 on master branch. --- scripts/db_migrator.py | 14 +++++++++--- tests/db_migrator_test.py | 45 +++++++++++++++++++++------------------ 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index afd5e638de..9be3ce325b 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -58,7 +58,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_202405_01' + self.CURRENT_VERSION = 'version_202411_01' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -1228,10 +1228,18 @@ def version_202311_03(self): def version_202405_01(self): """ - Version 202405_01, this version should be the final version for - master branch until 202405 branch is created. + Version 202405_01. """ log.log_info('Handling version_202405_01') + self.set_version('version_202411_01') + return 'version_202411_01' + + def version_202411_01(self): + """ + Version 202411_01, this version should be the final version for + master branch until 202411 branch is created. + """ + log.log_info('Handling version_202411_01') return None def get_version(self): diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e21539766a..cdf4251bd7 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -74,24 +74,27 @@ class TestVersionComparison(object): def setup_class(cls): cls.version_comp_list = [ # Old format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_1_0_2', 'result' : False }, - { 'v1' : 'version_1_0_2', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_2_0_1', 'result' : False }, - { 'v1' : 'version_2_0_1', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_1_0_2', 'result': False}, + {'v1': 'version_1_0_2', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_2_0_1', 'result': False}, + {'v1': 'version_2_0_1', 'v2': 'version_1_0_1', 'result': True}, # New format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_1_0_1', 'result': True}, # New format v.s new format - { 'v1' : 'version_202311_01', 'v2' : 'version_202311_02', 'result' : False }, - { 'v1' : 'version_202311_02', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_202305_01', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_202305_01', 'result' : True }, - { 'v1' : 'version_202311_01', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_master_01', 'v2' : 'version_master_02', 'result' : False }, - { 'v1' : 'version_master_02', 'v2' : 'version_master_01', 'result' : True }, + {'v1': 'version_202311_01', 'v2': 'version_202311_02', 'result': False}, + {'v1': 'version_202311_02', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_202305_01', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_202305_01', 'result': True}, + {'v1': 'version_202405_01', 'v2': 'version_202411_01', 'result': False}, + {'v1': 'version_202411_01', 'v2': 'version_202405_01', 'result': True}, + {'v1': 'version_202411_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_master_01', 'v2': 'version_master_02', 'result': False}, + {'v1': 'version_master_02', 'v2': 'version_master_01', 'result': True}, ] def test_version_comparison(self): @@ -383,7 +386,7 @@ def test_dns_nameserver_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'dns-nameserver-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_keys = dbmgtr.configDB.keys(dbmgtr.configDB.CONFIG_DB, 'DNS_NAMESERVER*') expected_keys = expected_db.cfgdb.keys(expected_db.cfgdb.CONFIG_DB, 'DNS_NAMESERVER*') @@ -895,7 +898,7 @@ def test_init(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) def test_init_no_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace=None, operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -903,7 +906,7 @@ def test_init_no_namespace(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) def test_init_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace="asic0", operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -940,7 +943,7 @@ def test_dns_nameserver_migrator_minigraph(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-minigraph-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") @@ -956,7 +959,7 @@ def test_dns_nameserver_migrator_configdb(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-configdb-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") From f19662277acba963e34cf8ca45c0fd7ab234be65 Mon Sep 17 00:00:00 2001 From: ganglv <88995770+ganglyu@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:43:24 +0800 Subject: [PATCH 04/15] fix show techsupport date issue (#3437) What I did Show techsupport is designed to collect logs and core files since given date. I find that some core files are missing when given date is relative, for example "5 minutes ago". Microsoft ADO: 28737486 How I did it Create the reference file at the start of the script, and don't update it in find_files. How to verify it Run end to end test: show_techsupport/test_auto_techsupport.py --- scripts/generate_dump | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index b163366bb0..3d0ef3430d 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1120,7 +1120,6 @@ save_file() { find_files() { trap 'handle_error $? $LINENO' ERR local -r directory=$1 - $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" local -r find_command="find -L $directory -type f -newer ${REFERENCE_FILE}" echo $($find_command) @@ -1914,6 +1913,8 @@ main() { ${CMD_PREFIX}renice +5 -p $$ >> /dev/null ${CMD_PREFIX}ionice -c 2 -n 5 -p $$ >> /dev/null + # Created file as a reference to compare modification time + $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" $MKDIR $V -p $TARDIR # Start with this script so its obvious what code is responsible From 772ee793d067be40eeb8779d20b645aa7f97ea30 Mon Sep 17 00:00:00 2001 From: Rida Hanif Date: Tue, 23 Jul 2024 14:42:13 -0700 Subject: [PATCH 05/15] IP Assignment Issue (#3408) #### What I did Added Check for IP Assignment on Port when a Vlan is configured. This PR is created in response to [Issue](https://github.com/sonic-net/sonic-buildimage/issues/19505) #### How I did it Modified config/main.py to add check for IP Assignment when Port has vlan membership #### How to verify it After this, ip cannot be assigned on port which is configured to a VLAN. --- config/main.py | 8 ++++++++ tests/vlan_test.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 709c96402a..46bfc332b0 100644 --- a/config/main.py +++ b/config/main.py @@ -4853,6 +4853,14 @@ def add_interface_ip(ctx, interface_name, ip_addr, gw, secondary): interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port mode + vlan_member_table = config_db.get_table('VLAN_MEMBER') + + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 2d3c1dcf1b..fc3569b87d 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1426,7 +1426,7 @@ def test_config_set_router_port_on_member_interface(self): ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 - assert 'Interface Ethernet4 is in trunk mode and needs to be in routed mode!' in result.output + assert 'Interface Ethernet4 is a member of vlan\nAborting!\n' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() From a81321595b1f2cf34b26255fb6953f304ba2df14 Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Wed, 24 Jul 2024 14:21:47 -0700 Subject: [PATCH 06/15] Fix multi-asic behaviour for dropstat (#3059) * Fixes dropstat multi-asic behaviour by using multi-asic helpers and ensuring that dropstat iterates through correct namespaces when 'show' command is run. Co-authored-by: rdjeric Co-authored-by: Kenneth Cheung --- scripts/dropstat | 118 ++++++++++++++------------ show/dropcounters.py | 7 +- tests/mock_tables/asic1/asic_db.json | 6 ++ tests/multi_asic_dropstat_test.py | 122 +++++++++++++++++++++++++++ tests/single_asic_dropstat_test.py | 72 ++++++++++++++++ 5 files changed, 272 insertions(+), 53 deletions(-) create mode 100644 tests/mock_tables/asic1/asic_db.json create mode 100644 tests/multi_asic_dropstat_test.py create mode 100644 tests/single_asic_dropstat_test.py diff --git a/scripts/dropstat b/scripts/dropstat index 485ac65637..219ad2b494 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,8 +11,8 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries +import click import json -import argparse import os import socket import sys @@ -20,6 +20,9 @@ import sys from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -28,9 +31,14 @@ try: test_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, test_path) - import mock_tables.dbconnector + from tests.mock_tables import dbconnector socket.gethostname = lambda: 'sonic_drops_test' os.getuid = lambda: 27 + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + else: + dbconnector.load_database_config() except KeyError: pass @@ -90,30 +98,32 @@ def get_dropstat_dir(): class DropStat(object): - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) - self.db.connect(self.db.ASIC_DB) - self.db.connect(self.db.APPL_DB) - self.db.connect(self.db.CONFIG_DB) + def __init__(self, namespace): + self.namespaces = multi_asic.get_namespace_list(namespace) + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + self.config_db = None + self.cached_namespace = None dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'port-stats') - self.switch_drop_stats_file = os.path.join(dropstat_dir + 'switch-stats') - self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') + self.switch_drop_stats_file = os.path.join(dropstat_dir, 'switch-stats') + self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') self.stat_lookup = {} self.reverse_stat_lookup = {} + @multi_asic_util.run_on_multi_asic def show_drop_counts(self, group, counter_type): """ Prints out the current drop counts at the port-level and switch-level. """ + if os.environ.get("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE", "0") == "1": + # Temp cache needs to be cleard to avoid interference from previous test cases + UserCache().remove() + self.show_switch_std_drop_counts(group, counter_type) self.show_port_drop_counts(group, counter_type) print('') @@ -124,16 +134,36 @@ class DropStat(object): Clears the current drop counts. """ - try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) + counters_port_drop = {} + counters_switch_drop = {} + counters_switch_std_drop = {} + for ns in self.namespaces: + self.config_db = multi_asic.connect_config_db_for_ns(ns) + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + counts = self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP) + if counts: + counters_port_drop.update(counts) + counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP) if counters: - json.dump(self.get_counts(counters, self.get_switch_id()), open(self.switch_drop_stats_file, 'w+')) + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_drop.update(counts) counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) if counters: - json.dump(self.get_counts(counters, self.get_switch_id()), open(self.switch_std_drop_stats_file, 'w+')) + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_std_drop.update(counts) + + try: + if counters_port_drop: + json.dump(counters_port_drop, open(self.port_drop_stats_file, 'w+')) + + if counters_switch_drop: + json.dump(counters_switch_drop, open(self.switch_drop_stats_file, 'w+')) + + if counters_switch_std_drop: + json.dump(counters_switch_std_drop, open(self.switch_std_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) @@ -321,12 +351,13 @@ class DropStat(object): the given object type. """ + if self.cached_namespace != self.multi_asic.current_namespace: + self.stat_lookup = {} + self.cached_namespace = self.multi_asic.current_namespace + if not self.stat_lookup.get(object_stat_map, None): stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map) - if stats_map: - self.stat_lookup[object_stat_map] = stats_map - else: - self.stat_lookup[object_stat_map] = None + self.stat_lookup[object_stat_map] = stats_map if stats_map else None return self.stat_lookup[object_stat_map] @@ -457,39 +488,22 @@ class DropStat(object): else: return PORT_STATE_NA - -def main(): - parser = argparse.ArgumentParser(description='Display drop counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - dropstat -""") - - # Version - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - # Actions - parser.add_argument('-c', '--command', type=str, help='Desired action to perform') - - # Variables - parser.add_argument('-g', '--group', type=str, help='The group of the target drop counter', default=None) - parser.add_argument('-t', '--type', type=str, help='The type of the target drop counter', default=None) - - args = parser.parse_args() - - command = args.command - - group = args.group - counter_type = args.type - - dcstat = DropStat() +@click.command(help='Display drop counters') +@click.option('-c', '--command', required=True, help='Desired action to perform', + type=click.Choice(['clear', 'show'], case_sensitive=False)) +@click.option('-g', '--group', default=None, help='The group of the target drop counter') +@click.option('-t', '--type', 'counter_type', default=None, help='The type of the target drop counter') +@click.option('-n', '--namespace', help='Namespace name', default=None, + type=click.Choice(multi_asic.get_namespace_list())) +@click.version_option(version='1.0') +def main(command, group, counter_type, namespace): + load_db_config() + + dcstat = DropStat(namespace) if command == 'clear': dcstat.clear_drop_counts() - elif command == 'show': - dcstat.show_drop_counts(group, counter_type) else: - print("Command not recognized") + dcstat.show_drop_counts(group, counter_type) if __name__ == '__main__': diff --git a/show/dropcounters.py b/show/dropcounters.py index 30779b9364..9bb988fc5b 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util # @@ -41,7 +42,8 @@ def capabilities(verbose): @click.option('-g', '--group', required=False) @click.option('-t', '--counter_type', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counts(group, counter_type, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def counts(group, counter_type, verbose, namespace): """Show drop counts""" cmd = ['dropstat', '-c', 'show'] @@ -51,4 +53,7 @@ def counts(group, counter_type, verbose): if counter_type: cmd += ['-t', str(counter_type)] + if namespace: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/tests/mock_tables/asic1/asic_db.json b/tests/mock_tables/asic1/asic_db.json new file mode 100644 index 0000000000..1a769b82b5 --- /dev/null +++ b/tests/mock_tables/asic1/asic_db.json @@ -0,0 +1,6 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": { + "SAI_SWITCH_ATTR_INIT_SWITCH": "true", + "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "DE:AD:BE:EF:CA:FE" + } +} diff --git a/tests/multi_asic_dropstat_test.py b/tests/multi_asic_dropstat_test.py new file mode 100644 index 0000000000..8b9dd72826 --- /dev/null +++ b/tests/multi_asic_dropstat_test.py @@ -0,0 +1,122 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_masic_result_asic0 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 10 100 0 0 80 20 + Ethernet4 U 0 1000 0 0 800 100 +Ethernet-BP0 U 0 1000 0 0 800 100 +Ethernet-BP4 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_asic1 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 10 100 0 0 80 20 +Ethernet-BP260 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 0 0 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 +Ethernet-BP0 U 0 0 0 0 0 0 +Ethernet-BP4 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 0 0 0 0 0 0 +Ethernet-BP260 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_dropcount_masic_asic0(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic0' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 and return_code == 0 + + def test_show_dropcount_masic_all_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 + dropstat_masic_result_asic1 + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_clear_all and return_code == 0 + + def test_show_dropcount_masic_invalid_ns(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 2 + assert "invalid choice: asic5" in result + + def test_show_dropcount_version(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '--version' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") + print("TEARDOWN") diff --git a/tests/single_asic_dropstat_test.py b/tests/single_asic_dropstat_test.py new file mode 100644 index 0000000000..c521bcfa60 --- /dev/null +++ b/tests/single_asic_dropstat_test.py @@ -0,0 +1,72 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_result = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" + +dropstat_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 0 0 0 0 0 0 +Ethernet4 N/A 0 0 0 0 0 0 +Ethernet8 N/A 0 0 0 0 0 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 0 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + print("SETUP") + + def test_show_dropcount_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result_clear_all and return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + print("TEARDOWN") From 9b24421aacaaa496f65fbab39e918283886205e5 Mon Sep 17 00:00:00 2001 From: Anoop Kamath <115578705+AnoopKamath@users.noreply.github.com> Date: Thu, 25 Jul 2024 18:01:49 -0700 Subject: [PATCH 07/15] Add sfputil power enable/disable command (#3418) --- sfputil/main.py | 56 +++++++++++++++++++++++++++++++++++++++++++ tests/sfputil_test.py | 45 ++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/sfputil/main.py b/sfputil/main.py index 309d5c98dd..2c8f85d016 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1320,6 +1320,62 @@ def reset(port_name): i += 1 + +# 'power' subgroup +@cli.group() +def power(): + """Enable or disable power of SFP transceiver""" + pass + + +# Helper method for setting low-power mode +def set_power(port_name, enable): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("Power disable/enable is not available for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + presence = sfp.get_presence() + except NotImplementedError: + click.echo("sfp get_presence() NOT implemented!") + sys.exit(EXIT_FAIL) + + if not presence: + click.echo("{}: SFP EEPROM not detected\n".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + result = platform_chassis.get_sfp(physical_port).set_power(enable) + except (NotImplementedError, AttributeError): + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) + + if result: + click.echo("OK") + else: + click.echo("Failed") + sys.exit(EXIT_FAIL) + + +# 'disable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def disable(port_name): + """Disable power of SFP transceiver""" + set_power(port_name, False) + + +# 'enable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def enable(port_name): + """Enable power of SFP transceiver""" + set_power(port_name, True) + + def update_firmware_info_to_state_db(port_name): physical_port = logical_port_to_physical_port_index(port_name) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 5854bb201b..0e58daa18e 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -610,6 +610,51 @@ def test_show_lpmode(self, mock_chassis): """ assert result.output == expected_output + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=True)) + def test_power_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Power disable/enable is not available for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + def test_power(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.exit_code == 0 + + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n\n' + + mock_sfp.get_presence.return_value = True + mock_sfp.set_power = MagicMock(side_effect=NotImplementedError) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'This functionality is currently not implemented for this platform\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.set_power = MagicMock(return_value=False) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Failed\n' + + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) From 84cb00a4b2d7e8fb2bcab259367836fa11a17d0a Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Fri, 26 Jul 2024 08:51:51 -0700 Subject: [PATCH 08/15] Change the default behavior of show ip bgp network (#3447) * show/bgp_frr_v4.py: change the default behavior of "show ip bgp network" - after change, show ip bgp network will have "all" as the default value of namespace option - after change, ip-address/ip-prefix is a required argument when executing show ip bgp network on a chassis supervisor * tests/remote_show_test.py update unit tests to comply with the new behaviors * tests/show_bgp_network_test.py: update a test vector to make it comply with the new default behavior * tests/bgp_commands_input/bgp_network_test_vector.py: update a test vector to comply with the new default behavior --- show/bgp_frr_v4.py | 30 +++++++++++-------- .../bgp_network_test_vector.py | 6 ++-- tests/remote_show_test.py | 16 ++++++++++ tests/show_bgp_network_test.py | 2 +- 4 files changed, 37 insertions(+), 17 deletions(-) diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 10e5d982cd..ddcd688581 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -20,12 +20,13 @@ def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" if device_info.is_supervisor(): - # if the device is a chassis, the command need to be executed by rexec - click.echo("Since the current device is a chassis supervisor, " + - "this command will be executed remotely on all linecards") - proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) - sys.exit(proc.returncode) - pass + subcommand = sys.argv[3] + if subcommand not in "network": + # the command will be executed directly by rexec if it is not "show ip bgp network" + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) # 'summary' subcommand ("show ip bgp summary") @@ -92,7 +93,7 @@ def neighbors(ipaddress, info_type, namespace): @bgp.command() @click.argument('ipaddress', metavar='[|]', - required=False) + required=True if device_info.is_supervisor() else False) @click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', type=click.Choice( @@ -103,19 +104,22 @@ def neighbors(ipaddress, info_type, namespace): 'namespace', type=str, show_default=True, - required=True if multi_asic.is_multi_asic is True else False, + required=False, help='Namespace name or all', - default=multi_asic.DEFAULT_NAMESPACE, + default="all", callback=multi_asic_util.multi_asic_namespace_validation_callback) def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" + if device_info.is_supervisor(): + # the command will be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) + namespace = namespace.strip() if multi_asic.is_multi_asic(): - if namespace == multi_asic.DEFAULT_NAMESPACE: - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}' - .format(multi_asic.get_namespace_list())) if namespace != "all" and namespace not in multi_asic.get_namespace_list(): ctx = click.get_current_context() ctx.fail('invalid namespace {}. provide namespace from list {}' diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index 73ece16a66..f9edd66fa2 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -595,10 +595,10 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v6_network_longer_prefixes }, - 'bgp_v4_network_multi_asic': { + 'bgp_v4_network_default_multi_asic': { 'args': [], - 'rc': 2, - 'rc_err_msg': multi_asic_bgp_network_err + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic }, 'bgp_v4_network_asic0': { 'args': ['-nasic0'], diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py index 6acbb8185f..e1be3d0302 100644 --- a/tests/remote_show_test.py +++ b/tests/remote_show_test.py @@ -31,6 +31,7 @@ def setup_class(cls): pass @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) def test_show_ip_bgp_rexec(self, setup_bgp_commands): show = setup_bgp_commands runner = CliRunner() @@ -44,6 +45,7 @@ def test_show_ip_bgp_rexec(self, setup_bgp_commands): assert MULTI_LC_REXEC_OUTPUT == result.output @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): show = setup_bgp_commands runner = CliRunner() @@ -55,3 +57,17 @@ def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): subprocess.run = _old_subprocess_run assert result.exit_code == 1 assert MULTI_LC_ERR_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "network", "10.0.0.0/24"]) + def test_show_ip_bgp_network_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["network", "10.0.0.0/24"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index d3f24c8571..bfc23d8912 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -78,7 +78,7 @@ def setup_class(cls): @pytest.mark.parametrize( 'setup_multi_asic_bgp_instance, test_vector', - [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + [('bgp_v4_network_all_asic', 'bgp_v4_network_default_multi_asic'), ('bgp_v6_network', 'bgp_v6_network_multi_asic'), ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), From ff2c73f85ca24dea2634dc5ec83956f27ab9e32b Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:02:59 -0700 Subject: [PATCH 09/15] Add namespace check for multiasic (#3458) * Add namespace check for multiasic * Fix format --- generic_config_updater/generic_updater.py | 3 + .../multiasic_change_applier_test.py | 137 +++++++++++++++--- 2 files changed, 121 insertions(+), 19 deletions(-) diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index b6d65e2ce6..8ce27455bb 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -31,6 +31,9 @@ def extract_scope(path): scope = HOST_NAMESPACE remainder = "/" + "/".join(parts[1:]) else: + if multi_asic.is_multi_asic(): + raise GenericConfigUpdaterError(f"Multi ASIC must have namespace prefix in path: '{path}'.") + scope = "" remainder = path return scope, remainder diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index d7f734d2ec..0102cfff00 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -9,25 +9,124 @@ class TestMultiAsicChangeApplier(unittest.TestCase): - def test_extract_scope(self): + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_multiasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True test_paths_expectedresults = { - "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/sometable/data": (True, "", "/sometable/data"), - "": (False, "", ""), - "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + False, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), + "/asic77": ( + False, "", "" + ), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception: + assert(not result) + + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_singleasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = False + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + True, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), "/asic77": (False, "", ""), - "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), } for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): @@ -35,8 +134,8 @@ def test_extract_scope(self): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception as e: - assert(result == False) + except Exception: + assert(not result) @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) From f50587a1ff65bb489231bfe45cec805fb32dbf00 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Fri, 2 Aug 2024 15:35:59 -0700 Subject: [PATCH 10/15] Update README.md (#3406) * Update README.md The new location of the sonic-utilities target wheel package is under bookworm instead of bullseye. Update the README to make it consistent with the current build behavior. * README.md: update build instrucrions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 91146bc9d0..d6f9a5e25a 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ A convenient alternative is to let the SONiC build system configure a build envi 2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl + make -f Makefile.work BLDENV=bookworm KEEP_SLAVE_ON=yes target/python-wheels/bookworm/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory From 018eb737eef61fb1f1134d1c80d508a756973e05 Mon Sep 17 00:00:00 2001 From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:26:46 -0700 Subject: [PATCH 11/15] Fix to use IPv6 linklocal address as snmp agent address (#3215) What I did If link local IPv6 address is added as SNMP agent address, it will fail. This PR requires changes in snmpd.conf.j2 template here sonic-net/sonic-buildimage#18350 How I did it Append scope id to ipv6 link local IP address. How to verify it Able to configure link local ipv6 address as snmp agent address sudo config snmpagentaddress add fe80::a%eth0 --- config/main.py | 7 +++++-- tests/config_snmp_test.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 46bfc332b0..f48c446adf 100644 --- a/config/main.py +++ b/config/main.py @@ -3498,7 +3498,10 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|| - if not clicommon.is_ipaddress(agentip): + # Link local IP address should be provided along with zone id + # % for ex fe80::1%eth0 + agent_ip_addr = agentip.split('%')[0] + if not clicommon.is_ipaddress(agent_ip_addr): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] @@ -3508,7 +3511,7 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 - ip = ipaddress.ip_address(agentip) + ip = ipaddress.ip_address(agent_ip_addr) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 76f5675690..25c54d36ec 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -877,6 +877,34 @@ def test_config_snmp_community_add_new_community_with_invalid_type_yang_validati assert result.exit_code != 0 assert 'SNMP community configuration failed' in result.output + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1%eth0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_linklocal(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["fe80::1%eth0"], obj=obj) + assert ('fe80::1%eth0', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "fe80::1%eth0||") == {} + + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_ipv4(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["10.1.0.32"], obj=obj) + assert ('10.1.0.32', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "10.1.0.32||") == {} + @classmethod def teardown_class(cls): print("TEARDOWN") From 557d68865cd92f0ede20c89edc636554605a4bf1 Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Mon, 5 Aug 2024 19:37:19 +0300 Subject: [PATCH 12/15] [Mellanox] Add support for Mellanox-SN4700-O32 and Mellanox-SN4700-V64 (#3450) Signed-off-by: Andriy Yurkiv --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 77b504b313..a379e7282f 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,7 +20,7 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, From 317e649514c9b205849ffb5ea96a6a233e38290c Mon Sep 17 00:00:00 2001 From: Vivek Date: Mon, 5 Aug 2024 09:40:18 -0700 Subject: [PATCH 13/15] Fix kexec_unload failure on secure boot enabled platforms (#3439) --- scripts/fast-reboot | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 2eeca11112..e183c34219 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -147,7 +147,7 @@ function clear_boot() # common_clear debug "${REBOOT_TYPE} failure ($?) cleanup ..." - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true teardown_control_plane_assistant @@ -519,7 +519,7 @@ function unload_kernel() { # Unload the previously loaded kernel if any loaded if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then - /sbin/kexec -u + /sbin/kexec -u -a fi } From 687bfc3fddbfc9c41946db13f7fb44b5db46701a Mon Sep 17 00:00:00 2001 From: noaOrMlnx Date: Wed, 17 Jul 2024 13:49:20 +0300 Subject: [PATCH 14/15] Add SPC5 to generic config updater file --- .../gcu_field_operation_validators.conf.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index a379e7282f..ff9bd92930 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,8 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ], + "spc5": ["ACS-SN5640"] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From 8d19c100271c041c409a9f67bf4e7193e5333db2 Mon Sep 17 00:00:00 2001 From: noaOrMlnx Date: Thu, 15 Aug 2024 15:12:15 +0300 Subject: [PATCH 15/15] fix typo of neighbors header --- utilities_common/bgp_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index cb49123c4b..f85faa8bba 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -303,7 +303,7 @@ def display_bgp_summary(bgp_summary, af): # "Neighbhor" is a known typo, # but fix it will impact lots of automation scripts that the community users may have developed for years # for now, let's keep it as it is. - headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", + headers = ["Neighbor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] try: