From 6829ded4339229d66af2c9f24cb1bc4ba3d8e90f Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Wed, 5 Jun 2024 19:59:51 +0300 Subject: [PATCH 01/23] Add W-ECMP CLI (#3253) * [wcmp]: Add WCMP CLI. Signed-off-by: Nazarii Hnydyn --- config/bgp_cli.py | 192 ++++++++++++++++++ config/main.py | 5 + doc/Command-Reference.md | 40 ++++ show/bgp_cli.py | 128 ++++++++++++ show/main.py | 3 + tests/bgp_input/assert_show_output.py | 55 +++++ tests/bgp_input/mock_config/all_disabled.json | 6 + tests/bgp_input/mock_config/all_enabled.json | 6 + tests/bgp_input/mock_config/empty.json | 5 + tests/bgp_input/mock_config/tsa_enabled.json | 6 + tests/bgp_input/mock_config/wcmp_enabled.json | 6 + tests/bgp_test.py | 130 ++++++++++++ utilities_common/bgp.py | 23 +++ 13 files changed, 605 insertions(+) create mode 100644 config/bgp_cli.py create mode 100644 show/bgp_cli.py create mode 100644 tests/bgp_input/assert_show_output.py create mode 100644 tests/bgp_input/mock_config/all_disabled.json create mode 100644 tests/bgp_input/mock_config/all_enabled.json create mode 100644 tests/bgp_input/mock_config/empty.json create mode 100644 tests/bgp_input/mock_config/tsa_enabled.json create mode 100644 tests/bgp_input/mock_config/wcmp_enabled.json create mode 100644 tests/bgp_test.py create mode 100644 utilities_common/bgp.py diff --git a/config/bgp_cli.py b/config/bgp_cli.py new file mode 100644 index 00000000000..a5a565359a9 --- /dev/null +++ b/config/bgp_cli.py @@ -0,0 +1,192 @@ +import click +import utilities_common.cli as clicommon + +from sonic_py_common import logger +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + SYSLOG_IDENTIFIER, + to_str, +) + + +log = logger.Logger(SYSLOG_IDENTIFIER) +log.set_min_log_priority_info() + + +# +# BGP DB interface ---------------------------------------------------------------------------------------------------- +# + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector object. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise click.ClickException(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise click.ClickException(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + db.set_entry(table, key, cfg[table][key]) + + +# +# BGP handlers -------------------------------------------------------------------------------------------------------- +# + + +def tsa_handler(ctx, db, state): + """ Handle config updates for Traffic-Shift-Away (TSA) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "tsa_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured TSA state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure TSA state: {}".format(str(e))) + ctx.fail(str(e)) + + +def wcmp_handler(ctx, db, state): + """ Handle config updates for Weighted-Cost Multi-Path (W-ECMP) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "wcmp_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured W-ECMP state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure W-ECMP state: {}".format(str(e))) + ctx.fail(str(e)) + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="device-global", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL(): + """ Configure BGP device global state """ + + pass + + +# +# BGP device-global tsa ----------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="tsa", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_TSA(): + """ Configure Traffic-Shift-Away (TSA) feature """ + + pass + + +@DEVICE_GLOBAL_TSA.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_ENABLED(ctx, db): + """ Enable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_TSA.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_DISABLED(ctx, db): + """ Disable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "false") + + +# +# BGP device-global w-ecmp -------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="w-ecmp", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_WCMP(): + """ Configure Weighted-Cost Multi-Path (W-ECMP) feature """ + + pass + + +@DEVICE_GLOBAL_WCMP.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_ENABLED(ctx, db): + """ Enable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_WCMP.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_DISABLED(ctx, db): + """ Disable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "false") diff --git a/config/main.py b/config/main.py index b750b49820c..1bd3bba634a 100644 --- a/config/main.py +++ b/config/main.py @@ -61,6 +61,7 @@ from . import syslog from . import switchport from . import dns +from . import bgp_cli # mock masic APIs for unit test @@ -4047,6 +4048,10 @@ def bgp(): """BGP-related configuration tasks""" pass + +# BGP module extensions +config.commands['bgp'].add_command(bgp_cli.DEVICE_GLOBAL) + # # 'shutdown' subgroup ('config bgp shutdown ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index dee689b9b80..757438dad07 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2630,6 +2630,26 @@ When enabled, BGP will not advertise routes which aren't yet offloaded. Disabled ``` +**show bgp device-global** + +This command displays BGP device global configuration. + +- Usage: + ```bash + show bgp device-global + ``` + +- Options: + - _-j,--json_: display in JSON format + +- Example: + ```bash + admin@sonic:~$ show bgp device-global + TSA W-ECMP + ------- ------- + enabled enabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ### BGP config commands @@ -2740,6 +2760,26 @@ Once enabled, BGP will not advertise routes which aren't yet offloaded. admin@sonic:~$ sudo config suppress-fib-pending disabled ``` +**config bgp device-global tsa/w-ecmp** + +This command is used to manage BGP device global configuration. + +Feature list: +1. TSA - Traffic-Shift-Away +2. W-ECMP - Weighted-Cost Multi-Path + +- Usage: + ```bash + config bgp device-global tsa + config bgp device-global w-ecmp + ``` + +- Examples: + ```bash + admin@sonic:~$ config bgp device-global tsa enabled + admin@sonic:~$ config bgp device-global w-ecmp enabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ## Console diff --git a/show/bgp_cli.py b/show/bgp_cli.py new file mode 100644 index 00000000000..d475638092e --- /dev/null +++ b/show/bgp_cli.py @@ -0,0 +1,128 @@ +import click +import tabulate +import json +import utilities_common.cli as clicommon + +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + to_str, +) + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: formatted attribute value. + """ + + if attr["is-leaf-list"]: + value = entry.get(attr["name"], []) + return "\n".join(value) if value else "N/A" + return entry.get(attr["name"], "N/A") + + +# +# BGP CLI ------------------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="bgp", + cls=clicommon.AliasedGroup +) +def BGP(): + """ Show BGP configuration """ + + pass + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@BGP.command( + name="device-global" +) +@click.option( + "-j", "--json", "json_format", + help="Display in JSON format", + is_flag=True, + default=False +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL(ctx, db, json_format): + """ Show BGP device global state """ + + header = [ + "TSA", + "W-ECMP", + ] + body = [] + + table = db.cfgdb.get_table(CFG_BGP_DEVICE_GLOBAL) + entry = table.get(BGP_DEVICE_GLOBAL_KEY, {}) + + if not entry: + click.echo("No configuration is present in CONFIG DB") + ctx.exit(0) + + if json_format: + json_dict = { + "tsa": to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + "w-ecmp": to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + } + click.echo(json.dumps(json_dict, indent=4)) + ctx.exit(0) + + row = [ + to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + ] + body.append(row) + + click.echo(tabulate.tabulate(body, header)) diff --git a/show/main.py b/show/main.py index cfdf30d3c61..c4d99b8eabe 100755 --- a/show/main.py +++ b/show/main.py @@ -66,6 +66,7 @@ from . import plugins from . import syslog from . import dns +from . import bgp_cli # Global Variables PLATFORM_JSON = 'platform.json' @@ -325,6 +326,8 @@ def cli(ctx): if is_gearbox_configured(): cli.add_command(gearbox.gearbox) +# bgp module +cli.add_command(bgp_cli.BGP) # # 'vrf' command ("show vrf") diff --git a/tests/bgp_input/assert_show_output.py b/tests/bgp_input/assert_show_output.py new file mode 100644 index 00000000000..3671c3ce5f8 --- /dev/null +++ b/tests/bgp_input/assert_show_output.py @@ -0,0 +1,55 @@ +""" +Module holding the correct values for show CLI command outputs for the bgp_test.py +""" + +show_device_global_empty = """\ +No configuration is present in CONFIG DB +""" + +show_device_global_all_disabled = """\ +TSA W-ECMP +-------- -------- +disabled disabled +""" +show_device_global_all_disabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_all_enabled = """\ +TSA W-ECMP +------- -------- +enabled enabled +""" +show_device_global_all_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "enabled" +} +""" + +show_device_global_tsa_enabled = """\ +TSA W-ECMP +------- -------- +enabled disabled +""" +show_device_global_tsa_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_wcmp_enabled = """\ +TSA W-ECMP +-------- -------- +disabled enabled +""" +show_device_global_wcmp_enabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "enabled" +} +""" diff --git a/tests/bgp_input/mock_config/all_disabled.json b/tests/bgp_input/mock_config/all_disabled.json new file mode 100644 index 00000000000..30a929c7b79 --- /dev/null +++ b/tests/bgp_input/mock_config/all_disabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/all_enabled.json b/tests/bgp_input/mock_config/all_enabled.json new file mode 100644 index 00000000000..eab39897bb6 --- /dev/null +++ b/tests/bgp_input/mock_config/all_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_input/mock_config/empty.json b/tests/bgp_input/mock_config/empty.json new file mode 100644 index 00000000000..e77dd4d79e5 --- /dev/null +++ b/tests/bgp_input/mock_config/empty.json @@ -0,0 +1,5 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "NULL": "NULL" + } +} diff --git a/tests/bgp_input/mock_config/tsa_enabled.json b/tests/bgp_input/mock_config/tsa_enabled.json new file mode 100644 index 00000000000..9c72a5f79d8 --- /dev/null +++ b/tests/bgp_input/mock_config/tsa_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/wcmp_enabled.json b/tests/bgp_input/mock_config/wcmp_enabled.json new file mode 100644 index 00000000000..fddc76b618e --- /dev/null +++ b/tests/bgp_input/mock_config/wcmp_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_test.py b/tests/bgp_test.py new file mode 100644 index 00000000000..d64d0b9eeac --- /dev/null +++ b/tests/bgp_test.py @@ -0,0 +1,130 @@ +import pytest +import os +import logging +import show.main as show +import config.main as config + +from click.testing import CliRunner +from utilities_common.db import Db +from .mock_tables import dbconnector +from .bgp_input import assert_show_output + + +test_path = os.path.dirname(os.path.abspath(__file__)) +input_path = os.path.join(test_path, "bgp_input") +mock_config_path = os.path.join(input_path, "mock_config") + +logger = logging.getLogger(__name__) + + +SUCCESS = 0 + + +class TestBgp: + @classmethod + def setup_class(cls): + logger.info("Setup class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + @classmethod + def teardown_class(cls): + logger.info("Teardown class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs.clear() + + # ---------- CONFIG BGP ---------- # + + @pytest.mark.parametrize( + "feature", [ + "tsa", + "w-ecmp" + ] + ) + @pytest.mark.parametrize( + "state", [ + "enabled", + "disabled" + ] + ) + def test_config_device_global(self, feature, state): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["bgp"].commands["device-global"]. + commands[feature].commands[state], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.exit_code == SUCCESS + + # ---------- SHOW BGP ---------- # + + @pytest.mark.parametrize( + "cfgdb,output", [ + pytest.param( + os.path.join(mock_config_path, "empty"), + { + "plain": assert_show_output.show_device_global_empty, + "json": assert_show_output.show_device_global_empty + }, + id="empty" + ), + pytest.param( + os.path.join(mock_config_path, "all_disabled"), + { + "plain": assert_show_output.show_device_global_all_disabled, + "json": assert_show_output.show_device_global_all_disabled_json + }, + id="all-disabled" + ), + pytest.param( + os.path.join(mock_config_path, "all_enabled"), + { + "plain": assert_show_output.show_device_global_all_enabled, + "json": assert_show_output.show_device_global_all_enabled_json + }, + id="all-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "tsa_enabled"), + { + "plain": assert_show_output.show_device_global_tsa_enabled, + "json": assert_show_output.show_device_global_tsa_enabled_json + }, + id="tsa-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "wcmp_enabled"), + { + "plain": assert_show_output.show_device_global_wcmp_enabled, + "json": assert_show_output.show_device_global_wcmp_enabled_json + }, + id="w-ecmp-enabled" + ) + ] + ) + @pytest.mark.parametrize( + "format", [ + "plain", + "json", + ] + ) + def test_show_device_global(self, cfgdb, output, format): + dbconnector.dedicated_dbs["CONFIG_DB"] = cfgdb + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["bgp"].commands["device-global"], + [] if format == "plain" else ["--json"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.output == output[format] + assert result.exit_code == SUCCESS diff --git a/utilities_common/bgp.py b/utilities_common/bgp.py new file mode 100644 index 00000000000..640be87ee00 --- /dev/null +++ b/utilities_common/bgp.py @@ -0,0 +1,23 @@ +from swsscommon.swsscommon import CFG_BGP_DEVICE_GLOBAL_TABLE_NAME as CFG_BGP_DEVICE_GLOBAL # noqa + +# +# BGP constants ------------------------------------------------------------------------------------------------------- +# + +BGP_DEVICE_GLOBAL_KEY = "STATE" + +SYSLOG_IDENTIFIER = "bgp-cli" + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def to_str(state): + """ Convert boolean to string representation """ + if state == "true": + return "enabled" + elif state == "false": + return "disabled" + return state From 1ebd09938c5c95746a957d5f40009950d63a8ce1 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Wed, 5 Jun 2024 11:39:54 -0700 Subject: [PATCH 02/23] Fix show fabric monitor capacity command when the feature is disabled. (#3347) --- scripts/fabricstat | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/fabricstat b/scripts/fabricstat index 520bdd17b1a..6f1893c9dbc 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -540,7 +540,10 @@ Examples: stat = FabricCapacity(namespace, table_cnt, threshold) stat.capacity_print() - click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + print_th = "" + if threshold: + print_th = threshold[0] + click.echo("Monitored fabric capacity threshold: {}".format(print_th)) click.echo() click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: From f96a132278fdc3545c3f6a609dc5a21149c49240 Mon Sep 17 00:00:00 2001 From: Zhijian Li Date: Sun, 9 Jun 2024 10:59:13 +0800 Subject: [PATCH 03/23] [consutil] Fix consule CLI and enhance unittest (#3360) **What I did?** 1. Bugfix for console CLI (This is introduced by [consutil] replace shell=True #2725, * cannot be treated as wildcard correctly). ``` admin@sonic:~$ show line ls: cannot access '/dev/C0-*': No such file or directory ``` 2. Enhance UT to avoid regression mentioned in 1. 3. Fix incorrect statement in UT. 4. Fix critical Flake8 error. **How to verify it** 1. Verified on Nokia-7215 MC0 device. 2. Verified by UT Sign-Off By: Zhijian Li --- consutil/lib.py | 2 +- tests/console_mock/dev/ttyACM1 | 0 tests/console_mock/dev/ttyUSB0 | 0 tests/console_test.py | 19 +++++++++++-------- 4 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 tests/console_mock/dev/ttyACM1 create mode 100644 tests/console_mock/dev/ttyUSB0 diff --git a/consutil/lib.py b/consutil/lib.py index 1d7f967bd37..e597e3b6437 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -277,7 +277,7 @@ def init_device_prefix(): @staticmethod def list_console_ttys(): """Lists all console tty devices""" - cmd = ["ls", SysInfoProvider.DEVICE_PREFIX + "*"] + cmd = ["bash", "-c", "ls " + SysInfoProvider.DEVICE_PREFIX + "*"] output, _ = SysInfoProvider.run_command(cmd, abort=False) ttys = output.split('\n') ttys = list([dev for dev in ttys if re.match(SysInfoProvider.DEVICE_PREFIX + r"\d+", dev) != None]) diff --git a/tests/console_mock/dev/ttyACM1 b/tests/console_mock/dev/ttyACM1 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/console_mock/dev/ttyUSB0 b/tests/console_mock/dev/ttyUSB0 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/console_test.py b/tests/console_test.py index 528f5f4ba8a..4a52a3c52e6 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -14,10 +14,15 @@ from click.testing import CliRunner from utilities_common.db import Db -from consutil.lib import * +from consutil.lib import ConsolePortProvider, ConsolePortInfo, ConsoleSession, SysInfoProvider, DbUtils, \ + InvalidConfigurationError, LineBusyError, LineNotFoundError, ConnectionFailedError from sonic_py_common import device_info from jsonpatch import JsonPatchConflict +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +CONSOLE_MOCK_DIR = SCRIPT_DIR + "/console_mock" + + class TestConfigConsoleCommands(object): @classmethod def setup_class(cls): @@ -543,17 +548,15 @@ def test_sys_info_provider_init_device_prefix_plugin(self): with mock.patch("builtins.open", mock.mock_open(read_data="C0-")): SysInfoProvider.init_device_prefix() assert SysInfoProvider.DEVICE_PREFIX == "/dev/C0-" - SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("/dev/ttyUSB0\n/dev/ttyACM1", ""))) def test_sys_info_provider_list_console_ttys(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev/ttyUSB" ttys = SysInfoProvider.list_console_ttys() print(SysInfoProvider.DEVICE_PREFIX) assert len(ttys) == 1 - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("", "ls: cannot access '/dev/ttyUSB*': No such file or directory"))) def test_sys_info_provider_list_console_ttys_device_not_exists(self): + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev_not_exist/ttyUSB" ttys = SysInfoProvider.list_console_ttys() assert len(ttys) == 0 @@ -563,7 +566,7 @@ def test_sys_info_provider_list_console_ttys_device_not_exists(self): """ @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=all_active_processes_output)) def test_sys_info_provider_list_active_console_processes(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" procs = SysInfoProvider.list_active_console_processes() assert len(procs) == 1 assert "0" in procs @@ -572,7 +575,7 @@ def test_sys_info_provider_list_active_console_processes(self): active_process_output = "13751 Wed Mar 6 08:31:35 2019 /usr/bin/sudo picocom -b 9600 -f n /dev/ttyUSB1" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_output)) def test_sys_info_provider_get_active_console_process_info_exists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("13751") assert proc is not None assert proc == ("1", "13751", "Wed Mar 6 08:31:35 2019") @@ -580,7 +583,7 @@ def test_sys_info_provider_get_active_console_process_info_exists(self): active_process_empty_output = "" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_empty_output)) def test_sys_info_provider_get_active_console_process_info_nonexists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("2") assert proc is None From c2370f88d896abbe9dc7b48acf66ebd2477ecdaa Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Mon, 10 Jun 2024 12:03:17 -0700 Subject: [PATCH 04/23] [DPB]Fixing return code of breakout command on failure (#3357) --- config/main.py | 2 +- tests/config_dpb_test.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/main.py b/config/main.py index 1bd3bba634a..30b14e025e8 100644 --- a/config/main.py +++ b/config/main.py @@ -249,7 +249,7 @@ def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \ click.echo("*** Printing dependencies ***") for dep in deps: click.echo(dep) - sys.exit(0) + sys.exit(1) else: click.echo("[ERROR] Port breakout Failed!!! Opting Out") raise click.Abort() diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 58a24dc9585..5dcf8149110 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -396,7 +396,7 @@ def test_config_breakout_verbose(self, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output # verbose must be set while creating instance of ConfigMgmt class @@ -538,7 +538,7 @@ def config_dpb_port8_2x50G_1x100G(): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v','-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output assert 'Printing dependencies' in result.output assert 'NO-NSW-PACL-V4' in result.output From d0856afad1c6f7ead6eeca70531c988fb448c335 Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Fri, 14 Jun 2024 01:15:33 -0700 Subject: [PATCH 05/23] Add Checkpoint and Rollback for Multi ASIC. (#3299) #### What I did Add `config` `checkpoint`, `rollback`, `replace`, `list-checkpoints`, `delete-checkpoint` support of Multi ASIC #### How I did it Add namespace for each of operation to support Multi ASIC. #### How to verify it 1. Single ASIC ```admin@str2-msn2700-spy-1:~/gcu$ sudo config checkpoint 20240522-xincun Config Rollbacker: Config checkpoint starting. Config Rollbacker: Checkpoint name: 20240522-xincun. Config Rollbacker: Getting current config db. Config Rollbacker: Getting checkpoint full-path. Config Rollbacker: Ensuring checkpoint directory exist. Config Rollbacker: Saving config db content to /etc/sonic/checkpoints/20240522-xincun.cp.json. Config Rollbacker: Config checkpoint completed. Checkpoint created successfully. admin@str2-msn2700-spy-1:~/gcu$ sudo config list-checkpoints [ "20240522-xincun" ] admin@str2-msn2700-spy-1:~/gcu$ sudo config rollback 20240522-xincun Config Rollbacker: Config rollbacking starting. Config Rollbacker: Checkpoint name: 20240522-xincun. Config Rollbacker: Verifying '20240522-xincun' exists. Config Rollbacker: Loading checkpoint into memory. Config Rollbacker: Replacing config using 'Config Replacer'. Config Replacer: Config replacement starting. Config Replacer: Target config length: 71214. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: localhost: Patch application starting. Patch Applier: localhost: Patch: [] Patch Applier: localhost getting current config db. Patch Applier: localhost: simulating the target full config after applying the patch. Patch Applier: localhost: validating all JsonPatch operations are permitted on the specified fields Patch Applier: localhost: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: localhost: sorting patch updates. Patch Applier: The localhost patch was converted into 0 changes. Patch Applier: localhost: applying 0 changes in order. Patch Applier: localhost: verifying patch updates are reflected on ConfigDB. Patch Applier: localhost patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config Rollbacker: Config rollbacking completed. Config rolled back successfully. admin@str2-msn2700-spy-1:~/gcu$ sudo config delete-checkpoint 20240522-xincun Config Rollbacker: Deleting checkpoint starting. Config Rollbacker: Checkpoint name: 20240522-xincun. Config Rollbacker: Checking checkpoint exists. Config Rollbacker: Deleting checkpoint. Config Rollbacker: Deleting checkpoint completed. Checkpoint deleted successfully. admin@str2-msn2700-spy-1:~/gcu$ sudo config list-checkpoints [] ``` 2. Multi ASIC ``` stli@str2-7250-2-lc01:~/gcu$ sudo config checkpoint 20240522-xincun MultiASICConfigRollbacker: Config checkpoint starting. MultiASICConfigRollbacker: Checkpoint name: 20240522-xincun. MultiASICConfigRollbacker: Getting current config db. MultiASICConfigRollbacker: Getting current asic0 config db. MultiASICConfigRollbacker: Getting current asic1 config db. MultiASICConfigRollbacker: Getting checkpoint full-path. MultiASICConfigRollbacker: Ensuring checkpoint directory exist. MultiASICConfigRollbacker: Saving config db content to /etc/sonic/checkpoints/20240522-xincun.cp.json. MultiASICConfigRollbacker: Config checkpoint completed. Checkpoint created successfully. stli@str2-7250-2-lc01:~/gcu$ sudo config list-checkpoints [ "20240522-xincun" ] stli@str2-7250-2-lc01:~/gcu$ sudo config rollback 20240522-xincun MultiASICConfigRollbacker: Config rollbacking starting. MultiASICConfigRollbacker: Checkpoint name: 20240522-xincun. MultiASICConfigRollbacker: Verifying '20240522-xincun' exists. MultiASICConfigRollbacker: Loading checkpoint '20240522-xincun' into memory. MultiASICConfigRollbacker: Replacing config '20240522-xincun' using 'Config Replacer'. Config Replacer: Config replacement starting. Config Replacer: Target config length: 38147. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: localhost: Patch application starting. Patch Applier: localhost: Patch: [] Patch Applier: localhost getting current config db. Patch Applier: localhost: simulating the target full config after applying the patch. Patch Applier: localhost: validating all JsonPatch operations are permitted on the specified fields Patch Applier: localhost: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: localhost: sorting patch updates. Patch Applier: The localhost patch was converted into 0 changes. Patch Applier: localhost: applying 0 changes in order. Patch Applier: localhost: verifying patch updates are reflected on ConfigDB. Patch Applier: localhost patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config Replacer: Config replacement starting. Config Replacer: Target config length: 97546. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: asic0: Patch application starting. Patch Applier: asic0: Patch: [] Patch Applier: asic0 getting current config db. Patch Applier: asic0: simulating the target full config after applying the patch. Patch Applier: asic0: validating all JsonPatch operations are permitted on the specified fields Patch Applier: asic0: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: asic0: sorting patch updates. Patch Applier: The asic0 patch was converted into 0 changes. Patch Applier: asic0: applying 0 changes in order. Patch Applier: asic0: verifying patch updates are reflected on ConfigDB. Patch Applier: asic0 patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config Replacer: Config replacement starting. Config Replacer: Target config length: 97713. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: asic1: Patch application starting. Patch Applier: asic1: Patch: [] Patch Applier: asic1 getting current config db. Patch Applier: asic1: simulating the target full config after applying the patch. Patch Applier: asic1: validating all JsonPatch operations are permitted on the specified fields Patch Applier: asic1: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: asic1: sorting patch updates. Patch Applier: The asic1 patch was converted into 0 changes. Patch Applier: asic1: applying 0 changes in order. Patch Applier: asic1: verifying patch updates are reflected on ConfigDB. Patch Applier: asic1 patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. MultiASICConfigRollbacker: Config rollbacking completed. Config rolled back successfully. stli@str2-7250-2-lc01:~/gcu$ sudo config delete-checkpoint 20240522-xincun MultiASICConfigRollbacker: Deleting checkpoint starting. MultiASICConfigRollbacker: Checkpoint name: 20240522-xincun. MultiASICConfigRollbacker: Checking checkpoint: 20240522-xincun exists. MultiASICConfigRollbacker: Deleting checkpoint: 20240522-xincun. MultiASICConfigRollbacker: Deleting checkpoint: 20240522-xincun completed. Checkpoint deleted successfully. stli@str2-7250-2-lc01:~/gcu$ sudo config list-checkpoints [] stli@str2-7250-2-lc01:~/gcu$ sudo config replace 20240522-xincun.cp.json Config Replacer: Config replacement starting. Config Replacer: Target config length: 38147. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: localhost: Patch application starting. Patch Applier: localhost: Patch: [] Patch Applier: localhost getting current config db. Patch Applier: localhost: simulating the target full config after applying the patch. Patch Applier: localhost: validating all JsonPatch operations are permitted on the specified fields Patch Applier: localhost: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: localhost: sorting patch updates. Patch Applier: The localhost patch was converted into 0 changes. Patch Applier: localhost: applying 0 changes in order. Patch Applier: localhost: verifying patch updates are reflected on ConfigDB. Patch Applier: localhost patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config Replacer: Config replacement starting. Config Replacer: Target config length: 97546. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: asic0: Patch application starting. Patch Applier: asic0: Patch: [] Patch Applier: asic0 getting current config db. Patch Applier: asic0: simulating the target full config after applying the patch. Patch Applier: asic0: validating all JsonPatch operations are permitted on the specified fields Patch Applier: asic0: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: asic0: sorting patch updates. Patch Applier: The asic0 patch was converted into 0 changes. Patch Applier: asic0: applying 0 changes in order. Patch Applier: asic0: verifying patch updates are reflected on ConfigDB. Patch Applier: asic0 patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config Replacer: Config replacement starting. Config Replacer: Target config length: 97713. Config Replacer: Getting current config db. Config Replacer: Generating patch between target config and current config db. Config Replacer: Applying patch using 'Patch Applier'. Patch Applier: asic1: Patch application starting. Patch Applier: asic1: Patch: [] Patch Applier: asic1 getting current config db. Patch Applier: asic1: simulating the target full config after applying the patch. Patch Applier: asic1: validating all JsonPatch operations are permitted on the specified fields Patch Applier: asic1: validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: asic1: sorting patch updates. Patch Applier: The asic1 patch was converted into 0 changes. Patch Applier: asic1: applying 0 changes in order. Patch Applier: asic1: verifying patch updates are reflected on ConfigDB. Patch Applier: asic1 patch application completed. Config Replacer: Verifying config replacement is reflected on ConfigDB. Config Replacer: Config replacement completed. Config replaced successfully. ``` --- config/main.py | 2 +- generic_config_updater/change_applier.py | 32 +- generic_config_updater/generic_updater.py | 301 +++++++++++++----- generic_config_updater/gu_common.py | 22 +- tests/config_test.py | 135 ++++++++ .../change_applier_test.py | 6 +- .../generic_updater_test.py | 4 +- .../generic_config_updater/gu_common_test.py | 22 ++ .../multiasic_change_applier_test.py | 22 +- .../multiasic_generic_updater_test.py | 6 +- 10 files changed, 420 insertions(+), 132 deletions(-) diff --git a/config/main.py b/config/main.py index 30b14e025e8..4b0d32fc978 100644 --- a/config/main.py +++ b/config/main.py @@ -1185,7 +1185,7 @@ def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_ru scope_for_log = scope if scope else HOST_NAMESPACE try: # Call apply_patch with the ASIC-specific changes and predefined parameters - GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), + GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index 32a356bf9ae..8d8d23f87a1 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -16,6 +16,7 @@ print_to_console = False + def set_verbose(verbose=False): global print_to_console, logger @@ -34,11 +35,12 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) +def get_config_db(scope=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=scope) config_db.connect() return config_db + def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -61,11 +63,9 @@ class DryRunChangeApplier: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def apply(self, change): self.config_wrapper.apply_change_to_config_db(change) - def remove_backend_tables_from_config(self, data): return data @@ -74,9 +74,9 @@ class ChangeApplier: updater_conf = None - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_db = get_config_db(self.namespace) + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_db = get_config_db(self.scope) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -86,7 +86,6 @@ def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): with open(UPDATER_CONF_FILE, "r") as s: ChangeApplier.updater_conf = json.load(s) - def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): # cmd is in the format as . # @@ -98,7 +97,6 @@ def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): return method_to_call(old_cfg, upd_cfg, keys) - def _services_validate(self, old_cfg, upd_cfg, keys): lst_svcs = set() lst_cmds = set() @@ -124,7 +122,6 @@ def _services_validate(self, old_cfg, upd_cfg, keys): log_debug("service invoked: {}".format(cmd)) return 0 - def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): for key in set(run_tbl.keys()).union(set(upd_tbl.keys())): run_data = run_tbl.get(key, None) @@ -135,20 +132,17 @@ def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): upd_keys[tbl][key] = {} log_debug("Patch affected tbl={} key={}".format(tbl, key)) - def _report_mismatch(self, run_data, upd_data): log_error("run_data vs expected_data: {}".format( str(jsondiff.diff(run_data, upd_data))[0:40])) - def apply(self, change): run_data = self._get_running_config() upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) upd_keys = defaultdict(dict) for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): - self._upd_data(tbl, run_data.get(tbl, {}), - upd_data.get(tbl, {}), upd_keys) + self._upd_data(tbl, run_data.get(tbl, {}), upd_data.get(tbl, {}), upd_keys) ret = self._services_validate(run_data, upd_data, upd_keys) if not ret: @@ -168,9 +162,9 @@ def remove_backend_tables_from_config(self, data): def _get_running_config(self): _, fname = tempfile.mkstemp(suffix="_changeApplier") - - if self.namespace: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + + if self.scope: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -181,7 +175,9 @@ def _get_running_config(self): return_code = result.returncode if return_code: os.remove(fname) - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError( + f"Failed to get running config for scope: {self.scope}," + + f"Return code: {return_code}, Error: {err}") run_data = {} try: diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index 374ce7670cf..b6d65e2ce65 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,24 +1,25 @@ import json import jsonpointer import os +import subprocess + from enum import Enum from .gu_common import HOST_NAMESPACE, GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ - DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging + DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ - TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter + TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" + def extract_scope(path): if not path: raise Exception("Wrong patch with empty path.") - pointer = jsonpointer.JsonPointer(path) parts = pointer.parts - if not parts: raise GenericConfigUpdaterError("Wrong patch with empty path.") if parts[0].startswith("asic"): @@ -32,10 +33,39 @@ def extract_scope(path): else: scope = "" remainder = path - return scope, remainder +def get_cmd_output(cmd): + proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + + +def get_config_json(): + scope_list = [multi_asic.DEFAULT_NAMESPACE] + all_running_config = {} + if multi_asic.is_multi_asic(): + scope_list.extend(multi_asic.get_namespace_list()) + for scope in scope_list: + command = ["sonic-cfggen", "-d", "--print-data"] + if scope != multi_asic.DEFAULT_NAMESPACE: + command += ["-n", scope] + + running_config_text, returncode = get_cmd_output(command) + if returncode: + raise GenericConfigUpdaterError( + f"Fetch all runningconfiguration failed as output:{running_config_text}") + running_config = json.loads(running_config_text) + + if multi_asic.is_multi_asic(): + if scope == multi_asic.DEFAULT_NAMESPACE: + scope = HOST_NAMESPACE + all_running_config[scope] = running_config + else: + all_running_config = running_config + return all_running_config + + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -50,22 +80,23 @@ class ConfigFormat(Enum): CONFIGDB = 1 SONICYANG = 2 + class PatchApplier: def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, patch_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(scope=self.scope) def apply(self, patch, sort=True): - scope = self.namespace if self.namespace else HOST_NAMESPACE + scope = self.scope if self.scope else HOST_NAMESPACE self.logger.log_notice(f"{scope}: Patch application starting.") self.logger.log_notice(f"{scope}: Patch: {patch}") @@ -83,14 +114,13 @@ def apply(self, patch, sort=True): # Validate target config does not have empty tables since they do not show up in ConfigDb self.logger.log_notice(f"""{scope}: validating target config does not have empty tables, - since they do not show up in ConfigDb.""") + since they do not show up in ConfigDb.""") empty_tables = self.config_wrapper.get_empty_tables(target_config) if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ - "which is not allowed in ConfigDb. " \ - f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") - + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables \ + which is not allowed in ConfigDb. \ + Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: self.logger.log_notice(f"{scope}: sorting patch updates.") @@ -115,19 +145,19 @@ def apply(self, patch, sort=True): new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") self.logger.log_notice(f"{scope} patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -145,7 +175,7 @@ def replace(self, target_config): self.logger.log_notice("Verifying config replacement is reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"After replacing config, there is still some parts not updated") self.logger.log_notice("Config replacement completed.") @@ -156,23 +186,24 @@ def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, config_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) + self.util = Util(checkpoints_dir=checkpoints_dir) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Loading checkpoint into memory.") - target_config = self._get_checkpoint_content(checkpoint_name) + target_config = self.util.get_checkpoint_content(checkpoint_name) self.logger.log_notice(f"Replacing config using 'Config Replacer'.") self.config_replacer.replace(target_config) @@ -184,16 +215,16 @@ def checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice("Getting current config db.") - json_content = self.config_wrapper.get_config_db_as_json() + json_content = get_config_json() self.logger.log_notice("Getting checkpoint full-path.") - path = self._get_checkpoint_full_path(checkpoint_name) + path = self.util.get_checkpoint_full_path(checkpoint_name) self.logger.log_notice("Ensuring checkpoint directory exist.") - self._ensure_checkpoints_dir_exists() + self.util.ensure_checkpoints_dir_exists() self.logger.log_notice(f"Saving config db content to {path}.") - self._save_json_file(path, json_content) + self.util.save_json_file(path, json_content) self.logger.log_notice("Config checkpoint completed.") @@ -201,12 +232,12 @@ def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") - if not self._checkpoints_dir_exist(): + if not self.util.checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") return [] self.logger.log_info("Getting checkpoints in checkpoints directory.") - checkpoint_names = self._get_checkpoint_names() + checkpoint_names = self.util.get_checkpoint_names() checkpoints_len = len(checkpoint_names) self.logger.log_info(f"Found {checkpoints_len} checkpoint{'s' if checkpoints_len != 1 else ''}{':' if checkpoints_len > 0 else '.'}") @@ -222,59 +253,139 @@ def delete_checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Checking checkpoint exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Deleting checkpoint.") - self._delete_checkpoint(checkpoint_name) + self.util.delete_checkpoint(checkpoint_name) self.logger.log_notice("Deleting checkpoint completed.") - def _ensure_checkpoints_dir_exists(self): + +class MultiASICConfigReplacer(ConfigReplacer): + def __init__(self, + patch_applier=None, + config_wrapper=None, + patch_wrapper=None, + scope=multi_asic.DEFAULT_NAMESPACE): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigReplacer", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + super().__init__(patch_applier, config_wrapper, patch_wrapper, scope) + + def replace(self, target_config): + config_keys = set(target_config.keys()) + missing_scopes = set(self.scopelist) - config_keys + if missing_scopes: + raise GenericConfigUpdaterError(f"To be replace config is missing scope: {missing_scopes}") + + for scope in self.scopelist: + scope_config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(scope_config) + + +class MultiASICConfigRollbacker(FileSystemConfigRollbacker): + def __init__(self, + checkpoints_dir=CHECKPOINTS_DIR, + config_replacer=None, + config_wrapper=None): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigRollbacker", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + self.checkpoints_dir = checkpoints_dir + self.util = Util(checkpoints_dir=checkpoints_dir) + super().__init__(config_wrapper=config_wrapper, config_replacer=config_replacer) + + def rollback(self, checkpoint_name): + self.logger.log_notice("Config rollbacking starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") + + if not self.util.check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + self.logger.log_notice(f"Loading checkpoint '{checkpoint_name}' into memory.") + target_config = self.util.get_checkpoint_content(checkpoint_name) + self.logger.log_notice(f"Replacing config '{checkpoint_name}' using 'Config Replacer'.") + + for scope in self.scopelist: + config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(config) + + self.logger.log_notice("Config rollbacking completed.") + + def checkpoint(self, checkpoint_name): + all_configs = get_config_json() + self.logger.log_notice("Config checkpoint starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + + self.logger.log_notice("Getting checkpoint full-path.") + path = self.util.get_checkpoint_full_path(checkpoint_name) + + self.logger.log_notice("Ensuring checkpoint directory exist.") + self.util.ensure_checkpoints_dir_exists() + + self.logger.log_notice(f"Saving config db content to {path}.") + self.util.save_json_file(path, all_configs) + + self.logger.log_notice("Config checkpoint completed.") + + +class Util: + def __init__(self, checkpoints_dir=CHECKPOINTS_DIR): + self.checkpoints_dir = checkpoints_dir + + def ensure_checkpoints_dir_exists(self): os.makedirs(self.checkpoints_dir, exist_ok=True) - def _save_json_file(self, path, json_content): + def save_json_file(self, path, json_content): with open(path, "w") as fh: fh.write(json.dumps(json_content)) - def _get_checkpoint_content(self, checkpoint_name): - path = self._get_checkpoint_full_path(checkpoint_name) + def get_checkpoint_content(self, checkpoint_name): + path = self.get_checkpoint_full_path(checkpoint_name) with open(path) as fh: text = fh.read() return json.loads(text) - def _get_checkpoint_full_path(self, name): + def get_checkpoint_full_path(self, name): return os.path.join(self.checkpoints_dir, f"{name}{CHECKPOINT_EXT}") - def _get_checkpoint_names(self): + def get_checkpoint_names(self): file_names = [] for file_name in os.listdir(self.checkpoints_dir): if file_name.endswith(CHECKPOINT_EXT): # Remove extension from file name. # Example assuming ext is '.cp.json', then 'checkpoint1.cp.json' becomes 'checkpoint1' file_names.append(file_name[:-len(CHECKPOINT_EXT)]) - return file_names - def _checkpoints_dir_exist(self): + def checkpoints_dir_exist(self): return os.path.isdir(self.checkpoints_dir) - def _check_checkpoint_exists(self, name): - path = self._get_checkpoint_full_path(name) + def check_checkpoint_exists(self, name): + path = self.get_checkpoint_full_path(name) return os.path.isfile(path) - def _delete_checkpoint(self, name): - path = self._get_checkpoint_full_path(name) + def delete_checkpoint(self, name): + path = self.get_checkpoint_full_path(name) return os.remove(path) class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, + decorated_patch_applier=None, + decorated_config_replacer=None, + decorated_config_rollbacker=None, + scope=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self, namespace=namespace) - ConfigReplacer.__init__(self, namespace=namespace) - FileSystemConfigRollbacker.__init__(self, namespace=namespace) - + PatchApplier.__init__(self, scope=scope) + ConfigReplacer.__init__(self, scope=scope) + FileSystemConfigRollbacker.__init__(self, scope=scope) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer self.decorated_config_rollbacker = decorated_config_rollbacker @@ -299,10 +410,14 @@ def delete_checkpoint(self, checkpoint_name): class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) - - self.namespace = namespace + def __init__(self, + patch_wrapper, + config_wrapper, + decorated_patch_applier=None, + decorated_config_replacer=None, + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, scope=scope) + self.scope = scope self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -321,9 +436,12 @@ def __init__(self, decorated_config_replacer=None, decorated_config_rollbacker=None, config_lock=ConfigLock(), - namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) - + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, + decorated_patch_applier, + decorated_config_replacer, + decorated_config_rollbacker, + scope=scope) self.config_lock = config_lock def apply(self, patch, sort=True): @@ -345,20 +463,20 @@ def execute_write_action(self, action, *args): class GenericUpdateFactory: - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) if config_format == ConfigFormat.CONFIGDB: pass @@ -366,62 +484,75 @@ def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_ patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, scope=self.scope) return patch_applier def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper) + else: + config_replacer = ConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper, + scope=self.scope) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, scope=self.scope) return config_replacer def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_tables=False, ignore_paths=[]): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) - - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier) + config_rollbacker = MultiASICConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer) + else: + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier, + scope=self.scope) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer, + scope=self.scope) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, scope=self.scope) return config_rollbacker @@ -430,15 +561,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper(namespace=self.namespace) + return DryRunConfigWrapper(scope=self.scope) else: - return ConfigWrapper(namespace=self.namespace) + return ConfigWrapper(scope=self.scope) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier(namespace=self.namespace) + return ChangeApplier(scope=self.scope) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -457,9 +588,9 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, class GenericUpdater: - def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, generic_update_factory=None, scope=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(scope=scope) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index c15334222ad..938aa1d034a 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -18,6 +18,7 @@ GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" HOST_NAMESPACE = "localhost" + class GenericConfigUpdaterError(Exception): pass @@ -53,8 +54,8 @@ def __eq__(self, other): return False class ConfigWrapper: - def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None @@ -65,8 +66,8 @@ def get_config_db_as_json(self): return config_db_json def _get_config_db_as_text(self): - if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + if self.scope is not None and self.scope != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -74,7 +75,8 @@ def _get_config_db_as_text(self): text, err = result.communicate() return_code = result.returncode if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.scope}," + f" Return code: {return_code}, Error: {err}") return text def get_sonic_yang_as_json(self): @@ -301,8 +303,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): - super().__init__(namespace=namespace) + def __init__(self, initial_imitated_config_db=None, scope=multi_asic.DEFAULT_NAMESPACE): + super().__init__(scope=scope) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -322,9 +324,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) + def __init__(self, config_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.scope) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/tests/config_test.py b/tests/config_test.py index f69f799561b..670932a436d 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -2900,6 +2900,7 @@ def setUp(self): self.runner = CliRunner() self.patch_file_path = 'path/to/patch.json' + self.replace_file_path = 'path/to/replace.json' self.patch_content = [ { "op": "add", @@ -3083,6 +3084,140 @@ def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subproces # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.replace_all = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config replaced successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.replace_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic_missing_scope(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + mock_replace_content.pop("asic0") + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed") + self.assertIn("Failed to replace config", result.output) + + @patch('generic_config_updater.generic_updater.subprocess.Popen') + @patch('generic_config_updater.generic_updater.Util.ensure_checkpoints_dir_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.save_json_file', MagicMock()) + def test_checkpoint_multiasic(self, mock_subprocess_popen): + allconfigs = copy.deepcopy(self.all_config) + + # Create mock instances for each subprocess call + mock_instance_localhost = MagicMock() + mock_instance_localhost.communicate.return_value = (json.dumps(allconfigs["localhost"]), 0) + mock_instance_localhost.returncode = 0 + + mock_instance_asic0 = MagicMock() + mock_instance_asic0.communicate.return_value = (json.dumps(allconfigs["asic0"]), 0) + mock_instance_asic0.returncode = 0 + + mock_instance_asic1 = MagicMock() + mock_instance_asic1.communicate.return_value = (json.dumps(allconfigs["asic1"]), 0) + mock_instance_asic1.returncode = 0 + + # Setup side effect to return different mock instances based on input arguments + def side_effect(*args, **kwargs): + if "asic" not in args[0]: + return mock_instance_localhost + elif "asic0" in args[0]: + return mock_instance_asic0 + elif "asic1" in args[0]: + return mock_instance_asic1 + else: + return MagicMock() # Default case + + mock_subprocess_popen.side_effect = side_effect + + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint created successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_content') + def test_rollback_multiasic(self, mock_get_checkpoint_content): + mock_get_checkpoint_content.return_value = copy.deepcopy(self.all_config) + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["rollback"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config rolled back successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.checkpoints_dir_exist', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_names', + mock.Mock(return_value=["checkpointname"])) + def test_list_checkpoint_multiasic(self): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["list-checkpoints"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("checkpointname", result.output) + + @patch('generic_config_updater.generic_updater.Util.delete_checkpoint', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + def test_delete_checkpoint_multiasic(self): + checkpointname = "checkpointname" + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.delete_checkpoint = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint deleted successfully.", result.output) + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 4c9b33c3a4d..7aad111f181 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -242,10 +242,11 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): running_config = copy.deepcopy(read_data["running_data"]) json_changes = copy.deepcopy(read_data["json_changes"]) + generic_config_updater.change_applier.ChangeApplier.updater_conf = None generic_config_updater.change_applier.UPDATER_CONF_FILE = CONF_FILE generic_config_updater.change_applier.set_verbose(True) generic_config_updater.services_validator.set_verbose(True) - + applier = generic_config_updater.change_applier.ChangeApplier() debug_print("invoked applier") @@ -254,7 +255,7 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): # Take copy for comparison start_running_config = copy.deepcopy(running_config) - + debug_print("main: json_change_index={}".format(json_change_index)) applier.apply(mock_obj()) @@ -297,4 +298,3 @@ def test_apply__calls_apply_change_to_config_db(self): # Assert applier.config_wrapper.apply_change_to_config_db.assert_has_calls([call(change)]) - diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index 96c25e3552b..8480dc23b0b 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -2,7 +2,7 @@ import os import shutil import unittest -from unittest.mock import MagicMock, Mock, call +from unittest.mock import MagicMock, Mock, call, patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.generic_updater as gu @@ -124,6 +124,8 @@ def __create_config_replacer(self, changes=None, verified_same_config=True): return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +@patch('generic_config_updater.generic_updater.get_config_json', MagicMock(return_value={})) class TestFileSystemConfigRollbacker(unittest.TestCase): def setUp(self): self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a2a776c0bb2..4a16a5ca4f0 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -76,6 +76,28 @@ def test_ctor__default_values_set(self): self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + @patch('generic_config_updater.gu_common.subprocess.Popen') + def test_get_config_db_as_text(self, mock_popen): + config_wrapper = gu_common.ConfigWrapper() + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + + config_wrapper = gu_common.ConfigWrapper(scope="asic0") + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): # Arrange config_wrapper = self.config_wrapper_mock diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index e8b277618f1..d7f734d2ec5 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -40,7 +40,7 @@ def test_extract_scope(self): @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -67,7 +67,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r } } - # Instantiate ChangeApplier with the default namespace + # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier() # Prepare a change object or data that applier.apply would use @@ -81,7 +81,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -108,8 +108,8 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run } } - # Instantiate ChangeApplier with the default namespace - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with the default scope + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -117,7 +117,7 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run # Call the apply method with the change object applier.apply(change) - # Assert ConfigDBConnector called with the correct namespace + # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @@ -129,9 +129,9 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con # Setup mock for json.load to return some running configuration mock_get_running_config.side_effect = Exception("Failed to get running config") - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - namespace = "asic0" - applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + scope = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(scope=scope) # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -159,8 +159,8 @@ def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, moc } } - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables change = MagicMock() diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py index 4a55eb98be3..5acdd391f03 100644 --- a/tests/generic_config_updater/multiasic_generic_updater_test.py +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -19,7 +19,7 @@ class TestMultiAsicPatchApplier(unittest.TestCase): @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') @patch('generic_config_updater.generic_updater.ChangeApplier') def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): - namespace = "asic0" + scope = "asic0" patch_data = jsonpatch.JsonPatch([ { "op": "add", @@ -158,10 +158,10 @@ def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_ } } - patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + patch_applier = generic_config_updater.generic_updater.PatchApplier(scope=scope) # Apply the patch and verify patch_applier.apply(patch_data) # Assertions to ensure the namespace is correctly used in underlying calls - mock_ChangeApplier.assert_called_once_with(namespace=namespace) + mock_ChangeApplier.assert_called_once_with(scope=scope) From 31f5fa8ee5b2b7cfaa500928a937e981f9eb2763 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Tue, 18 Jun 2024 08:50:39 +0800 Subject: [PATCH 06/23] Improve load_mingraph to wait eth0 restart before exit (#3365) * Improve load_mingraph to wait eth0 restart before exist --- config/main.py | 37 +++++++++++++++++++++++++++++++++++++ tests/config_test.py | 9 +++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 4b0d32fc978..c72fa47e5a6 100644 --- a/config/main.py +++ b/config/main.py @@ -898,10 +898,47 @@ def _reset_failed_services(): for service in _get_sonic_services(): clicommon.run_command(['systemctl', 'reset-failed', str(service)]) + +def get_service_finish_timestamp(service): + out, _ = clicommon.run_command(['sudo', + 'systemctl', + 'show', + '--no-pager', + service, + '-p', + 'ExecMainExitTimestamp', + '--value'], + return_cmd=True) + return out.strip(' \t\n\r') + + +def wait_service_restart_finish(service, last_timestamp, timeout=30): + start_time = time.time() + elapsed_time = 0 + while elapsed_time < timeout: + current_timestamp = get_service_finish_timestamp(service) + if current_timestamp and (current_timestamp != last_timestamp): + return + + time.sleep(1) + elapsed_time = time.time() - start_time + + log.log_warning("Service: {} does not restart in {} seconds, stop waiting".format(service, timeout)) + + def _restart_services(): + last_interface_config_timestamp = get_service_finish_timestamp('interfaces-config') + last_networking_timestamp = get_service_finish_timestamp('networking') + click.echo("Restarting SONiC target ...") clicommon.run_command(['sudo', 'systemctl', 'restart', 'sonic.target']) + # These service will restart eth0 and cause device lost network for 10 seconds + # When enable TACACS, every remote user commands will authorize by TACACS service via network + # If load_minigraph exit before eth0 restart, commands after load_minigraph may failed + wait_service_restart_finish('interfaces-config', last_interface_config_timestamp) + wait_service_restart_finish('networking', last_networking_timestamp) + try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Enabling container monitoring ...") diff --git a/tests/config_test.py b/tests/config_test.py index 670932a436d..fc70861c24f 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,4 +1,5 @@ import copy +import datetime import pytest import filecmp import importlib @@ -244,6 +245,10 @@ def mock_run_command_side_effect(*args, **kwargs): return 'enabled', 0 elif command == 'cat /var/run/dhclient.eth0.pid': return '101', 0 + elif command == 'sudo systemctl show --no-pager interfaces-config -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 + elif command == 'sudo systemctl show --no-pager networking -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 else: return '', 0 @@ -656,7 +661,7 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): @@ -671,7 +676,7 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) def test_load_minigraph_platform_plugin_fail(self, get_cmd_module, setup_single_broadcom_asic): From 8f715acf5d770344f936d73d1a5bce5793250265 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Tue, 18 Jun 2024 12:19:20 +0800 Subject: [PATCH 07/23] [config]Support single file reload for multiasic (#3349) ADO: 27595279 What I did Extend config reload to support single file reloading for multi-asic How I did it Add the single file reload support for mutli-asic How to verify it Unit test and manual test on multi-asic DUT --- config/main.py | 295 +++++++++++++++++++++++++++---------------- tests/config_test.py | 220 ++++++++++++++++++++++++++++++++ 2 files changed, 406 insertions(+), 109 deletions(-) diff --git a/config/main.py b/config/main.py index c72fa47e5a6..52816bdd414 100644 --- a/config/main.py +++ b/config/main.py @@ -31,7 +31,7 @@ from sonic_yang_cfg_generator import SonicYangCfgDbGenerator from utilities_common import util_base from swsscommon import swsscommon -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util @@ -1197,7 +1197,7 @@ def validate_gre_type(ctx, _, value): raise click.UsageError("{} is not a valid GRE type".format(value)) -def multi_asic_save_config(db, filename): +def multiasic_save_to_singlefile(db, filename): """A function to save all asic's config to single file """ all_current_config = {} @@ -1264,6 +1264,96 @@ def validate_patch(patch): except Exception as e: raise GenericConfigUpdaterError(f"Validate json patch: {patch} failed due to:{e}") + +def multiasic_validate_single_file(filename): + ns_list = [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()] + file_input = read_json_file(filename) + file_ns_list = [DEFAULT_NAMESPACE if key == HOST_NAMESPACE else key for key in file_input] + if set(ns_list) != set(file_ns_list): + click.echo( + "Input file {} must contain all asics config. ns_list: {} file ns_list: {}".format( + filename, ns_list, file_ns_list) + ) + raise click.Abort() + + +def load_sysinfo_if_missing(asic_config): + device_metadata = asic_config.get('DEVICE_METADATA', {}) + platform = device_metadata.get("localhost", {}).get("platform") + mac = device_metadata.get("localhost", {}).get("mac") + if not platform: + log.log_warning("platform is missing from Input file") + return True + elif not mac: + log.log_warning("mac is missing from Input file") + return True + else: + return False + + +def flush_configdb(namespace=DEFAULT_NAMESPACE): + if namespace is DEFAULT_NAMESPACE: + config_db = ConfigDBConnector() + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + return client, config_db + + +def migrate_db_to_lastest(namespace=DEFAULT_NAMESPACE): + # Migrate DB contents to latest version + db_migrator = '/usr/local/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + if namespace is DEFAULT_NAMESPACE: + command = [db_migrator, '-o', 'migrate'] + else: + command = [db_migrator, '-o', 'migrate', '-n', namespace] + clicommon.run_command(command, display_cmd=True) + + +def multiasic_write_to_db(filename, load_sysinfo): + file_input = read_json_file(filename) + for ns in [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()]: + asic_name = HOST_NAMESPACE if ns == DEFAULT_NAMESPACE else ns + asic_config = file_input[asic_name] + + asic_load_sysinfo = True if load_sysinfo else False + if not asic_load_sysinfo: + asic_load_sysinfo = load_sysinfo_if_missing(asic_config) + + if asic_load_sysinfo: + cfg_hwsku = asic_config.get("DEVICE_METADATA", {}).\ + get("localhost", {}).get("hwsku") + if not cfg_hwsku: + click.secho("Could not get the HWSKU from config file, Exiting!", fg='magenta') + sys.exit(1) + + client, _ = flush_configdb(ns) + + if asic_load_sysinfo: + if ns is DEFAULT_NAMESPACE: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(ns), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) + + if ns is DEFAULT_NAMESPACE: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True) + else: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect(False) + sonic_cfggen.FormatConverter.to_deserialized(asic_config) + data = sonic_cfggen.FormatConverter.output_to_db(asic_config) + config_db.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) + client.set(config_db.INIT_INDICATOR, 1) + + migrate_db_to_lastest(ns) + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1351,7 +1441,7 @@ def save(db, filename): # save all ASIC configurations to that single file. if len(cfg_files) == 1 and multi_asic.is_multi_asic(): filename = cfg_files[0] - multi_asic_save_config(db, filename) + multiasic_save_to_singlefile(db, filename) return elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) @@ -1669,11 +1759,15 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if multi_asic.is_multi_asic() and file_format == 'config_db': num_cfg_file += num_asic + multiasic_single_file_mode = False # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + multiasic_validate_single_file(cfg_files[0]) + multiasic_single_file_mode = True + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1682,127 +1776,109 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form log.log_notice("'reload' stopping services...") _stop_services() - # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB - # service running in the host + DB services running in each ASIC namespace created per ASIC. - # In the below logic, we get all namespaces in this platform and add an empty namespace '' - # denoting the current namespace which we are in ( the linux host ) - for inst in range(-1, num_cfg_file-1): - # Get the namespace name, for linux host it is None - if inst == -1: - namespace = None - else: - namespace = "{}{}".format(NAMESPACE_PREFIX, inst) - - # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json - if cfg_files: - file = cfg_files[inst+1] - # Save to tmpfile in case of stdin input which can only be read once - if file == "/dev/stdin": - file_input = read_json_file(file) - (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") - write_json_file(file_input, tmpfname) - file = tmpfname - else: - if file_format == 'config_db': - if namespace is None: - file = DEFAULT_CONFIG_DB_FILE - else: - file = "/etc/sonic/config_db{}.json".format(inst) + if multiasic_single_file_mode: + multiasic_write_to_db(cfg_files[0], load_sysinfo) + else: + # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB + # service running in the host + DB services running in each ASIC namespace created per ASIC. + # In the below logic, we get all namespaces in this platform and add an empty namespace '' + # denoting the current namespace which we are in ( the linux host ) + for inst in range(-1, num_cfg_file-1): + # Get the namespace name, for linux host it is DEFAULT_NAMESPACE + if inst == -1: + namespace = DEFAULT_NAMESPACE else: - file = DEFAULT_CONFIG_YANG_FILE - - - # Check the file exists before proceeding. - if not os.path.exists(file): - click.echo("The config file {} doesn't exist".format(file)) - continue - - if file_format == 'config_db': - file_input = read_json_file(file) + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + # Save to tmpfile in case of stdin input which can only be read once + if file == "/dev/stdin": + file_input = read_json_file(file) + (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") + write_json_file(file_input, tmpfname) + file = tmpfname + else: + if file_format == 'config_db': + if namespace is DEFAULT_NAMESPACE: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + else: + file = DEFAULT_CONFIG_YANG_FILE - platform = file_input.get("DEVICE_METADATA", {}).\ - get(HOST_NAMESPACE, {}).get("platform") - mac = file_input.get("DEVICE_METADATA", {}).\ - get(HOST_NAMESPACE, {}).get("mac") + # Check the file exists before proceeding. + if not os.path.exists(file): + click.echo("The config file {} doesn't exist".format(file)) + continue - if not platform or not mac: - log.log_warning("Input file does't have platform or mac. platform: {}, mac: {}" - .format(None if platform is None else platform, None if mac is None else mac)) - load_sysinfo = True + if file_format == 'config_db': + file_input = read_json_file(file) + if not load_sysinfo: + load_sysinfo = load_sysinfo_if_missing(file_input) + + if load_sysinfo: + try: + command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + + except FileNotFoundError as e: + click.echo("{}".format(str(e)), err=True) + raise click.Abort() + except Exception as e: + click.echo("{}\n{}".format(type(e), str(e)), err=True) + raise click.Abort() + + if not output: + click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') + sys.exit(1) - if load_sysinfo: - try: - command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] - proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) - output, err = proc.communicate() + cfg_hwsku = output.strip() - except FileNotFoundError as e: - click.echo("{}".format(str(e)), err=True) - raise click.Abort() - except Exception as e: - click.echo("{}\n{}".format(type(e), str(e)), err=True) - raise click.Abort() + client, config_db = flush_configdb(namespace) - if not output: - click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') - sys.exit(1) + if load_sysinfo: + if namespace is DEFAULT_NAMESPACE: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) - cfg_hwsku = output.strip() + # For the database service running in linux host we use the file user gives as input + # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, + # the default config_db.json format is used. - if namespace is None: - config_db = ConfigDBConnector() - else: - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_gen_opts = [] - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() + if os.path.isfile(INIT_CFG_FILE): + config_gen_opts += ['-j', str(INIT_CFG_FILE)] - if load_sysinfo: - if namespace is None: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + if file_format == 'config_db': + config_gen_opts += ['-j', str(file)] else: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] - clicommon.run_command(command, display_cmd=True) - - # For the database service running in linux host we use the file user gives as input - # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, - # the default config_db.json format is used. - + config_gen_opts += ['-Y', str(file)] - config_gen_opts = [] + if namespace is not DEFAULT_NAMESPACE: + config_gen_opts += ['-n', str(namespace)] - if os.path.isfile(INIT_CFG_FILE): - config_gen_opts += ['-j', str(INIT_CFG_FILE)] + command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - if file_format == 'config_db': - config_gen_opts += ['-j', str(file)] - else: - config_gen_opts += ['-Y', str(file)] - - if namespace is not None: - config_gen_opts += ['-n', str(namespace)] - - command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - - clicommon.run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) + clicommon.run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) - if os.path.exists(file) and file.endswith("_configReloadStdin"): - # Remove tmpfile - try: - os.remove(file) - except OSError as e: - click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) + if os.path.exists(file) and file.endswith("_configReloadStdin"): + # Remove tmpfile + try: + os.remove(file) + except OSError as e: + click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) - # Migrate DB contents to latest version - db_migrator='/usr/local/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - if namespace is None: - command = [db_migrator, '-o', 'migrate'] - else: - command = [db_migrator, '-o', 'migrate', '-n', str(namespace)] - clicommon.run_command(command, display_cmd=True) + # Migrate DB contents to latest version + migrate_db_to_lastest(namespace) # Re-generate the environment variable in case config_db.json was edited update_sonic_environment() @@ -4086,6 +4162,7 @@ def bgp(): pass + # BGP module extensions config.commands['bgp'].add_command(bgp_cli.DEVICE_GLOBAL) diff --git a/tests/config_test.py b/tests/config_test.py index fc70861c24f..00c9847a381 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -169,6 +169,21 @@ Reloading Monit configuration ... """ +reload_config_masic_onefile_output = """\ +Stopping SONiC target ... +Restarting SONiC target ... +Reloading Monit configuration ... +""" + +reload_config_masic_onefile_gen_sysinfo_output = """\ +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +""" + save_config_output = """\ Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json """ @@ -640,6 +655,211 @@ def teardown_class(cls): dbconnector.load_namespace_config() +class TestConfigReloadMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_reload_onefile_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "mac": "1d:34:db:16:a6:00", + "platform": "x86_64-mlnx_msn3800-r0", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:05", + "platform": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:06", + "platform": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == reload_config_masic_onefile_output + + def test_config_reload_onefile_gen_sysinfo_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join( + [li.rstrip() for li in result.output.split('\n')] + ) == reload_config_masic_onefile_gen_sysinfo_output + + def test_config_reload_onefile_bad_format_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": {}, + "asic0": {} + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code != 0 + assert "Input file all_config_db.json must contain all asics config" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestLoadMinigraph(object): @classmethod def setup_class(cls): From 3df762f15c6984f9d67f050e9a10ecbdeb0647e0 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Tue, 18 Jun 2024 12:37:49 +0800 Subject: [PATCH 08/23] [config] no op if Golden Config is invalid (#3367) ADO: 27941719 What I did Improve Golden Config workflow and make sure no op if invalid config detected How I did it Add table dependency check right after Golden Config path is enabled How to verify it Unit test --- config/main.py | 8 +++++++ tests/config_test.py | 50 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/config/main.py b/config/main.py index 52816bdd414..89572bd7881 100644 --- a/config/main.py +++ b/config/main.py @@ -1954,6 +1954,14 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() + # Dependency check golden config json + config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + host_config = config_to_check.get('localhost', {}) + else: + host_config = config_to_check + table_hard_dependency_check(host_config) + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") diff --git a/tests/config_test.py b/tests/config_test.py index 00c9847a381..db62bf3249e 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -987,8 +987,13 @@ def is_file_side_effect(filename): def test_load_minigraph_with_specified_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "--golden_config_path", "golden_config.json", "-y"]) @@ -999,14 +1004,48 @@ def is_file_side_effect(filename): def test_load_minigraph_with_default_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code == 0 assert "config override-config-table /etc/sonic/golden_config_db.json" in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_hard_dependency_check(self, get_cmd_module): + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + } + + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)), \ + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + (config, _) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -1024,7 +1063,12 @@ def test_load_minigraph_with_traffic_shift_away_with_golden_config(self, get_cmd with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: def is_file_side_effect(filename): return True if 'golden_config' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + + def read_json_file_side_effect(filename): + return {} + + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module db = Db() golden_config = {} From 515265a5e801cd8db242171a21f22fae4906b12b Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Tue, 18 Jun 2024 10:32:03 -0700 Subject: [PATCH 09/23] Update TRANSCEIVER_FIRMWARE_INFO table for all targets in sfputil (#3370) Signed-off-by: Mihir Patel --- sfputil/main.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/sfputil/main.py b/sfputil/main.py index ad0b1b3775e..4bb9058d79b 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1316,10 +1316,8 @@ def update_firmware_info_to_state_db(port_name): state_db.connect(state_db.STATE_DB) transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() if transceiver_firmware_info_dict is not None: - active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') - inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + for key, value in transceiver_firmware_info_dict.items(): + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), key, value) # 'firmware' subgroup @cli.group() From 9d206af70cb2c44c3d395a63170442a3828721c2 Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Wed, 19 Jun 2024 13:42:15 -0400 Subject: [PATCH 10/23] [chassis][mutli-asic][lldp] "show lldp table/neighbors" displays error message in output on multi-asis linecard. (#3358) Modify the lldpshow script to fix the output of the "show lldp table" and "show lldp neighbors" on the multi-asic Linecard. Fixes sonic-net/sonic-buildimage#19209 * [mutli-asic][lldp] "show lldp table" displays error message in output on multi-asic linecard * Added code coverage UT Signed-off-by: mlok --- scripts/lldpshow | 13 +++++++++---- tests/lldp_test.py | 17 +++++++++++++++++ tests/mock_tables/config_db.json | 2 ++ utilities_common/general.py | 31 ++++++++++++++++++++++++++++--- 4 files changed, 56 insertions(+), 7 deletions(-) diff --git a/scripts/lldpshow b/scripts/lldpshow index e09176cf3cd..fe40296f910 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -26,8 +26,9 @@ import sys from lxml import etree as ET from sonic_py_common import device_info +from utilities_common import constants from swsscommon.swsscommon import ConfigDBConnector -from utilities_common.general import load_db_config +from utilities_common.general import load_db_config, get_feature_state_data from tabulate import tabulate BACKEND_ASIC_INTERFACE_NAME_PREFIX = 'Ethernet-BP' @@ -69,8 +70,12 @@ class Lldpshow(object): self.lldp_interface[instance_num] += key + SPACE_TOKEN # LLDP running in host namespace - self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) - self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=constants.DEFAULT_NAMESPACE) + config_db.connect() + global_scope, asic_scope = get_feature_state_data(config_db, "lldp") + if global_scope == "True": + self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) + self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) def get_info(self, lldp_detail_info, lldp_port): """ @@ -85,7 +90,7 @@ class Lldpshow(object): elif lldp_interface_list == '': lldp_args = [] else: - lldp_args = [lldp_interface_list] + lldp_args = lldp_interface_list.split(' ') lldp_cmd = ['sudo', 'docker', 'exec', '-i', 'lldp{}'.format(self.lldp_instance[lldp_instace_num]), 'lldpctl'] + lldp_args p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, text=True) (output, err) = p.communicate() diff --git a/tests/lldp_test.py b/tests/lldp_test.py index 89177338e0c..1d6e55152c3 100644 --- a/tests/lldp_test.py +++ b/tests/lldp_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.general import load_module_from_source +from importlib import reload test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -83,6 +84,22 @@ def test_get_info(self): output = lldp.get_summary_output(lldp_detail_info=True) assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + def test_get_info_multi_asic(self): + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + lldp = lldpshow.Lldpshow() + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + lldp.lldp_instance = [''] + lldp.lldpraw = expected_lldpctl_xml_output + lldp.get_info(lldp_detail_info=True, lldp_port='Ethernet0') + lldp.parse_info(lldp_detail_info=True) + output = lldp.get_summary_output(lldp_detail_info=True) + assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index af37538447d..108fa7593df 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -848,6 +848,8 @@ "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", "high_mem_alert": "disabled", "set_owner": "kube" }, diff --git a/utilities_common/general.py b/utilities_common/general.py index 6ed70a46a11..97155532caf 100644 --- a/utilities_common/general.py +++ b/utilities_common/general.py @@ -2,8 +2,11 @@ import importlib.util import sys -from sonic_py_common.multi_asic import is_multi_asic +from sonic_py_common import multi_asic from swsscommon import swsscommon +FEATURE_TABLE = "FEATURE" +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' def load_module_from_source(module_name, file_path): """ @@ -25,7 +28,7 @@ def load_db_config(): - database_global.json for multi asic - database_config.json for single asic ''' - if is_multi_asic(): + if multi_asic.is_multi_asic(): if not swsscommon.SonicDBConfig.isGlobalInit(): swsscommon.SonicDBConfig.load_sonic_global_db_config() else: @@ -39,6 +42,28 @@ def get_optional_value_for_key_in_config_tbl(config_db, port, key, table): return None value = info_dict.get(key, None) - return value + +def get_feature_state_data(config_db, feature): + ''' + Get feature state from FEATURE table from CONFIG_DB. + return global_scope, per_asic_scope + - if feature state is disabled, return "False" for both global_scope and per_asic_scope + - if is not a multi-asic, return feature state for global_scope ("True/False") and + "False" for asic_scope + ''' + global_scope = "False" + asic_scope = "False" + info_dict = {} + info_dict = config_db.get_entry(FEATURE_TABLE, feature) + if info_dict is None: + return global_scope, asic_scope + if multi_asic.is_multi_asic(): + if info_dict['state'].lower() == "enabled": + global_scope = info_dict[FEATURE_HAS_GLOBAL_SCOPE] + asic_scope = info_dict[FEATURE_HAS_PER_ASIC_SCOPE] + else: + if info_dict['state'].lower() == "enabled": + global_scope = "True" + return global_scope, asic_scope From cf7bfa29bef5e84850afb798d14aadd7ade4570f Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 21 Jun 2024 15:52:23 +0800 Subject: [PATCH 11/23] Add the definition of `log` in `script decode-syseeprom` (#3383) #### What I did If there is something wrong getting eeprom while exectuing script `decode-syseeprom`, it will raise an exception and log the error. There was no definition of `log` in script `decode-syseeprom`, which will raise such error ``` Traceback (most recent call last): File "/usr/local/bin/decode-syseeprom", line 264, in sys.exit(main()) ^^^^^^ File "/usr/local/bin/decode-syseeprom", line 246, in main print_serial(use_db) File "/usr/local/bin/decode-syseeprom", line 171, in print_serial eeprom = instantiate_eeprom_object() ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/bin/decode-syseeprom", line 36, in instantiate_eeprom_object log.log_error('Failed to obtain EEPROM object due to {}'.format(repr(e))) ^^^ NameError: name 'log' is not defined ``` In this PR, I add the definition of log to avoid such error. #### How I did it Add the definition of log. #### How to verify it ``` admin@vlab-01:~$ sudo decode-syseeprom -s Failed to read system EEPROM info ``` --- scripts/decode-syseeprom | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 3d0b8d1db91..5812f38190b 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -17,13 +17,15 @@ import sys import sonic_platform from sonic_platform_base.sonic_eeprom.eeprom_tlvinfo import TlvInfoDecoder -from sonic_py_common import device_info +from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate EEPROM_INFO_TABLE = 'EEPROM_INFO' +SYSLOG_IDENTIFIER = 'decode-syseeprom' +log = logger.Logger(SYSLOG_IDENTIFIER) def instantiate_eeprom_object(): eeprom = None From c51758df2ae7a51d3ebd65169f7c0282cbbdf2b4 Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Mon, 24 Jun 2024 12:03:15 +1000 Subject: [PATCH 12/23] fix: fix show bgp summary output typo (#3375) * fix: fix show bgp summary output typo * fix: remove extra dash * fix: remove extra space --- tests/bgp_commands_test.py | 284 +++++++++++++++++------------------ utilities_common/bgp_util.py | 2 +- 2 files changed, 143 insertions(+), 143 deletions(-) diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index a60ba8c81f5..2a2179815f1 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -25,32 +25,32 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 -10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 -10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 -10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 -10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 -10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 -10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 -10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 -10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 -10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 -10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 -10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 -10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 -10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 -10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 -10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 -10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 -10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 -10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 -10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 -10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 -10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 -10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 -10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 +10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 +10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 +10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 +10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 +10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 +10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 +10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 +10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 +10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 +10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 +10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 +10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 +10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 +10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 +10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 +10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 +10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 +10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 +10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 +10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 +10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 +10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 +10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 Total number of neighbors 24 """ @@ -65,32 +65,32 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 -fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 -fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 -fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 -fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 -fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 -fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 -fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 -fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 -fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 -fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 -fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 -fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 -fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 -fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 -fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 -fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 -fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 -fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 -fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 -fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 -fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 -fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 -fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 +fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 +fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 +fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 +fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 +fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 +fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 +fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 +fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 +fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 +fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 +fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 +fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 +fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 +fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 +fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 +fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 +fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 +fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 +fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 +fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 +fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 +fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 +fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 Total number of neighbors 24 """ @@ -112,8 +112,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -128,8 +128,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -146,8 +146,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -164,8 +164,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -180,28 +180,28 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 Total number of neighbors 20 """ @@ -216,28 +216,28 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 -fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 -fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 -fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 -fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 -fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 -fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 -fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 -fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 -fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 -fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 -fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 -fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 -fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 -fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 -fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 -fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 -fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 -fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 -fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 +fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 +fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 +fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 +fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 +fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 +fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 +fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 +fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 +fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 +fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 +fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 +fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 +fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 +fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 +fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 +fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 +fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 +fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 +fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 Total number of neighbors 20 """ @@ -252,31 +252,31 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ -3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 -3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 -10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ +3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 +3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 Total number of neighbors 23 """ @@ -291,8 +291,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -308,9 +308,9 @@ Peer groups 3, using 3 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 Total number of neighbors 1 """ @@ -326,14 +326,14 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- -3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 -3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 -3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 -3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 -3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 -3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- +3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 +3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 +3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 +3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 +3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 +3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 """ diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 668ef344d56..df2e4963b6f 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -299,7 +299,7 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' - headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", + headers = ["Neighbor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] try: From 0e6a55ef5eac306ef61d6f0241625a6baee42ab8 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 24 Jun 2024 09:48:14 +0300 Subject: [PATCH 13/23] [fast-reboot] Backup database after syncd/swss stopped (#3342) - What I did Backup DB after syncd and swss are stopped. I observed an issue with fast-reboot that in a rare circumstances a queued FDB event might be written to ASIC_DB by a thread inside syncd after a call to FLUSHDB ASIC_DB was made. That left ASIC_DB only with one record about that FDB entry and caused syncd to crash at start: Mar 15 13:28:42.765108 sonic NOTICE syncd#SAI: :- Syncd: syncd started Mar 15 13:28:42.765268 sonic NOTICE syncd#SAI: :- onSyncdStart: performing hard reinit since COLD start was performed Mar 15 13:28:42.765451 sonic NOTICE syncd#SAI: :- readAsicState: loaded 1 switches Mar 15 13:28:42.765465 sonic NOTICE syncd#SAI: :- readAsicState: switch VID: oid:0x21000000000000 Mar 15 13:28:42.765465 sonic NOTICE syncd#SAI: :- readAsicState: read asic state took 0.000205 sec Mar 15 13:28:42.766364 sonic NOTICE syncd#SAI: :- onSyncdStart: on syncd start took 0.001097 sec Mar 15 13:28:42.766376 sonic ERR syncd#SAI: :- run: Runtime error during syncd init: map::at Mar 15 13:28:42.766376 sonic NOTICE syncd#SAI: :- sendShutdownRequest: sending switch_shutdown_request notification to OA for switch: oid:0x0 Mar 15 13:28:42.766518 sonic NOTICE syncd#SAI: :- sendShutdownRequestAfterException: notification send successfully - How I did it Backup DB after syncd/swss have stopped. - How to verify it Run fast-reboot. Signed-off-by: Stepan Blyschak --- scripts/fast-reboot | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 53dcffd7d2f..2eeca111129 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -244,6 +244,19 @@ function wait_for_pre_shutdown_complete_or_fail() function backup_database() { debug "Backing up database ..." + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Advanced reboot: dump state to host disk + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null + fi + + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Flush RESTAP_DB in fast-reboot to avoid stale status + sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null + fi + # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|*, FG_ROUTE_TABLE|* @@ -806,23 +819,11 @@ for service in ${SERVICES_TO_STOP}; do wait_for_pre_shutdown_complete_or_fail fi - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Advanced reboot: dump state to host disk - sonic-db-cli ASIC_DB FLUSHDB > /dev/null - sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null - sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null - fi - - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Flush RESTAP_DB in fast-reboot to avoid stale status - sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null - fi - - backup_database - fi done +backup_database + # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service || debug "Ignore stopping docker service error $?" From 667a1509c21aa42c268ecd6bff3cdb9b8d7b66c8 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Thu, 27 Jun 2024 19:37:59 +0300 Subject: [PATCH 14/23] [pbh]: Fix show PBH counters when cache is partial (#3356) * [pbh]: Fix show PBH counters when cache is partial. Signed-off-by: Nazarii Hnydyn --- show/plugins/pbh.py | 2 +- tests/pbh_input/assert_show_output.py | 8 +++++++ tests/pbh_input/counters_db_partial.json | 11 ++++++++++ tests/pbh_test.py | 28 ++++++++++++++++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 tests/pbh_input/counters_db_partial.json diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 407c5961630..f47b43fbdc1 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -395,7 +395,7 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): if not pbh_counters[key]: return '0' - if key in saved_pbh_counters: + if key in saved_pbh_counters and saved_pbh_counters[key]: new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) if new_value >= 0: return str(new_value) diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 7a701ba4bc8..5538f3aadad 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -78,6 +78,14 @@ """ +show_pbh_statistics_partial = """\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- diff --git a/tests/pbh_input/counters_db_partial.json b/tests/pbh_input/counters_db_partial.json new file mode 100644 index 00000000000..aa140188c8f --- /dev/null +++ b/tests/pbh_input/counters_db_partial.json @@ -0,0 +1,11 @@ +{ + "COUNTERS:oid:0x9000000000000": { }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 7dddfea9ca9..0d68f458ee2 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -946,6 +946,34 @@ def test_show_pbh_statistics_after_clear(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_zero + def test_show_pbh_statistics_after_clear_and_counters_partial(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_partial') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_partial def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') From 3a8f0be0b0e5c25e843510cd1f53a91475dbebd2 Mon Sep 17 00:00:00 2001 From: Vivek Date: Thu, 27 Jun 2024 23:17:26 -0700 Subject: [PATCH 15/23] [Mellanox] Add support for ACS-4280 (#3368) - What I did Add support for ACS-4280 SKU in GCU - How I did it - How to verify it Verified GCU tests in sonic-mgmt --- .../gcu_field_operation_validators.conf.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index c49fe08f37b..77b504b3131 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,8 +20,8 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", - "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", + "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, "broadcom_asics": { From 06965df2c431ec63e0706499f90ea4bf0a5a1b4a Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 1 Jul 2024 09:50:03 +0300 Subject: [PATCH 16/23] Remove suppress-fib-pending CLI and make route_check.py check suppress-fib in BGP configuration (#3331) What I did Revert suppress FIB pending feature Why I did it Some unresolved FRR issues in current version How I verified it Build and run [route_check] check if suppress fib is enabled in bgp Signed-off-by: Stepan Blyschak --- config/main.py | 12 ---------- doc/Command-Reference.md | 38 ------------------------------ scripts/route_check.py | 32 +++++++++++++++++-------- show/main.py | 19 ++++----------- tests/route_check_test.py | 7 ++++-- tests/suppress_pending_fib_test.py | 34 -------------------------- 6 files changed, 31 insertions(+), 111 deletions(-) delete mode 100644 tests/suppress_pending_fib_test.py diff --git a/config/main.py b/config/main.py index 89572bd7881..167c9c45bd2 100644 --- a/config/main.py +++ b/config/main.py @@ -2336,18 +2336,6 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) -# -# 'suppress-fib-pending' command ('config suppress-fib-pending ...') -# -@config.command('suppress-fib-pending') -@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) -@clicommon.pass_db -def suppress_pending_fib(db, state): - ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' - - config_db = db.cfgdb - config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) - # # 'yang_config_validation' command ('config yang_config_validation ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 757438dad07..78474d59483 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2610,26 +2610,6 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` -**show suppress-fib-pending** - -This command is used to show the status of suppress pending FIB feature. -When enabled, BGP will not advertise routes which aren't yet offloaded. - -- Usage: - ``` - show suppress-fib-pending - ``` - -- Examples: - ``` - admin@sonic:~$ show suppress-fib-pending - Enabled - ``` - ``` - admin@sonic:~$ show suppress-fib-pending - Disabled - ``` - **show bgp device-global** This command displays BGP device global configuration. @@ -2742,24 +2722,6 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` -**config suppress-fib-pending** - -This command is used to enable or disable announcements of routes not yet installed in the HW. -Once enabled, BGP will not advertise routes which aren't yet offloaded. - -- Usage: - ``` - config suppress-fib-pending - ``` - -- Examples: - ``` - admin@sonic:~$ sudo config suppress-fib-pending enabled - ``` - ``` - admin@sonic:~$ sudo config suppress-fib-pending disabled - ``` - **config bgp device-global tsa/w-ecmp** This command is used to manage BGP device global configuration. diff --git a/scripts/route_check.py b/scripts/route_check.py index ee417dc49cc..2fbe0415471 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,6 +328,16 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) +def is_bgp_suppress_fib_pending_enabled(namespace): + """ + Retruns True if FIB suppression is enabled in BGP config, False otherwise + """ + show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] + + output = subprocess.check_output(show_run_cmd, text=True) + return 'bgp suppress-fib-pending' in output + + def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -781,18 +791,20 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes(namespace) + if is_bgp_suppress_fib_pending_enabled(namespace): + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " + "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/show/main.py b/show/main.py index c4d99b8eabe..a3a72c70e78 100755 --- a/show/main.py +++ b/show/main.py @@ -165,7 +165,7 @@ def get_config_json_by_namespace(namespace): iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) # -# Display all storm-control data +# Display all storm-control data # def display_storm_all(): """ Show storm-control """ @@ -465,7 +465,7 @@ def is_mgmt_vrf_enabled(ctx): return False # -# 'storm-control' group +# 'storm-control' group # "show storm-control [interface ]" # @cli.group('storm-control', invoke_without_command=True) @@ -2111,7 +2111,7 @@ def summary(db): key_values = key.split('|') values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values["state"], values["type"], values["local_addr"], values["tx_interval"], values["rx_interval"], values["multiplier"], values["multihop"], values["local_discriminator"]]) @@ -2142,24 +2142,13 @@ def peer(db, peer_ip): key_values = key.split(delimiter) values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values.get("state"), values.get("type"), values.get("local_addr"), values.get("tx_interval"), values.get("rx_interval"), values.get("multiplier"), values.get("multihop"), values.get("local_discriminator")]) click.echo(tabulate(bfd_body, bfd_headers)) -# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") -@cli.command('suppress-fib-pending') -@clicommon.pass_db -def suppress_pending_fib(db): - """ Show the status of suppress pending FIB feature """ - - field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') - state = field_values.get('suppress-fib-pending', 'disabled').title() - click.echo(state) - - # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 1f92b3d19ae..26c632d7427 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,8 +252,11 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + if 'show runningconfiguration bgp' in ' '.join(args[0]): + return 'bgp suppress-fib-pending' + else: + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py deleted file mode 100644 index 04064d306ed..00000000000 --- a/tests/suppress_pending_fib_test.py +++ /dev/null @@ -1,34 +0,0 @@ -from click.testing import CliRunner - -import config.main as config -import show.main as show -from utilities_common.db import Db - - -class TestSuppressFibPending: - def test_synchronous_mode(self): - runner = CliRunner() - - db = Db() - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Enabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Disabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) - print(result.output) - assert result.exit_code != 0 From 414cf3bbce13d45c59bde3a74dd341a37b4e0d99 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 2 Jul 2024 02:14:10 +0530 Subject: [PATCH 17/23] [DPB]Fix return code in case of failure (#3389) * [DPB]Fix return code in case of failure * Updating UT --- config/main.py | 2 +- tests/config_dpb_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 167c9c45bd2..83637c1421e 100644 --- a/config/main.py +++ b/config/main.py @@ -4697,7 +4697,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') - sys.exit(0) + sys.exit(1) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 5dcf8149110..0a3d99cbcd7 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -350,7 +350,7 @@ def test_config_breakout_extra_table_warning(self, breakout_cfg_file, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Below Config can not be verified' in result.output assert 'UNKNOWN_TABLE' in result.output assert 'Do you wish to Continue?' in result.output From fb2e5cda90ced88249e06b18d8c5717a89ff62b9 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 4 Jul 2024 09:36:55 +0800 Subject: [PATCH 18/23] Remove secret from golden_config_db.json and old_config files (#3390) --- scripts/generate_dump | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 06d163a45e3..b163366bb0b 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -2155,7 +2155,7 @@ finalize() { ############################################################################### -# Remove secret from pipeline inout and output result to pipeline. +# Remove secret from pipeline input and output result to pipeline. # Globals: # None # Arguments: @@ -2168,6 +2168,18 @@ remove_secret_from_config_db_dump() { sed -E 's/\"passkey\"\s*:\s*\"([^\"]*)\"/\"passkey\":\"****\"/g; /SNMP_COMMUNITY/,/\s{2,4}\},/d' } + +############################################################################### +# Remove secret from file. +############################################################################### +remove_secret_from_config_db_dump_file() { + local dumpfile=$1 + if [ -e ${dumpfile} ]; then + cat $dumpfile | remove_secret_from_config_db_dump > $dumpfile.temp + mv $dumpfile.temp $dumpfile + fi +} + ############################################################################### # Remove secret from dump files. # Globals: @@ -2201,8 +2213,24 @@ remove_secret_from_etc_files() { sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml # Remove secret from /etc/sonic/config_db.json - cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp - mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/golden_config_db.json + + # Remove secret from /etc/sonic/old_config/ + + # Remove snmp community string from old_config/snmp.yml + local oldsnmp=${dumppath}/etc/sonic/old_config/snmp.yml + if [ -e ${oldsnmp} ]; then + sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $oldsnmp + fi + + # Remove secret from /etc/sonic/config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/golden_config_db.json } ############################################################################### From 789ef634022f39ee2126fcf541eaf99edfd806b7 Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 9 Jul 2024 13:03:04 -0700 Subject: [PATCH 19/23] Add Parallel option for apply-patch (#3373) * Add Parallel option for apply-patch * fix format * fix format * Add UT to check if parallel option ran as expected. * fix format. * Remove Dry Run. * add parallel run checker * Modify to number of asics * Modify UT. --- config/main.py | 47 +++++++-- tests/config_test.py | 244 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 276 insertions(+), 15 deletions(-) diff --git a/config/main.py b/config/main.py index 83637c1421e..709c96402a6 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,8 @@ #!/usr/sbin/env python +import threading import click +import concurrent.futures import datetime import ipaddress import json @@ -1212,6 +1214,11 @@ def multiasic_save_to_singlefile(db, filename): with open(filename, 'w') as file: json.dump(all_current_config, file, indent=4) + +def apply_patch_wrapper(args): + return apply_patch_for_scope(*args) + + # Function to apply patch for a single ASIC. def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): scope, changes = scope_changes @@ -1220,16 +1227,19 @@ def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_ru scope = multi_asic.DEFAULT_NAMESPACE scope_for_log = scope if scope else HOST_NAMESPACE + thread_id = threading.get_ident() + log.log_notice(f"apply_patch_for_scope started for {scope_for_log} by {changes} in thread:{thread_id}") + try: # Call apply_patch with the ASIC-specific changes and predefined parameters GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), - config_format, - verbose, - dry_run, - ignore_non_yang_tables, - ignore_path) + config_format, + verbose, + dry_run, + ignore_non_yang_tables, + ignore_path) results[scope_for_log] = {"success": True, "message": "Success"} - log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes} in thread:{thread_id}") except Exception as e: results[scope_for_log] = {"success": False, "message": str(e)} log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") @@ -1549,11 +1559,12 @@ def print_dry_run_message(dry_run): help='format of config of the patch is either ConfigDb(ABNF) or SonicYang', show_default=True) @click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-p', '--parallel', is_flag=True, default=False, help='applying the change to all ASICs parallelly') @click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True) @click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True) @click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') @click.pass_context -def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose): +def apply_patch(ctx, patch_file_path, format, dry_run, parallel, ignore_non_yang_tables, ignore_path, verbose): """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) @@ -1599,8 +1610,26 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i changes_by_scope[asic] = [] # Apply changes for each scope - for scope_changes in changes_by_scope.items(): - apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + if parallel: + with concurrent.futures.ThreadPoolExecutor() as executor: + # Prepare the argument tuples + arguments = [(scope_changes, results, config_format, + verbose, dry_run, ignore_non_yang_tables, ignore_path) + for scope_changes in changes_by_scope.items()] + + # Submit all tasks and wait for them to complete + futures = [executor.submit(apply_patch_wrapper, args) for args in arguments] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + else: + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, + results, + config_format, + verbose, dry_run, + ignore_non_yang_tables, + ignore_path) # Check if any updates failed failures = [scope for scope, result in results.items() if not result['success']] diff --git a/tests/config_test.py b/tests/config_test.py index db62bf3249e..748d434fc2a 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -3229,6 +3229,199 @@ def test_apply_patch_multiasic(self): @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_dryrun_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.apply_patch_wrapper') + def test_apply_patch_check_apply_call_parallel_multiasic(self, mock_apply_patch): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + self.assertEqual(mock_apply_patch.call_count, + multi_asic.get_num_asics() + 1, + "apply_patch_wrapper function should be called number of ASICs plus host times") + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_not_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_not_called() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + def test_apply_patch_parallel_with_error_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: # Mock GenericUpdater to avoid actual patch application @@ -3243,12 +3436,13 @@ def test_apply_patch_dryrun_multiasic(self): result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path, "--format", ConfigFormat.SONICYANG.name, - "--dry-run", - "--ignore-non-yang-tables", - "--ignore-path", "/ANY_TABLE", - "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", - "--ignore-path", "", - "--verbose"], + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], catch_exceptions=False) print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) @@ -3326,6 +3520,44 @@ def test_apply_patch_validate_patch_with_badpath_multiasic(self, mock_subprocess # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_parallel_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--parallel"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.subprocess.Popen') @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subprocess_popen): From 1f944447434033bd4262d1f961b7ec745e7d1f69 Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Wed, 10 Jul 2024 16:22:43 -0700 Subject: [PATCH 20/23] Fix multi-asic behaviour for pg-drop (#3058) * Fixes multi-asic behaviour for pg-drop script. show priority-group drop is not behaving correctly on multi-asic devices, as the namespace option '-n' is not available and correct namespaces were not traversed to retrieve drop counters. This change fixes the multi-asic behaviour of this command. * add additional test and simplify branching Co-authored-by: Kenneth Cheung --- scripts/pg-drop | 87 +++++++++++++------ show/main.py | 5 +- tests/mock_tables/asic1/counters_db.json | 102 +++++++++++++++++++++++ tests/multi_asic_pgdropstat_test.py | 95 +++++++++++++++++++++ 4 files changed, 261 insertions(+), 28 deletions(-) create mode 100644 tests/multi_asic_pgdropstat_test.py diff --git a/scripts/pg-drop b/scripts/pg-drop index 77415930811..9078d28ad69 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,6 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### +from importlib import reload import json import argparse import os @@ -13,6 +14,8 @@ from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic # mock the redis for unit test purposes # try: @@ -22,7 +25,9 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass @@ -43,13 +48,11 @@ def get_dropstat_dir(): class PgDropStat(object): - def __init__(self): - self.counters_db = SonicV2Connector(host='127.0.0.1') - self.counters_db.connect(self.counters_db.COUNTERS_DB) - - self.configdb = ConfigDBConnector() + def __init__(self, namespace): + self.namespace = namespace + self.ns_list = multi_asic.get_namespace_list(namespace) + self.configdb = ConfigDBConnector(namespace=namespace) self.configdb.connect() - dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'pg_drop_stats') @@ -57,14 +60,14 @@ class PgDropStat(object): """ Get port ID using object ID """ - port_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, oid) + port_id = self.get_counters_mapdata(COUNTERS_PG_PORT_MAP, oid) if not port_id: print("Port is not available for oid '{}'".format(oid)) sys.exit(1) return port_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.get_counters_mapall(COUNTERS_PORT_NAME_MAP) if not self.counter_port_name_map: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -77,7 +80,7 @@ class PgDropStat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.get_counters_mapall(COUNTERS_PG_NAME_MAP) if not counter_pg_name_map: print("COUNTERS_PG_NAME_MAP is empty!") sys.exit(1) @@ -94,13 +97,32 @@ class PgDropStat(object): "header_prefix": "PG"}, } + def get_counters_mapdata(self, tablemap, index): + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + data = counters_db.get(counters_db.COUNTERS_DB, tablemap, index) + if data: + return data + return None + + def get_counters_mapall(self, tablemap): + mapdata = {} + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + map_result = counters_db.get_all(counters_db.COUNTERS_DB, tablemap) + if map_result: + mapdata.update(map_result) + return mapdata + def get_pg_index(self, oid): """ return PG index (0-7) oid - object ID for entry in redis """ - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, oid) + pg_index = self.get_counters_mapdata(COUNTERS_PG_INDEX_MAP, oid) if not pg_index: print("Priority group index is not available for oid '{}'".format(oid)) sys.exit(1) @@ -154,7 +176,7 @@ class PgDropStat(object): old_collected_data = port_drop_ckpt.get(name,{})[full_table_id] if len(port_drop_ckpt) > 0 else 0 idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, counter_name) + counter_data = self.get_counters_mapdata(full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -180,18 +202,18 @@ class PgDropStat(object): print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def get_counts(self, counters, oid): - """ - Get the PG drop counts for an individual counter. - """ - counts = {} - table_id = COUNTER_TABLE_PREFIX + oid - for counter in counters: - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, table_id, counter) - if counter_data is None: - counts[table_id] = 0 - else: - counts[table_id] = int(counter_data) - return counts + """ + Get the PG drop counts for an individual counter. + """ + counts = {} + table_id = COUNTER_TABLE_PREFIX + oid + for counter in counters: + counter_data = self.get_counters_mapdata(table_id, counter) + if counter_data is None: + counts[table_id] = 0 + else: + counts[table_id] = int(counter_data) + return counts def get_counts_table(self, counters, object_table): """ @@ -199,10 +221,10 @@ class PgDropStat(object): to its PG drop counts. Counts are contained in a dictionary that maps counter oid to its counts. """ - counter_object_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, object_table) + counter_object_name_map = self.get_counters_mapall(object_table) current_stat_dict = OrderedDict() - if counter_object_name_map is None: + if not counter_object_name_map: return current_stat_dict for obj in natsorted(counter_object_name_map): @@ -239,10 +261,12 @@ def main(): epilog=""" Examples: pg-drop -c show +pg-drop -c show --namespace asic0 pg-drop -c clear """) parser.add_argument('-c', '--command', type=str, help='Desired action to perform') + parser.add_argument('-n', '--namespace', type=str, help='Namespace name or skip for all', default=None) args = parser.parse_args() command = args.command @@ -256,7 +280,16 @@ pg-drop -c clear print(e) sys.exit(e.errno) - pgdropstat = PgDropStat() + # Load database config files + load_db_config() + namespaces = multi_asic.get_namespace_list() + if args.namespace and args.namespace not in namespaces: + namespacelist = ', '.join(namespaces) + print(f"Input value for '--namespace' / '-n'. Choose from one of ({namespacelist})") + sys.exit(1) + + # For 'clear' command force applying to all namespaces + pgdropstat = PgDropStat(args.namespace if command != 'clear' else None) if command == 'clear': pgdropstat.clear_drop_counts() diff --git a/show/main.py b/show/main.py index a3a72c70e78..d20073fb01d 100755 --- a/show/main.py +++ b/show/main.py @@ -857,9 +857,12 @@ def drop(): pass @drop.command('counters') -def pg_drop_counters(): +@multi_asic_util.multi_asic_click_option_namespace +def pg_drop_counters(namespace): """Show dropped packets for priority-group""" command = ['pg-drop', '-c', 'show'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group(name='persistent-watermark') diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index c364d8599ef..f9197421571 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -207,6 +207,108 @@ "Ethernet-BP256": "oid:0x1000000000b06", "Ethernet-BP260": "oid:0x1000000000b08" }, + "COUNTERS_PG_NAME_MAP": { + "Ethernet-BP256:0": "oid:100000000b0f0", + "Ethernet-BP256:1": "oid:100000000b0f1", + "Ethernet-BP256:2": "oid:100000000b0f2", + "Ethernet-BP256:3": "oid:100000000b0f3", + "Ethernet-BP256:4": "oid:100000000b0f4", + "Ethernet-BP256:5": "oid:100000000b0f5", + "Ethernet-BP256:6": "oid:100000000b0f6", + "Ethernet-BP256:7": "oid:100000000b0f7", + "Ethernet-BP256:8": "oid:100000000b0f8", + "Ethernet-BP256:9": "oid:100000000b0f9", + "Ethernet-BP256:10": "oid:100000000b0fa", + "Ethernet-BP256:11": "oid:100000000b0fb", + "Ethernet-BP256:12": "oid:100000000b0fc", + "Ethernet-BP256:13": "oid:100000000b0fd", + "Ethernet-BP256:14": "oid:100000000b0fe", + "Ethernet-BP256:15": "oid:100000000b0ff", + "Ethernet-BP260:0": "oid:0x100000000b1f0", + "Ethernet-BP260:1": "oid:0x100000000b1f1", + "Ethernet-BP260:2": "oid:0x100000000b1f2", + "Ethernet-BP260:3": "oid:0x100000000b1f3", + "Ethernet-BP260:4": "oid:0x100000000b1f4", + "Ethernet-BP260:5": "oid:0x100000000b1f5", + "Ethernet-BP260:6": "oid:0x100000000b1f6", + "Ethernet-BP260:7": "oid:0x100000000b1f7", + "Ethernet-BP260:8": "oid:0x100000000b1f8", + "Ethernet-BP260:9": "oid:0x100000000b1f9", + "Ethernet-BP260:10": "oid:0x100000000b1fa", + "Ethernet-BP260:11": "oid:0x100000000b1fb", + "Ethernet-BP260:12": "oid:0x100000000b1fc", + "Ethernet-BP260:13": "oid:0x100000000b1fd", + "Ethernet-BP260:14": "oid:0x100000000b1fe", + "Ethernet-BP260:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000b06", + "oid:100000000b0f1": "oid:0x1000000000b06", + "oid:100000000b0f2": "oid:0x1000000000b06", + "oid:100000000b0f3": "oid:0x1000000000b06", + "oid:100000000b0f4": "oid:0x1000000000b06", + "oid:100000000b0f5": "oid:0x1000000000b06", + "oid:100000000b0f6": "oid:0x1000000000b06", + "oid:100000000b0f7": "oid:0x1000000000b06", + "oid:100000000b0f8": "oid:0x1000000000b06", + "oid:100000000b0f9": "oid:0x1000000000b06", + "oid:100000000b0fa": "oid:0x1000000000b06", + "oid:100000000b0fb": "oid:0x1000000000b06", + "oid:100000000b0fc": "oid:0x1000000000b06", + "oid:100000000b0fd": "oid:0x1000000000b06", + "oid:100000000b0fe": "oid:0x1000000000b06", + "oid:100000000b0ff": "oid:0x1000000000b06", + "oid:0x100000000b1f0": "oid:0x1000000000b08", + "oid:0x100000000b1f1": "oid:0x1000000000b08", + "oid:0x100000000b1f2": "oid:0x1000000000b08", + "oid:0x100000000b1f3": "oid:0x1000000000b08", + "oid:0x100000000b1f4": "oid:0x1000000000b08", + "oid:0x100000000b1f5": "oid:0x1000000000b08", + "oid:0x100000000b1f6": "oid:0x1000000000b08", + "oid:0x100000000b1f7": "oid:0x1000000000b08", + "oid:0x100000000b1f8": "oid:0x1000000000b08", + "oid:0x100000000b1f9": "oid:0x1000000000b08", + "oid:0x100000000b1fa": "oid:0x1000000000b08", + "oid:0x100000000b1fb": "oid:0x1000000000b08", + "oid:0x100000000b1fc": "oid:0x1000000000b08", + "oid:0x100000000b1fd": "oid:0x1000000000b08", + "oid:0x100000000b1fe": "oid:0x1000000000b08", + "oid:0x100000000b1ff" : "oid:0x1000000000b08" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py new file mode 100644 index 00000000000..94bb13011b4 --- /dev/null +++ b/tests/multi_asic_pgdropstat_test.py @@ -0,0 +1,95 @@ +import os +import sys +from utilities_common.cli import UserCache +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +pg_drop_masic_one_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + +pg_drop_masic_all_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ + Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + + +class TestMultiAsicPgDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_pg_drop_masic_all(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_all_result + + def test_show_pg_drop_masic(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic1' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_one_result + + def test_show_pg_drop_masic_not_exist(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 1 + assert result == "Input value for '--namespace' / '-n'. Choose from one of (asic0, asic1)" + + def test_clear_pg_drop(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == "Cleared PG drop counter\n" + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + UserCache('pg-drop').remove_all() + print("TEARDOWN") From b6f7c2b7d138299475d994d251721455deab668f Mon Sep 17 00:00:00 2001 From: Xinyu Lin Date: Fri, 12 Jul 2024 08:01:37 +0800 Subject: [PATCH 21/23] =?UTF-8?q?[sfputil]=20Add=20loopback=20sub-command?= =?UTF-8?q?=20for=20debugging=20and=20module=20diagnosti=E2=80=A6=20(#3369?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [sfputil] Add loopback sub-command for debugging and module diagnostic control Signed-off-by: xinyu * [sfputil] Correct and update the reference of sfputil debug loopback command Signed-off-by: xinyu --------- Signed-off-by: xinyu --- doc/Command-Reference.md | 27 ++++++++++++++++++++++++ sfputil/main.py | 45 ++++++++++++++++++++++++++++++++++++++++ tests/sfputil_test.py | 43 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 78474d59483..689ca23b73b 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -47,6 +47,8 @@ * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) * [CMIS firmware target mode commands](#cmis-firmware-target-mode-commands) +* [CMIS debug](#cmis-debug) +* [CMIS debug loopback](#cmis-debug-loopback) * [DHCP Relay](#dhcp-relay) * [DHCP Relay show commands](#dhcp-relay-show-commands) * [DHCP Relay clear commands](#dhcp-relay-clear-commands) @@ -3094,6 +3096,31 @@ Example of the module supporting target mode Target Mode set to 1 ``` +## CMIS debug + +### CMIS debug loopback + +This command is the standard CMIS diagnostic control used for troubleshooting link and performance issues between the host switch and transceiver module. + +**sfputil debug loopback** + +- Usage: + ``` + sfputil debug loopback PORT_NAME LOOPBACK_MODE + + Set the loopback mode + host-side-input: host side input loopback mode + host-side-output: host side output loopback mode + media-side-input: media side input loopback mode + media-side-output: media side output loopback mode + none: disable loopback mode + ``` + +- Example: + ``` + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + ``` + ## DHCP Relay ### DHCP Relay show commands diff --git a/sfputil/main.py b/sfputil/main.py index 4bb9058d79b..2674c51b10f 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1888,5 +1888,50 @@ def get_overall_offset_sff8472(api, page, offset, size, wire_addr): return page * PAGE_SIZE + offset + PAGE_SIZE_FOR_A0H +# 'debug' subgroup +@cli.group() +def debug(): + """Module debug and diagnostic control""" + pass + + +# 'loopback' subcommand +@debug.command() +@click.argument('port_name', required=True, default=None) +@click.argument('loopback_mode', required=True, default="none", + type=click.Choice(["none", "host-side-input", "host-side-output", + "media-side-input", "media-side-output"])) +def loopback(port_name, loopback_mode): + """Set module diagnostic loopback mode + """ + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("{}: This functionality is not applicable for RJ45 port".format(port_name)) + sys.exit(EXIT_FAIL) + + if not is_sfp_present(port_name): + click.echo("{}: SFP EEPROM not detected".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + api = sfp.get_xcvr_api() + except NotImplementedError: + click.echo("{}: This functionality is not implemented".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + try: + status = api.set_loopback_mode(loopback_mode) + except AttributeError: + click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + if status: + click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + else: + click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + sys.exit(EXIT_FAIL) + if __name__ == '__main__': cli() diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 523848ec453..537c329819b 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1510,3 +1510,46 @@ def test_load_port_config(self, mock_is_multi_asic): mock_is_multi_asic.return_value = False assert sfputil.load_port_config() == True + + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + def test_debug_loopback(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + + runner = CliRunner() + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n' + mock_sfp.get_presence.return_value = True + + mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: This functionality is not implemented\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: Set host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set none loopback failed\n' + assert result.exit_code == EXIT_FAIL + + mock_api.set_loopback_mode.return_value = True + mock_api.set_loopback_mode.side_effect = AttributeError + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED From c03c9c84ae3a015f857bf80806e7ba576b39d4ed Mon Sep 17 00:00:00 2001 From: Jianquan Ye Date: Tue, 16 Jul 2024 11:53:20 +1000 Subject: [PATCH 22/23] Revert "fix: fix show bgp summary output typo" (#3423) Reverts #3375 It will impact many automation scripts that the community users may have developped for years... So if we change it now, all those scripts will be impacted... not something we want to do just to correct a miss spelled word at this stage... Revert this change What I did Reverts fix: fix show bgp summary output typo #3375 Add comments in case someone else fix the typo without notification. co-authorized by: jianquanye@microsoft.com --- tests/bgp_commands_test.py | 286 +++++++++++++++++------------------ utilities_common/bgp_util.py | 6 +- 2 files changed, 148 insertions(+), 144 deletions(-) diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index 2a2179815f1..11415e8727e 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -25,32 +25,32 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 -10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 -10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 -10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 -10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 -10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 -10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 -10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 -10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 -10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 -10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 -10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 -10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 -10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 -10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 -10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 -10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 -10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 -10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 -10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 -10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 -10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 -10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 -10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 +10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 +10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 +10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 +10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 +10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 +10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 +10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 +10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 +10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 +10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 +10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 +10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 +10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 +10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 +10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 +10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 +10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 +10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 +10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 +10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 +10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 +10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 +10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 Total number of neighbors 24 """ @@ -65,32 +65,32 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 -fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 -fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 -fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 -fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 -fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 -fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 -fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 -fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 -fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 -fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 -fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 -fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 -fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 -fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 -fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 -fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 -fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 -fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 -fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 -fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 -fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 -fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 -fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 +fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 +fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 +fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 +fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 +fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 +fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 +fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 +fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 +fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 +fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 +fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 +fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 +fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 +fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 +fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 +fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 +fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 +fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 +fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 +fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 +fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 +fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 +fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 Total number of neighbors 24 """ @@ -112,8 +112,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -128,8 +128,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -146,8 +146,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -164,8 +164,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -180,28 +180,28 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 Total number of neighbors 20 """ @@ -216,28 +216,28 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 -fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 -fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 -fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 -fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 -fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 -fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 -fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 -fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 -fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 -fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 -fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 -fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 -fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 -fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 -fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 -fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 -fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 -fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 -fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 +fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 +fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 +fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 +fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 +fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 +fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 +fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 +fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 +fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 +fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 +fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 +fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 +fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 +fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 +fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 +fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 +fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 +fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 +fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 Total number of neighbors 20 """ @@ -252,31 +252,31 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ -3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 -3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 -10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ +3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 +3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 Total number of neighbors 23 """ @@ -291,8 +291,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -308,9 +308,9 @@ Peer groups 3, using 3 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 Total number of neighbors 1 """ @@ -326,17 +326,17 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- -3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 -3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 -3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 -3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 -3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 -3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- +3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 +3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 +3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 +3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 +3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 +3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 -""" +""" # noqa: E501 class TestBgpCommandsSingleAsic(object): diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index df2e4963b6f..cb49123c4bf 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -299,7 +299,11 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' - headers = ["Neighbor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", + + # "Neighbhor" is a known typo, + # but fix it will impact lots of automation scripts that the community users may have developed for years + # for now, let's keep it as it is. + headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] try: From b9a6049a954f6053b49de198bbacf550d5728de7 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Tue, 16 Jul 2024 15:53:16 -0700 Subject: [PATCH 23/23] [Bug Fix] Fix disk check test and drops group test (#3424) * tests/disk_check_test.py: remove temp files during teardown - modify teardown_class() to remove /tmp/tmp* * tests/drops_group_test.py: add code to remove temporary files when setting up test class - add a remove_tmp_dropstat_file() function as a helper to clean the cache - add an invocation of remove_tmp_dropstat_file() in setup_class() of TestDropCounters class * tests/disk_check_test.py: fix the subprocess command in the teardown_class() function * tests/disk_check_test.py: fix formatting for pre-commit check * tests/drops_group_test.py: fix formatting for pre-commit check --- tests/disk_check_test.py | 5 ++++- tests/drops_group_test.py | 10 +++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index 82b8b16ff65..ac541b05b95 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -1,7 +1,6 @@ import sys import syslog from unittest.mock import patch -import pytest import subprocess sys.path.append("scripts") @@ -178,3 +177,7 @@ def test_readonly(self, mock_proc, mock_log): assert max_log_lvl == syslog.LOG_ERR + @classmethod + def teardown_class(cls): + subprocess.run("rm -rf /tmp/tmp*", shell=True) # cleanup the temporary dirs + print("TEARDOWN") diff --git a/tests/drops_group_test.py b/tests/drops_group_test.py index ad8c8a42039..93f99e3f1b6 100644 --- a/tests/drops_group_test.py +++ b/tests/drops_group_test.py @@ -3,6 +3,7 @@ import shutil from click.testing import CliRunner +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -97,14 +98,17 @@ sonic_drops_test 0 0 """ -dropstat_path = "/tmp/dropstat-27" + +def remove_tmp_dropstat_file(): + # remove the tmp portstat + cache = UserCache("dropstat") + cache.remove_all() class TestDropCounters(object): @classmethod def setup_class(cls): print("SETUP") - if os.path.exists(dropstat_path): - shutil.rmtree(dropstat_path) + remove_tmp_dropstat_file() os.environ["PATH"] += os.pathsep + scripts_path os.environ["UTILITIES_UNIT_TESTING"] = "1"