Skip to content

Commit

Permalink
Merge branch 'master' into sensors
Browse files Browse the repository at this point in the history
  • Loading branch information
bmridul authored May 15, 2024
2 parents 648a501 + f652948 commit 82eefa7
Show file tree
Hide file tree
Showing 34 changed files with 890 additions and 30 deletions.
2 changes: 1 addition & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ stages:
curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
sudo apt-add-repository https://packages.microsoft.com/debian/10/prod
sudo apt-get update
sudo apt-get install -y dotnet-sdk-5.0
sudo apt-get install -y dotnet-sdk-8.0
displayName: "Install .NET CORE"
- task: PublishTestResults@2
Expand Down
8 changes: 4 additions & 4 deletions src/ax_interface/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ def __init__(self, mib_cls, update_frequency, loop):
self.loop = loop

# synchronization events
self.run_enabled = asyncio.Event(loop=loop)
self.oid_updaters_enabled = asyncio.Event(loop=loop)
self.stopped = asyncio.Event(loop=loop)
self.run_enabled = asyncio.Event()
self.oid_updaters_enabled = asyncio.Event()
self.stopped = asyncio.Event()

# Initialize our MIB
self.mib_table = MIBTable(mib_cls, update_frequency)
Expand Down Expand Up @@ -46,7 +46,7 @@ async def run_in_event_loop(self):
# signal background tasks to halt
self.oid_updaters_enabled.clear()
# wait for handlers to come back
await asyncio.wait_for(background_task, BACKGROUND_WAIT_TIMEOUT, loop=self.loop)
await asyncio.wait_for(background_task, BACKGROUND_WAIT_TIMEOUT)

# signal that we're done!
self.stopped.set()
Expand Down
20 changes: 19 additions & 1 deletion src/ax_interface/mib.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,29 @@ def __init__(self):

async def start(self):
# Run the update while we are allowed
redis_exception_happen = False
while self.run_event.is_set():
try:
# reinit internal structures
if self.update_counter > self.reinit_rate:
# reconnect when redis exception happen
if redis_exception_happen:
self.reinit_connection()

self.reinit_data()
self.update_counter = 0
else:
self.update_counter += 1

# run the background update task
self.update_data()
redis_exception_happen = False
except RuntimeError:
# Any unexpected exception or error, log it and keep running
logger.exception("MIBUpdater.start() caught an unexpected exception during update_data()")
# When redis server restart, swsscommon will throw swsscommon.RedisError, redis connection need re-initialize in reinit_data()
# TODO: change to swsscommon.RedisError
redis_exception_happen = True
except Exception:
# Any unexpected exception or error, log it and keep running
logger.exception("MIBUpdater.start() caught an unexpected exception during update_data()")
Expand All @@ -55,6 +67,12 @@ def reinit_data(self):
"""
return

def reinit_connection(self):
"""
Reinit redis connection task. Children may override this method.
"""
return

def update_data(self):
"""
Background task. Children must override this method.
Expand Down Expand Up @@ -282,7 +300,7 @@ def start_background_tasks(self, event):
fut = asyncio.ensure_future(updater.start())
fut.add_done_callback(MIBTable._done_background_task_callback)
tasks.append(fut)
return asyncio.gather(*tasks, loop=event._loop)
return asyncio.gather(*tasks)

def _find_parent_prefix(self, item):
oids = sorted(self.prefixes)
Expand Down
2 changes: 1 addition & 1 deletion src/ax_interface/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def __init__(self, mib_table, loop):
self.loop = loop
self.session_id = -1
self.mib_table = mib_table
self.closed = asyncio.Event(loop=loop)
self.closed = asyncio.Event()
self.counter = 0

def send_pdu(self, pdu):
Expand Down
6 changes: 5 additions & 1 deletion src/sonic_ax_impl/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import json
import logging.handlers

import faulthandler

# configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.NullHandler())

# enable faulthandler to provide more information for debug
# after enable faulthandler, the file 'stderr' will be remembered by faulthandler:
# https://docs.python.org/dev/library/faulthandler.html#issue-with-file-descriptors
faulthandler.enable()
Empty file modified src/sonic_ax_impl/bin/sysDescr_pass.py
100644 → 100755
Empty file.
13 changes: 12 additions & 1 deletion src/sonic_ax_impl/mibs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

COUNTERS_PORT_NAME_MAP = 'COUNTERS_PORT_NAME_MAP'
COUNTERS_QUEUE_NAME_MAP = 'COUNTERS_QUEUE_NAME_MAP'
BUFFER_MAX_PARAM_TABLE = 'BUFFER_MAX_PARAM_TABLE'

LAG_TABLE = 'LAG_TABLE'
LAG_MEMBER_TABLE = 'LAG_MEMBER_TABLE'
LOC_CHASSIS_TABLE = 'LLDP_LOC_CHASSIS'
Expand Down Expand Up @@ -60,6 +62,16 @@ def chassis_info_table(chassis_name):

return "CHASSIS_INFO" + TABLE_NAME_SEPARATOR_VBAR + chassis_name


def buffer_max_parm_table(port_name):
"""
:param: port_name: port name
:return: max buffer parametes info for this port
"""

return "BUFFER_MAX_PARAM_TABLE" + TABLE_NAME_SEPARATOR_VBAR + port_name


def fan_info_table(fan_name):
"""
:param: fan_name: fan name
Expand Down Expand Up @@ -453,7 +465,6 @@ def init_sync_d_queue_tables(db_conn):
port_index = get_index_from_str(port_name)
key = queue_key(port_index, queue_index)
port_queues_map[key] = sai_id

queue_stat_name = queue_table(sai_id)
queue_stat = db_conn.get_all(COUNTERS_DB, queue_stat_name, blocking=False)
if queue_stat is not None:
Expand Down
6 changes: 6 additions & 0 deletions src/sonic_ax_impl/mibs/ietf/rfc1213.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,9 @@ def __init__(self):
self.nexthop_map = {}
self.route_list = []

def reinit_connection(self):
Namespace.connect_all_dbs(self.db_conn, mibs.APPL_DB)

def update_data(self):
"""
Update redis (caches config)
Expand Down Expand Up @@ -216,6 +219,9 @@ def __init__(self):

self.namespace_db_map = Namespace.get_namespace_db_map(self.db_conn)

def reinit_connection(self):
Namespace.connect_namespace_dbs(self.db_conn)

def reinit_data(self):
"""
Subclass update interface information
Expand Down
3 changes: 3 additions & 0 deletions src/sonic_ax_impl/mibs/ietf/rfc2737.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,9 @@ def create_physical_entity_updaters(self):
"""
return [creator(self) for creator in PhysicalTableMIBUpdater.physical_entity_updater_types]

def reinit_connection(self):
Namespace.connect_all_dbs(self.statedb, mibs.STATE_DB)

def reinit_data(self):
"""
Re-initialize all data.
Expand Down
3 changes: 3 additions & 0 deletions src/sonic_ax_impl/mibs/ietf/rfc2863.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ def __init__(self):

self.namespace_db_map = Namespace.get_namespace_db_map(self.db_conn)

def reinit_connection(self):
Namespace.connect_namespace_dbs(self.db_conn)

def reinit_data(self):
"""
Subclass update interface information
Expand Down
4 changes: 3 additions & 1 deletion src/sonic_ax_impl/mibs/ietf/rfc3433.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,9 @@ def __init__(self):
self.voltage_sensor = []
self.current_sensor = []

def reinit_connection(self):
Namespace.connect_all_dbs(self.statedb, mibs.STATE_DB)

def reinit_data(self):
"""
Reinit data, clear cache
Expand All @@ -419,7 +422,6 @@ def reinit_data(self):
self.ent_phy_sensor_precision_map = {}
self.ent_phy_sensor_value_map = {}
self.ent_phy_sensor_oper_state_map = {}

transceiver_dom_encoded = Namespace.dbs_keys(self.statedb, mibs.STATE_DB, self.TRANSCEIVER_DOM_KEY_PATTERN)
if transceiver_dom_encoded:
self.transceiver_dom = [entry for entry in transceiver_dom_encoded]
Expand Down
3 changes: 3 additions & 0 deletions src/sonic_ax_impl/mibs/ietf/rfc4292.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ def __init__(self):
## loopback ip string -> ip address object
self.loips = {}

def reinit_connection(self):
Namespace.connect_all_dbs(self.db_conn, mibs.APPL_DB)

def reinit_data(self):
"""
Subclass update loopback information
Expand Down
3 changes: 3 additions & 0 deletions src/sonic_ax_impl/mibs/ietf/rfc4363.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ def fdb_vlanmac(self, fdb):
return None
return (int(vlan_id),) + mac_decimals(fdb["mac"])

def reinit_connection(self):
Namespace.connect_namespace_dbs(self.db_conn)

def reinit_data(self):
"""
Subclass update interface information
Expand Down
3 changes: 3 additions & 0 deletions src/sonic_ax_impl/mibs/vendor/cisco/ciscoPfcExtMIB.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ def __init__(self):
self.if_range = []
self.namespace_db_map = Namespace.get_namespace_db_map(self.db_conn)

def reinit_connection(self):
Namespace.connect_namespace_dbs(self.db_conn)

def reinit_data(self):
"""
Subclass update interface information
Expand Down
12 changes: 10 additions & 2 deletions src/sonic_ax_impl/mibs/vendor/cisco/ciscoSwitchQosMIB.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ def __init__(self):
"""
super().__init__()
self.db_conn = Namespace.init_namespace_dbs()
# establish connection to state database.
Namespace.connect_all_dbs(self.db_conn, mibs.STATE_DB)

self.lag_name_if_name_map = {}
self.if_name_lag_name_map = {}
self.oid_lag_name_map = {}
Expand All @@ -67,6 +70,9 @@ def __init__(self):
self.port_index_namespace = {}
self.namespace_db_map = Namespace.get_namespace_db_map(self.db_conn)

def reinit_connection(self):
Namespace.connect_namespace_dbs(self.db_conn)

def reinit_data(self):
"""
Subclass update interface information
Expand Down Expand Up @@ -129,8 +135,10 @@ def update_stats(self):
namespace = self.port_index_namespace[if_index]

# The first half of queue id is for ucast, and second half is for mcast
# To simulate vendor OID, we wrap queues by half distance
pq_count = math.ceil((max(if_queues) + 1) / 2)
# To simulate vendor OID, we wrap queues by max priority groups
port_max_queues = Namespace.dbs_get_all(self.db_conn, mibs.STATE_DB,
mibs.buffer_max_parm_table(self.oid_name_map[if_index]))['max_queues']
pq_count = math.ceil(int(port_max_queues) / 2)

for queue in if_queues:
# Get queue type and statistics
Expand Down
4 changes: 4 additions & 0 deletions tests/mock_tables/asic0/appl_db.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@
"alias": "etp2",
"speed": 100000
},
"PORT_TABLE:Ethernet24": {
"alias": "etp17",
"speed": 100000
},
"PORT_TABLE:Ethernet-BP0": {
"description": "snowflake",
"alias": "etp3",
Expand Down
70 changes: 66 additions & 4 deletions tests/mock_tables/asic0/counters_db.json
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,8 @@
"Ethernet-BP0": "oid:0x1000000000005",
"Ethernet-BP4": "oid:0x1000000000006",
"Ethernet-IB0": "oid:0x1000000000080",
"Ethernet-Rec0": "oid:0x1000000000081"
"Ethernet-Rec0": "oid:0x1000000000081",
"Ethernet24": "oid:0x1000000000014"
},
"COUNTERS_LAG_NAME_MAP": {
"PortChannel01": "oid:0x1000000000007"
Expand Down Expand Up @@ -592,7 +593,13 @@
"Ethernet4:12": "oid:0x15000000010244",
"Ethernet4:13": "oid:0x15000000010245",
"Ethernet4:14": "oid:0x15000000010246",
"Ethernet4:15": "oid:0x15000000010247"
"Ethernet4:15": "oid:0x15000000010247",
"Ethernet24:0": "oid:0x15000000000260",
"Ethernet24:1": "oid:0x15000000000261",
"Ethernet24:2": "oid:0x15000000000262",
"Ethernet24:3": "oid:0x15000000000263",
"Ethernet24:4": "oid:0x15000000000264",
"Ethernet24:6": "oid:0x15000000000266"
},
"COUNTERS_QUEUE_TYPE_MAP": {
"oid:0x15000000000230": "SAI_QUEUE_TYPE_UNICAST",
Expand Down Expand Up @@ -626,7 +633,13 @@
"oid:0x15000000010244": "SAI_QUEUE_TYPE_MULTICAST",
"oid:0x15000000010245": "SAI_QUEUE_TYPE_MULTICAST",
"oid:0x15000000010246": "SAI_QUEUE_TYPE_MULTICAST",
"oid:0x15000000010247": "SAI_QUEUE_TYPE_MULTICAST"
"oid:0x15000000010247": "SAI_QUEUE_TYPE_MULTICAST",
"oid:0x15000000000260": "SAI_QUEUE_TYPE_UNICAST",
"oid:0x15000000000261": "SAI_QUEUE_TYPE_UNICAST",
"oid:0x15000000000262": "SAI_QUEUE_TYPE_UNICAST",
"oid:0x15000000000263": "SAI_QUEUE_TYPE_UNICAST",
"oid:0x15000000000264": "SAI_QUEUE_TYPE_UNICAST",
"oid:0x15000000000266": "SAI_QUEUE_TYPE_UNICAST"
},
"COUNTERS:oid:0x15000000000230": {
"SAI_QUEUE_STAT_PACKETS": "1",
Expand Down Expand Up @@ -883,4 +896,53 @@
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
}}
},
"COUNTERS:oid:0x15000000000260": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "23492723984237432",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
},
"COUNTERS:oid:0x15000000000261": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "2",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
},
"COUNTERS:oid:0x15000000000262": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "2",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
},
"COUNTERS:oid:0x15000000000263": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "2",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
},
"COUNTERS:oid:0x15000000000264": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "2",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
},
"COUNTERS:oid:0x15000000000266": {
"SAI_QUEUE_STAT_PACKETS": "1",
"SAI_QUEUE_STAT_BYTES": "2",
"SAI_QUEUE_STAT_DROPPED_PACKETS": "3",
"SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS": "4",
"SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES": "5",
"SAI_QUEUE_STAT_WATERMARK_BYTES": "6"
}
}
9 changes: 9 additions & 0 deletions tests/mock_tables/asic0/state_db.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,14 @@
},
"NEIGH_STATE_TABLE|fec0::ffff:afa:07": {
"state": "Active"
},
"BUFFER_MAX_PARAM_TABLE|Ethernet0": {
"max_queues": "16"
},
"BUFFER_MAX_PARAM_TABLE|Ethernet4": {
"max_queues": "16"
},
"BUFFER_MAX_PARAM_TABLE|Ethernet24": {
"max_queues": "16"
}
}
4 changes: 4 additions & 0 deletions tests/mock_tables/asic1/appl_db.json
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@
"speed": 1000,
"alias": "etp16"
},
"PORT_TABLE:Ethernet32": {
"speed": 1000,
"alias": "etp18"
},
"PORT_TABLE:Ethernet-BP8": {
"alias": "etp7"
},
Expand Down
Loading

0 comments on commit 82eefa7

Please sign in to comment.