From a76f9290aa9f86b9184706db2e8e610ad8c79bc1 Mon Sep 17 00:00:00 2001 From: Teingi Date: Wed, 8 May 2024 10:34:16 +0800 Subject: [PATCH 1/4] (#issue 158) --- handler/gather/gather_plan_monitor.py | 2 +- handler/rca/rca_handler.py | 7 +-- handler/rca/scene/lock_conflict_scene.py | 62 ++++++-------------- handler/rca/scene/major_hold_scene.py | 75 ++++++++++-------------- 4 files changed, 49 insertions(+), 97 deletions(-) diff --git a/handler/gather/gather_plan_monitor.py b/handler/gather/gather_plan_monitor.py index 88d29e44..ac7b71c2 100644 --- a/handler/gather/gather_plan_monitor.py +++ b/handler/gather/gather_plan_monitor.py @@ -277,7 +277,7 @@ def report_schema(self, sql): for t in valid_words: try: data = self.db_connector.execute_sql("show create table %s" % t) - schemas = schemas + "
%s
" % (data[1]) + schemas = schemas + "
{0}
".format(data[0][1]) self.stdio.verbose("table schema: {0}".format(schemas)) except Exception as e: pass diff --git a/handler/rca/rca_handler.py b/handler/rca/rca_handler.py index 6574c6b0..94d4d017 100644 --- a/handler/rca/rca_handler.py +++ b/handler/rca/rca_handler.py @@ -293,20 +293,15 @@ def export(self): class RCA_ResultRecord: - def __init__(self,stdio=None): + def __init__(self): self.records = [] self.suggest = "The suggest: " - self.stdio = stdio def add_record(self, record): self.records.append(record) - if self.stdio is not None: - self.stdio.verbose("add record: {0}".format(record)) def add_suggest(self, suggest): self.suggest += suggest - if self.stdio is not None: - self.stdio.verbose("add suggest: {0}".format(suggest)) def suggest_is_empty(self): return self.suggest == "The suggest: " diff --git a/handler/rca/scene/lock_conflict_scene.py b/handler/rca/scene/lock_conflict_scene.py index 97024ad4..6e0ab71a 100644 --- a/handler/rca/scene/lock_conflict_scene.py +++ b/handler/rca/scene/lock_conflict_scene.py @@ -15,22 +15,18 @@ @file: lock_conflict_scene.py @desc: """ -import json - from handler.rca.rca_exception import RCAInitException, RCANotNeedExecuteException from handler.rca.rca_handler import RcaScene, RCA_ResultRecord -from common.tool import StringUtils, DateTimeEncoder +from common.tool import StringUtils class LockConflictScene(RcaScene): def __init__(self): super().__init__() - self.local_path = "" def init(self, context): try: super().init(context) - self.local_path = context.get_variable('store_dir') if self.observer_version is None or len(self.observer_version.strip()) == 0 or self.observer_version == "": raise Exception("observer version is None. Please check the NODES conf.") except Exception as e: @@ -69,51 +65,27 @@ def __execute_4_2(self): continue else: trans_id = OB_LOCKS_data['ID1'] - trans_record.add_record("get holding_lock trans_id:{0}".format(trans_id)) - holding_lock_session_id=trans_id - self.stdio.verbose("get holding lock SESSION_ID by trans_id:{0}".format(trans_id)) - cursor_by_trans_id = self.ob_connector.execute_sql_return_cursor_dictionary( - 'select * from oceanbase.V$OB_TRANSACTION_PARTICIPANTS where TX_ID="{0}";'.format(holding_lock_session_id)) - holding_lock_session_id_datas = cursor_by_trans_id.fetchall() - holding_lock_session_id = "not get" - if len(holding_lock_session_id_datas) == 1: - holding_lock_session_id=holding_lock_session_id_datas[0].get("SESSION_ID") - trans_record.add_record("get holding_lock_session_id:{0}".format(holding_lock_session_id)) - + trans_record.add_record("holding lock trans_id is {0}".format(trans_id)) wait_lock_trans_id=OB_LOCKS_data['TRANS_ID'] cursor_by_trans_id = self.ob_connector.execute_sql_return_cursor_dictionary( 'select * from oceanbase.V$OB_TRANSACTION_PARTICIPANTS where TX_ID="{0}";'.format(wait_lock_trans_id)) + self.stdio.verbose("get SESSION_ID by trans_id:{0}".format(trans_id)) trans_record.add_record("wait_lock_trans_id is {0}".format(wait_lock_trans_id)) - wait_lock_session_datas = cursor_by_trans_id.fetchall() - wait_lock_session_id="not get" - if len(wait_lock_session_datas) == 1: - wait_lock_session_id=wait_lock_session_datas[0].get("SESSION_ID") - trans_record.add_record("get wait_lock_session_id:{0}".format(wait_lock_session_datas[0].get("SESSION_ID"))) - self.stdio.verbose("get sql_info by holding_lock_session_id:{0}".format(holding_lock_session_id)) - # check SQL_AUDIT switch - sql_info="not find" - - cursor_check_switch = self.ob_connector.execute_sql_return_cursor_dictionary("SHOW PARAMETERS LIKE '%enable_sql_audit%';") - audit_switch_value = cursor_check_switch.fetchone().get('value') - if audit_switch_value.strip().upper() == "TRUE": - holding_lock_sql_info_cursor=self.ob_connector.execute_sql_return_cursor_dictionary( - 'SELECT * FROM oceanbase.v$OB_SQL_AUDIT where SID="{0}";'.format(holding_lock_session_id)) - holding_lock_sql_info= holding_lock_sql_info_cursor.fetchall() - if len(holding_lock_sql_info)==0: - trans_record.add_record("holding_lock_session_id: {0}; not find sql_info on v$OB_SQL_AUDIT".format(holding_lock_session_id)) - else: - holding_lock_sql_info_json_data = json.dumps(holding_lock_sql_info, cls=DateTimeEncoder) - file_name = "{0}/rca_holding_lock_sql_info_{1}.json".format( self.local_path, holding_lock_session_id) - with open(file_name, 'w+') as f: - f.write(str(holding_lock_sql_info_json_data)) - trans_record.add_record( - "holding_lock_session_id: {0}. holding_lock_sql_info save on {1}".format(holding_lock_session_id, - file_name)) - sql_info="save on {0}".format(file_name) + session_datas = cursor_by_trans_id.fetchall() + trans_record.add_record( + "get SESSION_ID by wait_lock_trans_id:{0}. get data:{0}".format(trans_id, session_datas)) + if len(session_datas) != 1: + trans_record.add_suggest("wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id)) + continue + if session_datas[0].get("SESSION_ID") is not None: + trans_record.add_record("get SESSION_ID:{0}".format(session_datas[0].get("SESSION_ID"))) + trans_record.add_suggest("Sessions corresponding to lock transactions. The ID is {0}, " + "which may be a lock conflict issue.You can be accessed through kill " + "session to rollback the corresponding transaction with ID. Please " + "note that this will result in corresponding transaction regression! " + "".format(session_datas[0].get("SESSION_ID"))) else: - self.stdio.verbose("SQL_AUDIT switch is False") - trans_record.add_record("SQL_AUDIT switch is False. can't get sql_info") - trans_record.add_suggest("holding_lock_session_id: {0}; wait_lock_session_id : {1}, sql_info: {2}. Lock conflicts can be ended by killing holding_lock_session_id or wait_lock_session_id".format(holding_lock_session_id,wait_lock_session_id,sql_info)) + trans_record.add_record("wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id)) except Exception as e: trans_record.add_record("get SESSION_ID panic. OB_LOCKS_data:{0} error: {1}".format(OB_LOCKS_data, e)) diff --git a/handler/rca/scene/major_hold_scene.py b/handler/rca/scene/major_hold_scene.py index e72ecb14..7ebfb93d 100644 --- a/handler/rca/scene/major_hold_scene.py +++ b/handler/rca/scene/major_hold_scene.py @@ -31,7 +31,7 @@ def __init__(self): def init(self, context): try: super().init(context) - self.local_path = context.get_variable('store_dir') + self.local_path = context.get_variable('result_path') if self.observer_version is None: raise Exception("obproxy version is None. Please check the NODES conf.") @@ -52,7 +52,7 @@ def execute(self): COMPACTING_data = self.ob_connector.execute_sql( 'select * from oceanbase.CDB_OB_MAJOR_COMPACTION where IS_ERROR="YES";') if len(COMPACTING_data) == 0: - first_record.add_record("ON CDB_OB_MAJOR_COMPACTION WHERE IS_ERROR='YES', data is not exist") + first_record.add_record("CDB_OB_MAJOR_COMPACTION is not exist IS_ERROR='YES'") else: need_tag = True CDB_OB_MAJOR_COMPACTION_err_tenant_ids = [] @@ -177,26 +177,22 @@ def execute(self): sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format( err_tenant_id, global_broadcast_scn) - cursor = self.ob_connector.execute_sql_return_cursor_dictionary(sql) - OB_COMPACTION_PROGRESS_data_global_broadcast_scn_data = cursor.fetchall() - OB_COMPACTION_PROGRESS_data_global_broadcast_scn_json_data = json.dumps(OB_COMPACTION_PROGRESS_data_global_broadcast_scn_data, cls=DateTimeEncoder) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_global_broadcast_scn.json".format( + OB_COMPACTION_PROGRESS_data_global_broadcast_scn = self.ob_connector.execute_sql(sql) + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_global_broadcast_scn".format( self.local_path, err_tenant_id) - with open(file_name, 'w+') as f: - f.write(str(OB_COMPACTION_PROGRESS_data_global_broadcast_scn_json_data)) + with open(file_name, 'w') as f: + f.write(str(OB_COMPACTION_PROGRESS_data_global_broadcast_scn)) tenant_record.add_record( "tenant_id:{0} OB_COMPACTION_PROGRESS_data_global_broadcast_scn save on {1}".format(err_tenant_id, file_name)) sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format( err_tenant_id, last_scn) - cursor = self.ob_connector.execute_sql_return_cursor_dictionary(sql) - OB_COMPACTION_PROGRESS_data_last_scn_data = cursor.fetchall() - OB_COMPACTION_PROGRESS_data_last_scn_json_data = json.dumps(OB_COMPACTION_PROGRESS_data_last_scn_data, cls=DateTimeEncoder) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_last_scn.json".format( + OB_COMPACTION_PROGRESS_data_last_scn = self.ob_connector.execute_sql(sql) + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_last_scn".format( self.local_path, err_tenant_id) - with open(file_name, 'w+') as f: - f.write(str(OB_COMPACTION_PROGRESS_data_last_scn_json_data)) + with open(file_name, 'w') as f: + f.write(str(OB_COMPACTION_PROGRESS_data_last_scn)) tenant_record.add_record( "tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, file_name)) @@ -237,31 +233,19 @@ def execute(self): try: cursor = self.ob_connector.execute_sql_return_cursor_dictionary( 'select * from oceanbase.GV$OB_COMPACTION_SUGGESTIONS where tenant_id="{0}";'.format(err_tenant_id)) + columns = [column[0] for column in cursor.description] OB_COMPACTION_SUGGESTIONS_data = cursor.fetchall() OB_COMPACTION_SUGGESTIONS_info = json.dumps(OB_COMPACTION_SUGGESTIONS_data, cls=DateTimeEncoder) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_SUGGESTIONS_info.json".format( + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_SUGGESTIONS_info".format( self.local_path, err_tenant_id) - with open(file_name, 'w+') as f: + with open(file_name, 'w') as f: f.write(str(OB_COMPACTION_SUGGESTIONS_info)) tenant_record.add_record( "tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, file_name)) except Exception as e: - self.stdio.error("MajorHoldScene execute 5 exception: {0}".format(e)) - #6 - try: - # get oceanbase.__all_virtual_dag_warning_history status="RETRYED" type like "%MERGE%" - cursor=self.ob_connector.execute_sql_return_cursor_dictionary('SELECT * FROM oceanbase.__all_virtual_dag_warning_history WHERE tenant_id="{0}" AND status="RETRYED" AND type like "%MERGE%";'.format(err_tenant_id)) - __all_virtual_dag_warning_history_data = cursor.fetchall() - file_name = "{0}/rca_major_hold_{0}_all_virtual_dag_warning_history.json".format( - self.local_path, err_tenant_id) - __all_virtual_dag_warning_history_json_data = json.dumps(__all_virtual_dag_warning_history_data, cls=DateTimeEncoder) - with open(file_name, 'w+') as f: - f.write(str(__all_virtual_dag_warning_history_json_data)) - tenant_record.add_record("tenant_id:{0} all_virtual_dag_warning_history save on {1}".format(err_tenant_id,file_name)) - except Exception as e: - self.stdio.error("MajorHoldScene execute 6 exception: {0}".format(e)) + self.stdio.warn("MajorHoldScene execute 5 exception: {0}".format(e)) tenant_record.add_suggest("send the {0} to the oceanbase community".format(self.local_path)) self.Result.records.append(tenant_record) @@ -271,14 +255,14 @@ def get_info__all_virtual_compaction_diagnose_info(self, tenant_record): "SELECT * FROM oceanbase.__all_virtual_compaction_diagnose_info WHERE IS_ERROR = 'NO' OR IS_SUSPENDED = 'NO';") if len(COMPACTING_datas) == 0: tenant_record.add_record( - "ON oceanbase.__all_virtual_compaction_diagnose_info. No data WHERE IS_ERROR = 'NO' OR IS_SUSPENDED = 'NO';") + "sql:select * from oceanbase.__all_virtual_compaction_diagnose_info; no data") return else: tenant_record.add_record( "sql:select * from oceanbase.CDB_OB_MAJOR_COMPACTION where status=COMPACTING; " "result:{0}".format(str(COMPACTING_datas))) for index, COMPACTING_data in COMPACTING_datas: - self.diagnose_info_switch(COMPACTING_data,tenant_record) + self.diagnose_info_switch(COMPACTING_data) except Exception as e: raise RCAExecuteException( "MajorHoldScene execute get_info__all_virtual_compaction_diagnose_info exception: {0}".format(e)) @@ -304,7 +288,7 @@ def diagnose_info_switch(self, sql_data, tenant_record): log_name = "/tmp/rca_major_hold_schedule_medium_failed_{1}_{2}_{0}.txt".format(tenant_id, svr_ip, svr_port) tenant_record.add_record( - "diagnose_info type: 'schedule medium failed'. time is {0},observer is {1}:{2},the log is {3}".format( + "diagnose_info type is 'schedule medium failed'. time is {0},observer is {1}:{2},the log is {3}".format( create_time, svr_ip, svr_port, log_name)) ssh_helper.ssh_exec_cmd( 'grep "schedule_medium_failed" {1}/log/observer.log* |grep -P "\[\d+\]" -m 1 -o >{0}'.format(log_name, @@ -325,12 +309,12 @@ def diagnose_info_switch(self, sql_data, tenant_record): table_id, tenant_id))[0][7] if compaction_scn > global_broadcast_scn: tenant_record.add_record( - "diagnose_info type: error_no. error_no: {0}, err_trace: {1} , table_id:{2}, tenant_id:{3}, compaction_scn: {4}, global_broadcast_scn: {5}. compaction_scn>global_broadcast_scn".format( + "diagnose_info type is error_no. error_no: {0}, err_trace: {1} , table_id:{2}, tenant_id:{3}, compaction_scn: {4}, global_broadcast_scn: {5}. compaction_scn>global_broadcast_scn".format( err_no, err_trace, table_id, tenant_id, compaction_scn, global_broadcast_scn)) return else: tenant_record.add_record( - "diagnose_info type: error_no. error_no: {0}, err_trace:{1}, table_id:{2}, tenant_id:{3}, compaction_scn: {4}, global_broadcast_scn: {5}. compaction_scn /tmp/{0}'.format(log_name, err_trace)) ssh_helper.download(log_name, local_path=self.local_path) @@ -370,7 +354,7 @@ def diagnose_info_switch(self, sql_data, tenant_record): cursor = self.ob_connector.execute_sql_return_cursor_dictionary( "select * from oceanbase.__all_virtual_ls_info where tenant_id='{0}' and ls_id='{1}';".format(tenant_id, ls_id)) - + columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) tenant_record.add_record( @@ -379,7 +363,7 @@ def diagnose_info_switch(self, sql_data, tenant_record): "result:{0}".format(str(self.all_virtual_ls_info))) return elif "memtable can not create dag successfully" in diagnose_info: - tenant_record.add_record("diagnose_info type: memtable can not create dag successfully.") + tenant_record.add_record("diagnose_info type is memtable can not create dag successfully.") global_broadcast_scn = self.ob_connector.execute_sql( "select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(tenant_id))[0][3] @@ -388,14 +372,14 @@ def diagnose_info_switch(self, sql_data, tenant_record): table_id, tenant_id))[0][7] if compaction_scn > global_broadcast_scn: tenant_record.add_record( - "diagnose_info type: memtable can not create dag successfully. table_id:{0}, tenant_id:{1}, compaction_scn: {2}, global_broadcast_scn: {3}. compaction_scn>global_broadcast_scn".format( + "diagnose_info type is memtable can not create dag successfully. table_id:{0}, tenant_id:{1}, compaction_scn: {2}, global_broadcast_scn: {3}. compaction_scn>global_broadcast_scn".format( table_id, tenant_id, compaction_scn, global_broadcast_scn)) return else: cursor = self.ob_connector.execute_sql_return_cursor_dictionary( "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( svr_ip, svr_port, tenant_id)) - + columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) tenant_record.add_record( @@ -406,11 +390,11 @@ def diagnose_info_switch(self, sql_data, tenant_record): return elif "medium wait for freeze" in diagnose_info or "major wait for freeze" in diagnose_info: - tenant_record.add_record("diagnose_info type: medium wait for freeze or major wait for freeze.") + tenant_record.add_record("diagnose_info type is medium wait for freeze or major wait for freeze.") cursor = self.ob_connector.execute_sql_return_cursor_dictionary( "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( svr_ip, svr_port, tenant_id)) - + columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) tenant_record.add_record( @@ -420,10 +404,11 @@ def diagnose_info_switch(self, sql_data, tenant_record): "result:{0}".format(str(self.all_virtual_ls_info))) return elif "major not schedule for long time" in diagnose_info: - tenant_record.add_record("diagnose_info type: ‘major not schedule for long time’") + tenant_record.add_record("diagnose_info type is major not schedule for long time") cursor = self.ob_connector.execute_sql_return_cursor_dictionary( "select * from oceanbase.__all_virtual_tablet_compaction_info where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}' and ls_id='{3}' and tablet_id='{4}';".format( svr_ip, svr_port, tenant_id, ls_id, table_id)) + columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() all_virtual_tablet_compaction_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) tenant_record.add_record( @@ -444,7 +429,7 @@ def diagnose_info_switch(self, sql_data, tenant_record): svr_ip, svr_port) tenant_record.add_record( - "diagnose_info type: 'major not schedule for long time'. time is {0},observer is {1}:{2},the log is {3}".format( + "diagnose_info type is 'major not schedule for long time'. time is {0},observer is {1}:{2},the log is {3}".format( create_time, svr_ip, svr_port, log_name)) thread_id = ssh_helper.ssh_exec_cmd( 'cat {0}/log/observer.log* |grep "MediumLoo" -m 1 |grep -P "\[\d+\]" -m 1 -o | grep -oP "\d+"'.format( @@ -456,7 +441,7 @@ def diagnose_info_switch(self, sql_data, tenant_record): ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) else: - tenant_record.add_record("diagnose_info type: Unknown.") + tenant_record.add_record("diagnose_info type is Unknown.") def export_result(self): return self.Result.export() From a14d126b82346d86eb208fd8bc17a7c9c6021b84 Mon Sep 17 00:00:00 2001 From: Teingi Date: Mon, 27 May 2024 14:11:10 +0800 Subject: [PATCH 2/4] format python code --- cmd.py | 118 +- common/__init__.py | 2 +- common/command.py | 63 +- common/config_helper.py | 59 +- common/constant.py | 38 +- common/log.py | 2 +- common/ob_connector.py | 16 +- common/ob_log_level.py | 2 +- common/ocp/__init__.py | 2 +- common/ocp/ocp_api.py | 1 - common/ocp/ocp_task.py | 10 +- common/scene.py | 11 +- common/ssh.py | 86 +- common/tool.py | 104 +- common/types.py | 29 +- common/version.py | 6 +- config.py | 71 +- context.py | 3 +- core.py | 74 +- dependencies/check_dependencies.py | 1 + err.py | 16 +- handler/__init__.py | 2 +- handler/analyzer/__init__.py | 2 +- handler/analyzer/analyze_flt_trace.py | 35 +- handler/analyzer/analyze_log.py | 142 +- handler/analyzer/log_parser/__init__.py | 2 +- handler/analyzer/log_parser/log_entry.py | 33 +- handler/analyzer/log_parser/tree.py | 24 +- handler/checker/__init__.py | 1 - handler/checker/check_exception.py | 10 +- handler/checker/check_handler.py | 82 +- handler/checker/check_list.py | 19 +- handler/checker/check_report.py | 14 +- handler/checker/check_task.py | 14 +- handler/checker/result/__init__.py | 1 - handler/checker/result/result.py | 13 +- handler/checker/result/verify.py | 36 +- handler/checker/step/__init__.py | 1 - handler/checker/step/data_size.py | 14 +- handler/checker/step/get_system_parameter.py | 13 +- handler/checker/step/sql.py | 10 +- handler/checker/step/ssh.py | 13 +- handler/checker/step/stepbase.py | 31 +- handler/gather/__init__.py | 2 +- handler/gather/gather_ash_report.py | 46 +- handler/gather/gather_awr.py | 50 +- handler/gather/gather_log.py | 113 +- handler/gather/gather_obadmin.py | 69 +- handler/gather/gather_obproxy_log.py | 100 +- handler/gather/gather_obstack2.py | 75 +- handler/gather/gather_perf.py | 48 +- handler/gather/gather_plan_monitor.py | 397 ++-- handler/gather/gather_scenes.py | 6 +- handler/gather/gather_sysstat.py | 57 +- handler/gather/scenes/__init__.py | 2 +- handler/gather/scenes/base.py | 9 +- handler/gather/scenes/cpu_high.py | 1 + handler/gather/scenes/list.py | 14 +- handler/gather/scenes/px_collect_log.py | 29 +- handler/gather/scenes/register.py | 21 +- handler/gather/scenes/sql_problem.py | 9 +- handler/gather/step/__init__.py | 2 +- handler/gather/step/base.py | 10 +- handler/gather/step/sql.py | 13 +- handler/gather/step/ssh.py | 4 +- handler/meta/__init__.py | 2 +- handler/meta/check_meta.py | 2 +- handler/meta/html_meta.py | 6 +- handler/meta/ob_error.py | 1958 +++++++++--------- handler/meta/sql_meta.py | 84 +- handler/rca/__init__.py | 1 - handler/rca/plugins/__init__.py | 1 - handler/rca/plugins/gather.py | 24 +- handler/rca/rca_exception.py | 2 - handler/rca/rca_handler.py | 54 +- handler/rca/rca_list.py | 13 +- handler/rca/scene/ddl_disk_full_scene.py | 75 +- handler/rca/scene/disconnection_scene.py | 95 +- handler/rca/scene/lock_conflict_scene.py | 64 +- handler/rca/scene/log_error_scene.py | 132 +- handler/rca/scene/major_hold_scene.py | 225 +- main.py | 2 +- stdio.py | 52 +- telemetry/__init__.py | 2 +- telemetry/telemetry.py | 47 +- test/analyzer/test_tree.py | 61 +- update/__init__.py | 2 +- update/update.py | 35 +- 88 files changed, 2325 insertions(+), 2917 deletions(-) diff --git a/cmd.py b/cmd.py index 2a225da8..a0ba7469 100644 --- a/cmd.py +++ b/cmd.py @@ -52,8 +52,7 @@ def format_option(self, option): if len(help_lines) == 1: help_lines = textwrap.wrap(help_text, self.help_width) result.append("%*s%s\n" % (indent_first, "", help_lines[0])) - result.extend(["%*s%s\n" % (self.help_position, "", line) - for line in help_lines[1:]]) + result.extend(["%*s%s\n" % (self.help_position, "", line) for line in help_lines[1:]]) elif opts[-1] != "\n": result.append("\n") return "".join(result) @@ -62,24 +61,8 @@ def format_option(self, option): class AllowUndefinedOptionParser(OptionParser): IS_TTY = sys.stdin.isatty() - def __init__(self, - usage=None, - option_list=None, - option_class=Option, - version=None, - conflict_handler="resolve", - description=None, - formatter=None, - add_help_option=True, - prog=None, - epilog=None, - allow_undefine=True, - undefine_warn=True - ): - OptionParser.__init__( - self, usage, option_list, option_class, version, conflict_handler, - description, formatter, add_help_option, prog, epilog - ) + def __init__(self, usage=None, option_list=None, option_class=Option, version=None, conflict_handler="resolve", description=None, formatter=None, add_help_option=True, prog=None, epilog=None, allow_undefine=True, undefine_warn=True): + OptionParser.__init__(self, usage, option_list, option_class, version, conflict_handler, description, formatter, add_help_option, prog, epilog) self.allow_undefine = allow_undefine self.undefine_warn = undefine_warn @@ -96,7 +79,7 @@ def _process_long_opt(self, rargs, values): except BadOptionError as e: if self.allow_undefine: key = e.opt_str - value = value[len(key) + 1:] + value = value[len(key) + 1 :] setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True) self.undefine_warn and self.warn(e) else: @@ -109,7 +92,7 @@ def _process_short_opts(self, rargs, values): except BadOptionError as e: if self.allow_undefine: key = e.opt_str - value = value[len(key) + 1:] + value = value[len(key) + 1 :] setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True) self.undefine_warn and self.warn(e) else: @@ -133,7 +116,7 @@ def __init__(self, name, summary): self.parser.add_option('-v', '--verbose', action='callback', callback=self._set_verbose, help='Activate verbose output.') def _set_verbose(self, *args, **kwargs): - ROOT_IO.set_verbose_level(0xfffffff) + ROOT_IO.set_verbose_level(0xFFFFFFF) def init(self, cmd, args): if self.is_init is False: @@ -198,7 +181,7 @@ def preprocess_argv(self, argv): elif to_index is not None and i == to_index: next_arg = argv[i + 1] if i + 1 < len(argv) else None if next_arg and self.is_valid_time_format(next_arg): - processed_argv.append(argv[i] + ' ' + next_arg) + processed_argv.append(argv[i] + ' ' + next_arg) to_index = None i += 1 else: @@ -273,6 +256,7 @@ def enable_log(self): def _do_command(self, obdiag): from common.ssh import LocalClient + if not self.cmds: return self._show_help() log_dir = os.path.expanduser('~/.obdiag/log') @@ -290,6 +274,7 @@ def _do_command(self, obdiag): ROOT_IO.print(data.stdout) return True + class MajorCommand(BaseCommand): def __init__(self, name, summary): @@ -323,8 +308,8 @@ def do_command(self): cmd = '%s %s' % (self.prev_cmd, base) ROOT_IO.track_limit += 1 if "main.py" in cmd: - telemetry.work_tag=False - telemetry.push_cmd_info("cmd: {0}. args:{1}".format(cmd,args)) + telemetry.work_tag = False + telemetry.push_cmd_info("cmd: {0}. args:{1}".format(cmd, args)) return self.commands[base].init(cmd, args).do_command() def register_command(self, command): @@ -347,7 +332,7 @@ def __init__(self): self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false") self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) - + def init(self, cmd, args): super(ObdiagGatherAllCommand, self).init(cmd, args) self.parser.set_usage('%s [options]' % self.prev_cmd) @@ -360,7 +345,7 @@ def _do_command(self, obdiag): class ObdiagGatherLogCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagGatherLogCommand, self).__init__('log', 'Gather oceanbase logs from oceanbase machines') + super(ObdiagGatherLogCommand, self).__init__('log', 'Gather oceanbase logs from oceanbase machines') self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--to', type='string', help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.", default='30m') @@ -378,10 +363,11 @@ def init(self, cmd, args): def _do_command(self, obdiag): return obdiag.gather_function('gather_log', self.opts) + class ObdiagGatherSysStatCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagGatherSysStatCommand, self).__init__('sysstat', 'Gather Host information') + super(ObdiagGatherSysStatCommand, self).__init__('sysstat', 'Gather Host information') self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) @@ -398,7 +384,7 @@ class ObdiagGatherStackCommand(ObdiagOriginCommand): def __init__(self): super(ObdiagGatherStackCommand, self).__init__('stack', 'Gather stack') - + self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) @@ -415,7 +401,7 @@ class ObdiagGatherPerfCommand(ObdiagOriginCommand): def __init__(self): super(ObdiagGatherPerfCommand, self).__init__('perf', 'Gather perf') - + self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('--scope', type='string', help="perf type constrains, choices=[sample, flame, pstack, all]", default='all') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) @@ -468,10 +454,11 @@ def init(self, cmd, args): def _do_command(self, obdiag): return obdiag.gather_function('gather_clog', self.opts) + class ObdiagGatherAwrCommand(ObdiagOriginCommand): - + def __init__(self): - super(ObdiagGatherAwrCommand, self).__init__('awr', 'Gather ParalleSQL information') + super(ObdiagGatherAwrCommand, self).__init__('awr', 'Gather ParalleSQL information') self.parser.add_option('--cluster_name', type='string', help='cluster_name from ocp') self.parser.add_option('--cluster_id', type='string', help='cluster_id from ocp') self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") @@ -492,7 +479,7 @@ def _do_command(self, obdiag): class ObdiagGatherPlanMonitorCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagGatherPlanMonitorCommand, self).__init__('plan_monitor', 'Gather ParalleSQL information') + super(ObdiagGatherPlanMonitorCommand, self).__init__('plan_monitor', 'Gather ParalleSQL information') self.parser.add_option('--trace_id', type='string', help='sql trace id') self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('--env', type='string', help='''env, eg: "{db_connect='-h127.0.0.1 -P2881 -utest@test -p****** -Dtest'}"''') @@ -510,7 +497,7 @@ def _do_command(self, obdiag): class ObdiagGatherObproxyLogCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagGatherObproxyLogCommand, self).__init__('obproxy_log', 'Gather obproxy log from obproxy machines') + super(ObdiagGatherObproxyLogCommand, self).__init__('obproxy_log', 'Gather obproxy log from obproxy machines') self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--to', type='string', help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.", default='30m') @@ -530,10 +517,10 @@ def _do_command(self, obdiag): class ObdiagGatherSceneListCommand(ObdiagOriginCommand): - + def __init__(self): super(ObdiagGatherSceneListCommand, self).__init__('list', 'gather scene list') - + def init(self, cmd, args): super(ObdiagGatherSceneListCommand, self).init(cmd, args) return self @@ -543,13 +530,13 @@ def _do_command(self, obdiag): class ObdiagGatherSceneRunCommand(ObdiagOriginCommand): - + def __init__(self): super(ObdiagGatherSceneRunCommand, self).__init__('run', 'gather scene run') self.parser.add_option('--scene', type='string', help="Specify the scene to be gather") self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--to', type='string', help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") - self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.",default='30m') + self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.", default='30m') self.parser.add_option('--env', type='string', help='env, eg: "{env1=xxx, env2=xxx}"') self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) @@ -566,24 +553,16 @@ class ObdiagGatherAshReportCommand(ObdiagOriginCommand): def __init__(self): super(ObdiagGatherAshReportCommand, self).__init__('ash', 'Gather ash report') - self.parser.add_option('--trace_id', type='string', - help="The TRACE.ID of the SQL to be sampled, if left blank or filled with NULL, indicates that TRACE.ID is not restricted.") - self.parser.add_option('--sql_id', type='string', - help="The SQL.ID, if left blank or filled with NULL, indicates that SQL.ID is not restricted.") + self.parser.add_option('--trace_id', type='string', help="The TRACE.ID of the SQL to be sampled, if left blank or filled with NULL, indicates that TRACE.ID is not restricted.") + self.parser.add_option('--sql_id', type='string', help="The SQL.ID, if left blank or filled with NULL, indicates that SQL.ID is not restricted.") # WAIT_CLASS - self.parser.add_option('--wait_class', type='string', - help='Event types to be sampled.') - self.parser.add_option('--report_type', type='string', - help='Report type, currently only supports text type.', default='TEXT') - self.parser.add_option('--from', type='string', - help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") - self.parser.add_option('--to', type='string', - help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") - self.parser.add_option('--store_dir', type='string', - help='the dir to store gather result, current dir by default.', default='./') - - self.parser.add_option('-c', type='string', help='obdiag custom config', - default=os.path.expanduser('~/.obdiag/config.yml')) + self.parser.add_option('--wait_class', type='string', help='Event types to be sampled.') + self.parser.add_option('--report_type', type='string', help='Report type, currently only supports text type.', default='TEXT') + self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") + self.parser.add_option('--to', type='string', help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") + self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') + + self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) def init(self, cmd, args): super(ObdiagGatherAshReportCommand, self).init(cmd, args) @@ -596,7 +575,7 @@ def _do_command(self, obdiag): class ObdiagAnalyzeLogCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagAnalyzeLogCommand, self).__init__('log', 'Analyze oceanbase log from online observer machines or offline oceanbase log files') + super(ObdiagAnalyzeLogCommand, self).__init__('log', 'Analyze oceanbase log from online observer machines or offline oceanbase log files') self.parser.add_option('--from', type='string', help="specify the start of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--to', type='string', help="specify the end of the time range. format: 'yyyy-mm-dd hh:mm:ss'") self.parser.add_option('--scope', type='string', help="log type constrains, choices=[observer, election, rootservice, all]", default='all') @@ -605,7 +584,7 @@ def __init__(self): self.parser.add_option('--files', action="append", type='string', help="specify files") self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) - self.parser.add_option('--since', type='string',help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.",default='30m') + self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.", default='30m') def init(self, cmd, args): super(ObdiagAnalyzeLogCommand, self).init(cmd, args) @@ -623,7 +602,7 @@ def _do_command(self, obdiag): class ObdiagAnalyzeFltTraceCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagAnalyzeFltTraceCommand, self).__init__('flt_trace', 'Analyze oceanbase trace.log from online observer machines or offline oceanbase trace.log files') + super(ObdiagAnalyzeFltTraceCommand, self).__init__('flt_trace', 'Analyze oceanbase trace.log from online observer machines or offline oceanbase trace.log files') self.parser.add_option('--flt_trace_id', type='string', help="flt trace id, . format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx") self.parser.add_option('--files', action="append", help="specify files") self.parser.add_option('--top', type='string', help="top leaf span", default=5) @@ -644,13 +623,12 @@ def _do_command(self, obdiag): class ObdiagCheckCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagCheckCommand, self).__init__('check', 'check oceanbase cluster') + super(ObdiagCheckCommand, self).__init__('check', 'check oceanbase cluster') self.parser.add_option('--cases', type='string', help="check observer's cases on package_file") self.parser.add_option('--obproxy_cases', type='string', help="check obproxy's cases on package_file") self.parser.add_option('--store_dir', type='string', help='the dir to store check result, current dir by default.', default='./check_report/') self.parser.add_option('-c', type='string', help='obdiag custom config', default=os.path.expanduser('~/.obdiag/config.yml')) - def init(self, cmd, args): super(ObdiagCheckCommand, self).init(cmd, args) self.parser.set_usage('%s [options]' % self.prev_cmd) @@ -666,7 +644,7 @@ def _do_command(self, obdiag): class ObdiagRCARunCommand(ObdiagOriginCommand): def __init__(self): - super(ObdiagRCARunCommand, self).__init__('run', 'root cause analysis') + super(ObdiagRCARunCommand, self).__init__('run', 'root cause analysis') self.parser.add_option('--scene', type='string', help="rca scene name. The argument is required.") self.parser.add_option('--store_dir', type='string', help='the dir to store rca result, current dir by default.', default='./rca/') self.parser.add_option('--input_parameters', type='string', help='input parameters of scene') @@ -696,7 +674,7 @@ def _do_command(self, obdiag): class ObdiagConfigCommand(ObdiagOriginCommand): - + def __init__(self): super(ObdiagConfigCommand, self).__init__('config', 'Quick build config') self.parser.add_option('-h', type='string', help="database host") @@ -712,13 +690,17 @@ def init(self, cmd, args): def _do_command(self, obdiag): return obdiag.config(self.opts) + class ObdiagUpdateCommand(ObdiagOriginCommand): - + def __init__(self): super(ObdiagUpdateCommand, self).__init__('update', 'Update cheat files') self.parser.add_option('--file', type='string', help="obdiag update cheat file path. Please note that you need to ensure the reliability of the files on your own.") - self.parser.add_option('--force', action='store_true', help='You can force online upgrades by adding --force in the command',) - + self.parser.add_option( + '--force', + action='store_true', + help='You can force online upgrades by adding --force in the command', + ) def init(self, cmd, args): super(ObdiagUpdateCommand, self).init(cmd, args) @@ -748,7 +730,7 @@ def __init__(self): class ObdiagGatherSceneCommand(MajorCommand): - + def __init__(self): super(ObdiagGatherSceneCommand, self).__init__('scene', 'Gather scene diagnostic info') self.register_command(ObdiagGatherSceneListCommand()) @@ -772,7 +754,7 @@ def __init__(self): class MainCommand(MajorCommand): - + def __init__(self): super(MainCommand, self).__init__('obdiag', '') self.register_command(DisplayTraceCommand()) diff --git a/common/__init__.py b/common/__init__.py index 2b33595e..4f2405d9 100644 --- a/common/__init__.py +++ b/common/__init__.py @@ -14,4 +14,4 @@ @time: 2022/6/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/common/command.py b/common/command.py index ae1f8c46..d418b19f 100644 --- a/common/command.py +++ b/common/command.py @@ -86,7 +86,7 @@ def download_file(is_ssh, ssh_helper, remote_path, local_path, stdio=None): """ try: if is_ssh: - stdio.verbose("Please wait a moment, download file [{0}] from server {1} to [{2}]".format(remote_path, ssh_helper.get_name(),local_path)) + stdio.verbose("Please wait a moment, download file [{0}] from server {1} to [{2}]".format(remote_path, ssh_helper.get_name(), local_path)) ssh_helper.download(remote_path, local_path) else: cmd = "cp -r {0} {1}".format(remote_path, local_path) @@ -102,8 +102,7 @@ def upload_file(is_ssh, ssh_helper, local_path, remote_path, stdio=None): :param args: is_ssh, ssh helper, local file path, remote file path :return: local path """ - stdio.verbose("Please wait a moment, upload file to server {0}, local file path {1}, remote file path {2}".format( - ssh_helper.get_name(), local_path, remote_path)) + stdio.verbose("Please wait a moment, upload file to server {0}, local file path {1}, remote file path {2}".format(ssh_helper.get_name(), local_path, remote_path)) try: if is_ssh: ssh_helper.upload(local_path, remote_path) @@ -126,6 +125,7 @@ def rm_rf_file(is_ssh, ssh_helper, dir, stdio=None): else: LocalClient(stdio).run(cmd) + def delete_file_in_folder(is_ssh, ssh_helper, file_path, stdio): """ delete file @@ -160,7 +160,7 @@ def is_empty_dir(is_ssh, ssh_helper, dir, stdio=None): def get_file_start_time(is_ssh, ssh_helper, file_name, dir, stdio=None): """ - get log file start time + get log file start time :param args: is_ssh, ssh helper, gather log full path :return: true or false """ @@ -174,7 +174,7 @@ def get_file_start_time(is_ssh, ssh_helper, file_name, dir, stdio=None): def get_logfile_name_list(is_ssh, ssh_helper, from_time_str, to_time_str, log_dir, log_files, stdio=None): """ - get log name list + get log name list :param args: is_ssh, ssh helper, from time, to time, log dir, log file list :return: true or false """ @@ -197,11 +197,9 @@ def get_logfile_name_list(is_ssh, ssh_helper, from_time_str, to_time_str, log_di elif last_file_dict["prefix_file_name"] == "": file_start_time_str = get_file_start_time(is_ssh, ssh_helper, file_name, log_dir, stdio) # When two time intervals overlap, need to add the file - if (file_end_time_str != "") and (file_start_time_str != "") and (file_start_time_str <= to_time_str) and ( - file_end_time_str >= from_time_str): + if (file_end_time_str != "") and (file_start_time_str != "") and (file_start_time_str <= to_time_str) and (file_end_time_str >= from_time_str): log_name_list.append(file_name) - last_file_dict = {"prefix_file_name": prefix_name, "file_name": file_name, - "file_end_time": file_end_time_str} + last_file_dict = {"prefix_file_name": prefix_name, "file_name": file_name, "file_end_time": file_end_time_str} elif file_name.endswith("log") or file_name.endswith("wf"): # Get the first and last lines of text of the file. Here, use a command get_first_line_cmd = "head -n 1 {0}/{1} && tail -n 1 {0}/{1}".format(log_dir, file_name) @@ -219,16 +217,14 @@ def get_logfile_name_list(is_ssh, ssh_helper, from_time_str, to_time_str, log_di # Time to parse the first and last lines of text file_start_time_str = TimeUtils.extract_time_from_log_file_text(first_line_text, stdio) file_end_time = TimeUtils.extract_time_from_log_file_text(last_line_text, stdio) - stdio.verbose("The log file {0} starts at {1} ends at {2}".format(file_name, file_start_time_str,file_end_time)) + stdio.verbose("The log file {0} starts at {1} ends at {2}".format(file_name, file_start_time_str, file_end_time)) stdio.verbose("to_time_str {0} from_time_str {1}".format(to_time_str, from_time_str)) if (file_start_time_str <= to_time_str) and (file_end_time >= from_time_str): log_name_list.append(file_name) if len(log_name_list) > 0: - stdio.verbose("Find the qualified log file {0} on Server [{1}], " - "wait for the next step".format(log_name_list, "localhost" if not is_ssh else ssh_helper.get_name())) + stdio.verbose("Find the qualified log file {0} on Server [{1}], " "wait for the next step".format(log_name_list, "localhost" if not is_ssh else ssh_helper.get_name())) else: - stdio.warn("No found the qualified log file on Server [{0}]".format( - "localhost" if not is_ssh else ssh_helper.get_name())) + stdio.warn("No found the qualified log file on Server [{0}]".format("localhost" if not is_ssh else ssh_helper.get_name())) return log_name_list @@ -264,9 +260,7 @@ def zip_dir(is_ssh, ssh_helper, father_dir, zip_dir, stdio=None): :param args: is_ssh, ssh helper, father dir, zip dir :return: """ - cmd = "cd {father_dir} && zip {zip_dir}.zip -rm {zip_dir}".format( - father_dir=father_dir, - zip_dir=zip_dir) + cmd = "cd {father_dir} && zip {zip_dir}.zip -rm {zip_dir}".format(father_dir=father_dir, zip_dir=zip_dir) stdio.verbose("Please wait a moment ...") if is_ssh: SshClient(stdio).run(ssh_helper, cmd) @@ -280,16 +274,14 @@ def zip_encrypt_dir(is_ssh, ssh_helper, zip_password, father_dir, zip_dir, stdio :param args: is_ssh, ssh helper, password, raw_log_dir, gather dir name :return: """ - cmd = "cd {father_dir} && zip --password {zip_password} {zip_dir}.zip -rm {zip_dir}".format( - zip_password=zip_password, - father_dir=father_dir, - zip_dir=zip_dir) + cmd = "cd {father_dir} && zip --password {zip_password} {zip_dir}.zip -rm {zip_dir}".format(zip_password=zip_password, father_dir=father_dir, zip_dir=zip_dir) stdio.verbose("Please wait a moment ...") if is_ssh: SshClient(stdio).run(ssh_helper, cmd) else: LocalClient(stdio).run(cmd) + def is_support_arch(is_ssh, ssh_helper, stdio=None): """ Determine if it is a supported operating system @@ -331,8 +323,7 @@ def get_observer_version(is_ssh, ssh_helper, ob_install_dir, stdio): result = re.sub(r'[a-zA-Z]', '', ob_version[0]) return result.strip() else: - cmd = "export LD_LIBRARY_PATH={ob_install_dir}/lib && {ob_install_dir}/bin/observer --version".format( - ob_install_dir=ob_install_dir) + cmd = "export LD_LIBRARY_PATH={ob_install_dir}/lib && {ob_install_dir}/bin/observer --version".format(ob_install_dir=ob_install_dir) if is_ssh: ob_version_info = SshClient(stdio).run_get_stderr(ssh_helper, cmd) else: @@ -363,13 +354,12 @@ def get_obproxy_version(is_ssh, ssh_helper, obproxy_install_dir, stdio): if len(ob_version) > 0: return ob_version[0] else: - cmd = "export LD_LIBRARY_PATH={obproxy_install_dir}/lib && {obproxy_install_dir}/bin/obproxy --version".format( - obproxy_install_dir=obproxy_install_dir) + cmd = "export LD_LIBRARY_PATH={obproxy_install_dir}/lib && {obproxy_install_dir}/bin/obproxy --version".format(obproxy_install_dir=obproxy_install_dir) if is_ssh: obproxy_version_info = SshClient(stdio).run_get_stderr(ssh_helper, cmd) else: obproxy_version_info = LocalClient(stdio).run_get_stderr(cmd) - stdio.verbose("get obproxy version with LD_LIBRARY_PATH,cmd:{0}, result:{1}".format(cmd,obproxy_version_info)) + stdio.verbose("get obproxy version with LD_LIBRARY_PATH,cmd:{0}, result:{1}".format(cmd, obproxy_version_info)) if "REVISION" not in obproxy_version_info: raise Exception("Please check conf about proxy,{0}".format(obproxy_version_info)) pattern = r"(\d+\.\d+\.\d+\.\d+)" @@ -383,17 +373,15 @@ def get_obproxy_version(is_ssh, ssh_helper, obproxy_install_dir, stdio): obproxy_version_info = match.group(1) obproxy_version_info = obproxy_version_info.split()[0] return obproxy_version_info + + # Only applicable to the community version + def get_observer_version_by_sql(ob_cluster, stdio=None): stdio.verbose("start get_observer_version_by_sql . input: {0}".format(ob_cluster)) try: - ob_connector = OBConnector(ip=ob_cluster.get("db_host"), - port=ob_cluster.get("db_port"), - username=ob_cluster.get("tenant_sys").get("user"), - password=ob_cluster.get("tenant_sys").get("password"), - stdio=stdio, - timeout=100) + ob_connector = OBConnector(ip=ob_cluster.get("db_host"), port=ob_cluster.get("db_port"), username=ob_cluster.get("tenant_sys").get("user"), password=ob_cluster.get("tenant_sys").get("password"), stdio=stdio, timeout=100) ob_version_info = ob_connector.execute_sql("select version();") except Exception as e: raise Exception("get_observer_version_by_sql Exception. Maybe cluster'info is error: " + e.__str__()) @@ -528,15 +516,14 @@ def uzip_dir_local(uzip_dir, stdio=None): cmd = f"cd {uzip_dir} && unzip *.zip && rm -rf *.zip" LocalClient(stdio).run(cmd) + def analyze_log_get_sqc_addr(uzip_dir, stdio): """ analyze files :param args: father dir, uzip dir :return: ip_port """ - cmd = "cd {uzip_dir} && cd ob_log* && grep {key_words} * | grep -oP '{key_words}=\"\\K[^\"]+' | sort | uniq".format( - uzip_dir=uzip_dir, - key_words = "px_obdiag_sqc_addr") + cmd = "cd {uzip_dir} && cd ob_log* && grep {key_words} * | grep -oP '{key_words}=\"\\K[^\"]+' | sort | uniq".format(uzip_dir=uzip_dir, key_words="px_obdiag_sqc_addr") stdout = LocalClient(stdio).run(cmd) sqc_addrs = stdout.decode().strip().split('\n') if len(sqc_addrs) > 0: @@ -547,14 +534,14 @@ def analyze_log_get_sqc_addr(uzip_dir, stdio): else: return None + def find_home_path_by_port(is_ssh, ssh_helper, internal_port_str, stdio): - cmd = "ps aux | grep observer | grep 'P {internal_port_str}' | grep -oP '/[^\s]*/bin/observer' ".format( - internal_port_str = internal_port_str) + cmd = "ps aux | grep observer | grep 'P {internal_port_str}' | grep -oP '/[^\s]*/bin/observer' ".format(internal_port_str=internal_port_str) if is_ssh: stdout = SshClient(stdio).run(ssh_helper, cmd) else: stdout = LocalClient(stdio).run(cmd) - + str_list = stdout.strip().split('\n') home_path = "" for original_str in str_list: diff --git a/common/config_helper.py b/common/config_helper.py index dacce797..979d59b9 100644 --- a/common/config_helper.py +++ b/common/config_helper.py @@ -41,17 +41,18 @@ def __init__(self, context): self.db_port = Util.get_option(options, 'P') self.config_path = os.path.expanduser('~/.obdiag/config.yml') self.inner_config = self.context.inner_config - self.ob_cluster = {"db_host": self.db_host, "db_port": self.db_port, "tenant_sys": {"password": self.sys_tenant_password, "user": self.sys_tenant_user, }} + self.ob_cluster = { + "db_host": self.db_host, + "db_port": self.db_port, + "tenant_sys": { + "password": self.sys_tenant_password, + "user": self.sys_tenant_user, + }, + } def get_cluster_name(self): ob_version = get_observer_version_by_sql(self.ob_cluster, self.stdio) - obConnetcor = OBConnector( - ip=self.db_host, - port=self.db_port, - username=self.sys_tenant_user, - password=self.sys_tenant_password, - stdio=self.stdio, - timeout=100) + obConnetcor = OBConnector(ip=self.db_host, port=self.db_port, username=self.sys_tenant_user, password=self.sys_tenant_password, stdio=self.stdio, timeout=100) if ob_version.startswith("3") or ob_version.startswith("2"): sql = "select cluster_name from oceanbase.v$ob_cluster" res = obConnetcor.execute_sql(sql) @@ -64,19 +65,13 @@ def get_cluster_name(self): def get_host_info_list_by_cluster(self): ob_version = get_observer_version_by_sql(self.ob_cluster, self.stdio) - obConnetcor = OBConnector(ip=self.db_host, - port=self.db_port, - username=self.sys_tenant_user, - password=self.sys_tenant_password, - stdio=self.stdio, - timeout=100) + obConnetcor = OBConnector(ip=self.db_host, port=self.db_port, username=self.sys_tenant_user, password=self.sys_tenant_password, stdio=self.stdio, timeout=100) sql = "select SVR_IP, SVR_PORT, ZONE, BUILD_VERSION from oceanbase.DBA_OB_SERVERS" if ob_version.startswith("3") or ob_version.startswith("2") or ob_version.startswith("1"): sql = "select SVR_IP, SVR_PORT, ZONE, BUILD_VERSION from oceanbase.__all_server" res = obConnetcor.execute_sql(sql) if len(res) == 0: - raise Exception("Failed to get the node from cluster config, " - "please check whether the cluster config correct!!!") + raise Exception("Failed to get the node from cluster config, " "please check whether the cluster config correct!!!") host_info_list = [] for row in res: host_info = OrderedDict() @@ -111,25 +106,8 @@ def build_configuration(self): global_data_dir = self.input_with_default("oceanbase data_dir", default_data_dir) global_redo_dir = self.input_with_default("oceanbase redo_dir", default_data_dir) tenant_sys_config = {"user": self.sys_tenant_user, "password": self.sys_tenant_password} - global_config = { - "ssh_username": global_ssh_username, - "ssh_password": global_ssh_password, - "ssh_port": global_ssh_port, - "ssh_key_file": "", - "home_path": global_home_path, - "data_dir": global_data_dir, - "redo_dir": global_redo_dir - } - new_config = { - "obcluster": { - "ob_cluster_name": ob_cluster_name, - "db_host": self.db_host, - "db_port": self.db_port, - "tenant_sys": tenant_sys_config, - "servers": { - "nodes": nodes_config, - "global": global_config - }}} + global_config = {"ssh_username": global_ssh_username, "ssh_password": global_ssh_password, "ssh_port": global_ssh_port, "ssh_key_file": "", "home_path": global_home_path, "data_dir": global_data_dir, "redo_dir": global_redo_dir} + new_config = {"obcluster": {"ob_cluster_name": ob_cluster_name, "db_host": self.db_host, "db_port": self.db_port, "tenant_sys": tenant_sys_config, "servers": {"nodes": nodes_config, "global": global_config}}} YamlUtils.write_yaml_data(new_config, self.config_path) need_config_obproxy = self.input_choice_default("need config obproxy [y/N]", "N") if need_config_obproxy: @@ -154,13 +132,7 @@ def build_obproxy_configuration(self, path): "ssh_key_file": "", "home_path": global_home_path, } - new_config = { - "obproxy": { - "obproxy_cluster_name": "obproxy", - "servers": { - "nodes": nodes_config, - "global": global_config - }}} + new_config = {"obproxy": {"obproxy_cluster_name": "obproxy", "servers": {"nodes": nodes_config, "global": global_config}}} YamlUtils.write_yaml_data_append(new_config, path) def get_old_configuration(self, path): @@ -185,8 +157,7 @@ def input_with_default(self, prompt, default): return value def input_password_with_default(self, prompt, default): - value = pwinput.pwinput(prompt="\033[32mEnter your {0} (default:'{1}'): \033[0m".format(prompt, default), - mask='*') + value = pwinput.pwinput(prompt="\033[32mEnter your {0} (default:'{1}'): \033[0m".format(prompt, default), mask='*') if value == '' or value.lower() == "y" or value.lower() == "yes": return default else: diff --git a/common/constant.py b/common/constant.py index 4f7b9755..c47c75f4 100644 --- a/common/constant.py +++ b/common/constant.py @@ -62,41 +62,15 @@ def __setattr__(self, name, value): const.FLT_TRACE_OUTPUT = 50 const.OBDIAG_BASE_DEFAULT_CONFIG = { - "obdiag": { - "basic": { - "config_path": "~/.obdiag/config.yml", - "config_backup_dir": "~/.obdiag/backup_conf", - "file_number_limit": 20, - "file_size_limit": "2G" - }, - "logger": { - "file_handler_log_level": "DEBUG", - "log_dir": "~/.obdiag/log", - "log_filename": "obdiag.log", - "log_level": "INFO", - "mode": "obdiag", - "stdout_handler_log_level": "INFO" + "obdiag": { + "basic": {"config_path": "~/.obdiag/config.yml", "config_backup_dir": "~/.obdiag/backup_conf", "file_number_limit": 20, "file_size_limit": "2G"}, + "logger": {"file_handler_log_level": "DEBUG", "log_dir": "~/.obdiag/log", "log_filename": "obdiag.log", "log_level": "INFO", "mode": "obdiag", "stdout_handler_log_level": "INFO"}, } - } } -const.OBDIAG_CHECK_DEFAULT_CONFIG = { - "check": { - "ignore_version": "false", - "report": { - "report_path": "./check_report/", - "export_type": "table" - }, - "package_file": "~/.obdiag/check/check_package.yaml", - "tasks_base_path": "~/.obdiag/check/tasks/" - } -} +const.OBDIAG_CHECK_DEFAULT_CONFIG = {"check": {"ignore_version": "false", "report": {"report_path": "./check_report/", "export_type": "table"}, "package_file": "~/.obdiag/check/check_package.yaml", "tasks_base_path": "~/.obdiag/check/tasks/"}} -const.OBDIAG_GATHER_DEFAULT_CONFIG = { - "gather": { - "cases_base_path": "~/.obdiag/gather/tasks" - } -} +const.OBDIAG_GATHER_DEFAULT_CONFIG = {"gather": {"cases_base_path": "~/.obdiag/gather/tasks"}} const.OBDIAG_RCA_DEFAULT_CONFIG = { "rca": { @@ -110,4 +84,4 @@ def __setattr__(self, name, value): const.UPDATE_REMOTE_SERVER = 'https://obbusiness-private.oss-cn-shanghai.aliyuncs.com' const.UPDATE_REMOTE_VERSION_FILE_NAME = 'https://obbusiness-private.oss-cn-shanghai.aliyuncs.com/download-center/opensource/obdiag/version.yaml' const.UPDATE_REMOTE_UPDATE_FILE_NAME = 'https://obbusiness-private.oss-cn-shanghai.aliyuncs.com/download-center/opensource/obdiag/data.tar' -const.RCA_WORK_PATH= '~/.obdiag/rca' \ No newline at end of file +const.RCA_WORK_PATH = '~/.obdiag/rca' diff --git a/common/log.py b/common/log.py index 4a98b80e..a1c584a5 100644 --- a/common/log.py +++ b/common/log.py @@ -27,4 +27,4 @@ def __init__(self, name, level=logging.DEBUG): self.buffer_size = 0 def _log(self, level, msg, args, end='\n', **kwargs): - return super(Logger, self)._log(level, msg, args, **kwargs) \ No newline at end of file + return super(Logger, self)._log(level, msg, args, **kwargs) diff --git a/common/ob_connector.py b/common/ob_connector.py index 948644a5..1ba0259c 100644 --- a/common/ob_connector.py +++ b/common/ob_connector.py @@ -20,7 +20,16 @@ class OBConnector(object): - def __init__(self, ip, port, username, password=None, database=None, stdio=None, timeout=30,): + def __init__( + self, + ip, + port, + username, + password=None, + database=None, + stdio=None, + timeout=30, + ): self.ip = str(ip) self.port = int(port) self.username = str(username) @@ -52,17 +61,16 @@ def _connect_db(self): self.stdio.error("connect OB: {0}:{1} with user {2} failed, error:{3}".format(self.ip, self.port, self.username, e)) return try: - ob_trx_timeout=self.timeout*1000000 + ob_trx_timeout = self.timeout * 1000000 self.execute_sql("SET SESSION ob_trx_timeout={0};".format(ob_trx_timeout)) except Exception as e: self.stdio.warn("set ob_trx_timeout failed, error:{0}".format(e)) try: - ob_query_timeout=self.timeout*1000000 + ob_query_timeout = self.timeout * 1000000 self.execute_sql("SET SESSION ob_query_timeout={0};".format(ob_query_timeout)) except Exception as e: self.stdio.warn("set ob_query_timeout failed, error:{0}".format(e)) - def execute_sql(self, sql): if self.conn is None: self._connect_db() diff --git a/common/ob_log_level.py b/common/ob_log_level.py index f5c07c6a..844f91ae 100644 --- a/common/ob_log_level.py +++ b/common/ob_log_level.py @@ -16,6 +16,7 @@ @desc: """ + class OBLogLevel(object): CRITICAL = 50 FATAL = 50 @@ -49,4 +50,3 @@ def get_log_level(self, level_str): return self.DEBUG else: return self.NOTSET - \ No newline at end of file diff --git a/common/ocp/__init__.py b/common/ocp/__init__.py index 2b33595e..4f2405d9 100644 --- a/common/ocp/__init__.py +++ b/common/ocp/__init__.py @@ -14,4 +14,4 @@ @time: 2022/6/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/common/ocp/ocp_api.py b/common/ocp/ocp_api.py index 6c63c8f2..e6a5fc16 100644 --- a/common/ocp/ocp_api.py +++ b/common/ocp/ocp_api.py @@ -35,4 +35,3 @@ # task task = "/api/v2/tasks/instances" - diff --git a/common/ocp/ocp_task.py b/common/ocp/ocp_task.py index 96c33ef6..878f5407 100644 --- a/common/ocp/ocp_task.py +++ b/common/ocp/ocp_task.py @@ -32,9 +32,7 @@ class Task: STATUS_TASK_PENDING = "PENDING" STATUS_TASK_SUCCESS = "SUCCESSFUL" STATUS_TASK_FAILED = "FAILED" - TASK_RETRY_PERMIT = { - "Task retry": 3 - } + TASK_RETRY_PERMIT = {"Task retry": 3} def __init__(self, url, auth, task_id): self.url = url @@ -88,12 +86,10 @@ def wait_done(self, interval=20): else: self.retry() self._retry_times += 1 - logger.warning('task %s failed,but allowed retry %s times, now retry %s time' % ( - self.name, self._retry_permit_time, self._retry_times)) + logger.warning('task %s failed,but allowed retry %s times, now retry %s time' % (self.name, self._retry_permit_time, self._retry_times)) time.sleep(interval) continue else: - logger.info( - "task(%s:%s) not finished yet, now status %s, waiting" % (self.id, self.name, self.status)) + logger.info("task(%s:%s) not finished yet, now status %s, waiting" % (self.id, self.name, self.status)) time.sleep(interval) return self.status diff --git a/common/scene.py b/common/scene.py index c9c4016c..0acb1f75 100644 --- a/common/scene.py +++ b/common/scene.py @@ -60,7 +60,8 @@ def filter_by_version(scene, cluster, stdio=None): stdio.exception("filter_by_version Exception : {0}".format(e)) raise Exception("filter_by_version Exception : {0}".format(e)) -def get_version(nodes, type,cluster, stdio=None): + +def get_version(nodes, type, cluster, stdio=None): try: if len(nodes) < 1: raise Exception("input nodes is empty, please check your config") @@ -69,7 +70,7 @@ def get_version(nodes, type,cluster, stdio=None): version = "" if type == "observer": try: - version = get_observer_version_by_sql(cluster,stdio) + version = get_observer_version_by_sql(cluster, stdio) except Exception as e: stdio.warn("get observer version by sql fail, use node ssher to get. Exception:{0}".format(e)) version = get_observer_version(True, ssh, nodes[0]["home_path"], stdio) @@ -80,6 +81,7 @@ def get_version(nodes, type,cluster, stdio=None): stdio.exception("can't get version, Exception: {0}".format(e)) raise Exception("can't get version, Exception: {0}".format(e)) + def get_obproxy_and_ob_version(obproxy_nodes, nodes, type, stdio=None): try: if type == "observer" or type == "other": @@ -95,9 +97,8 @@ def get_obproxy_and_ob_version(obproxy_nodes, nodes, type, stdio=None): ssh = SshHelper(True, node.get("ip"), node.get("ssh_username"), node.get("ssh_password"), node.get("ssh_port"), node.get("ssh_key_file"), node) version = get_obproxy_version(True, ssh, nodes[0]["home_path"], stdio) else: - raise Exception( - "type is {0} . No func to get the version".format(type)) + raise Exception("type is {0} . No func to get the version".format(type)) return version except Exception as e: stdio.exception("can't get version, Exception: {0}".format(e)) - raise Exception("can't get version, Exception: {0}".format(e)) \ No newline at end of file + raise Exception("can't get version, Exception: {0}".format(e)) diff --git a/common/ssh.py b/common/ssh.py index f58b8994..c9f468b7 100644 --- a/common/ssh.py +++ b/common/ssh.py @@ -42,6 +42,7 @@ from stdio import SafeStdio from err import EC_SSH_CONNECT from subprocess32 import Popen, PIPE + warnings.filterwarnings("ignore") @@ -59,7 +60,7 @@ def __init__(self, host, username='root', password=None, key_filename=None, port self.timeout = int(timeout) def __str__(self): - return '%s@%s' % (self.username ,self.host) + return '%s@%s' % (self.username, self.host) class SshReturn(object): @@ -71,10 +72,10 @@ def __init__(self, code, stdout, stderr): def __bool__(self): return self.code == 0 - + def __nonzero__(self): return self.__bool__() - + class FeatureSshReturn(SshReturn, SafeStdio): @@ -105,7 +106,7 @@ def _get_return(self): verbose_msg = 'exited code 255, error output:\n%s' % self._stderr self.stdio.verbose(verbose_msg) self.stdio.exception('') - + @property def code(self): self._get_return() @@ -175,7 +176,7 @@ def submit(self): class LocalClient(SafeStdio): - + @staticmethod def init_env(env=None): if env is None: @@ -199,7 +200,6 @@ def execute_command_background(command, env=None, timeout=None, stdio=None): stdio.exception('') return SshReturn(code, output, error) - @staticmethod def execute_command(command, env=None, timeout=None, stdio=None): stdio.verbose('local execute: %s ' % command, end='') @@ -298,6 +298,7 @@ def run_command(command, env=None, timeout=None, print_stderr=True, elimit=0, ol process.terminate() return SshReturn(code, stdout, stderr) + class RemoteTransporter(enum.Enum): CLIENT = 0 RSYNC = 1 @@ -372,7 +373,7 @@ def _add_env_for_local(self, key, value, rewrite=False): def get_env(self, key, stdio=None): return self.env[key] if key in self.env else None - def del_env(self, key, stdio=None): + def del_env(self, key, stdio=None): if key in self.env: stdio.verbose('%s@%s delete env %s' % (self.config.username, self.config.host, key)) del self.env[key] @@ -392,13 +393,7 @@ def _login(self, stdio=None, exit=True): self.ssh_client.set_missing_host_key_policy(AutoAddPolicy()) stdio.verbose('host: %s, port: %s, user: %s, password: %s' % (self.config.host, self.config.port, self.config.username, self.config.password)) self.ssh_client.connect( - self.config.host, - port=self.config.port, - username=self.config.username, - password=self.config.password, - key_filename=self.config.key_filename, - timeout=self.config.timeout, - disabled_algorithms=self._disabled_rsa_algorithms + self.config.host, port=self.config.port, username=self.config.username, password=self.config.password, key_filename=self.config.key_filename, timeout=self.config.timeout, disabled_algorithms=self._disabled_rsa_algorithms ) self.is_connected = True except AuthenticationException: @@ -470,7 +465,7 @@ def _execute_command(self, command, timeout=None, retry=3, stdio=None): except SSHException as e: if retry: self.close() - return self._execute_command(command, retry-1, stdio) + return self._execute_command(command, retry - 1, stdio) else: stdio.exception('') stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e)) @@ -556,11 +551,7 @@ def _rsync(self, source, target, stdio=None): identity_option += '-i {key_filename} '.format(key_filename=self.config.key_filename) if self.config.port: identity_option += '-p {}'.format(self.config.port) - cmd = 'yes | rsync -a -W -e "ssh {identity_option}" {source} {target}'.format( - identity_option=identity_option, - source=source, - target=target - ) + cmd = 'yes | rsync -a -W -e "ssh {identity_option}" {source} {target}'.format(identity_option=identity_option, source=source, target=target) ret = LocalClient.execute_command(cmd, stdio=stdio) return bool(ret) @@ -811,19 +802,24 @@ def file_uploader(self, local_dir, remote_dir, stdio=None): except: stdio.exception("") stdio.verbose('Failed to get %s' % remote_dir) + + # TODO ENV_DISABLE_RSA_ALGORITHMS need get by context.inner_context -ENV_DISABLE_RSA_ALGORITHMS=0 +ENV_DISABLE_RSA_ALGORITHMS = 0 + + def dis_rsa_algorithms(state=0): """ Disable RSA algorithms in OpenSSH server. """ global ENV_DISABLE_RSA_ALGORITHMS - ENV_DISABLE_RSA_ALGORITHMS=state + ENV_DISABLE_RSA_ALGORITHMS = state + + class SshHelper(object): - def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_port=None, key_file=None, - node=None, stdio=None): + def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_port=None, key_file=None, node=None, stdio=None): if node is None: - node={} + node = {} self.is_ssh = is_ssh self.stdio = stdio self.host_ip = host_ip @@ -832,7 +828,7 @@ def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_ self.need_password = True self.password = node.get("ssh_password") or password self.key_file = node.get("ssh_key_file") or key_file - self.key_file=os.path.expanduser(self.key_file) + self.key_file = os.path.expanduser(self.key_file) self.ssh_type = node.get("ssh_type") or "remote" self._ssh_fd = None self._sftp_client = None @@ -858,7 +854,7 @@ def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_ return if self.is_ssh: - self._disabled_rsa_algorithms=None + self._disabled_rsa_algorithms = None DISABLED_ALGORITHMS = dict(pubkeys=["rsa-sha2-512", "rsa-sha2-256"]) if ENV_DISABLE_RSA_ALGORITHMS == 1: self._disabled_rsa_algorithms = DISABLED_ALGORITHMS @@ -868,11 +864,11 @@ def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_ self._ssh_fd = paramiko.SSHClient() self._ssh_fd.set_missing_host_key_policy(paramiko.client.AutoAddPolicy()) self._ssh_fd.load_system_host_keys() - self._ssh_fd.connect(hostname=host_ip, username=username, key_filename=self.key_file, port=ssh_port,disabled_algorithms=self._disabled_rsa_algorithms) + self._ssh_fd.connect(hostname=host_ip, username=username, key_filename=self.key_file, port=ssh_port, disabled_algorithms=self._disabled_rsa_algorithms) except AuthenticationException: self.password = input("Authentication failed, Input {0}@{1} password:\n".format(username, host_ip)) self.need_password = True - self._ssh_fd.connect(hostname=host_ip, username=username, password=password, port=ssh_port,disabled_algorithms=self._disabled_rsa_algorithms) + self._ssh_fd.connect(hostname=host_ip, username=username, password=password, port=ssh_port, disabled_algorithms=self._disabled_rsa_algorithms) except Exception as e: raise OBDIAGSSHConnException("ssh {0}@{1}: failed, exception:{2}".format(username, host_ip, e)) else: @@ -880,7 +876,7 @@ def __init__(self, is_ssh=None, host_ip=None, username=None, password=None, ssh_ self._ssh_fd.set_missing_host_key_policy(paramiko.client.AutoAddPolicy()) self._ssh_fd.load_system_host_keys() self.need_password = True - self._ssh_fd.connect(hostname=host_ip, username=username, password=password, port=ssh_port,disabled_algorithms=self._disabled_rsa_algorithms) + self._ssh_fd.connect(hostname=host_ip, username=username, password=password, port=ssh_port, disabled_algorithms=self._disabled_rsa_algorithms) def ssh_exec_cmd(self, cmd): if self.ssh_type == "docker": @@ -894,9 +890,7 @@ def ssh_exec_cmd(self, cmd): stderr=True, ) if result.exit_code != 0: - raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " - "command=[{1}], exception:{2}".format(self.node["container_name"], cmd, - result.output.decode('utf-8'))) + raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " "command=[{1}], exception:{2}".format(self.node["container_name"], cmd, result.output.decode('utf-8'))) except Exception as e: self.stdio.error("sshHelper ssh_exec_cmd docker Exception: {0}".format(e)) @@ -907,11 +901,9 @@ def ssh_exec_cmd(self, cmd): stdin, stdout, stderr = self._ssh_fd.exec_command(cmd) err_text = stderr.read() if len(err_text): - raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " - "command=[{1}], exception:{2}".format(self.host_ip, cmd, err_text)) + raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " "command=[{1}], exception:{2}".format(self.host_ip, cmd, err_text)) except SSHException as e: - raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " - "command=[{1}], exception:{2}".format(self.host_ip, cmd, e)) + raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " "command=[{1}], exception:{2}".format(self.host_ip, cmd, e)) return stdout.read().decode('utf-8') def ssh_exec_cmd_ignore_err(self, cmd): @@ -988,10 +980,9 @@ def progress_bar(self, transferred, to_be_transferred, suffix=''): bar = '\033[32;1m%s\033[0m' % '=' * filled_len + '-' * (bar_len - filled_len) print_percents = round((percents * 5), 1) sys.stdout.flush() - sys.stdout.write('Downloading [%s] %s%s%s %s %s\r' % (bar, '\033[32;1m%s\033[0m' % print_percents, '% [', self.translate_byte(transferred), ']', suffix)) + sys.stdout.write('Downloading [%s] %s%s%s %s %s\r' % (bar, '\033[32;1m%s\033[0m' % print_percents, '% [', self.translate_byte(transferred), ']', suffix)) if transferred == to_be_transferred: - sys.stdout.write('Downloading [%s] %s%s%s %s %s\r' % ( - bar, '\033[32;1m%s\033[0m' % print_percents, '% [', self.translate_byte(transferred), ']', suffix)) + sys.stdout.write('Downloading [%s] %s%s%s %s %s\r' % (bar, '\033[32;1m%s\033[0m' % print_percents, '% [', self.translate_byte(transferred), ']', suffix)) print() def download(self, remote_path, local_path): @@ -1011,16 +1002,16 @@ def download(self, remote_path, local_path): transport = self._ssh_fd.get_transport() self._sftp_client = paramiko.SFTPClient.from_transport(transport) - print('Download {0}:{1}'.format(self.host_ip,remote_path)) + print('Download {0}:{1}'.format(self.host_ip, remote_path)) self._sftp_client.get(remote_path, local_path, callback=self.progress_bar) self._sftp_client.close() def translate_byte(self, B): B = float(B) KB = float(1024) - MB = float(KB ** 2) - GB = float(MB ** 2) - TB = float(GB ** 2) + MB = float(KB**2) + GB = float(MB**2) + TB = float(GB**2) if B < KB: return '{} {}'.format(B, 'bytes' if B > 1 else "byte") elif KB < B < MB: @@ -1081,11 +1072,10 @@ def ssh_invoke_shell_switch_user(self, new_user, cmd, time_out): self._ssh_fd.close() result = ssh.recv(65535) except SSHException as e: - raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " - "command=[{1}], exception:{2}".format(self.host_ip, cmd, e)) + raise OBDIAGShellCmdException("Execute Shell command on server {0} failed, " "command=[{1}], exception:{2}".format(self.host_ip, cmd, e)) return result def get_name(self): if self.ssh_type == "docker": - return "(docker)"+self.node.get("container_name") - return self.host_ip \ No newline at end of file + return "(docker)" + self.node.get("container_name") + return self.host_ip diff --git a/common/tool.py b/common/tool.py index bc8d7e6d..8bc35010 100644 --- a/common/tool.py +++ b/common/tool.py @@ -54,25 +54,12 @@ from ruamel.yaml import YAML from err import EC_SQL_EXECUTE_FAILED from stdio import SafeStdio + _open = open encoding_open = open -__all__ = ( -"Timeout", -"DynamicLoading", -"ConfigUtil", -"DirectoryUtil", -"FileUtil", -"YamlLoader", -"OrderedDict", -"COMMAND_ENV", -"TimeUtils", -"NetUtils", -"StringUtils", -"YamlUtils", -"Util" -) +__all__ = ("Timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict", "COMMAND_ENV", "TimeUtils", "NetUtils", "StringUtils", "YamlUtils", "Util") _WINDOWS = os.name == 'nt' @@ -318,6 +305,7 @@ class FileUtil(object): @staticmethod def checksum(target_path, stdio=None): from common.ssh import LocalClient + if not os.path.isfile(target_path): info = 'No such file: ' + target_path if stdio: @@ -374,12 +362,13 @@ def copy(src, dst, stdio=None): FileUtil.symlink(os.readlink(src), dst) return True with FileUtil.open(src, 'rb') as fsrc, FileUtil.open(dst, 'wb') as fdst: - FileUtil.copy_fileobj(fsrc, fdst) - os.chmod(dst, os.stat(src).st_mode) - return True + FileUtil.copy_fileobj(fsrc, fdst) + os.chmod(dst, os.stat(src).st_mode) + return True except Exception as e: if int(getattr(e, 'errno', -1)) == 26: from common.ssh import LocalClient + if LocalClient.execute_command('/usr/bin/cp -f %s %s' % (src, dst), stdio=stdio): return True elif stdio: @@ -503,7 +492,7 @@ def size_format(num, unit="B", output_str=False, stdio=None): unit_idx = units.index(unit) except KeyError: raise ValueError("unit {0} is illegal!".format(unit)) - new_num = float(num) * (1024 ** unit_idx) + new_num = float(num) * (1024**unit_idx) unit_idx = 0 while new_num > 1024: new_num = float(new_num) / 1024 @@ -513,23 +502,21 @@ def size_format(num, unit="B", output_str=False, stdio=None): if output_str: return "".join(["%.3f" % new_num, units[unit_idx]]) return new_num, units[unit_idx] - + @staticmethod def show_file_size_tabulate(ip, file_size, stdio=None): format_file_size = FileUtil.size_format(int(file_size), output_str=True, stdio=stdio) summary_tab = [] field_names = ["Node", "LogSize"] summary_tab.append((ip, format_file_size)) - return "\nZipFileInfo:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + return "\nZipFileInfo:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) @staticmethod def show_file_list_tabulate(ip, file_list, stdio=None): summary_tab = [] field_names = ["Node", "LogList"] summary_tab.append((ip, file_list)) - return "\nFileListInfo:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + return "\nFileListInfo:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) @staticmethod def find_all_file(base, stdio=None): @@ -632,6 +619,7 @@ def dumps(self, data, transform=None): self.stdio.exception('dumps error:\n%s' % e) raise e + class YamlUtils(object): @staticmethod @@ -760,7 +748,7 @@ def get_inner_ip(stdio=None): return localhost_ip except Exception as e: return localhost_ip - + @staticmethod def network_connectivity(url="", stdio=None): try: @@ -772,7 +760,7 @@ def network_connectivity(url="", stdio=None): return False except Exception as e: return False - + @staticmethod def download_file(url, local_filename, stdio=None): with requests.get(url, stream=True) as r: @@ -782,7 +770,8 @@ def download_file(url, local_filename, stdio=None): f.write(chunk) return local_filename -COMMAND_ENV=CommandEnv() + +COMMAND_ENV = CommandEnv() class DateTimeEncoder(json.JSONEncoder): @@ -793,6 +782,7 @@ def default(self, obj): # 其他类型按默认处理 return super().default(obj) + class TimeUtils(object): @staticmethod @@ -818,7 +808,6 @@ def get_format_time(time_str, stdio=None): except Exception as e: stdio.exception('%s parse time fialed, error:\n%s, time format need to be %s' % (time_str, e, '%Y-%m-%d %H:%M:%S')) - @staticmethod def sub_minutes(t, delta, stdio=None): try: @@ -826,7 +815,6 @@ def sub_minutes(t, delta, stdio=None): except Exception as e: stdio.exception('%s get time fialed, error:\n%s' % (t, e)) - @staticmethod def add_minutes(t, delta, stdio=None): try: @@ -869,7 +857,7 @@ def get_current_us_timestamp(stdio=None): @staticmethod def parse_time_length_to_sec(time_length_str, stdio=None): unit = time_length_str[-1] - if unit != "m" and unit != "h" and unit != "d": + if unit != "m" and unit != "h" and unit != "d": raise Exception("time length must be format 'n'") value = int(time_length_str[:-1]) if unit == "m": @@ -920,7 +908,7 @@ def parse_time_str(arg_time, stdio=None): @staticmethod def filename_time_to_datetime(filename_time, stdio=None): - """ transform yyyymmddhhmmss to yyyy-mm-dd hh:mm:ss""" + """transform yyyymmddhhmmss to yyyy-mm-dd hh:mm:ss""" if filename_time != "": return "{0}-{1}-{2} {3}:{4}:{5}".format(filename_time[0:4], filename_time[4:6], filename_time[6:8], filename_time[8:10], filename_time[10:12], filename_time[12:14]) else: @@ -928,7 +916,7 @@ def filename_time_to_datetime(filename_time, stdio=None): @staticmethod def extract_filename_time_from_log_name(log_name, stdio=None): - """ eg: xxx.20221226231617 """ + """eg: xxx.20221226231617""" log_name_fields = log_name.split(".") if bytes.isdigit(log_name_fields[-1].encode("utf-8")) and len(log_name_fields[-1]) >= 14: return log_name_fields[-1] @@ -939,10 +927,10 @@ def extract_time_from_log_file_text(log_text, stdio=None): # 因为 yyyy-mm-dd hh:mm:ss.000000 的格式已经占了27个字符,所以如果传进来的字符串包含时间信息,那长度一定大于27 if len(log_text) > 27: if log_text.startswith("["): - time_str = log_text[1: log_text.find(']')] + time_str = log_text[1 : log_text.find(']')] else: - time_str = log_text[0: log_text.find(',')] - time_without_us = time_str[0: time_str.find('.')] + time_str = log_text[0 : log_text.find(',')] + time_without_us = time_str[0 : time_str.find('.')] try: format_time = datetime.datetime.strptime(time_without_us, "%Y-%m-%d %H:%M:%S") format_time_str = time.strftime("%Y-%m-%d %H:%M:%S", format_time.timetuple()) @@ -1011,7 +999,8 @@ def str_2_timestamp(t, stdio=None): if isinstance(t, int): return t temp = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S.%f') - return int(datetime.datetime.timestamp(temp) * 10 ** 6) + return int(datetime.datetime.timestamp(temp) * 10**6) + class StringUtils(object): @@ -1025,7 +1014,7 @@ def parse_mysql_conn(cli_conn_str, stdio=None): password = password_match.group(2) db_info['password'] = password # 去除密码部分,避免后续解析出错 - cli_conn_str = cli_conn_str[:password_match.start()] + cli_conn_str[password_match.end():] + cli_conn_str = cli_conn_str[: password_match.start()] + cli_conn_str[password_match.end() :] # 模式匹配短选项 short_opt_pattern = re.compile(r'-(\w)\s*(\S*)') @@ -1096,9 +1085,9 @@ def get_observer_ip_from_trace_id(content, stdio=None): if content[0] == 'Y' and len(content) >= 12: sep = content.find('-') uval = int(content[1:sep], 16) - ip = uval & 0xffffffff - port = (uval >> 32) & 0xffff - return "%d.%d.%d.%d:%d" % ((ip >> 24 & 0xff), (ip >> 16 & 0xff), (ip >> 8 & 0xff), (ip >> 0 & 0xff), port) + ip = uval & 0xFFFFFFFF + port = (uval >> 32) & 0xFFFF + return "%d.%d.%d.%d:%d" % ((ip >> 24 & 0xFF), (ip >> 16 & 0xFF), (ip >> 8 & 0xFF), (ip >> 0 & 0xFF), port) else: return "" @@ -1143,29 +1132,35 @@ def parse_range_string(range_str, nu, stdio=None): def build_str_on_expr_by_dict(expr, variable_dict, stdio=None): s = expr d = variable_dict + def replacer(match): key = match.group(1) return str(d.get(key, match.group(0))) + return re.sub(r'#\{(\w+)\}', replacer, s) @staticmethod def build_str_on_expr_by_dict_2(expr, variable_dict, stdio=None): s = expr d = variable_dict + def replacer(match): key = match.group(1) value = str(d.get(key, match.group(0))) return f"{value}" + return re.sub(r'\$\{(\w+)\}', replacer, s) @staticmethod def build_sql_on_expr_by_dict(expr, variable_dict, stdio=None): s = expr d = variable_dict + def replacer(match): key = match.group(1) value = str(d.get(key, match.group(0))) return f'"{value}"' + return re.sub(r'\$\{(\w+)\}', replacer, s) @staticmethod @@ -1214,7 +1209,6 @@ def compare_versions_lower(v1, v2, stdio=None): return len(v1.split(".")) < len(v2.split(".")) - class Cursor(SafeStdio): def __init__(self, ip, port, user='root', tenant='sys', password='', stdio=None): @@ -1249,15 +1243,17 @@ def raise_cursor(self): return raise_cursor if sys.version_info.major == 2: + def _connect(self): self.stdio.verbose('connect %s -P%s -u%s -p%s' % (self.ip, self.port, self.user, self.password)) self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), passwd=str(self.password)) self.cursor = self.db.cursor(cursorclass=mysql.cursors.DictCursor) + else: + def _connect(self): self.stdio.verbose('connect %s -P%s -u%s -p%s' % (self.ip, self.port, self.user, self.password)) - self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), - cursorclass=mysql.cursors.DictCursor) + self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), cursorclass=mysql.cursors.DictCursor) self.cursor = self.db.cursor() def new_cursor(self, tenant='sys', user='root', password='', ip='', port='', print_exception=True): @@ -1305,22 +1301,22 @@ class Util(object): @staticmethod def get_option(options, key, default=None): - if not hasattr(options, key) : + if not hasattr(options, key): return default value = getattr(options, key) if value is None: value = default return value - + @staticmethod def set_option(options, key, value): setattr(options, key, value) - + @staticmethod def convert_to_number(s, stdio=None): if isinstance(s, (int, float)): return s - if isinstance(s,decimal.Decimal): + if isinstance(s, decimal.Decimal): try: return float(s) except: @@ -1350,12 +1346,12 @@ def print_scene(scene_dict, stdio=None): table_data = [[value[key] for key in keys] for value in scene_dict.values()] column_widths = [max(len(str(item)) * (StringUtils.is_chinese(item) or 1) for item in column) for column in zip(*table_data)] table_data.insert(0, keys) - Util.print_line(length= sum(column_widths) + 5) + Util.print_line(length=sum(column_widths) + 5) for i in range(len(table_data)): print(Fore.GREEN + " ".join(f"{item:<{width}}" for item, width in zip(table_data[i], column_widths)) + Style.RESET_ALL) if i == 0: - Util.print_line(length= sum(column_widths) + 5) - Util.print_line(length= sum(column_widths) + 5) + Util.print_line(length=sum(column_widths) + 5) + Util.print_line(length=sum(column_widths) + 5) @staticmethod def print_line(char='-', length=50, stdio=None): @@ -1377,18 +1373,20 @@ def wrapper(*args, **kwargs): return_values = decor_method(*args, **kwargs) return return_values except Exception as e: - if getattr(stdio, "Function execution %s retry: %s " %(decor_method.__name__, count + 1), False): + if getattr(stdio, "Function execution %s retry: %s " % (decor_method.__name__, count + 1), False): stdio.exception('dumps error:\n%s' % e) time.sleep(retry_interval) if count == retry_count - 1: raise e + return wrapper + return real_decorator @staticmethod def get_nodes_list(context, nodes, stdio=None): - ctx_nodes = context.get_variable("filter_nodes_list",None) - if ctx_nodes is not None and len(ctx_nodes)>0: + ctx_nodes = context.get_variable("filter_nodes_list", None) + if ctx_nodes is not None and len(ctx_nodes) > 0: new_nodes = [] for node in nodes: if node in ctx_nodes: diff --git a/common/types.py b/common/types.py index 2baef5f0..67576319 100644 --- a/common/types.py +++ b/common/types.py @@ -23,9 +23,7 @@ import uuid import traceback -__all__ = ( -"Moment", "Time", "Capacity", "CapacityWithB", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", -"Boolean", "Integer", "String", "Path", "SafeString", "PathList", "SafeStringList", "DBUrl", "WebUrl", "OBUser") +__all__ = ("Moment", "Time", "Capacity", "CapacityWithB", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String", "Path", "SafeString", "PathList", "SafeStringList", "DBUrl", "WebUrl", "OBUser") class Null(object): @@ -113,15 +111,7 @@ def _format(self): class Time(ConfigItemType): - UNITS = { - 'ns': 0.000000001, - 'us': 0.000001, - 'ms': 0.001, - 's': 1, - 'm': 60, - 'h': 3600, - 'd': 86400 - } + UNITS = {'ns': 0.000000001, 'us': 0.000001, 'ms': 0.001, 's': 1, 'm': 60, 'h': 3600, 'd': 86400} def _format(self): if self._origin: @@ -157,26 +147,22 @@ def __repr__(self): def __add__(self, other): if isinstance(other, DecimalValue): - return DecimalValue(self.value + other.value, - self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value + other.value, self.precision if self.precision is not None else other.precision) return DecimalValue(self.value + other, self.precision) def __sub__(self, other): if isinstance(other, DecimalValue): - return DecimalValue(self.value - other.value, - self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value - other.value, self.precision if self.precision is not None else other.precision) return DecimalValue(self.value - other, self.precision) def __mul__(self, other): if isinstance(other, DecimalValue): - return DecimalValue(self.value * other.value, - self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value * other.value, self.precision if self.precision is not None else other.precision) return DecimalValue(self.value * other, self.precision) def __truediv__(self, other): if isinstance(other, DecimalValue): - return DecimalValue(self.value / other.value, - self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value / other.value, self.precision if self.precision is not None else other.precision) return DecimalValue(self.value / other, self.precision) @@ -408,8 +394,7 @@ def _format(self): # this type is used to ensure the parameter is a valid database connection url class DBUrl(ConfigItemType): - DBURL_PATTERN = re.compile( - "^jdbc:(mysql|oceanbase):(\/\/)([a-zA-Z0-9_.-]+)(:[0-9]{1,5})?\/([a-zA-Z0-9_\-]+)(\?[a-zA-Z0-9_&;=.-]*)?$") + DBURL_PATTERN = re.compile("^jdbc:(mysql|oceanbase):(\/\/)([a-zA-Z0-9_.-]+)(:[0-9]{1,5})?\/([a-zA-Z0-9_\-]+)(\?[a-zA-Z0-9_&;=.-]*)?$") def _format(self): if not self.DBURL_PATTERN.match(str(self._origin)): diff --git a/common/version.py b/common/version.py index 74af20df..0c446b66 100644 --- a/common/version.py +++ b/common/version.py @@ -28,6 +28,8 @@ def get_obdiag_version(): Copyright (C) 2022 OceanBase License Mulan PSL v2: http://license.coscl.org.cn/MulanPSL2. You can use this software according to the terms and conditions of the Mulan PSL v2. -There is NO WARRANTY, to the extent permitted by law.''' % (OBDIAG_VERSION, OBDIAG_BUILD_TIME) +There is NO WARRANTY, to the extent permitted by law.''' % ( + OBDIAG_VERSION, + OBDIAG_BUILD_TIME, + ) return version - diff --git a/config.py b/config.py index d79fcb9c..3b9811e5 100644 --- a/config.py +++ b/config.py @@ -58,44 +58,44 @@ ''' DEFAULT_INNER_CONFIG = { - 'obdiag': { - 'basic': { - 'config_path': '~/.obdiag/config.yml', - 'config_backup_dir': '~/.obdiag/backup_conf', - 'file_number_limit': 20, - 'file_size_limit': '2G', - 'dis_rsa_algorithms':0, - }, - 'logger': { - 'log_dir': '~/.obdiag/log', - 'log_filename': 'obdiag.log', - 'file_handler_log_level': 'DEBUG', - 'log_level': 'INFO', - 'mode': 'obdiag', - 'stdout_handler_log_level': 'INFO', - }, - }, - 'check': { - 'ignore_version': False, - 'work_path': '~/.obdiag/check', - 'report': { - 'report_path': './check_report/', - 'export_type': 'table', - }, - 'package_file': '~/.obdiag/check/check_package.yaml', - 'tasks_base_path': '~/.obdiag/check/tasks/', + 'obdiag': { + 'basic': { + 'config_path': '~/.obdiag/config.yml', + 'config_backup_dir': '~/.obdiag/backup_conf', + 'file_number_limit': 20, + 'file_size_limit': '2G', + 'dis_rsa_algorithms': 0, }, - 'gather': { - 'scenes_base_path': '~/.obdiag/gather/tasks', + 'logger': { + 'log_dir': '~/.obdiag/log', + 'log_filename': 'obdiag.log', + 'file_handler_log_level': 'DEBUG', + 'log_level': 'INFO', + 'mode': 'obdiag', + 'stdout_handler_log_level': 'INFO', }, - 'rca': { - 'result_path': './rca/', + }, + 'check': { + 'ignore_version': False, + 'work_path': '~/.obdiag/check', + 'report': { + 'report_path': './check_report/', + 'export_type': 'table', }, - } + 'package_file': '~/.obdiag/check/check_package.yaml', + 'tasks_base_path': '~/.obdiag/check/tasks/', + }, + 'gather': { + 'scenes_base_path': '~/.obdiag/gather/tasks', + }, + 'rca': { + 'result_path': './rca/', + }, +} class Manager(SafeStdio): - + RELATIVE_PATH = '' def __init__(self, home_path, stdio=None): @@ -142,7 +142,7 @@ def load_config_with_defaults(self, defaults_dict): class ConfigManager(Manager): - + def __init__(self, config_file=None, stdio=None): default_config_path = os.path.join(os.path.expanduser("~"), ".obdiag", "config.yml") @@ -248,9 +248,10 @@ def get_node_config(self, type, node_ip, config_item): return node.get(config_item) return None + class InnerConfigManager(Manager): - + def __init__(self, stdio=None): inner_config_abs_path = os.path.abspath(INNER_CONFIG_FILE) super().__init__(inner_config_abs_path, stdio=stdio) - self.config = self.load_config_with_defaults(DEFAULT_INNER_CONFIG) \ No newline at end of file + self.config = self.load_config_with_defaults(DEFAULT_INNER_CONFIG) diff --git a/context.py b/context.py index 2e4e7b59..7d9d5126 100644 --- a/context.py +++ b/context.py @@ -17,6 +17,7 @@ from __future__ import absolute_import, division, print_function from optparse import Values + class HandlerContextNamespace: def __init__(self, spacename): @@ -149,4 +150,4 @@ def get_option(self, name, spacename=None, default=None): return namespace.get_option(name, default) if namespace else None def set_option(self, name, value): - self.namespace.set_option(name, value) \ No newline at end of file + self.namespace.set_option(name, value) diff --git a/core.py b/core.py index af760090..ab8c7f98 100644 --- a/core.py +++ b/core.py @@ -64,13 +64,15 @@ def __init__(self, stdio=None, config_path=os.path.expanduser('~/.obdiag/config. self.context = None self.inner_config_manager = InnerConfigManager(stdio) self.config_manager = ConfigManager(config_path, stdio) - if self.inner_config_manager.config.get("obdiag") is not None and self.inner_config_manager.config.get("obdiag").get( - "basic") is not None and self.inner_config_manager.config.get("obdiag").get("basic").get( - "telemetry") is not None and self.inner_config_manager.config.get("obdiag").get("basic").get("telemetry") is False: + if ( + self.inner_config_manager.config.get("obdiag") is not None + and self.inner_config_manager.config.get("obdiag").get("basic") is not None + and self.inner_config_manager.config.get("obdiag").get("basic").get("telemetry") is not None + and self.inner_config_manager.config.get("obdiag").get("basic").get("telemetry") is False + ): telemetry.work_tag = False - if self.inner_config_manager.config.get("obdiag") is not None and self.inner_config_manager.config.get("obdiag").get( - "basic") is not None and self.inner_config_manager.config.get("obdiag").get("basic").get("dis_rsa_algorithms") is not None : - disable_rsa_algorithms=self.inner_config_manager.config.get("obdiag").get("basic").get("dis_rsa_algorithms") + if self.inner_config_manager.config.get("obdiag") is not None and self.inner_config_manager.config.get("obdiag").get("basic") is not None and self.inner_config_manager.config.get("obdiag").get("basic").get("dis_rsa_algorithms") is not None: + disable_rsa_algorithms = self.inner_config_manager.config.get("obdiag").get("basic").get("dis_rsa_algorithms") dis_rsa_algorithms(disable_rsa_algorithms) def fork(self, cmds=None, options=None, stdio=None): @@ -99,8 +101,7 @@ def _print(msg, *arg, **kwarg): self._stdio_func = {} if not self.stdio: return - for func in ['start_loading', 'stop_loading', 'print', 'confirm', 'verbose', 'warn', 'exception', 'error', - 'critical', 'print_list', 'read']: + for func in ['start_loading', 'stop_loading', 'print', 'confirm', 'verbose', 'warn', 'exception', 'error', 'critical', 'print_list', 'read']: self._stdio_func[func] = getattr(self.stdio, func, _print) def set_context(self, handler_name, namespace, config): @@ -113,7 +114,7 @@ def set_context(self, handler_name, namespace, config): cmd=self.cmds, options=self.options, stdio=self.stdio, - inner_config=self.inner_config_manager.config + inner_config=self.inner_config_manager.config, ) telemetry.set_cluster_conn(config.get_ob_cluster_config) @@ -127,18 +128,11 @@ def set_context_skip_cluster_conn(self, handler_name, namespace, config): cmd=self.cmds, options=self.options, stdio=self.stdio, - inner_config=self.inner_config_manager.config + inner_config=self.inner_config_manager.config, ) def set_offline_context(self, handler_name, namespace): - self.context = HandlerContext( - handler_name=handler_name, - namespace=namespace, - cmd=self.cmds, - options=self.options, - stdio=self.stdio, - inner_config=self.inner_config_manager.config - ) + self.context = HandlerContext(handler_name=handler_name, namespace=namespace, cmd=self.cmds, options=self.options, stdio=self.stdio, inner_config=self.inner_config_manager.config) def get_namespace(self, spacename): if spacename in self.namespaces: @@ -149,17 +143,7 @@ def get_namespace(self, spacename): return namespace def call_plugin(self, plugin, spacename=None, target_servers=None, **kwargs): - args = { - 'namespace': spacename, - 'namespaces': self.namespaces, - 'cluster_config': None, - 'obproxy_config': None, - 'ocp_config': None, - 'cmd': self.cmds, - 'options': self.options, - 'stdio': self.stdio, - 'target_servers': target_servers - } + args = {'namespace': spacename, 'namespaces': self.namespaces, 'cluster_config': None, 'obproxy_config': None, 'ocp_config': None, 'cmd': self.cmds, 'options': self.options, 'stdio': self.stdio, 'target_servers': target_servers} args.update(kwargs) self._call_stdio('verbose', 'Call %s ' % (plugin)) return plugin(**args) @@ -176,17 +160,7 @@ def ssh_clients_connect(self, servers, ssh_clients, user_config, fail_exit=False success = True for server in servers: if server not in ssh_clients: - client = SshClient( - SshConfig( - server.ip, - user_config.username, - user_config.password, - user_config.key_file, - user_config.port, - user_config.timeout - ), - self.stdio - ) + client = SshClient(SshConfig(server.ip, user_config.username, user_config.password, user_config.key_file, user_config.port, user_config.timeout), self.stdio) error = client.connect(stdio=connect_io) connect_status[server] = status = CheckStatus() if error is not True: @@ -252,7 +226,7 @@ def gather_function(self, function_type, opt): handler = GatherSceneHandler(self.context) return handler.handle() elif function_type == 'gather_ash_report': - handler =GatherAshReportHandler(self.context) + handler = GatherAshReportHandler(self.context) return handler.handle() else: self._call_stdio('error', 'Not support gather function: {0}'.format(function_type)) @@ -295,7 +269,7 @@ def analyze_fuction(self, function_type, opt): else: self._call_stdio('error', 'Not support analyze function: {0}'.format(function_type)) return False - + def check(self, opts): config = self.config_manager if not config: @@ -306,26 +280,22 @@ def check(self, opts): self.set_context('check', 'check', config) obproxy_check_handler = None observer_check_handler = None - if self.context.obproxy_config.get("servers") is not None and len(self.context.obproxy_config.get("servers"))>0: - obproxy_check_handler = CheckHandler(self.context,check_target_type="obproxy") + if self.context.obproxy_config.get("servers") is not None and len(self.context.obproxy_config.get("servers")) > 0: + obproxy_check_handler = CheckHandler(self.context, check_target_type="obproxy") obproxy_check_handler.handle() obproxy_check_handler.execute() - if self.context.cluster_config.get("servers") is not None and len(self.context.cluster_config.get("servers"))>0: - observer_check_handler = CheckHandler(self.context,check_target_type="observer") + if self.context.cluster_config.get("servers") is not None and len(self.context.cluster_config.get("servers")) > 0: + observer_check_handler = CheckHandler(self.context, check_target_type="observer") observer_check_handler.handle() observer_check_handler.execute() if obproxy_check_handler is not None: obproxy_report_path = os.path.expanduser(obproxy_check_handler.report.get_report_path()) if os.path.exists(obproxy_report_path): - self.stdio.print( - "Check obproxy finished. For more details, please run cmd '" + Fore.YELLOW + " cat {0} ".format( - obproxy_check_handler.report.get_report_path()) + Style.RESET_ALL + "'") + self.stdio.print("Check obproxy finished. For more details, please run cmd '" + Fore.YELLOW + " cat {0} ".format(obproxy_check_handler.report.get_report_path()) + Style.RESET_ALL + "'") if observer_check_handler is not None: observer_report_path = os.path.expanduser(observer_check_handler.report.get_report_path()) if os.path.exists(observer_report_path): - self.stdio.print( - "Check observer finished. For more details, please run cmd'" + Fore.YELLOW + " cat {0} ".format( - observer_check_handler.report.get_report_path()) + Style.RESET_ALL + "'") + self.stdio.print("Check observer finished. For more details, please run cmd'" + Fore.YELLOW + " cat {0} ".format(observer_check_handler.report.get_report_path()) + Style.RESET_ALL + "'") def check_list(self, opts): config = self.config_manager diff --git a/dependencies/check_dependencies.py b/dependencies/check_dependencies.py index 07e59e25..bbfc804e 100644 --- a/dependencies/check_dependencies.py +++ b/dependencies/check_dependencies.py @@ -34,6 +34,7 @@ def check_client_dependencies(): import argparse import paramiko import traceback + install_flag = True except Exception as err: print("import error!!!,cause:[{0}]".format(err)) diff --git a/err.py b/err.py index 0fc92745..9ca45125 100644 --- a/err.py +++ b/err.py @@ -17,6 +17,7 @@ from __future__ import absolute_import, division, print_function + class OBDIAGErrorCode(object): def __init__(self, code, msg): @@ -24,7 +25,7 @@ def __init__(self, code, msg): self.msg = msg def __str__(self): - return self.msg + return self.msg class OBDIAGErrorCodeTemplate(object): @@ -55,6 +56,7 @@ def __init__(self, operation, key, value=None, is_global=False): self.value = value self.is_global = is_global + class OBDIAGErrorSuggestion(object): def __init__(self, msg, auto_fix=False, fix_eval=[]): @@ -71,14 +73,11 @@ def __init__(self, msg, auto_fix=False, fix_eval=[]): self.fix_eval = fix_eval if isinstance(fix_eval, list) else [fix_eval] def format(self, *args, **kwargs): - return OBDIAGErrorSuggestion( - self._msg.format(*args, **kwargs), - auto_fix=kwargs.get('auto_fix', self.auto_fix), - fix_eval=kwargs.get('fix_eval', self.fix_eval) - ) + return OBDIAGErrorSuggestion(self._msg.format(*args, **kwargs), auto_fix=kwargs.get('auto_fix', self.auto_fix), fix_eval=kwargs.get('fix_eval', self.fix_eval)) + class CheckStatus(object): - + FAIL = "FAIL" PASS = "PASS" WAIT = "WAIT" @@ -88,6 +87,7 @@ def __init__(self, status=WAIT, error=None, suggests=[]): self.error = error self.suggests = suggests + SUG_SSH_FAILED = OBDIAGErrorSuggestionTemplate('Please check user config and network') EC_SSH_CONNECT = OBDIAGErrorCodeTemplate(1013, '{user}@{ip} connect failed: {message}') -EC_SQL_EXECUTE_FAILED = OBDIAGErrorCodeTemplate(5000, "{sql} execute failed") \ No newline at end of file +EC_SQL_EXECUTE_FAILED = OBDIAGErrorCodeTemplate(5000, "{sql} execute failed") diff --git a/handler/__init__.py b/handler/__init__.py index 2b33595e..4f2405d9 100644 --- a/handler/__init__.py +++ b/handler/__init__.py @@ -14,4 +14,4 @@ @time: 2022/6/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/analyzer/__init__.py b/handler/analyzer/__init__.py index 29f4a072..d3a64b03 100644 --- a/handler/analyzer/__init__.py +++ b/handler/analyzer/__init__.py @@ -14,4 +14,4 @@ @time: 2023/9/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/analyzer/analyze_flt_trace.py b/handler/analyzer/analyze_flt_trace.py index 3faf5b4f..267f5222 100644 --- a/handler/analyzer/analyze_flt_trace.py +++ b/handler/analyzer/analyze_flt_trace.py @@ -84,7 +84,6 @@ def init_option(self): self.output = int(output_option) return True - def handle(self): if not self.init_option(): self.stdio.error('init option failed') @@ -125,10 +124,7 @@ def handle_from_node(node): return analyze_tuples def __handle_from_node(self, node, old_files, local_store_parent_dir): - resp = { - "skip": False, - "error": "" - } + resp = {"skip": False, "error": ""} remote_ip = node.get("ip") if self.is_ssh else '127.0.0.1' remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") @@ -147,10 +143,7 @@ def __handle_from_node(self, node, old_files, local_store_parent_dir): ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node) except Exception as e: ssh = None - self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -177,6 +170,7 @@ def __get_online_log_file(self, ssh_helper, node, gather_path, local_store_dir): home_path = node.get("home_path") log_path = os.path.join(home_path, "log") local_store_path = "{0}/{1}".format(local_store_dir, str(node.get("host_type")) + '-' + str(self.flt_trace_id)) + def check_filename(filename): if os.path.exists(filename): # 文件已存在,尝试添加后缀 @@ -192,17 +186,10 @@ def check_filename(filename): return filename local_store_path = check_filename(local_store_path) - grep_cmd = "grep '{grep_args}' {log_dir}/*trace.log* > {gather_path}/{log_name} ".format( - grep_args=self.flt_trace_id, - gather_path=gather_path, - log_name=self.flt_trace_id, - log_dir=log_path) + grep_cmd = "grep '{grep_args}' {log_dir}/*trace.log* > {gather_path}/{log_name} ".format(grep_args=self.flt_trace_id, gather_path=gather_path, log_name=self.flt_trace_id, log_dir=log_path) self.stdio.verbose("grep files, run cmd = [{0}]".format(grep_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) - log_full_path = "{gather_path}/{log_name}".format( - log_name=self.flt_trace_id, - gather_path=gather_path - ) + log_full_path = "{gather_path}/{log_name}".format(log_name=self.flt_trace_id, gather_path=gather_path) download_file(True, ssh_helper, log_full_path, local_store_path, self.stdio) def __get_offline_log_file(self, ssh_helper, log_full_path, local_store_dir): @@ -213,10 +200,7 @@ def __get_offline_log_file(self, ssh_helper, log_full_path, local_store_dir): local_store_path = os.path.join(local_store_dir, self.flt_trace_id) log_name_list = self.__get_log_name_list_offline() if self.flt_trace_id is not None and (len(log_name_list) > 0): - grep_cmd = "grep -e '{grep_args}' {log_file} > {local_store_path} ".format( - grep_args=self.flt_trace_id, - log_file=' '.join(log_name_list), - local_store_path=local_store_path) + grep_cmd = "grep -e '{grep_args}' {log_file} > {local_store_path} ".format(grep_args=self.flt_trace_id, log_file=' '.join(log_name_list), local_store_path=local_store_path) LocalClient(self.stdio).run(grep_cmd) download_file(False, ssh_helper, log_full_path, local_store_path, self.stdio) @@ -269,6 +253,7 @@ def remap_key(di): di[new_key] = temp di.pop(key) return di + li = [] with open(file, 'r', encoding='utf-8') as f: content = f.read() @@ -300,9 +285,9 @@ def parse_line(self, node, line, trace): if countStr == 1: return json.loads(data_start + line[idx:-1] + data_end) else: - line_last = line[idx + 1:len(line) + 1] + line_last = line[idx + 1 : len(line) + 1] idx_last = line_last.find(traced_prefix) - new_line = line[idx:(idx + idx_last + 1)] + new_line = line[idx : (idx + idx_last + 1)] if len(new_line) > 10: return json.loads(data_start + new_line + data_end) else: @@ -368,4 +353,4 @@ def parse_file(self, file): if file[1].endswith('.json'): return self.__parse_json_file(file[0], file[1], self.flt_trace_id) else: - return self.__parse_log_file(file[0], file[1], self.flt_trace_id) \ No newline at end of file + return self.__parse_log_file(file[0], file[1], self.flt_trace_id) diff --git a/handler/analyzer/analyze_log.py b/handler/analyzer/analyze_log.py index b658d51d..3a46e134 100644 --- a/handler/analyzer/analyze_log.py +++ b/handler/analyzer/analyze_log.py @@ -127,9 +127,7 @@ def handle(self): if not self.init_config(): self.stdio.error('init config failed') return False - local_store_parent_dir = os.path.join(self.gather_pack_dir, - "analyze_pack_{0}".format(TimeUtils.timestamp_to_filename_time( - TimeUtils.get_current_us_timestamp()))) + local_store_parent_dir = os.path.join(self.gather_pack_dir, "analyze_pack_{0}".format(TimeUtils.timestamp_to_filename_time(TimeUtils.get_current_us_timestamp()))) self.stdio.verbose("Use {0} as pack dir.".format(local_store_parent_dir)) analyze_tuples = [] @@ -153,20 +151,17 @@ def handle_from_node(node): self.stdio.print(title) self.stdio.print(table) FileUtil.write_append(os.path.join(local_store_parent_dir, "result_details.txt"), title + str(table) + "\n\nDetails:\n\n") - + for m in range(len(summary_details_list)): for n in range(len(field_names)): - extend = "\n\n" if n == len(field_names) -1 else "\n" + extend = "\n\n" if n == len(field_names) - 1 else "\n" FileUtil.write_append(os.path.join(local_store_parent_dir, "result_details.txt"), field_names[n] + ": " + str(summary_details_list[m][n]) + extend) last_info = "For more details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(local_store_parent_dir, "result_details.txt")) self.stdio.print(last_info) return analyze_tuples def __handle_from_node(self, node, local_store_parent_dir): - resp = { - "skip": False, - "error": "" - } + resp = {"skip": False, "error": ""} node_results = [] remote_ip = node.get("ip") if self.is_ssh else '127.0.0.1' remote_user = node.get("ssh_username") @@ -176,20 +171,17 @@ def __handle_from_node(self, node, local_store_parent_dir): remote_home_path = node.get("home_path") self.stdio.verbose("Sending Collect Shell Command to node {0} ...".format(remote_ip)) DirectoryUtil.mkdir(path=local_store_parent_dir, stdio=self.stdio) - if "ssh_type" in node and node["ssh_type"]=="docker": - local_store_dir= "{0}/docker_{1}".format(local_store_parent_dir, node["container_name"]) + if "ssh_type" in node and node["ssh_type"] == "docker": + local_store_dir = "{0}/docker_{1}".format(local_store_parent_dir, node["container_name"]) else: local_store_dir = "{0}/{1}".format(local_store_parent_dir, remote_ip.replace(".", "_")) - DirectoryUtil.mkdir(path=local_store_dir,stdio=self.stdio) + DirectoryUtil.mkdir(path=local_store_dir, stdio=self.stdio) ssh_failed = False ssh = None try: - ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key,node, self.stdio) + ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.error("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.error("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -209,9 +201,7 @@ def __handle_from_node(self, node, local_store_parent_dir): self.__pharse_offline_log_file(ssh_helper=ssh, log_name=log_name, local_store_dir=local_store_dir) analyze_log_full_path = "{0}/{1}".format(local_store_dir, str(log_name).strip(".").replace("/", "_")) else: - self.__pharse_log_file(ssh_helper=ssh, node=node, log_name=log_name, - gather_path=gather_dir_full_path, - local_store_dir=local_store_dir) + self.__pharse_log_file(ssh_helper=ssh, node=node, log_name=log_name, gather_path=gather_dir_full_path, local_store_dir=local_store_dir) analyze_log_full_path = "{0}/{1}".format(local_store_dir, log_name) self.stdio.start_loading('analyze log start') file_result = self.__parse_log_lines(analyze_log_full_path) @@ -227,21 +217,15 @@ def __handle_log_list(self, ssh, node, resp): else: log_list = self.__get_log_name_list(ssh, node) if len(log_list) > self.file_number_limit: - self.stdio.warn("{0} The number of log files is {1}, out of range (0,{2}]".format(node.get("ip"), len(log_list), - self.file_number_limit)) - resp["skip"] = True, - resp["error"] = "Too many files {0} > {1}, Please adjust the analyze time range".format(len(log_list), - self.file_number_limit) + self.stdio.warn("{0} The number of log files is {1}, out of range (0,{2}]".format(node.get("ip"), len(log_list), self.file_number_limit)) + resp["skip"] = (True,) + resp["error"] = "Too many files {0} > {1}, Please adjust the analyze time range".format(len(log_list), self.file_number_limit) if self.directly_analyze_files: - resp["error"] = "Too many files {0} > {1}, " \ - "Please adjust the number of incoming files".format(len(log_list), - self.file_number_limit) + resp["error"] = "Too many files {0} > {1}, " "Please adjust the number of incoming files".format(len(log_list), self.file_number_limit) return log_list, resp elif len(log_list) == 0: - self.stdio.warn( - "{0} The number of log files is {1}, No files found, " - "Please adjust the query limit".format(node.get("ip"), len(log_list))) - resp["skip"] = True, + self.stdio.warn("{0} The number of log files is {1}, No files found, " "Please adjust the query limit".format(node.get("ip"), len(log_list))) + resp["skip"] = (True,) resp["error"] = "No files found" return log_list, resp return log_list, resp @@ -256,13 +240,11 @@ def __get_log_name_list(self, ssh_helper, node): if self.scope == "observer" or self.scope == "rootservice" or self.scope == "election": get_oblog = "ls -1 -F %s/*%s.log* | awk -F '/' '{print $NF}'" % (log_path, self.scope) else: - get_oblog = "ls -1 -F %s/observer.log* %s/rootservice.log* %s/election.log* | awk -F '/' '{print $NF}'" % \ - (log_path, log_path, log_path) + get_oblog = "ls -1 -F %s/observer.log* %s/rootservice.log* %s/election.log* | awk -F '/' '{print $NF}'" % (log_path, log_path, log_path) log_name_list = [] log_files = SshClient(self.stdio).run(ssh_helper, get_oblog) if self.is_ssh else LocalClient(self.stdio).run(get_oblog) if log_files: - log_name_list = get_logfile_name_list(self.is_ssh, ssh_helper, self.from_time_str, self.to_time_str, - log_path, log_files, self.stdio) + log_name_list = get_logfile_name_list(self.is_ssh, ssh_helper, self.from_time_str, self.to_time_str, log_path, log_files, self.stdio) else: self.stdio.error("Unable to find the log file. Please provide the correct --ob_install_dir, the default is [/home/admin/oceanbase]") return log_name_list @@ -294,25 +276,15 @@ def __pharse_log_file(self, ssh_helper, node, log_name, gather_path, local_store log_path = os.path.join(home_path, "log") local_store_path = "{0}/{1}".format(local_store_dir, log_name) if self.grep_args is not None: - grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format( - grep_args=self.grep_args, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format(grep_args=self.grep_args, gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose("grep files, run cmd = [{0}]".format(grep_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) if self.is_ssh else LocalClient(self.stdio).run(grep_cmd) - log_full_path = "{gather_path}/{log_name}".format( - log_name=log_name, - gather_path=gather_path - ) + log_full_path = "{gather_path}/{log_name}".format(log_name=log_name, gather_path=gather_path) download_file(self.is_ssh, ssh_helper, log_full_path, local_store_path, self.stdio) else: real_time_logs = ["observer.log", "rootservice.log", "election.log", "trace.log", "observer.log.wf", "rootservice.log.wf", "election.log.wf", "trace.log.wf"] if log_name in real_time_logs: - cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format( - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format(gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose("copy files, run cmd = [{0}]".format(cp_cmd)) SshClient(self.stdio).run(ssh_helper, cp_cmd) if self.is_ssh else LocalClient(self.stdio).run(cp_cmd) log_full_path = "{gather_path}/{log_name}".format(log_name=log_name, gather_path=gather_path) @@ -328,10 +300,7 @@ def __pharse_offline_log_file(self, ssh_helper, log_name, local_store_dir): """ local_store_path = "{0}/{1}".format(local_store_dir, str(log_name).strip(".").replace("/", "_")) if self.grep_args is not None: - grep_cmd = "grep -e '{grep_args}' {log_name} >> {local_store_path} ".format( - grep_args=self.grep_args, - log_name=log_name, - local_store_path=local_store_path) + grep_cmd = "grep -e '{grep_args}' {log_name} >> {local_store_path} ".format(grep_args=self.grep_args, log_name=log_name, local_store_path=local_store_path) self.stdio.verbose("grep files, run cmd = [{0}]".format(grep_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) if self.is_ssh else LocalClient(self.stdio).run(grep_cmd) else: @@ -382,29 +351,15 @@ def __parse_log_lines(self, file_full_path): if len(ret_code) > 1: trace_id = self.__get_trace_id(line) if error_dict.get(ret_code) is None: - error_dict[ret_code] = { - "file_name": file_full_path, - "count": 1, - "first_found_time": line_time, - "last_found_time": line_time, - "trace_id_list": {trace_id} if len(trace_id) > 0 else {} - } + error_dict[ret_code] = {"file_name": file_full_path, "count": 1, "first_found_time": line_time, "last_found_time": line_time, "trace_id_list": {trace_id} if len(trace_id) > 0 else {}} else: count = error_dict[ret_code]["count"] + 1 - first_found_time = error_dict[ret_code]["first_found_time"] if error_dict[ret_code][ - "first_found_time"] < line_time else line_time - last_found_time = error_dict[ret_code]["last_found_time"] if error_dict[ret_code][ - "last_found_time"] > line_time else line_time + first_found_time = error_dict[ret_code]["first_found_time"] if error_dict[ret_code]["first_found_time"] < line_time else line_time + last_found_time = error_dict[ret_code]["last_found_time"] if error_dict[ret_code]["last_found_time"] > line_time else line_time trace_id_list = list(error_dict[ret_code]["trace_id_list"]) if not (trace_id in trace_id_list): trace_id_list.append(trace_id) - error_dict[ret_code] = { - "file_name": file_full_path, - "count": count, - "first_found_time": first_found_time, - "last_found_time": last_found_time, - "trace_id_list": trace_id_list - } + error_dict[ret_code] = {"file_name": file_full_path, "count": count, "first_found_time": first_found_time, "last_found_time": last_found_time, "trace_id_list": trace_id_list} self.stdio.verbose("complete parse log {0}".format(file_full_path)) return error_dict @@ -416,7 +371,7 @@ def __get_time_from_ob_log_line(self, log_line): """ time_str = "" if len(log_line) >= 28: - time_str = log_line[1: log_line.find(']')] + time_str = log_line[1 : log_line.find(']')] return time_str def __get_trace_id(self, log_line): @@ -430,7 +385,6 @@ def __get_trace_id(self, log_line): if find and find.group(1): return find.group(1).strip('[').strip(']') - def __get_log_level(self, log_line): """ Get the log level from the observer's log line @@ -454,10 +408,7 @@ def __get_overall_summary(node_summary_tuples, is_files=False): :param node_summary_tuple :return: a string indicating the overall summary """ - field_names = [ - "Node", "Status", "FileName", "ErrorCode", - "Message", "Count" - ] + field_names = ["Node", "Status", "FileName", "ErrorCode", "Message", "Count"] t = [] t_details = [] field_names_details = field_names @@ -477,29 +428,26 @@ def __get_overall_summary(node_summary_tuples, is_files=False): error_code_info = OB_RET_DICT.get(ret_key, "") if len(error_code_info) > 3: is_empty = False - t.append([node, - "Error:" + tup[2] if is_err else "Completed", - ret_value["file_name"], - ret_key, - error_code_info[1], - ret_value["count"] - ]) - t_details.append([node, - "Error:" + tup[2] if is_err else "Completed", - ret_value["file_name"], - ret_key, - error_code_info[1], - ret_value["count"], - error_code_info[2], - error_code_info[3], - ret_value["first_found_time"], - ret_value["last_found_time"], - str(ret_value["trace_id_list"]) - ]) + t.append([node, "Error:" + tup[2] if is_err else "Completed", ret_value["file_name"], ret_key, error_code_info[1], ret_value["count"]]) + t_details.append( + [ + node, + "Error:" + tup[2] if is_err else "Completed", + ret_value["file_name"], + ret_key, + error_code_info[1], + ret_value["count"], + error_code_info[2], + error_code_info[3], + ret_value["first_found_time"], + ret_value["last_found_time"], + str(ret_value["trace_id_list"]), + ] + ) if is_empty: t.append([node, "\033[32mPASS\033[0m", None, None, None, None]) t_details.append([node, "\033[32mPASS\033[0m", None, None, None, None, None, None, None, None, None]) title = "\nAnalyze OceanBase Offline Log Summary:\n" if is_files else "\nAnalyze OceanBase Online Log Summary:\n" t.sort(key=lambda x: (x[0], x[1], x[2], x[3]), reverse=False) t_details.sort(key=lambda x: (x[0], x[1], x[2], x[3]), reverse=False) - return title, field_names, t, t_details \ No newline at end of file + return title, field_names, t, t_details diff --git a/handler/analyzer/log_parser/__init__.py b/handler/analyzer/log_parser/__init__.py index f5e16f89..5a69f20f 100644 --- a/handler/analyzer/log_parser/__init__.py +++ b/handler/analyzer/log_parser/__init__.py @@ -14,4 +14,4 @@ @time: 2023/11/07 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/analyzer/log_parser/log_entry.py b/handler/analyzer/log_parser/log_entry.py index 405192e0..c56c5b25 100644 --- a/handler/analyzer/log_parser/log_entry.py +++ b/handler/analyzer/log_parser/log_entry.py @@ -34,14 +34,13 @@ def find_field_end(data, end_chs=",)}({|][", start=0, end=-1): class LogEntry(object): WF_LEVELS = {"ERROR", "WARN", "FATAL"} - __slots__ = ("id", "log_type", "log_file_offset", "log_text_length", "timestamp_us", "parse_succ", "is_trace", - "log_level", "component", "source_func", "source_file", "source_line", "th_id", "co_id", "trace_id", - "lt", "dc", "log_text", "content") + __slots__ = ("id", "log_type", "log_file_offset", "log_text_length", "timestamp_us", "parse_succ", "is_trace", "log_level", "component", "source_func", "source_file", "source_line", "th_id", "co_id", "trace_id", "lt", "dc", "log_text", "content") """ Log entry parsed from the log text. This entry should be complete, meaning that we can resume the original log from this entry. At this stage we store the raw text in the field "log_text". """ + def __init__(self, log_id, log_type, file_offset, file_length): # key_info: only these vars dumped to files self.id = log_id @@ -83,7 +82,7 @@ def parse_from_data(self, data, time_slice): while idx != -1: next_idx = data.find(next_brac, idx) if next_idx == -1: - bare_elems.append(data[idx:len(data)].strip()) + bare_elems.append(data[idx : len(data)].strip()) break if cur_brac == "[": in_brac_elems.append(data[idx:next_idx].strip()) @@ -97,7 +96,7 @@ def parse_from_data(self, data, time_slice): bare_elems.append(bare_elem) idx = next_idx + 1 cur_brac, next_brac = next_brac, cur_brac - time_slice[0] += (time.time()-st) + time_slice[0] += time.time() - st st = time.time() if len(in_brac_elems) < 3 and "(tid:" in data: return @@ -121,12 +120,12 @@ def parse_from_data(self, data, time_slice): in_brac_elems = [in_brac_elems[0], ""] + in_brac_elems[1:] # has func and component, level is WARN ERROR FATAL elif len(real_bare_content) == 4: - bare_elems = [real_bare_content[0], real_bare_content[2]+" "+real_bare_content[3]] + bare_elems[1:] + bare_elems = [real_bare_content[0], real_bare_content[2] + " " + real_bare_content[3]] + bare_elems[1:] in_brac_elems = [in_brac_elems[0], real_bare_content[1]] + in_brac_elems[1:] else: - time_slice[1] += (time.time() - st) + time_slice[1] += time.time() - st raise Exception() - time_slice[1] += (time.time()-st) + time_slice[1] += time.time() - st st = time.time() self.timestamp_us = TimeUtils.datetime_to_timestamp(in_brac_elems[0]) self.log_level = bare_elems[0] @@ -141,7 +140,7 @@ def parse_from_data(self, data, time_slice): fl_tup = file_line_str.split(':') self.source_file = fl_tup[0] self.source_line = int(fl_tup[1]) - time_slice[2] += (time.time()-st) + time_slice[2] += time.time() - st st = time.time() th_idx = 2 # 寻找lt和dc的位置 @@ -181,10 +180,10 @@ def parse_from_data(self, data, time_slice): else: # 无dc 有lt self.lt = in_brac_elems[lt_idx] - self.trace_id = in_brac_elems[lt_idx-1] + self.trace_id = in_brac_elems[lt_idx - 1] self.th_id = int(in_brac_elems[th_idx]) if lt_idx - 2 != th_idx: - self.co_id = int(in_brac_elems[lt_idx-2]) + self.co_id = int(in_brac_elems[lt_idx - 2]) else: # 有dc @@ -199,7 +198,7 @@ def parse_from_data(self, data, time_slice): self.co_id = in_brac_elems[dc_idx - 3] self.th_id = in_brac_elems[th_idx] - time_slice[3] += (time.time()-st) + time_slice[3] += time.time() - st st = time.time() # 日志内容解析 # 将非头部分提取为content @@ -215,13 +214,12 @@ def parse_from_data(self, data, time_slice): self.co_id = int(self.co_id) if self.co_id is not None else None # self.lt = int(self.lt.split("=")[1]) if self.lt is not None else None # self.dc = int(self.dc.split("=")[1]) if self.dc is not None else None - time_slice[4] += (time.time()-st) + time_slice[4] += time.time() - st self.parse_succ = True return @staticmethod - def generate_log_entries_from_string_reader( - reader_io, parse_time_slice, max_read_n=-1, start_offset=0, start_log_id=0): + def generate_log_entries_from_string_reader(reader_io, parse_time_slice, max_read_n=-1, start_offset=0, start_log_id=0): """ parse log entries from start point that user specified. A posix read-like stream reader interface. @@ -267,15 +265,14 @@ def generate_log_entries_from_string_reader( log_entry_text = "\n".join(log_entry_texts) st = time.time() try: - log_entry = LogEntry(log_id, None, log_entry_begin_offset, - line_offset - log_entry_begin_offset) + log_entry = LogEntry(log_id, None, log_entry_begin_offset, line_offset - log_entry_begin_offset) log_entry.parse_from_data(log_entry_text, parse_time_slice) success_log_entries.append(log_entry) log_id += 1 n_read += 1 except Exception as e: irregular_logs.append(log_entry_text) - parse_time_slice[5] += (time.time() - st) + parse_time_slice[5] += time.time() - st log_entry_begin_offset = line_offset log_entry_texts = [line] else: diff --git a/handler/analyzer/log_parser/tree.py b/handler/analyzer/log_parser/tree.py index 89692745..7587b801 100644 --- a/handler/analyzer/log_parser/tree.py +++ b/handler/analyzer/log_parser/tree.py @@ -162,6 +162,7 @@ def format_len(elem): keyword = elem ex = 0 return '{: <' + str(keyword + ex) + 's}' + return '| %s|' % '| '.join(format_len(elem) for elem in self.fmt_elements).format(*args) @property @@ -206,11 +207,17 @@ def detail(self, index, node: Node): time_str = 'Elapsed: {}'.format(TimeUtils.trans_time(et - st)) else: time_str = '' - return '{} - {} {} {}' \ - '{}{}{}{}{}'.format(index, node.value['trace_data']['name'] if node.value else '', time_str, - ('\n' + ' ' * (3 + len(str(index)))) if hosts else '', hosts, - ('\n' + ' ' * (3 + len(str(index)))) if tags else '', tags, - ('\n' + ' ' * (3 + len(str(index)))) if logs else '', logs) + return '{} - {} {} {}' '{}{}{}{}{}'.format( + index, + node.value['trace_data']['name'] if node.value else '', + time_str, + ('\n' + ' ' * (3 + len(str(index)))) if hosts else '', + hosts, + ('\n' + ' ' * (3 + len(str(index)))) if tags else '', + tags, + ('\n' + ' ' * (3 + len(str(index)))) if logs else '', + logs, + ) def record_node_info(self, node: Node): self.counter += 1 @@ -268,8 +275,7 @@ def recurse(node: Node, parent_info): if max_recursion != -1 and len(parent_info) / 2 > max_recursion: return if parent_info: - node.tree_info = parent_info[:-1].replace(node_chars[0:2], child_chars[0:2]). \ - replace(node_chars[2:4], child_chars[2:4]) + parent_info[-1] + node.tree_info = parent_info[:-1].replace(node_chars[0:2], child_chars[0:2]).replace(node_chars[2:4], child_chars[2:4]) + parent_info[-1] else: node.tree_info = '' meta_data.record_node_info(node) @@ -301,7 +307,7 @@ def _traverse(self, root_node: Node, max_recursion=3, top_n=5): topN = heapq.nlargest(top_n, leaf_nodes.items(), lambda x: x[1].elapsed_time_us) topN_meta = TreeMeta() topN_counter = 0 - table = PrettyTable(['ID','Leaf Span Name','Elapsed Time','HOSTS']) + table = PrettyTable(['ID', 'Leaf Span Name', 'Elapsed Time', 'HOSTS']) table.align = 'l' while topN: topN_counter += 1 @@ -316,7 +322,7 @@ def _traverse(self, root_node: Node, max_recursion=3, top_n=5): yield str(line) yield topN_meta.detail_header for node in topN_li: - yield topN_meta.detail(node.index,node) + yield topN_meta.detail(node.index, node) meta = self.meta[root_node] yield meta.details_data diff --git a/handler/checker/__init__.py b/handler/checker/__init__.py index 681bdff5..70691bef 100644 --- a/handler/checker/__init__.py +++ b/handler/checker/__init__.py @@ -15,4 +15,3 @@ @file: __init__.py @desc: """ - diff --git a/handler/checker/check_exception.py b/handler/checker/check_exception.py index b1019974..8e1b4736 100644 --- a/handler/checker/check_exception.py +++ b/handler/checker/check_exception.py @@ -52,24 +52,24 @@ def __init__(self, msg=None, obj=None): class ResultFalseException(CheckException): def __init__(self, msg=None, obj=None): - super(ResultFalseException,self).__init__(msg, obj) + super(ResultFalseException, self).__init__(msg, obj) class ResultFailException(CheckException): def __init__(self, msg=None, obj=None): - super(ResultFailException,self).__init__(msg, obj) + super(ResultFailException, self).__init__(msg, obj) class VerifyFalseException(CheckException): def __init__(self, msg=None, obj=None): - super(VerifyFalseException,self).__init__(msg, obj) + super(VerifyFalseException, self).__init__(msg, obj) class VerifyFailException(CheckException): def __init__(self, msg=None, obj=None): - super(VerifyFailException,self).__init__(msg, obj) + super(VerifyFailException, self).__init__(msg, obj) class TaskException(CheckException): def __init__(self, msg=None, obj=None): - super(TaskException,self).__init__(msg, obj) + super(TaskException, self).__init__(msg, obj) diff --git a/handler/checker/check_handler.py b/handler/checker/check_handler.py index f225832c..128cce72 100644 --- a/handler/checker/check_handler.py +++ b/handler/checker/check_handler.py @@ -33,37 +33,40 @@ from common.tool import YamlUtils from common.tool import StringUtils + class CheckHandler: - def __init__(self, context, check_target_type="observer"): + def __init__(self, context, check_target_type="observer"): self.context = context self.stdio = context.stdio # init input parameters self.report = None self.tasks = None self.work_path = os.path.expanduser(self.context.inner_config["check"]["work_path"] or "~/.obdiag/check") - self.export_report_path=os.path.expanduser(self.context.inner_config["check"]["report"]["report_path"] or "./check_report/") + self.export_report_path = os.path.expanduser(self.context.inner_config["check"]["report"]["report_path"] or "./check_report/") self.export_report_type = self.context.inner_config["check"]["report"]["export_type"] or "table" self.ignore_version = self.context.inner_config["check"]["ignore_version"] or False self.cluster = self.context.cluster_config - if check_target_type=="observer": - self.nodes =self.context.cluster_config.get("servers") + if check_target_type == "observer": + self.nodes = self.context.cluster_config.get("servers") if check_target_type == "obproxy": self.nodes = self.context.obproxy_config.get("servers") self.tasks_base_path = os.path.expanduser(self.work_path + "/tasks/") self.check_target_type = check_target_type - self.stdio.verbose("CheckHandler input. ignore_version is {0} , cluster is {1} , nodes is {2}, " - "export_report_path is {3}, export_report_type is {4} , check_target_type is {5}, " - " tasks_base_path is {6}.".format(self.ignore_version, - self.cluster.get( - "ob_cluster_name") or self.cluster.get( - "obproxy_cluster_name"), - StringUtils.node_cut_passwd_for_log(self.nodes), - self.export_report_path, - self.export_report_type, - self.check_target_type, - self.tasks_base_path)) + self.stdio.verbose( + "CheckHandler input. ignore_version is {0} , cluster is {1} , nodes is {2}, " + "export_report_path is {3}, export_report_type is {4} , check_target_type is {5}, " + " tasks_base_path is {6}.".format( + self.ignore_version, + self.cluster.get("ob_cluster_name") or self.cluster.get("obproxy_cluster_name"), + StringUtils.node_cut_passwd_for_log(self.nodes), + self.export_report_path, + self.export_report_type, + self.check_target_type, + self.tasks_base_path, + ) + ) # case_package_file # build case_package_file @@ -90,37 +93,31 @@ def __init__(self, context, check_target_type="observer"): raise CheckException("tasks_base_path {0} is not exist".format(tasks_base_path)) self.stdio.verbose("tasks_base_path is " + self.tasks_base_path) # input_param - self.options=self.context.options + self.options = self.context.options # add ssher - new_node=[] + new_node = [] for node in self.nodes: # add ssher ssher = None try: - ssher = SshHelper(True, node.get("ip"), - node.get("ssh_username"), - node.get("ssh_password"), - node.get("ssh_port"), - node.get("ssh_key_file"), - node) + ssher = SshHelper(True, node.get("ip"), node.get("ssh_username"), node.get("ssh_password"), node.get("ssh_port"), node.get("ssh_key_file"), node) except Exception as e: self.stdio.warn("StepBase get SshHelper fail on{0} ,Exception: {1}".format(node.get("ip"), e)) node["ssher"] = ssher new_node.append(node) - self.nodes=new_node - self.version=get_version(self.nodes, self.check_target_type,self.cluster, self.stdio) + self.nodes = new_node + self.version = get_version(self.nodes, self.check_target_type, self.cluster, self.stdio) # add OBConnectorPool try: - obConnectorPool=checkOBConnectorPool(context,3,self.cluster) + obConnectorPool = checkOBConnectorPool(context, 3, self.cluster) except Exception as e: self.stdio.warn("obConnector init error. Error info is {0}".format(e)) finally: self.context.set_variable('check_obConnector_pool', obConnectorPool) - def handle(self): try: package_name = None @@ -157,8 +154,7 @@ def handle(self): self.get_all_tasks() filter_tasks = self.get_package_tasks("filter") if len(filter_tasks) > 0: - self.tasks = {key: value for key, value in self.tasks.items() if key not in - filter_tasks} + self.tasks = {key: value for key, value in self.tasks.items() if key not in filter_tasks} new_tasks = {} for filter_task in filter_tasks: for task_name, task_value in self.tasks.items(): @@ -205,7 +201,7 @@ def execute_one(self, task_name): try: self.stdio.verbose("execute tasks is {0}".format(task_name)) # Verify if the version is within a reasonable range - report = TaskReport(self.context,task_name) + report = TaskReport(self.context, task_name) if not self.ignore_version: version = self.version if version: @@ -226,12 +222,8 @@ def execute_one(self, task_name): def execute(self): try: - self.stdio.verbose( - "execute_all_tasks. the number of tasks is {0} ,tasks is {1}".format(len(self.tasks.keys()), - self.tasks.keys())) - self.report = CheckReport(self.context, export_report_path=self.export_report_path, - export_report_type=self.export_report_type, - report_target=self.check_target_type) + self.stdio.verbose("execute_all_tasks. the number of tasks is {0} ,tasks is {1}".format(len(self.tasks.keys()), self.tasks.keys())) + self.report = CheckReport(self.context, export_report_path=self.export_report_path, export_report_type=self.export_report_type, report_target=self.check_target_type) # one of tasks to execute for task in self.tasks: t_report = self.execute_one(task) @@ -242,29 +234,22 @@ def execute(self): except Exception as e: self.stdio.error("Internal error :{0}".format(e)) + class checkOBConnectorPool: - def __init__(self,context, max_size, cluster): + def __init__(self, context, max_size, cluster): self.max_size = max_size - self.cluster=cluster + self.cluster = cluster self.connections = queue.Queue(maxsize=max_size) - self.stdio=context.stdio + self.stdio = context.stdio self.stdio.verbose("obConnectorPool init success!") try: for i in range(max_size): - conn = OBConnector( - ip=self.cluster.get("db_host"), - port=self.cluster.get("db_port"), - username=self.cluster.get("tenant_sys").get("user"), - password=self.cluster.get("tenant_sys").get("password"), - stdio=self.stdio, - timeout=10000 - ) + conn = OBConnector(ip=self.cluster.get("db_host"), port=self.cluster.get("db_port"), username=self.cluster.get("tenant_sys").get("user"), password=self.cluster.get("tenant_sys").get("password"), stdio=self.stdio, timeout=10000) self.connections.put(conn) self.stdio.verbose("obConnectorPool init success!") except Exception as e: self.stdio.error("obConnectorPool init fail! err:".format(e)) - def get_connection(self): try: return self.connections.get() @@ -277,4 +262,3 @@ def release_connection(self, conn): if conn is not None: self.connections.put(conn) return - diff --git a/handler/checker/check_list.py b/handler/checker/check_list.py index f885fe74..53ab952c 100644 --- a/handler/checker/check_list.py +++ b/handler/checker/check_list.py @@ -34,16 +34,12 @@ def handle(self): files = [f for f in entries if os.path.isfile(os.path.join(self.work_path, f))] for file in files: if "check_package" in file: - cases_map = {"all": {"name": "all", "command": "obdiag check", - "info_en": "default check all task without filter", - "info_cn": "默认执行除filter组里的所有巡检项"}} + cases_map = {"all": {"name": "all", "command": "obdiag check", "info_en": "default check all task without filter", "info_cn": "默认执行除filter组里的所有巡检项"}} # Obtain which files match and corresponding header files # Using string segmentation methods parts = file.split('_') if len(parts) < 1: - self.stdio.warn( - "invalid check package name :{0} , Please don't add file, which 'check_package' in the name".format( - file)) + self.stdio.warn("invalid check package name :{0} , Please don't add file, which 'check_package' in the name".format(file)) continue target = parts[0] file = "{0}/{1}".format(self.work_path, file) @@ -63,10 +59,11 @@ def handle(self): else: package_target = "{0}_cases".format(target) - cases_map[package_data] = {"name": package_data, - "command": "obdiag check --{0}={1}".format(package_target, - package_data), - "info_en": package_file_data[package_data].get("info_en") or "", - "info_cn": package_file_data[package_data].get("info_cn") or ""} + cases_map[package_data] = { + "name": package_data, + "command": "obdiag check --{0}={1}".format(package_target, package_data), + "info_en": package_file_data[package_data].get("info_en") or "", + "info_cn": package_file_data[package_data].get("info_cn") or "", + } Util.print_title("check cases about {0}".format(target)) Util.print_scene(cases_map) diff --git a/handler/checker/check_report.py b/handler/checker/check_report.py index c0790061..4f2e95b0 100644 --- a/handler/checker/check_report.py +++ b/handler/checker/check_report.py @@ -34,8 +34,7 @@ class CheckReport: - def __init__(self, context, report_target="observer", export_report_path="./check_report/", - export_report_type="table"): + def __init__(self, context, report_target="observer", export_report_path="./check_report/", export_report_type="table"): self.context = context self.stdio = context.stdio self.tasks = [] @@ -62,8 +61,7 @@ def add_task_report(self, task_report): self.tasks.append(task_report) def export_report(self): - self.stdio.verbose( - "export report to {0}.{1}, export type is {1}".format(self.report_path, self.export_report_type)) + self.stdio.verbose("export report to {0}.{1}, export type is {1}".format(self.report_path, self.export_report_type)) try: if self.export_report_type == "table": self.export_report_table() @@ -128,9 +126,7 @@ def report_tobeMap(self): allMap["critical"] = criticalMap allMap["warning"] = warningMap allMap["all"] = allInfoMap - telemetry.push_check_info(self.report_target, - {"fail_cases": list(failMap), "critical_cases": list(criticalMap), - "warning_cases": list(warningMap)}) + telemetry.push_check_info(self.report_target, {"fail_cases": list(failMap), "critical_cases": list(criticalMap), "warning_cases": list(warningMap)}) return allMap def export_report_table(self): @@ -169,9 +165,7 @@ def export_report_table(self): report_all_tb.add_row([task.name, '\n'.join(task.all())]) if len(task.all_fail()) == 0 and len(task.all_critical()) == 0 and len(task.all_warning()) == 0: report_all_tb.add_row([task.name, "all pass"]) - telemetry.push_check_info(self.report_target, - {"fail_cases": list(set(failMap)), "critical_cases": list(set(criticalMap)), - "warning_cases": list(set(warningMap))}) + telemetry.push_check_info(self.report_target, {"fail_cases": list(set(failMap)), "critical_cases": list(set(criticalMap)), "warning_cases": list(set(warningMap))}) fp = open(self.report_path + ".table", 'a+', encoding="utf8") diff --git a/handler/checker/check_task.py b/handler/checker/check_task.py index 7e634e0a..0b4798f9 100644 --- a/handler/checker/check_task.py +++ b/handler/checker/check_task.py @@ -18,8 +18,7 @@ import threading from common.ob_connector import OBConnector -from handler.checker.check_exception import StepResultFailException, \ - StepExecuteFailException, StepResultFalseException, TaskException +from handler.checker.check_exception import StepResultFailException, StepExecuteFailException, StepResultFalseException, TaskException from handler.checker.step.stepbase import StepBase from common.tool import StringUtils from common.scene import filter_by_version @@ -51,19 +50,20 @@ def execute(self): # TODO: 这里的逻辑需要优化,如果一个节点执行失败了,那么后续的步骤就不会被执行了。 work_threads = [] for node in self.nodes: - t = threading.Thread(target=self.execute_one_node, args=(steps_nu,node)) + t = threading.Thread(target=self.execute_one_node, args=(steps_nu, node)) work_threads.append(t) t.start() for t in work_threads: t.join() self.stdio.verbose("task execute end") - def execute_one_node(self,steps_nu,node): + + def execute_one_node(self, steps_nu, node): try: self.stdio.verbose("run task in node: {0}".format(StringUtils.node_cut_passwd_for_log(node))) steps = self.task[steps_nu] nu = 1 - task_variable_dict={} + task_variable_dict = {} for step in steps["steps"]: try: self.stdio.verbose("step nu: {0}".format(nu)) @@ -72,7 +72,7 @@ def execute_one_node(self,steps_nu,node): step_run = StepBase(self.context, step, node, self.cluster, task_variable_dict) self.stdio.verbose("step nu: {0} initted, to execute".format(nu)) step_run.execute(self.report) - task_variable_dict=step_run.update_task_variable_dict() + task_variable_dict = step_run.update_task_variable_dict() if "report_type" in step["result"] and step["result"]["report_type"] == "execution": self.stdio.verbose("report_type stop this step") return @@ -94,5 +94,3 @@ def execute_one_node(self,steps_nu,node): except Exception as e: self.stdio.error("TaskBase execute Exception: {0}".format(e)) raise e - - diff --git a/handler/checker/result/__init__.py b/handler/checker/result/__init__.py index 681bdff5..70691bef 100644 --- a/handler/checker/result/__init__.py +++ b/handler/checker/result/__init__.py @@ -15,4 +15,3 @@ @file: __init__.py @desc: """ - diff --git a/handler/checker/result/result.py b/handler/checker/result/result.py index 9e505699..b112720e 100644 --- a/handler/checker/result/result.py +++ b/handler/checker/result/result.py @@ -24,8 +24,9 @@ # validation process, handle it as fail); VerifyException (verification failed, report needs to be combined with # report_type) + class CheckResult: - def __init__(self,context, step_result_info, variable_dict): + def __init__(self, context, step_result_info, variable_dict): self.context = context self.stdio = context.stdio self.step_result_info = step_result_info @@ -42,8 +43,7 @@ def execute(self): # if verify in step.result[] if "verify" in self.step_result_info: try: - verify = VerifyResult(self.context,self.step_result_info["verify"], - self.variable_dict, self.step_result_info["set_value"], verify_type) + verify = VerifyResult(self.context, self.step_result_info["verify"], self.variable_dict, self.step_result_info["set_value"], verify_type) result = verify.execute() self.stdio.verbose("verify.execute end. and result is {0}".format(result)) @@ -52,12 +52,9 @@ def execute(self): raise ResultFailException(e) if not result: err_msg = self.build_msg() - self.stdio.verbose( - "verify.execute end. and result is false return ResultFalseException err_msg:{0}".format(err_msg)) + self.stdio.verbose("verify.execute end. and result is false return ResultFalseException err_msg:{0}".format(err_msg)) raise ResultFalseException(err_msg) - - def build_msg(self): s = "the step is not pass" if 'err_msg' in self.step_result_info: @@ -69,5 +66,3 @@ def replacer(match): return str(d.get(key, match.group(0))) return re.sub(r'#\{(\w+)\}', replacer, s) - - diff --git a/handler/checker/result/verify.py b/handler/checker/result/verify.py index 19f470d4..e59d7318 100644 --- a/handler/checker/result/verify.py +++ b/handler/checker/result/verify.py @@ -68,8 +68,7 @@ def _verify_base(self): self.stdio.verbose("the result verify is {0}".format(self.expr)) real_shell = re.sub(r'\$\{([^}]+)\}', self.expr, check_verify_shell) for env in self.env_dict: - self.stdio.verbose("add env: {0} ,the value:{1} , the type:{2}".format(env, self.env_dict[env], - type(self.env_dict[env]))) + self.stdio.verbose("add env: {0} ,the value:{1} , the type:{2}".format(env, self.env_dict[env], type(self.env_dict[env]))) if isinstance(self.env_dict[env], int): real_shell = env + '=' + str(self.env_dict[env]) + '\n' + real_shell else: @@ -89,15 +88,10 @@ def _verify_max(self): try: the_num = self.env_dict[self.now_step_set_value_name] if isinstance(the_num, decimal.Decimal): - the_num= int(self.env_dict[self.now_step_set_value_name]) + the_num = int(self.env_dict[self.now_step_set_value_name]) if not isinstance(the_num, (int, float, decimal.Decimal)): - self.stdio.warn( - "{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, - self.env_dict[ - self.now_step_set_value_name], - type(self.env_dict[ - self.now_step_set_value_name]))) - the_num=0 + self.stdio.warn("{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, self.env_dict[self.now_step_set_value_name], type(self.env_dict[self.now_step_set_value_name]))) + the_num = 0 range_str = self.expr return int(the_num) < int(range_str) except Exception as e: @@ -108,15 +102,10 @@ def _verify_min(self): try: the_num = self.env_dict[self.now_step_set_value_name] if isinstance(the_num, decimal.Decimal): - the_num= int(self.env_dict[self.now_step_set_value_name]) + the_num = int(self.env_dict[self.now_step_set_value_name]) if not isinstance(the_num, (int, float, decimal.Decimal)): - self.stdio.warn( - "{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, - self.env_dict[ - self.now_step_set_value_name], - type(self.env_dict[ - self.now_step_set_value_name]))) - the_num=0 + self.stdio.warn("{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, self.env_dict[self.now_step_set_value_name], type(self.env_dict[self.now_step_set_value_name]))) + the_num = 0 range_str = self.expr return int(the_num) > int(range_str) except Exception as e: @@ -127,15 +116,10 @@ def _verify_equal(self): try: the_num = self.env_dict[self.now_step_set_value_name] if isinstance(the_num, decimal.Decimal): - the_num= int(self.env_dict[self.now_step_set_value_name]) + the_num = int(self.env_dict[self.now_step_set_value_name]) if not isinstance(the_num, (int, float, decimal.Decimal)): - self.stdio.warn( - "{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, - self.env_dict[ - self.now_step_set_value_name], - type(self.env_dict[ - self.now_step_set_value_name]))) - the_num=0 + self.stdio.warn("{0} is {1} and the type is {2}, not int or float or decimal ! set it to 0.".format(self.now_step_set_value_name, self.env_dict[self.now_step_set_value_name], type(self.env_dict[self.now_step_set_value_name]))) + the_num = 0 range_str = self.expr return int(self.env_dict[self.now_step_set_value_name]) == int(range_str) except Exception as e: diff --git a/handler/checker/step/__init__.py b/handler/checker/step/__init__.py index 681bdff5..70691bef 100644 --- a/handler/checker/step/__init__.py +++ b/handler/checker/step/__init__.py @@ -15,4 +15,3 @@ @file: __init__.py @desc: """ - diff --git a/handler/checker/step/data_size.py b/handler/checker/step/data_size.py index b8d0ff0c..dfb06d51 100644 --- a/handler/checker/step/data_size.py +++ b/handler/checker/step/data_size.py @@ -23,7 +23,7 @@ class DataSizeHandler: - def __init__(self,context, step, node, task_variable_dict): + def __init__(self, context, step, node, task_variable_dict): self.context = context self.stdio = context.stdio self.stdio.verbose("init DataSizeHandler") @@ -34,14 +34,12 @@ def __init__(self,context, step, node, task_variable_dict): self.task_variable_dict = task_variable_dict try: - self.ssh_helper=self.node["ssher"] + self.ssh_helper = self.node["ssher"] if self.ssh_helper is None: raise Exception("self.ssh_helper is None.") except Exception as e: - self.stdio.error( - "DataSizeHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) - raise Exception( - "DataSizeHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) + self.stdio.error("DataSizeHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) + raise Exception("DataSizeHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) # step report self.parameter = [] @@ -55,8 +53,8 @@ def execute(self): self.stdio.verbose("DataSizeHandler execute: {0}".format(self.step["key"])) s = self.step["key"] value = self.task_variable_dict[s] - self.task_variable_dict[s]=Capacity(value).btyes() - self.stdio.verbose("DataSizeHandler set {0} = {1}".format(s,self.task_variable_dict[s])) + self.task_variable_dict[s] = Capacity(value).btyes() + self.stdio.verbose("DataSizeHandler set {0} = {1}".format(s, self.task_variable_dict[s])) except Exception as e: self.stdio.error("DataSizeHandler execute Exception: {0}".format(e).strip()) raise StepExecuteFailException("DataSizeHandler execute Exception: {0}".format(e).strip()) diff --git a/handler/checker/step/get_system_parameter.py b/handler/checker/step/get_system_parameter.py index af3341c1..040ee492 100644 --- a/handler/checker/step/get_system_parameter.py +++ b/handler/checker/step/get_system_parameter.py @@ -23,7 +23,7 @@ class GetSystemParameterHandler: - def __init__(self,context, step, node, task_variable_dict): + def __init__(self, context, step, node, task_variable_dict): self.context = context self.stdio = context.stdio self.stdio.verbose("init GetSystemParameterHandler") @@ -34,14 +34,12 @@ def __init__(self,context, step, node, task_variable_dict): self.task_variable_dict = task_variable_dict try: - self.ssh_helper=self.node["ssher"] + self.ssh_helper = self.node["ssher"] if self.ssh_helper is None: raise Exception("self.ssh_helper is None.") except Exception as e: - self.stdio.error( - "GetSystemParameterHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) - raise Exception( - "GetSystemParameterHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) + self.stdio.error("GetSystemParameterHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) + raise Exception("GetSystemParameterHandler ssh init fail . Please check the NODES conf Exception : {0} .".format(e)) # step report self.parameter = [] @@ -53,8 +51,7 @@ def get_parameter(self, parameter_name): parameter_value = self.ssh_helper.ssh_exec_cmd("cat /proc/sys/" + parameter_name).strip() self.ssh_helper.ssh_close() except Exception as e: - self.stdio.warn( - "get {0} fail:{1} .please check, the parameter_value will be set -1".format(parameter_name, e)) + self.stdio.warn("get {0} fail:{1} .please check, the parameter_value will be set -1".format(parameter_name, e)) parameter_value = str("-1") return parameter_value diff --git a/handler/checker/step/sql.py b/handler/checker/step/sql.py index b572c201..3514859d 100644 --- a/handler/checker/step/sql.py +++ b/handler/checker/step/sql.py @@ -23,7 +23,7 @@ class StepSQLHandler: - def __init__(self,context, step, task_variable_dict): + def __init__(self, context, step, task_variable_dict): try: self.context = context self.stdio = context.stdio @@ -32,9 +32,9 @@ def __init__(self,context, step, task_variable_dict): self.tenant_mode = None self.sys_database = None self.database = None - self.ob_connector_pool=self.context.get_variable('check_obConnector_pool',None) + self.ob_connector_pool = self.context.get_variable('check_obConnector_pool', None) if self.ob_connector_pool is not None: - self.ob_connector=self.ob_connector_pool.get_connection() + self.ob_connector = self.ob_connector_pool.get_connection() if self.ob_connector is None: raise Exception("self.ob_connector is None.") except Exception as e: @@ -59,8 +59,8 @@ def execute(self): self.stdio.verbose("StepSQLHandler execute: {0}".format(sql)) data = self.ob_connector.execute_sql(sql) self.stdio.verbose("execute_sql result:{0}".format(data)) - if data is None or len(data) == 0: - data="" + if data is None or len(data) == 0: + data = "" else: data = data[0][0] if data is None: diff --git a/handler/checker/step/ssh.py b/handler/checker/step/ssh.py index 282477e2..3f7c6247 100644 --- a/handler/checker/step/ssh.py +++ b/handler/checker/step/ssh.py @@ -23,7 +23,7 @@ class SshHandler: - def __init__(self,context, step, node, task_variable_dict): + def __init__(self, context, step, node, task_variable_dict): self.context = context self.stdio = context.stdio self.ssh_report_value = None @@ -31,14 +31,12 @@ def __init__(self,context, step, node, task_variable_dict): self.step = step self.node = node try: - self.ssh_helper=self.node["ssher"] + self.ssh_helper = self.node["ssher"] if self.ssh_helper is None: raise Exception("self.ssh_helper is None.") except Exception as e: - self.stdio.error( - "SshHandler init fail. Please check the NODES conf. node: {0}. Exception : {1} .".format(node, e)) - raise Exception( - "SshHandler init fail. Please check the NODES conf node: {0} Exception : {1} .".format(node, e)) + self.stdio.error("SshHandler init fail. Please check the NODES conf. node: {0}. Exception : {1} .".format(node, e)) + raise Exception("SshHandler init fail. Please check the NODES conf node: {0} Exception : {1} .".format(node, e)) self.task_variable_dict = task_variable_dict self.parameter = [] self.report = TaskReport @@ -56,8 +54,7 @@ def execute(self): ssh_report_value = ssh_report_value.strip() self.stdio.verbose("ssh result:{0}".format(Util.convert_to_number(ssh_report_value))) if "result" in self.step and "set_value" in self.step["result"]: - self.stdio.verbose("ssh result set {0}".format(self.step["result"]["set_value"], - Util.convert_to_number(ssh_report_value))) + self.stdio.verbose("ssh result set {0}".format(self.step["result"]["set_value"], Util.convert_to_number(ssh_report_value))) self.task_variable_dict[self.step["result"]["set_value"]] = Util.convert_to_number(ssh_report_value) except Exception as e: self.stdio.error("ssh execute Exception:{0}".format(e).strip()) diff --git a/handler/checker/step/stepbase.py b/handler/checker/step/stepbase.py index c3afe63c..897566f5 100644 --- a/handler/checker/step/stepbase.py +++ b/handler/checker/step/stepbase.py @@ -16,8 +16,7 @@ @desc: """ -from handler.checker.check_exception import StepResultFailException, StepExecuteFailException, \ - ResultFalseException, ResultFailException, StepResultFalseException +from handler.checker.check_exception import StepResultFailException, StepExecuteFailException, ResultFalseException, ResultFailException, StepResultFalseException from handler.checker.step.data_size import DataSizeHandler from handler.checker.step.get_system_parameter import GetSystemParameterHandler from handler.checker.result.result import CheckResult @@ -45,9 +44,7 @@ def execute(self, report): self.task_variable_dict["remote_ip"] = self.node["ip"] elif "ssh_type" in self.node and self.node["ssh_type"] == "docker": self.stdio.verbose("execute ssh_type is docker") - self.task_variable_dict["remote_ip"] = \ - docker.from_env().containers.get(self.node["container_name"]).attrs['NetworkSettings']['Networks'][ - 'bridge']["IPAddress"] + self.task_variable_dict["remote_ip"] = docker.from_env().containers.get(self.node["container_name"]).attrs['NetworkSettings']['Networks']['bridge']["IPAddress"] for node in self.node: self.task_variable_dict["remote_{0}".format(node)] = self.node[node] if "type" not in self.step: @@ -67,19 +64,15 @@ def execute(self, report): except Exception as e: self.stdio.error("StepBase handler.execute fail {0}".format(e)) if self.step["type"] == "sql": - report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get( - "obproxy_cluster_name") or no_cluster_name_msg, e), "fail") + report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get("obproxy_cluster_name") or no_cluster_name_msg, e), "fail") else: - report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", - self.node.get("container_name") or self.task_variable_dict.get( - "remote_ip") or "", e), "fail") + report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", self.node.get("container_name") or self.task_variable_dict.get("remote_ip") or "", e), "fail") raise StepExecuteFailException("StepBase handler.execute fail {0}".format(e)) try: self.task_variable_dict = handler.update_step_variable_dict() self.stdio.verbose("self.task_variable_dict: {0}".format(self.task_variable_dict)) - if self.step["type"] == "get_system_parameter" and "result" in self.step and "set_value" in self.step[ - "result"] and self.task_variable_dict[self.step["result"]["set_value"]] == "": + if self.step["type"] == "get_system_parameter" and "result" in self.step and "set_value" in self.step["result"] and self.task_variable_dict[self.step["result"]["set_value"]] == "": return if "result" in self.step: @@ -107,12 +100,9 @@ def execute(self, report): if level == "execution": level = "warning" if self.step["type"] == "sql": - report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get( - "obproxy_cluster_name") or no_cluster_name_msg, resultException), level) + report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get("obproxy_cluster_name") or no_cluster_name_msg, resultException), level) else: - report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", - self.node.get("container_name") or self.task_variable_dict.get( - "remote_ip") or "", resultException), level) + report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", self.node.get("container_name") or self.task_variable_dict.get("remote_ip") or "", resultException), level) if level == "critical": raise StepResultFailException(resultException) raise StepResultFalseException(resultException) @@ -121,12 +111,9 @@ def execute(self, report): # 验证失败,属于fail类型,一般是verify阶段出现异常,需要马上修正 self.stdio.error("step_base ResultFailException:{0}".format(resultFailException)) if self.step["type"] == "sql": - report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get( - "obproxy_cluster_name") or no_cluster_name_msg, resultFailException), "fail") + report.add("[cluster:{0}] {1}".format(self.cluster.get("ob_cluster_name") or self.cluster.get("obproxy_cluster_name") or no_cluster_name_msg, resultFailException), "fail") else: - report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", - self.node.get("container_name") or self.task_variable_dict.get( - "remote_ip") or "", resultFailException), "fail") + report.add("[{0}:{1}] {2}".format(self.node.get("ssh_type") or "", self.node.get("container_name") or self.task_variable_dict.get("remote_ip") or "", resultFailException), "fail") raise StepResultFailException(resultFailException) except Exception as e: diff --git a/handler/gather/__init__.py b/handler/gather/__init__.py index 29f4a072..d3a64b03 100644 --- a/handler/gather/__init__.py +++ b/handler/gather/__init__.py @@ -14,4 +14,4 @@ @time: 2023/9/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/gather/gather_ash_report.py b/handler/gather/gather_ash_report.py index c445f263..2c84a5df 100644 --- a/handler/gather/gather_ash_report.py +++ b/handler/gather/gather_ash_report.py @@ -52,13 +52,7 @@ def __init__(self, context, gather_pack_dir='./'): self.observer_nodes = self.context.cluster_config.get("servers") try: self.obconn = OBConnector( - ip=self.cluster.get("db_host"), - port=self.cluster.get("db_port"), - username=self.cluster.get("tenant_sys").get("user"), - password=self.cluster.get("tenant_sys").get("password"), - stdio=self.stdio, - timeout=10000, - database="oceanbase" + ip=self.cluster.get("db_host"), port=self.cluster.get("db_port"), username=self.cluster.get("tenant_sys").get("user"), password=self.cluster.get("tenant_sys").get("password"), stdio=self.stdio, timeout=10000, database="oceanbase" ) except Exception as e: self.stdio.error("Failed to connect to database: {0}".format(e)) @@ -74,15 +68,15 @@ def handle(self): self.__init_report_path() self.execute() self.__print_result() + def version_check(self): observer_version = "" try: observer_version = get_observer_version_by_sql(self.ob_cluster, self.stdio) except Exception as e: if len(self.observer_nodes) > 0: - ssher=SshHelper(self.observer_nodes[0]["ip"], self.observer_nodes[0]["ssh_port"], self.observer_nodes[0]["ssh_username"], self.observer_nodes[0]["ssh_password"]) - observer_version = get_observer_version(True, ssher, - self.observer_nodes[0]["home_path"],self.stdio) + ssher = SshHelper(self.observer_nodes[0]["ip"], self.observer_nodes[0]["ssh_port"], self.observer_nodes[0]["ssh_username"], self.observer_nodes[0]["ssh_password"]) + observer_version = get_observer_version(True, ssher, self.observer_nodes[0]["home_path"], self.stdio) else: self.stdio.warn("RCAHandler Failed to get observer version:{0}".format(e)) self.stdio.verbose("RCAHandler.init get observer version: {0}".format(observer_version)) @@ -107,13 +101,12 @@ def execute(self): raise OBDIAGException("ash report data is empty") # save ash_report_data - self.ash_report_file_name = "ash_report_{0}.txt".format( - TimeUtils.timestamp_to_filename_time(self.gather_timestamp)) - self.ash_report_file_name=os.path.join(self.report_path, self.ash_report_file_name) + self.ash_report_file_name = "ash_report_{0}.txt".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp)) + self.ash_report_file_name = os.path.join(self.report_path, self.ash_report_file_name) with open(self.ash_report_file_name, 'w+') as f: f.write(ash_report) - self.stdio.print("save ash report file name: "+ Fore.YELLOW +"{0}".format(self.ash_report_file_name)+Style.RESET_ALL) + self.stdio.print("save ash report file name: " + Fore.YELLOW + "{0}".format(self.ash_report_file_name) + Style.RESET_ALL) self.result_summary_file_name = os.path.join(self.report_path, "result_summary.txt") with open(self.result_summary_file_name, 'w+') as f: f.write(self.ash_report_file_name) @@ -129,8 +122,6 @@ def __init_report_path(self): except Exception as e: self.stdio.error("init_report_path failed, error:{0}".format(e)) - - def init_option(self): options = self.context.options from_option = Util.get_option(options, 'from') @@ -139,7 +130,7 @@ def init_option(self): sql_id_option = Util.get_option(options, 'sql_id') report_type_option = Util.get_option(options, 'report_type') wait_class_option = Util.get_option(options, 'wait_class') - store_dir_option = Util.get_option(options, 'store_dir' ) + store_dir_option = Util.get_option(options, 'store_dir') since_option = "30m" if from_option is not None and to_option is not None: @@ -149,18 +140,15 @@ def init_option(self): self.from_time_str = from_option self.to_time_str = to_option except OBDIAGFormatException: - self.stdio.exception( - 'Error: Datetime is invalid. Must be in format yyyy-mm-dd hh:mm:ss. from_datetime={0}, to_datetime={1}'.format( - from_option, to_option)) + self.stdio.exception('Error: Datetime is invalid. Must be in format yyyy-mm-dd hh:mm:ss. from_datetime={0}, to_datetime={1}'.format(from_option, to_option)) return False if to_timestamp <= from_timestamp: self.stdio.exception('Error: from datetime is larger than to datetime, please check.') return False - elif (from_option is None or to_option is None): + elif from_option is None or to_option is None: now_time = datetime.datetime.now() self.to_time_str = (now_time + datetime.timedelta(minutes=0)).strftime('%Y-%m-%d %H:%M:%S') - self.from_time_str = (now_time - datetime.timedelta( - seconds=TimeUtils.parse_time_length_to_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S') + self.from_time_str = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_length_to_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S') self.stdio.print('gather from_time: {0}, to_time: {1}'.format(self.from_time_str, self.to_time_str)) else: self.stdio.warn('No time option provided, default processing is based on the last 30 minutes') @@ -170,8 +158,7 @@ def init_option(self): self.stdio.print('gather from_time: {0}, to_time: {1}'.format(self.from_time_str, self.to_time_str)) if store_dir_option: if not os.path.exists(os.path.abspath(store_dir_option)): - self.stdio.warn('warn: args --store_dir [{0}] incorrect: No such directory, Now create it'.format( - os.path.abspath(store_dir_option))) + self.stdio.warn('warn: args --store_dir [{0}] incorrect: No such directory, Now create it'.format(os.path.abspath(store_dir_option))) os.makedirs(os.path.abspath(store_dir_option)) self.gather_pack_dir = os.path.abspath(store_dir_option) if sql_id_option: @@ -198,11 +185,12 @@ def init_option(self): self.gather_pack_dir = store_dir_option else: self.gather_pack_dir = "./" - self.stdio.print("from_time: {0}, to_time: {1}, sql_id: {2}, trace_id: {3}, report_type: {4}, wait_class: {5}, store_dir: {6}".format(self.from_time_str, self.to_time_str, self.sql_id, self.trace_id, self.report_type, self.wait_class,self.gather_pack_dir)) + self.stdio.print( + "from_time: {0}, to_time: {1}, sql_id: {2}, trace_id: {3}, report_type: {4}, wait_class: {5}, store_dir: {6}".format(self.from_time_str, self.to_time_str, self.sql_id, self.trace_id, self.report_type, self.wait_class, self.gather_pack_dir) + ) return True def __print_result(self): - self.stdio.print(Fore.YELLOW + "\nGather ash_report results stored in this directory: {0}".format( - self.report_path) + Style.RESET_ALL) - self.stdio.print("") \ No newline at end of file + self.stdio.print(Fore.YELLOW + "\nGather ash_report results stored in this directory: {0}".format(self.report_path) + Style.RESET_ALL) + self.stdio.print("") diff --git a/handler/gather/gather_awr.py b/handler/gather/gather_awr.py index 1e64111a..c7eebbe1 100644 --- a/handler/gather/gather_awr.py +++ b/handler/gather/gather_awr.py @@ -36,8 +36,8 @@ def __init__(self, context, gather_pack_dir='./'): self.gather_pack_dir = gather_pack_dir self.cluster_name = None self.cluster_id = None - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -57,9 +57,7 @@ def handle(self): self.stdio.error('init config failed') return False # example of the format of pack dir for this command: (gather_pack_dir)/gather_pack_20190610123344 - pack_dir_this_command = os.path.join(self.gather_pack_dir, - "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time( - self.gather_timestamp))) + pack_dir_this_command = os.path.join(self.gather_pack_dir, "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) self.stdio.verbose("Use {0} as pack dir.".format(pack_dir_this_command)) DirectoryUtil.mkdir(path=pack_dir_this_command, stdio=self.stdio) gather_tuples = [] @@ -89,14 +87,10 @@ def handle_awr_from_ocp(ocp_url, cluster_name): if resp["skip"]: return if resp["error"]: - gather_tuples.append((ocp_url, True, - resp["error_msg"], 0, int(time.time() - st), - "Error:{0}".format(resp["error_msg"]), "")) + gather_tuples.append((ocp_url, True, resp["error_msg"], 0, int(time.time() - st), "Error:{0}".format(resp["error_msg"]), "")) return gather_pack_path_dict[(cluster_name, ocp_url)] = resp["gather_pack_path"] - gather_tuples.append((cluster_name, False, "", - os.path.getsize(resp["gather_pack_path"]), - int(time.time() - st), resp["gather_pack_path"])) + gather_tuples.append((cluster_name, False, "", os.path.getsize(resp["gather_pack_path"]), int(time.time() - st), resp["gather_pack_path"])) ocp_threads = [threading.Thread(None, handle_awr_from_ocp(self.ocp_url, self.cluster_name), args=())] list(map(lambda x: x.start(), ocp_threads)) @@ -120,16 +114,14 @@ def __download_report(self, store_path, name, report_id): "error": False, } - self.stdio.verbose( - "Sending Status Request to cluster {0} ...".format(self.cluster_name)) + self.stdio.verbose("Sending Status Request to cluster {0} ...".format(self.cluster_name)) path = ocp_api.cluster + "/%s/performance/workload/reports/%s" % (self.cluster_id, report_id) save_path = os.path.join(store_path, name + ".html") self.stdio.start_loading('download AWR report') pack_path = self.download(self.ocp_url + path, save_path, self.auth) self.stdio.stop_loading('download AWR report') - self.stdio.verbose( - "cluster {0} response. analysing...".format(self.cluster_name)) + self.stdio.verbose("cluster {0} response. analysing...".format(self.cluster_name)) resp["gather_pack_path"] = pack_path if resp["error"]: @@ -157,16 +149,9 @@ def __generate_awr_report(self): path = ocp_api.cluster + "/%s/performance/workload/reports" % self.cluster_id - start_time = datetime.datetime.strptime(TimeUtils.trans_datetime_utc_to_local(start_time.split(".")[0]), - "%Y-%m-%d %H:%M:%S") - end_time = datetime.datetime.strptime(TimeUtils.trans_datetime_utc_to_local(end_time.split(".")[0]), - "%Y-%m-%d %H:%M:%S") - params = { - "name": "OBAWR_obcluster_%s_%s_%s" % ( - self.cluster_name, start_time.strftime("%Y%m%d%H%M%S"), end_time.strftime("%Y%m%d%H%M%S")), - "startSnapshotId": start_sid, - "endSnapshotId": end_sid - } + start_time = datetime.datetime.strptime(TimeUtils.trans_datetime_utc_to_local(start_time.split(".")[0]), "%Y-%m-%d %H:%M:%S") + end_time = datetime.datetime.strptime(TimeUtils.trans_datetime_utc_to_local(end_time.split(".")[0]), "%Y-%m-%d %H:%M:%S") + params = {"name": "OBAWR_obcluster_%s_%s_%s" % (self.cluster_name, start_time.strftime("%Y%m%d%H%M%S"), end_time.strftime("%Y%m%d%H%M%S")), "startSnapshotId": start_sid, "endSnapshotId": end_sid} response = requests.post(self.ocp_url + path, auth=self.auth, data=params) @@ -194,8 +179,7 @@ def __get_snapshot_list(self): # adjust the times to ensure snapshots can be retrieved. if from_datetime_timestamp + 3 * 3600000000 >= to_datetime_timestamp: # Round the start time to the nearest hour - from_datetime_timestamp = TimeUtils.datetime_to_timestamp( - TimeUtils.get_time_rounding(dt=TimeUtils.parse_time_str(self.from_time_str), step=0, rounding_level="hour")) + from_datetime_timestamp = TimeUtils.datetime_to_timestamp(TimeUtils.get_time_rounding(dt=TimeUtils.parse_time_str(self.from_time_str), step=0, rounding_level="hour")) # Set the end time to one hour and three minutes after the rounded start time # (the three-minute offset ensures snapshots can be obtained) @@ -206,8 +190,7 @@ def __get_snapshot_list(self): for info in response.json()["data"]["contents"]: try: - snapshot_time = TimeUtils.datetime_to_timestamp( - TimeUtils.trans_datetime_utc_to_local(str(info["snapshotTime"]).split(".")[0])) + snapshot_time = TimeUtils.datetime_to_timestamp(TimeUtils.trans_datetime_utc_to_local(str(info["snapshotTime"]).split(".")[0])) if from_datetime_timestamp <= snapshot_time <= to_datetime_timestamp: snapshot_id_list.append((info["snapshotId"], info["snapshotTime"])) except KeyError: @@ -259,8 +242,7 @@ def init_option(self): from_timestamp = TimeUtils.datetime_to_timestamp(from_option) to_timestamp = TimeUtils.datetime_to_timestamp(to_option) except OBDIAGFormatException: - self.stdio.error("Error: Datetime is invalid. Must be in format yyyy-mm-dd hh:mm:ss. " \ - "from_datetime={0}, to_datetime={1}".format(from_option, to_option)) + self.stdio.error("Error: Datetime is invalid. Must be in format yyyy-mm-dd hh:mm:ss. " "from_datetime={0}, to_datetime={1}".format(from_option, to_option)) return False if to_timestamp <= from_timestamp: self.stdio.error("Error: from datetime is larger than to datetime, please check.") @@ -309,7 +291,5 @@ def __get_overall_summary(node_summary_tuple): consume_time = tup[4] pack_path = tup[5] format_file_size = FileUtil.size_format(num=file_size, output_str=True) - summary_tab.append((cluster, "Error" if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather AWR Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((cluster, "Error" if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather AWR Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_log.py b/handler/gather/gather_log.py index 6003fc74..972cebe9 100644 --- a/handler/gather/gather_log.py +++ b/handler/gather/gather_log.py @@ -49,8 +49,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.zip_encrypt = False self.is_scene = is_scene self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -79,18 +79,18 @@ def init_option(self): grep_option = Util.get_option(options, 'grep') scope_option = Util.get_option(options, 'scope') encrypt_option = Util.get_option(options, 'encrypt') - if self.context.get_variable("gather_from",None) : - from_option=self.context.get_variable("gather_from") - if self.context.get_variable("gather_to",None) : - to_option=self.context.get_variable("gather_to") - if self.context.get_variable("gather_since",None) : - since_option=self.context.get_variable("gather_since") - if self.context.get_variable("store_dir",None) : - store_dir_option=self.context.get_variable("store_dir") - if self.context.get_variable("gather_scope",None) : - scope_option=self.context.get_variable("gather_scope") - if self.context.get_variable("gather_grep",None) : - grep_option=self.context.get_variable("gather_grep") + if self.context.get_variable("gather_from", None): + from_option = self.context.get_variable("gather_from") + if self.context.get_variable("gather_to", None): + to_option = self.context.get_variable("gather_to") + if self.context.get_variable("gather_since", None): + since_option = self.context.get_variable("gather_since") + if self.context.get_variable("store_dir", None): + store_dir_option = self.context.get_variable("store_dir") + if self.context.get_variable("gather_scope", None): + scope_option = self.context.get_variable("gather_scope") + if self.context.get_variable("gather_grep", None): + grep_option = self.context.get_variable("gather_grep") if from_option is not None and to_option is not None: try: from_timestamp = TimeUtils.parse_time_str(from_option) @@ -151,11 +151,7 @@ def handle_from_node(node): file_size = "" if len(resp["error"]) == 0: file_size = os.path.getsize(resp["gather_pack_path"]) - gather_tuples.append((node.get("ip"), False, resp["error"], - file_size, - resp["zip_password"], - int(time.time() - st), - resp["gather_pack_path"])) + gather_tuples.append((node.get("ip"), False, resp["error"], file_size, resp["zip_password"], int(time.time() - st), resp["gather_pack_path"])) if self.is_ssh: for node in self.nodes: @@ -169,19 +165,14 @@ def handle_from_node(node): summary_tuples = self.__get_overall_summary(gather_tuples, self.zip_encrypt) self.stdio.print(summary_tuples) - self.pack_dir_this_command=pack_dir_this_command + self.pack_dir_this_command = pack_dir_this_command # Persist the summary results to a file FileUtil.write_append(os.path.join(pack_dir_this_command, "result_summary.txt"), summary_tuples) last_info = "For result details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(pack_dir_this_command, "result_summary.txt")) return True def __handle_from_node(self, pack_dir_this_command, node): - resp = { - "skip": False, - "error": "", - "zip_password": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "zip_password": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip() remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") @@ -197,15 +188,12 @@ def __handle_from_node(self, pack_dir_this_command, node): try: ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.exception('ssh {0}@{1}: failed, Please check the {2}'.format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception('ssh {0}@{1}: failed, Please check the {2}'.format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) - if not ssh_failed: + if not ssh_failed: # transform timestamp(in us) to yyyymmddhhmmss (filename_time style) from_datetime_timestamp = TimeUtils.timestamp_to_filename_time(TimeUtils.datetime_to_timestamp(self.from_time_str)) to_datetime_timestamp = TimeUtils.timestamp_to_filename_time(TimeUtils.datetime_to_timestamp(self.to_time_str)) @@ -232,7 +220,7 @@ def __handle_from_node(self, pack_dir_this_command, node): self.__handle_zip_file(node.get("ip"), ssh, resp, gather_dir_name, pack_dir_this_command) ssh.ssh_close() return resp - + def __grep_log_until_empty(self, ssh_helper, home_path, log_list, gather_path): """ 按时间顺序排序日志,从最新的时间(或者从设置的时间)开始往前找日志,直到grep的结果不为空,再直到grep的结果为空,则停止 @@ -244,7 +232,7 @@ def __grep_log_until_empty(self, ssh_helper, home_path, log_list, gather_path): # 理论上只有上述三种日志,other_log_list应该为空 other_log_list = [log_name for log_name in log_list if not any(log_name.startswith(prefix) for prefix in log_type_list)] for log_name in other_log_list: - self.__pharse_log(ssh_helper=ssh_helper, log_name=log_name, home_path=home_path, gather_path=gather_path) + self.__pharse_log(ssh_helper=ssh_helper, log_name=log_name, home_path=home_path, gather_path=gather_path) # wf结尾的日志非全量日志,不排查 # 形如observer.log等日志不方便排序,暂时删除,在后续重新加上 @@ -273,14 +261,8 @@ def __grep_log(self, ssh_helper, home_path, log_name, gather_path): """ log_path = os.path.join(home_path, "log") if self.grep_options is not None: - grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} > {gather_path}/{log_name} ".format( - grep_args=self.grep_options, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) - find_file_cmd = "find {gather_path} -type f -name {log_name} ! -empty".format( - gather_path=gather_path, - log_name=log_name) + grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} > {gather_path}/{log_name} ".format(grep_args=self.grep_options, gather_path=gather_path, log_name=log_name, log_dir=log_path) + find_file_cmd = "find {gather_path} -type f -name {log_name} ! -empty".format(gather_path=gather_path, log_name=log_name) self.stdio.verbose("grep files, run cmd = [{0}]".format(grep_cmd)) self.stdio.verbose("grep files, run cmd = [{0}]".format(find_file_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) if self.is_ssh else LocalClient(self.stdio).run(grep_cmd) @@ -293,15 +275,13 @@ def __handle_log_list(self, ssh, node, resp): log_list = self.__get_log_name(ssh, node) ip = node.get("ip") if len(log_list) > self.file_number_limit: - self.stdio.warn('{0} The number of log files is {1}, out of range (0,{2}], ' - "Please adjust the query limit".format(ip, len(log_list), self.file_number_limit)) - resp["skip"] = True, + self.stdio.warn('{0} The number of log files is {1}, out of range (0,{2}], ' "Please adjust the query limit".format(ip, len(log_list), self.file_number_limit)) + resp["skip"] = (True,) resp["error"] = "Too many files {0} > {1}".format(len(log_list), self.file_number_limit) return log_list, resp elif len(log_list) <= 0: - self.stdio.warn('{0} The number of log files is {1}, The time range for file gather from {2} to {3}, and no eligible files were found.' - " Please adjust the query time limit.".format(ip, len(log_list), self.from_time_str, self.to_time_str)) - resp["skip"] = True, + self.stdio.warn('{0} The number of log files is {1}, The time range for file gather from {2} to {3}, and no eligible files were found.' " Please adjust the query time limit.".format(ip, len(log_list), self.from_time_str, self.to_time_str)) + resp["skip"] = (True,) resp["error"] = "No files found" return log_list, resp return log_list, resp @@ -317,8 +297,7 @@ def __get_log_name(self, ssh_helper, node): if self.scope == "observer" or self.scope == "rootservice" or self.scope == "election": get_oblog = "ls -1 -F %s/*%s.log* | awk -F '/' '{print $NF}'" % (log_path, self.scope) else: - get_oblog = "ls -1 -F %s/observer.log* %s/rootservice.log* %s/election.log* | awk -F '/' '{print $NF}'" % \ - (log_path, log_path, log_path) + get_oblog = "ls -1 -F %s/observer.log* %s/rootservice.log* %s/election.log* | awk -F '/' '{print $NF}'" % (log_path, log_path, log_path) log_name_list = [] log_files = SshClient(self.stdio).run(ssh_helper, get_oblog) if self.is_ssh else LocalClient(self.stdio).run(get_oblog) if log_files: @@ -335,35 +314,24 @@ def __pharse_log(self, ssh_helper, home_path, log_name, gather_path): """ log_path = os.path.join(home_path, "log") if self.grep_options is not None: - grep_cmd="" + grep_cmd = "" if type(self.grep_options) == str: - grep_cmd = "grep -e '{grep_options}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format( - grep_options=self.grep_options, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) - elif type(self.grep_options) == list and len(self.grep_options)>0: - grep_litter_cmd="" + grep_cmd = "grep -e '{grep_options}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format(grep_options=self.grep_options, gather_path=gather_path, log_name=log_name, log_dir=log_path) + elif type(self.grep_options) == list and len(self.grep_options) > 0: + grep_litter_cmd = "" for grep_option in self.grep_options: - if type(grep_option)!=str: + if type(grep_option) != str: self.stdio.error('The grep args must be string or list of strings, but got {0}'.format(type(grep_option))) raise Exception('The grep args must be string or list of strings, but got {0}'.format(type(grep_option))) - elif grep_option == "": + elif grep_option == "": self.stdio.warn('The grep args must be string or list of strings, but got ""') continue grep_litter_cmd += "| grep -e '{0}'".format(grep_option) - grep_cmd = "cat {log_dir}/{log_name} {grep_options} >> {gather_path}/{log_name} ".format( - grep_options=grep_litter_cmd, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + grep_cmd = "cat {log_dir}/{log_name} {grep_options} >> {gather_path}/{log_name} ".format(grep_options=grep_litter_cmd, gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose('grep files, run cmd = [{0}]'.format(grep_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) if self.is_ssh else LocalClient(self.stdio).run(grep_cmd) else: - cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format( - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format(gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose('copy files, run cmd = [{0}]'.format(cp_cmd)) SshClient(self.stdio).run(ssh_helper, cp_cmd) if self.is_ssh else LocalClient(self.stdio).run(cp_cmd) @@ -419,10 +387,7 @@ def __get_overall_summary(node_summary_tuple, is_zip_encrypt): except: format_file_size = FileUtil.size_format(num=0, output_str=True) if is_zip_encrypt: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) else: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather Ob Log Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather Ob Log Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_obadmin.py b/handler/gather/gather_obadmin.py index 41e50d0a..542f2192 100644 --- a/handler/gather/gather_obadmin.py +++ b/handler/gather/gather_obadmin.py @@ -48,8 +48,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.grep_args = None self.zip_encrypt = False self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -60,7 +60,7 @@ def init_config(self): self.nodes = new_nodes self.inner_config = self.context.inner_config self.ob_admin_mode = 'clog' - if self.context.get_variable("gather_obadmin_mode", None) : + if self.context.get_variable("gather_obadmin_mode", None): self.ob_admin_mode = self.context.get_variable("gather_obadmin_mode") if self.inner_config is None: self.file_number_limit = 20 @@ -131,11 +131,7 @@ def handle_from_node(node): file_size = "" if len(resp["error"]) == 0: file_size = os.path.getsize(resp["gather_pack_path"]) - gather_tuples.append((node.get("ip"), False, resp["error"], - file_size, - resp["zip_password"], - int(time.time() - st), - resp["gather_pack_path"])) + gather_tuples.append((node.get("ip"), False, resp["error"], file_size, resp["zip_password"], int(time.time() - st), resp["gather_pack_path"])) if self.is_ssh: for node in self.nodes: @@ -159,18 +155,13 @@ def handle_from_node(node): last_info = "For result details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(pack_dir_this_command, "result_summary.txt")) def __handle_from_node(self, local_stored_path, node): - resp = { - "skip": False, - "error": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip() remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") remote_port = node.get("ssh_port") remote_private_key = node.get("ssh_key_file") - self.stdio.verbose( - "Sending Collect Shell Command to node {0} ...".format(remote_ip)) + self.stdio.verbose("Sending Collect Shell Command to node {0} ...".format(remote_ip)) DirectoryUtil.mkdir(path=local_stored_path, stdio=self.stdio) now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') if self.ob_admin_mode == "slog": @@ -182,10 +173,7 @@ def __handle_from_node(self, local_stored_path, node): try: ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.error("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.error("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -194,8 +182,7 @@ def __handle_from_node(self, local_stored_path, node): SshClient(self.stdio).run(ssh_helper, mkdir_cmd) if self.is_ssh else LocalClient(self.stdio).run(mkdir_cmd) ob_version = get_observer_version(self.is_ssh, ssh_helper, node.get("home_path"), self.stdio) if (ob_version != "" and not StringUtils.compare_versions_lower(ob_version, const.MAX_OB_VERSION_SUPPORT_GATHER_OBADMIN, self.stdio)) or ob_version == "": - self.stdio.verbose("This version {0} does not support gather clog/slog . The max supported version less than {1}". - format(ob_version, const.MAX_OB_VERSION_SUPPORT_GATHER_OBADMIN)) + self.stdio.verbose("This version {0} does not support gather clog/slog . The max supported version less than {1}".format(ob_version, const.MAX_OB_VERSION_SUPPORT_GATHER_OBADMIN)) resp["error"] = "{0} not support gather clog/slog".format(ob_version) resp["gather_pack_path"] = "{0}".format(local_stored_path) resp["zip_password"] = "" @@ -216,17 +203,13 @@ def __handle_from_node(self, local_stored_path, node): def __handle_log_list(self, ssh, node, resp): log_list = self.__get_log_name(ssh, node) if len(log_list) > 20: - self.stdio.warn( - "{0} The number of log files is {1}, out of range (0,20], " - "Please adjust the query limit".format(node.get("ip"), len(log_list))) - resp["skip"] = True, + self.stdio.warn("{0} The number of log files is {1}, out of range (0,20], " "Please adjust the query limit".format(node.get("ip"), len(log_list))) + resp["skip"] = (True,) resp["error"] = "Too many files {0} > 20".format(len(log_list)) return log_list, resp elif len(log_list) <= 0: - self.stdio.warn( - "{0} The number of log files is {1}, out of range (0,20], " - "Please adjust the query limit".format(node.get("ip"), len(log_list))) - resp["skip"] = True, + self.stdio.warn("{0} The number of log files is {1}, out of range (0,20], " "Please adjust the query limit".format(node.get("ip"), len(log_list))) + resp["skip"] = (True,) resp["error"] = "No files found" return log_list, resp return log_list, resp @@ -253,8 +236,7 @@ def __handle_zip_file(self, ip, ssh, resp, gather_dir_name, pack_dir_this_comman rm_rf_file(self.is_ssh, ssh, gather_package_dir, self.stdio) resp["gather_pack_path"] = local_path - self.stdio.verbose( - "Collect pack gathered from node {0}: stored in {1}".format(ip, gather_package_dir)) + self.stdio.verbose("Collect pack gathered from node {0}: stored in {1}".format(ip, gather_package_dir)) return resp def __get_log_name(self, ssh_helper, node): @@ -269,7 +251,7 @@ def __get_log_name(self, ssh_helper, node): get_log = "ls -l SLOG_DIR --time-style '+.%Y%m%d%H%M%S' | awk '{print $7,$6}'".replace("SLOG_DIR", slog_dir) else: get_log = "ls -l CLOG_DIR --time-style '+.%Y%m%d%H%M%S' | awk '{print $7,$6}'".replace("CLOG_DIR", clog_dir) - log_files = SshClient(self.stdio).run(ssh_helper, get_log) if self.is_ssh else LocalClient(self.stdio).run(get_log) + log_files = SshClient(self.stdio).run(ssh_helper, get_log) if self.is_ssh else LocalClient(self.stdio).run(get_log) log_name_list = [] for file_name in log_files.split('\n'): if file_name == "": @@ -283,8 +265,7 @@ def __get_log_name(self, ssh_helper, node): if (log_time > from_time) and (log_time < to_time): log_name_list.append(str(log_name_fields[0]).rstrip()) if len(log_name_list): - self.stdio.verbose("Find the qualified log file {0} on Server [{1}], " - "wait for the next step".format(log_name_list, ssh_helper.get_name())) + self.stdio.verbose("Find the qualified log file {0} on Server [{1}], " "wait for the next step".format(log_name_list, ssh_helper.get_name())) else: self.stdio.warn("No found the qualified log file on Server [{0}]".format(ssh_helper.get_name())) return log_name_list @@ -293,18 +274,14 @@ def __gather_log_info(self, ssh_helper, node, log_name, remote_dir): home_path = node.get("home_path") obadmin_install_dir = os.path.join(home_path, "/bin") if self.ob_admin_mode == "slog": - cmd = "export LD_LIBRARY_PATH={ob_install_dir}/lib && cd {store_dir} && {obadmin_dir}/ob_admin slog_tool -f {slog_name}".format( - ob_install_dir=home_path, - store_dir=remote_dir, - obadmin_dir=obadmin_install_dir, - slog_name=log_name) + cmd = "export LD_LIBRARY_PATH={ob_install_dir}/lib && cd {store_dir} && {obadmin_dir}/ob_admin slog_tool -f {slog_name}".format(ob_install_dir=home_path, store_dir=remote_dir, obadmin_dir=obadmin_install_dir, slog_name=log_name) else: cmd = "export LD_LIBRARY_PATH={ob_install_dir}/lib && cd {store_dir} && {obadmin_dir}/ob_admin clog_tool dump_all {clog_name}".format( ob_install_dir=home_path, store_dir=remote_dir, obadmin_dir=obadmin_install_dir, clog_name=log_name, - ) + ) self.stdio.verbose("gather obadmin info, run cmd = [{0}]".format(cmd)) SshClient(self.stdio).run(ssh_helper, cmd) if self.is_ssh else LocalClient(self.stdio).run(cmd) @@ -335,14 +312,10 @@ def __get_overall_summary(node_summary_tuple, mode, is_zip_encrypt): except: format_file_size = FileUtil.size_format(num=0, output_str=True) if is_zip_encrypt: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) else: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) if mode == "slog": - return "\nGather slog Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", - showindex=False) + return "\nGather slog Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) else: - return "\nGather clog Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", - showindex=False) + return "\nGather clog Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_obproxy_log.py b/handler/gather/gather_obproxy_log.py index ccb2e3e7..46459388 100644 --- a/handler/gather/gather_obproxy_log.py +++ b/handler/gather/gather_obproxy_log.py @@ -25,8 +25,7 @@ from common.obdiag_exception import OBDIAGFormatException from common.command import LocalClient, SshClient from common.constant import const -from common.command import get_file_size, download_file, is_empty_dir, get_logfile_name_list, mkdir, delete_empty_file, \ - rm_rf_file, zip_encrypt_dir, zip_dir +from common.command import get_file_size, download_file, is_empty_dir, get_logfile_name_list, mkdir, delete_empty_file, rm_rf_file, zip_encrypt_dir, zip_dir from common.ssh import SshHelper from common.tool import Util from common.tool import DirectoryUtil @@ -52,8 +51,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.zip_encrypt = False self.is_scene = is_scene self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -82,15 +81,15 @@ def init_option(self): grep_option = Util.get_option(options, 'grep') encrypt_option = Util.get_option(options, 'encrypt') scope_option = Util.get_option(options, 'scope') - if self.context.get_variable("gather_from",None) : - from_option=self.context.get_variable("gather_from") - if self.context.get_variable("gather_to",None) : - to_option=self.context.get_variable("gather_to") - if self.context.get_variable("gather_since",None) : - since_option=self.context.get_variable("gather_since") - if self.context.get_variable("store_dir",None) : - store_dir_option=self.context.get_variable("store_dir") - if self.context.get_variable("gather_scope",None) : + if self.context.get_variable("gather_from", None): + from_option = self.context.get_variable("gather_from") + if self.context.get_variable("gather_to", None): + to_option = self.context.get_variable("gather_to") + if self.context.get_variable("gather_since", None): + since_option = self.context.get_variable("gather_since") + if self.context.get_variable("store_dir", None): + store_dir_option = self.context.get_variable("store_dir") + if self.context.get_variable("gather_scope", None): scope_option = self.context.get_variable("gather_scope") if self.context.get_variable("gather_grep", None): grep_option = self.context.get_variable("gather_grep") @@ -170,18 +169,13 @@ def handle_from_node(node): summary_tuples = self.__get_overall_summary(gather_tuples, self.zip_encrypt) self.stdio.print(summary_tuples) - self.pack_dir_this_command=pack_dir_this_command + self.pack_dir_this_command = pack_dir_this_command FileUtil.write_append(os.path.join(pack_dir_this_command, "result_summary.txt"), summary_tuples) last_info = "For result details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(pack_dir_this_command, "result_summary.txt")) return True def __handle_from_node(self, node, pack_dir_this_command): - resp = { - "skip": False, - "error": "", - "zip_password": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "zip_password": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip() remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") @@ -192,12 +186,9 @@ def __handle_from_node(self, node, pack_dir_this_command): self.stdio.verbose("Sending Collect Shell Command to node {0} ...".format(remote_ip)) DirectoryUtil.mkdir(path=pack_dir_this_command, stdio=self.stdio) try: - ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key,node, self.stdio) + ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -229,15 +220,13 @@ def __handle_log_list(self, ssh, node, resp): log_list = self.__get_log_name(ssh, node) ip = node.get("ip") if len(log_list) > self.file_number_limit: - self.stdio.warn("{0} The number of log files is {1}, out of range (0,{2}], " - "Please adjust the query limit".format(ip, len(log_list), self.file_number_limit)) - resp["skip"] = True, + self.stdio.warn("{0} The number of log files is {1}, out of range (0,{2}], " "Please adjust the query limit".format(ip, len(log_list), self.file_number_limit)) + resp["skip"] = (True,) resp["error"] = "Too many files {0} > {1}".format(len(log_list), self.file_number_limit) return log_list, resp elif len(log_list) <= 0: - self.stdio.warn("{0} The number of log files is {1}, The time range for file gather from {2} to {3}, and no eligible files were found." - " Please adjust the query time limit.".format(ip, len(log_list), self.from_time_str, self.to_time_str)) - resp["skip"] = True, + self.stdio.warn("{0} The number of log files is {1}, The time range for file gather from {2} to {3}, and no eligible files were found." " Please adjust the query time limit.".format(ip, len(log_list), self.from_time_str, self.to_time_str)) + resp["skip"] = (True,) resp["error"] = "No files found" return log_list, resp return log_list, resp @@ -245,11 +234,18 @@ def __handle_log_list(self, ssh, node, resp): def __get_log_name(self, ssh_helper, node): home_path = node.get("home_path") log_path = os.path.join(home_path, "log") - if self.scope == "obproxy" or self.scope == "obproxy_stat" or self.scope == "obproxy_digest" or \ - self.scope == "obproxy_limit" or self.scope == "obproxy_slow" or self.scope == "obproxy_diagnosis" or self.scope == "obproxy_error": + if self.scope == "obproxy" or self.scope == "obproxy_stat" or self.scope == "obproxy_digest" or self.scope == "obproxy_limit" or self.scope == "obproxy_slow" or self.scope == "obproxy_diagnosis" or self.scope == "obproxy_error": get_obproxy_log = "ls -1 -F %s/*%s.*log* | awk -F '/' '{print $NF}'" % (log_path, self.scope) else: - get_obproxy_log = "ls -1 -F %s/obproxy.*log* %s/obproxy_error.*log* %s/obproxy_stat.*log* %s/obproxy_digest.*log* %s/obproxy_limit.*log* %s/obproxy_slow.*log* %s/obproxy_diagnosis.*log*| awk -F '/' '{print $NF}'" % (log_path, log_path, log_path, log_path, log_path, log_path,log_path) + get_obproxy_log = "ls -1 -F %s/obproxy.*log* %s/obproxy_error.*log* %s/obproxy_stat.*log* %s/obproxy_digest.*log* %s/obproxy_limit.*log* %s/obproxy_slow.*log* %s/obproxy_diagnosis.*log*| awk -F '/' '{print $NF}'" % ( + log_path, + log_path, + log_path, + log_path, + log_path, + log_path, + log_path, + ) if self.is_ssh: log_files = SshClient(self.stdio).run(ssh_helper, get_obproxy_log).strip() else: @@ -269,36 +265,25 @@ def __pharse_log(self, ssh_helper, home_path, log_name, gather_path): """ log_path = os.path.join(home_path, "log") if self.grep_args is not None: - grep_cmd="" + grep_cmd = "" if type(self.grep_args) == str: - grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format( - grep_args=self.grep_args, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) - elif type(self.grep_args) == list and len(self.grep_args)>0: - grep_litter_cmd="" + grep_cmd = "grep -e '{grep_args}' {log_dir}/{log_name} >> {gather_path}/{log_name} ".format(grep_args=self.grep_args, gather_path=gather_path, log_name=log_name, log_dir=log_path) + elif type(self.grep_args) == list and len(self.grep_args) > 0: + grep_litter_cmd = "" for grep_arg in self.grep_args: - if type(grep_arg)!=str: + if type(grep_arg) != str: self.stdio.error('The grep args must be string or list of strings, but got {0}'.format(type(grep_arg))) raise Exception('The grep args must be string or list of strings, but got {0}'.format(type(grep_arg))) - elif grep_arg == "": + elif grep_arg == "": self.stdio.warn('The grep args must be string or list of strings, but got ""') continue grep_litter_cmd += "| grep -e '{0}'".format(grep_arg) - grep_cmd = "cat {log_dir}/{log_name} {grep_args} >> {gather_path}/{log_name} ".format( - grep_args=grep_litter_cmd, - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + grep_cmd = "cat {log_dir}/{log_name} {grep_args} >> {gather_path}/{log_name} ".format(grep_args=grep_litter_cmd, gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose("grep files, run cmd = [{0}]".format(grep_cmd)) SshClient(self.stdio).run(ssh_helper, grep_cmd) if self.is_ssh else LocalClient(self.stdio).run(grep_cmd) else: - cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format( - gather_path=gather_path, - log_name=log_name, - log_dir=log_path) + cp_cmd = "cp {log_dir}/{log_name} {gather_path}/{log_name} ".format(gather_path=gather_path, log_name=log_name, log_dir=log_path) self.stdio.verbose("copy files, run cmd = [{0}]".format(cp_cmd)) SshClient(self.stdio).run(ssh_helper, cp_cmd) if self.is_ssh else LocalClient(self.stdio).run(cp_cmd) @@ -352,10 +337,7 @@ def __get_overall_summary(node_summary_tuple, is_zip_encrypt): except: format_file_size = FileUtil.size_format(num=0, output_str=True) if is_zip_encrypt: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, tup[4], "{0} s".format(int(consume_time)), pack_path)) else: - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather ObProxy Log Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather ObProxy Log Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_obstack2.py b/handler/gather/gather_obstack2.py index a8804ad7..7ba6d2b0 100644 --- a/handler/gather/gather_obstack2.py +++ b/handler/gather/gather_obstack2.py @@ -46,8 +46,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.remote_stored_path = None self.is_scene = is_scene self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -90,16 +90,14 @@ def handle(self): pack_dir_this_command = os.path.join(self.local_stored_path, "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) self.stdio.verbose("Use {0} as pack dir.".format(pack_dir_this_command)) gather_tuples = [] + def handle_from_node(node): st = time.time() resp = self.__handle_from_node(pack_dir_this_command, node) file_size = "" if len(resp["error"]) == 0: file_size = os.path.getsize(resp["gather_pack_path"]) - gather_tuples.append((node.get("ip"), False, resp["error"], - file_size, - int(time.time() - st), - resp["gather_pack_path"])) + gather_tuples.append((node.get("ip"), False, resp["error"], file_size, int(time.time() - st), resp["gather_pack_path"])) if self.is_ssh: for node in self.nodes: @@ -118,30 +116,22 @@ def handle_from_node(node): last_info = "For result details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(pack_dir_this_command, "result_summary.txt")) def __handle_from_node(self, local_stored_path, node): - resp = { - "skip": False, - "error": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip() remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") remote_port = node.get("ssh_port") remote_private_key = node.get("ssh_key_file") - self.stdio.verbose( - "Sending Collect Shell Command to node {0} ...".format(remote_ip)) + self.stdio.verbose("Sending Collect Shell Command to node {0} ...".format(remote_ip)) DirectoryUtil.mkdir(path=local_stored_path, stdio=self.stdio) now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S') remote_dir_name = "obstack2_{0}_{1}".format(remote_ip, now_time) remote_dir_full_path = "/tmp/{0}".format(remote_dir_name) ssh_failed = False try: - ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key,node, self.stdio) + ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -155,15 +145,13 @@ def __handle_from_node(self, local_stored_path, node): # install and chmod obstack2 ob_version = get_observer_version(self.is_ssh, ssh_helper, node.get("home_path"), self.stdio) if not StringUtils.compare_versions_greater(ob_version, const.MIN_OB_VERSION_SUPPORT_GATHER_OBSTACK): - self.stdio.verbose("This version {0} does not support gather obstack . The minimum supported version is {1}". - format(ob_version, const.MIN_OB_VERSION_SUPPORT_GATHER_OBSTACK)) + self.stdio.verbose("This version {0} does not support gather obstack . The minimum supported version is {1}".format(ob_version, const.MIN_OB_VERSION_SUPPORT_GATHER_OBSTACK)) resp["error"] = "{0} not support gather obstack".format(ob_version) resp["gather_pack_path"] = "{0}".format(local_stored_path) return resp is_need_install_obstack = self.__is_obstack_exists(self.is_ssh, ssh_helper) if is_need_install_obstack: - self.stdio.verbose("There is no obstack2 on the host {0}. It needs to be installed. " - "Please wait a moment ...".format(remote_ip)) + self.stdio.verbose("There is no obstack2 on the host {0}. It needs to be installed. " "Please wait a moment ...".format(remote_ip)) if getattr(sys, 'frozen', False): absPath = os.path.dirname(sys.executable) else: @@ -171,14 +159,14 @@ def __handle_from_node(self, local_stored_path, node): obstack2_local_stored_full_path = os.path.join(absPath, const.OBSTACK2_LOCAL_STORED_PATH) upload_file(self.is_ssh, ssh_helper, obstack2_local_stored_full_path, const.OBSTACK2_DEFAULT_INSTALL_PATH, self.stdio) self.stdio.verbose("Installation of obstack2 is completed and gather begins ...") - + self.__chmod_obstack2(self.is_ssh, ssh_helper) # get observer_pid observer_pid_list = get_observer_pid(self.is_ssh, ssh_helper, node.get("home_path"), self.stdio) # gather obstack2 info for observer_pid in observer_pid_list: user = self.__get_observer_execute_user(ssh_helper, observer_pid) - self.__gather_obstack2_info(self.is_ssh, ssh_helper, user, observer_pid, remote_dir_name,node) + self.__gather_obstack2_info(self.is_ssh, ssh_helper, user, observer_pid, remote_dir_name, node) try: self.stdio.start_loading('gather obstack info') self.is_ready(ssh_helper, observer_pid, remote_dir_name) @@ -186,13 +174,12 @@ def __handle_from_node(self, local_stored_path, node): except: self.stdio.stop_loading('gather info failed') self.stdio.error("Gather obstack info on the host {0} observer pid {1}".format(remote_ip, observer_pid)) - delete_file_force(self.is_ssh, ssh_helper, "/tmp/{dir_name}/observer_{pid}_obstack.txt" - .format(dir_name=remote_dir_name, pid=observer_pid), self.stdio) + delete_file_force(self.is_ssh, ssh_helper, "/tmp/{dir_name}/observer_{pid}_obstack.txt".format(dir_name=remote_dir_name, pid=observer_pid), self.stdio) pass if is_empty_dir(self.is_ssh, ssh_helper, "/tmp/{0}".format(remote_dir_name), self.stdio): resp["error"] = "gather failed, folder is empty" return resp - + zip_dir(self.is_ssh, ssh_helper, "/tmp", remote_dir_name, self.stdio) remote_zip_file_path = "{0}.zip".format(remote_dir_full_path) @@ -204,7 +191,7 @@ def __handle_from_node(self, local_stored_path, node): resp["error"] = "" else: resp["error"] = "File too large" - delete_file_force(self.is_ssh, ssh_helper, remote_file_full_path, self.stdio) + delete_file_force(self.is_ssh, ssh_helper, remote_file_full_path, self.stdio) ssh_helper.ssh_close() resp["gather_pack_path"] = "{0}/{1}.zip".format(local_stored_path, remote_dir_name) return resp @@ -212,17 +199,14 @@ def __handle_from_node(self, local_stored_path, node): @Util.retry(5, 2) def is_ready(self, ssh_helper, pid, remote_dir_name): try: - self.stdio.verbose("Check whether the directory /tmp/{dir_name} or " - "file /tmp/{dir_name}/observer_{pid}_obstack.txt is empty" - .format(dir_name=remote_dir_name, pid=pid)) + self.stdio.verbose("Check whether the directory /tmp/{dir_name} or " "file /tmp/{dir_name}/observer_{pid}_obstack.txt is empty".format(dir_name=remote_dir_name, pid=pid)) is_empty_dir_res = is_empty_dir(self.is_ssh, ssh_helper, "/tmp/{0}".format(remote_dir_name), self.stdio) - is_empty_file_res = is_empty_file(self.is_ssh, ssh_helper, "/tmp/{dir_name}/observer_{pid}_obstack.txt" - .format(dir_name=remote_dir_name, pid=pid), self.stdio) + is_empty_file_res = is_empty_file(self.is_ssh, ssh_helper, "/tmp/{dir_name}/observer_{pid}_obstack.txt".format(dir_name=remote_dir_name, pid=pid), self.stdio) if is_empty_dir_res or is_empty_file_res: self.stdio.verbose( "The server {host_ip} directory /tmp/{dir_name} or file /tmp/{dir_name}/observer_{pid}_obstack.txt" - " is empty, waiting for the collection to complete" - .format(host_ip=ssh_helper.get_name() if self.is_ssh else NetUtils.get_inner_ip(self.stdio), dir_name=remote_dir_name, pid=pid)) + " is empty, waiting for the collection to complete".format(host_ip=ssh_helper.get_name() if self.is_ssh else NetUtils.get_inner_ip(self.stdio), dir_name=remote_dir_name, pid=pid) + ) raise except Exception as e: raise e @@ -233,7 +217,7 @@ def __chmod_obstack2(self, is_ssh, ssh_helper): def __is_obstack_exists(self, is_ssh, ssh_helper): cmd = "test -e {file} && echo exists".format(file=const.OBSTACK2_DEFAULT_INSTALL_PATH) - stdout = SshClient(self.stdio).run(ssh_helper, cmd) if is_ssh else LocalClient(self.stdio).run(cmd) + stdout = SshClient(self.stdio).run(ssh_helper, cmd) if is_ssh else LocalClient(self.stdio).run(cmd) if stdout == 'exists': return False else: @@ -241,23 +225,20 @@ def __is_obstack_exists(self, is_ssh, ssh_helper): def __get_observer_execute_user(self, ssh_helper, pid): cmd = "ps -o ruser=userForLongName -e -o pid,ppid,c,stime,tty,time,cmd | grep observer | grep {0} | awk {1}".format(pid, "'{print $1}'") - stdout = SshClient(self.stdio).run(ssh_helper, cmd) if self.is_ssh else LocalClient(self.stdio).run(cmd) + stdout = SshClient(self.stdio).run(ssh_helper, cmd) if self.is_ssh else LocalClient(self.stdio).run(cmd) user = stdout.splitlines()[0] self.stdio.verbose("get observer execute user, run cmd = [{0}], result:{1} ".format(cmd, user)) return user - def __gather_obstack2_info(self, is_ssh, ssh_helper, user, observer_pid, remote_gather_dir,node): - cmd = "{obstack} {pid} > /tmp/{gather_dir}/observer_{pid}_obstack.txt".format( - obstack=const.OBSTACK2_DEFAULT_INSTALL_PATH, - pid=observer_pid, - gather_dir=remote_gather_dir) + def __gather_obstack2_info(self, is_ssh, ssh_helper, user, observer_pid, remote_gather_dir, node): + cmd = "{obstack} {pid} > /tmp/{gather_dir}/observer_{pid}_obstack.txt".format(obstack=const.OBSTACK2_DEFAULT_INSTALL_PATH, pid=observer_pid, gather_dir=remote_gather_dir) if is_ssh: if user == ssh_helper.username: self.stdio.verbose("gather obstack info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), cmd)) SshClient(self.stdio).run_ignore_err(ssh_helper, cmd) else: - ssh_helper_new = SshHelper(ssh_helper.host_ip, ssh_helper.username, ssh_helper.password, ssh_helper.ssh_port, ssh_helper.key_file,node) - chown_cmd = "chown {user} /tmp/{gather_dir}/".format(user=user,gather_dir=remote_gather_dir) + ssh_helper_new = SshHelper(ssh_helper.host_ip, ssh_helper.username, ssh_helper.password, ssh_helper.ssh_port, ssh_helper.key_file, node) + chown_cmd = "chown {user} /tmp/{gather_dir}/".format(user=user, gather_dir=remote_gather_dir) SshClient(self.stdio).run(ssh_helper_new, chown_cmd) self.stdio.verbose("gather obstack info on server {0}, run cmd = [su {1}, {2}]".format(ssh_helper.get_name(), user, cmd)) ssh_helper_new.ssh_invoke_shell_switch_user(user, cmd, 10) @@ -278,7 +259,5 @@ def __get_overall_summary(node_summary_tuple): format_file_size = FileUtil.size_format(num=file_size, output_str=True) except: format_file_size = FileUtil.size_format(num=0, output_str=True) - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather Ob stack Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather Ob stack Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_perf.py b/handler/gather/gather_perf.py index 79e7a996..1acb5057 100644 --- a/handler/gather/gather_perf.py +++ b/handler/gather/gather_perf.py @@ -45,8 +45,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.is_scene = is_scene self.scope = "all" self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -87,7 +87,7 @@ def handle(self): if self.is_scene: pack_dir_this_command = self.local_stored_path else: - pack_dir_this_command = os.path.join(self.local_stored_path,"gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) + pack_dir_this_command = os.path.join(self.local_stored_path, "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) self.stdio.verbose("Use {0} as pack dir.".format(pack_dir_this_command)) gather_tuples = [] @@ -97,10 +97,7 @@ def handle_from_node(node): file_size = "" if len(resp["error"]) == 0: file_size = os.path.getsize(resp["gather_pack_path"]) - gather_tuples.append((node.get("ip"), False, resp["error"], - file_size, - int(time.time() - st), - resp["gather_pack_path"])) + gather_tuples.append((node.get("ip"), False, resp["error"], file_size, int(time.time() - st), resp["gather_pack_path"])) if self.is_ssh: for node in self.nodes: @@ -119,11 +116,7 @@ def handle_from_node(node): last_info = "For result details, please run cmd \033[32m' cat {0} '\033[0m\n".format(os.path.join(pack_dir_this_command, "result_summary.txt")) def __handle_from_node(self, node, local_stored_path): - resp = { - "skip": False, - "error": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip(self.stdio) remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") @@ -136,13 +129,9 @@ def __handle_from_node(self, node, local_stored_path): remote_dir_full_path = "/tmp/{0}".format(remote_dir_name) ssh_failed = False try: - ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, - remote_private_key, node, self.stdio) + ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -168,7 +157,7 @@ def __handle_from_node(self, node, local_stored_path): file_size = get_file_size(self.is_ssh, ssh_helper, remote_file_full_path, self.stdio) if int(file_size) < self.file_size_limit: local_file_path = "{0}/{1}.zip".format(local_stored_path, remote_dir_name) - download_file(self.is_ssh,ssh_helper, remote_file_full_path, local_file_path, self.stdio) + download_file(self.is_ssh, ssh_helper, remote_file_full_path, local_file_path, self.stdio) resp["error"] = "" else: resp["error"] = "File too large" @@ -179,13 +168,11 @@ def __handle_from_node(self, node, local_stored_path): def __gather_perf_sample(self, ssh_helper, gather_path, pid_observer): try: - cmd = "cd {gather_path} && perf record -o sample.data -e cycles -c 100000000 -p {pid} -g -- sleep 20".format( - gather_path=gather_path, pid=pid_observer) + cmd = "cd {gather_path} && perf record -o sample.data -e cycles -c 100000000 -p {pid} -g -- sleep 20".format(gather_path=gather_path, pid=pid_observer) self.stdio.verbose("gather perf sample, run cmd = [{0}]".format(cmd)) SshClient(self.stdio).run_ignore_err(ssh_helper, cmd) if self.is_ssh else LocalClient(self.stdio).run(cmd) - generate_data = "cd {gather_path} && perf script -i sample.data -F ip,sym -f > sample.viz".format( - gather_path=gather_path) + generate_data = "cd {gather_path} && perf script -i sample.data -F ip,sym -f > sample.viz".format(gather_path=gather_path) self.stdio.verbose("generate perf sample data, run cmd = [{0}]".format(generate_data)) SshClient(self.stdio).run_ignore_err(ssh_helper, generate_data) if self.is_ssh else LocalClient(self.stdio).run(generate_data) except: @@ -193,13 +180,11 @@ def __gather_perf_sample(self, ssh_helper, gather_path, pid_observer): def __gather_perf_flame(self, ssh_helper, gather_path, pid_observer): try: - perf_cmd = "cd {gather_path} && perf record -o flame.data -F 99 -p {pid} -g -- sleep 20".format( - gather_path=gather_path, pid=pid_observer) + perf_cmd = "cd {gather_path} && perf record -o flame.data -F 99 -p {pid} -g -- sleep 20".format(gather_path=gather_path, pid=pid_observer) self.stdio.verbose("gather perf, run cmd = [{0}]".format(perf_cmd)) SshClient(self.stdio).run_ignore_err(ssh_helper, perf_cmd) if self.is_ssh else LocalClient(self.stdio).run(perf_cmd) - generate_data = "cd {gather_path} && perf script -i flame.data > flame.viz".format( - gather_path=gather_path) + generate_data = "cd {gather_path} && perf script -i flame.data > flame.viz".format(gather_path=gather_path) self.stdio.verbose("generate perf data, run cmd = [{0}]".format(generate_data)) SshClient(self.stdio).run_ignore_err(ssh_helper, generate_data) if self.is_ssh else LocalClient(self.stdio).run(generate_data) except: @@ -207,8 +192,7 @@ def __gather_perf_flame(self, ssh_helper, gather_path, pid_observer): def __gather_top(self, ssh_helper, gather_path, pid_observer): try: - cmd = "cd {gather_path} && top -Hp {pid} -b -n 1 > top.txt".format( - gather_path=gather_path, pid=pid_observer) + cmd = "cd {gather_path} && top -Hp {pid} -b -n 1 > top.txt".format(gather_path=gather_path, pid=pid_observer) self.stdio.verbose("gather top, run cmd = [{0}]".format(cmd)) SshClient(self.stdio).run(ssh_helper, cmd) if self.is_ssh else LocalClient(self.stdio).run(cmd) except: @@ -228,7 +212,5 @@ def __get_overall_summary(node_summary_tuple): format_file_size = FileUtil.size_format(num=file_size, output_str=True) except: format_file_size = FileUtil.size_format(num=0, output_str=True) - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather Perf Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather Perf Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/gather_plan_monitor.py b/handler/gather/gather_plan_monitor.py index ac7b71c2..710ebaa1 100644 --- a/handler/gather/gather_plan_monitor.py +++ b/handler/gather/gather_plan_monitor.py @@ -53,8 +53,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.sql_audit_name = "gv$sql_audit" self.plan_explain_name = "gv$plan_cache_plan_explain" self.is_scene = is_scene - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -70,8 +70,8 @@ def init_option(self): trace_id_option = Util.get_option(options, 'trace_id') store_dir_option = Util.get_option(options, 'store_dir') env_option = Util.get_option(options, 'env') - if self.context.get_variable("gather_plan_monitor_trace_id", None) : - trace_id_option=self.context.get_variable("gather_plan_monitor_trace_id") + if self.context.get_variable("gather_plan_monitor_trace_id", None): + trace_id_option = self.context.get_variable("gather_plan_monitor_trace_id") if trace_id_option is not None: self.trace_id = trace_id_option else: @@ -101,8 +101,7 @@ def handle(self): if self.is_scene: pack_dir_this_command = self.local_stored_path else: - pack_dir_this_command = os.path.join(self.local_stored_path, "gather_pack_{0}".format( - TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) + pack_dir_this_command = os.path.join(self.local_stored_path, "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) self.report_file_path = os.path.join(pack_dir_this_command, "sql_plan_monitor_report.html") self.stdio.verbose("Use {0} as pack dir.".format(pack_dir_this_command)) DirectoryUtil.mkdir(path=pack_dir_this_command, stdio=self.stdio) @@ -136,20 +135,12 @@ def handle_plan_monitor_from_ob(cluster_name): self.stdio.verbose("TENANT_ID: %s " % tenant_id) sql_plan_monitor_svr_agg_template = self.sql_plan_monitor_svr_agg_template_sql() - sql_plan_monitor_svr_agg_v1 = str(sql_plan_monitor_svr_agg_template) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_ORDER_BY##", "PLAN_LINE_ID ASC, MAX_CHANGE_TIME ASC, SVR_IP, SVR_PORT") - sql_plan_monitor_svr_agg_v2 = str(sql_plan_monitor_svr_agg_template) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_ORDER_BY##", "SVR_IP, SVR_PORT, PLAN_LINE_ID") + sql_plan_monitor_svr_agg_v1 = str(sql_plan_monitor_svr_agg_template).replace("##REPLACE_TRACE_ID##", trace_id).replace("##REPLACE_ORDER_BY##", "PLAN_LINE_ID ASC, MAX_CHANGE_TIME ASC, SVR_IP, SVR_PORT") + sql_plan_monitor_svr_agg_v2 = str(sql_plan_monitor_svr_agg_template).replace("##REPLACE_TRACE_ID##", trace_id).replace("##REPLACE_ORDER_BY##", "SVR_IP, SVR_PORT, PLAN_LINE_ID") sql_plan_monitor_detail_template = self.sql_plan_monitor_detail_template_sql() - sql_plan_monitor_detail_v1 = str(sql_plan_monitor_detail_template) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_ORDER_BY##", "PLAN_LINE_ID ASC, SVR_IP, SVR_PORT, CHANGE_TS, PROCESS_NAME ASC") - sql_plan_monitor_detail_v2 = str(sql_plan_monitor_detail_template) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_ORDER_BY##", "PROCESS_NAME ASC, PLAN_LINE_ID ASC, FIRST_REFRESH_TIME ASC") + sql_plan_monitor_detail_v1 = str(sql_plan_monitor_detail_template).replace("##REPLACE_TRACE_ID##", trace_id).replace("##REPLACE_ORDER_BY##", "PLAN_LINE_ID ASC, SVR_IP, SVR_PORT, CHANGE_TS, PROCESS_NAME ASC") + sql_plan_monitor_detail_v2 = str(sql_plan_monitor_detail_template).replace("##REPLACE_TRACE_ID##", trace_id).replace("##REPLACE_ORDER_BY##", "PROCESS_NAME ASC, PLAN_LINE_ID ASC, FIRST_REFRESH_TIME ASC") sql_plan_monitor_dfo_op = self.sql_plan_monitor_dfo_op_sql(tenant_id, plan_id, trace_id) full_audit_sql_by_trace_id_sql = self.full_audit_sql_by_trace_id_sql(trace_id) @@ -190,12 +181,9 @@ def handle_plan_monitor_from_ob(cluster_name): # 输出本报告在租户下使用的 SQL self.__report("

本报告在租户下使用的 SQL

") - self.__report("
DFO 级
%s

" % ( - sql_plan_monitor_dfo_op)) - self.__report("
机器级
%s

" % ( - sql_plan_monitor_svr_agg_v1)) - self.__report("
线程级
%s

" % ( - sql_plan_monitor_detail_v1)) + self.__report("
DFO 级
%s

" % (sql_plan_monitor_dfo_op)) + self.__report("
机器级
%s

" % (sql_plan_monitor_svr_agg_v1)) + self.__report("
线程级
%s

" % (sql_plan_monitor_detail_v1)) t = time.localtime(time.time()) self.__report("报告生成时间: %s" % (time.strftime("%Y-%m-%d %H:%M:%S", t))) @@ -205,14 +193,13 @@ def handle_plan_monitor_from_ob(cluster_name): if resp["skip"]: return if resp["error"]: - gather_tuples.append((cluster_name, True, resp["error_msg"], 0, int(time.time() - st), - "Error:{0}".format(resp["error_msg"]), "")) + gather_tuples.append((cluster_name, True, resp["error_msg"], 0, int(time.time() - st), "Error:{0}".format(resp["error_msg"]), "")) return gather_pack_path_dict[cluster_name] = resp["gather_pack_path"] gather_tuples.append((cluster_name, False, "", int(time.time() - st), pack_dir_this_command)) if getattr(sys, 'frozen', False): - absPath = os.path.dirname(sys.executable) + absPath = os.path.dirname(sys.executable) else: absPath = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) cs_resources_path = os.path.join(absPath, "resources") @@ -228,7 +215,6 @@ def handle_plan_monitor_from_ob(cluster_name): FileUtil.write_append(os.path.join(pack_dir_this_command, "result_summary.txt"), summary_tuples) return gather_tuples, gather_pack_path_dict - def __init_db_conn(self, env): try: env_dict = StringUtils.parse_env(env) @@ -258,18 +244,15 @@ def __get_overall_summary(node_summary_tuple): is_err = tup[2] consume_time = tup[3] pack_path = tup[4] - summary_tab.append( - (cluster, "Error" if is_err else "Completed", "{0} s".format(int(consume_time)), pack_path)) - return "\nGather Sql Plan Monitor Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((cluster, "Error" if is_err else "Completed", "{0} s".format(int(consume_time)), pack_path)) + return "\nGather Sql Plan Monitor Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) def report_schema(self, sql): try: schemas = "" valid_words = [] if self.enable_dump_db: - words = [w.strip(',') for w in ("%s" % sql).split() if not ( - "[" in w or "=" in w or "|" in w or "(" in w or "--" in w or "]" in w or ")" in w or "*" in w or "/" in w or "%" in w or "'" in w or "-" in w or w.isdigit())] + words = [w.strip(',') for w in ("%s" % sql).split() if not ("[" in w or "=" in w or "|" in w or "(" in w or "--" in w or "]" in w or ")" in w or "*" in w or "/" in w or "%" in w or "'" in w or "-" in w or w.isdigit())] for t in words: if t in valid_words: continue @@ -295,8 +278,7 @@ def report_schema(self, sql): s = from_db_cursor(cursor) s.align = 'l' schemas = schemas + "
%s
" % s - self.__report( - "

SCHEMA 信息

") + self.__report("

SCHEMA 信息

") cursor.close() except Exception as e: self.stdio.exception("report table schema failed %s" % sql) @@ -314,8 +296,7 @@ def report_header(self): self.stdio.verbose("report header complete") def init_monitor_stat(self): - sql = "select ID,NAME,TYPE from " + ( - "SYS." if self.tenant_mode == "oracle" else "oceanbase.") + "v$sql_monitor_statname order by ID" + sql = "select ID,NAME,TYPE from " + ("SYS." if self.tenant_mode == "oracle" else "oceanbase.") + "v$sql_monitor_statname order by ID" data = self.sys_connector.execute_sql(sql) for item in data: self.STAT_NAME[item[0]] = {"type": item[2], "name": item[1]} @@ -330,8 +311,7 @@ def otherstat_detail_explain_item(self, item, n, v): elif self.STAT_NAME[item[n]]["type"] == 2: val = "%0.3fMB" % (item[n + 1] / 1024.0 / 1024) elif self.STAT_NAME[item[n]]["type"] == 3: - val = "%s.%06d" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(item[v] / 1000000)), - item[v] - (item[v] / 1000000) * 1000000) + val = "%s.%06d" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(item[v] / 1000000)), item[v] - (item[v] / 1000000) * 1000000) else: val = str(item[v]) except Exception as e: @@ -375,13 +355,23 @@ def dfo_otherstat_explain(self, item): def report_detail_graph_data(self, ident, cursor, title=''): data = "" data = data + "

%s

" % (title, ident) self.__report(data) @@ -389,14 +379,25 @@ def report_detail_graph_data(self, ident, cursor, title=''): def report_detail_graph_data_obversion4(self, ident, cursor, title=''): data = "" data = data + "

%s

" % (title, ident) self.__report(data) @@ -416,26 +417,46 @@ def report_dfo_agg_db_time_graph_data_obversion4(self, cursor, title=''): threads = item['THREAD_NUM'] my_cpu_time = item['MY_CPU_TIME'] my_io_time = item['MY_IO_TIME'] - otherstat = "my_db_time:%f, my_cpu_time:%f, my_io_time:%f" % ( - item['MY_DB_TIME'], item['MY_CPU_TIME'], item['MY_IO_TIME']) + otherstat = "my_db_time:%f, my_cpu_time:%f, my_io_time:%f" % (item['MY_DB_TIME'], item['MY_CPU_TIME'], item['MY_IO_TIME']) data = data + "{cpu:%f,io:%f,start:%f, end:%f, diff:%f, my_io_time:%f, my_cpu_time:%f, opid:%s, op:'%s', est_rows:0, rows:%d, tag:'db_time', tid: %d, depth:%d, otherstat:'%s'}," % ( - item['MY_CPU_TIME'], item['MY_IO_TIME'], start, end, diff, my_io_time, my_cpu_time, op_id, op, rows, - threads, depth, otherstat) + item['MY_CPU_TIME'], + item['MY_IO_TIME'], + start, + end, + diff, + my_io_time, + my_cpu_time, + op_id, + op, + rows, + threads, + depth, + otherstat, + ) data = data + "{start:0}];" - data = data + "

%s

" % (title); + data = data + "

%s

" % (title) self.__report(data) def report_dfo_agg_graph_data(self, cursor, title=''): data = "

%s

" % (title) self.__report(data) @@ -443,15 +464,27 @@ def report_dfo_agg_graph_data(self, cursor, title=''): def report_dfo_agg_graph_data_obversion4(self, cursor, title=''): data = "

%s

" % (title) self.__report(data) @@ -459,15 +492,23 @@ def report_dfo_agg_graph_data_obversion4(self, cursor, title=''): def report_dfo_sched_agg_graph_data(self, cursor, title=''): data = "

%s

" % (title) self.__report(data) @@ -475,20 +516,27 @@ def report_dfo_sched_agg_graph_data(self, cursor, title=''): def report_dfo_sched_agg_graph_data_obversion4(self, cursor, title=''): data = "

%s

" % (title) self.__report(data) @@ -497,14 +545,20 @@ def report_dfo_sched_agg_graph_data_obversion4(self, cursor, title=''): def report_svr_agg_graph_data(self, ident, cursor, title=''): data = "" data = data + "

%s

" % (title, ident) self.stdio.verbose("report SQL_PLAN_MONITOR SQC operator priority start, DATA: %s", data) @@ -513,16 +567,24 @@ def report_svr_agg_graph_data(self, ident, cursor, title=''): def report_svr_agg_graph_data_obversion4(self, ident, cursor, title=''): data = "" data = data + "

%s

" % (title, ident) self.stdio.verbose("report SQL_PLAN_MONITOR SQC operator priority start, DATA: %s", data) @@ -563,9 +625,7 @@ def tenant_mode_detected(self): major_version = int(version.split('.')[0]) self.sql_audit_name = "gv$ob_sql_audit" if major_version >= 4 else "gv$sql_audit" - self.plan_explain_name = ( - "gv$ob_plan_cache_plan_explain" if major_version >= 4 else "gv$plan_cache_plan_explain" - ) + self.plan_explain_name = "gv$ob_plan_cache_plan_explain" if major_version >= 4 else "gv$plan_cache_plan_explain" self.ob_major_version = major_version self.tenant_mode = "mysql" self.sys_database = "oceanbase" @@ -615,13 +675,9 @@ def copy_cs_resource(self, source_path, target_path): def sql_audit_by_trace_id_limit1_sql(self): if self.tenant_mode == 'mysql': - sql = str(GlobalSqlMeta().get_value(key="sql_audit_by_trace_id_limit1_mysql")) \ - .replace("##REPLACE_TRACE_ID##", self.trace_id).replace("##REPLACE_SQL_AUDIT_TABLE_NAME##", - self.sql_audit_name) + sql = str(GlobalSqlMeta().get_value(key="sql_audit_by_trace_id_limit1_mysql")).replace("##REPLACE_TRACE_ID##", self.trace_id).replace("##REPLACE_SQL_AUDIT_TABLE_NAME##", self.sql_audit_name) else: - sql = str(GlobalSqlMeta().get_value(key="sql_audit_by_trace_id_limit1_oracle")).replace( - "##REPLACE_TRACE_ID##", self.trace_id).replace("##REPLACE_SQL_AUDIT_TABLE_NAME##", - self.sql_audit_name) + sql = str(GlobalSqlMeta().get_value(key="sql_audit_by_trace_id_limit1_oracle")).replace("##REPLACE_TRACE_ID##", self.trace_id).replace("##REPLACE_SQL_AUDIT_TABLE_NAME##", self.sql_audit_name) return sql def select_sql_audit_by_trace_id_limit1(self): @@ -632,68 +688,66 @@ def select_sql_audit_by_trace_id_limit1(self): def plan_explain_sql(self, tenant_id, plan_id, svr_ip, svr_port): if self.tenant_mode == 'mysql': if self.ob_major_version >= 4: - sql = "select * from oceanbase.gv$ob_plan_cache_plan_explain where tenant_id = %s and " \ - "plan_id = %s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) + sql = "select * from oceanbase.gv$ob_plan_cache_plan_explain where tenant_id = %s and " "plan_id = %s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) else: - sql = "select * from oceanbase.gv$plan_cache_plan_explain where tenant_id = %s and " \ - "plan_id = %s and ip = '%s' and port = %s" % (tenant_id, plan_id, svr_ip, svr_port) + sql = "select * from oceanbase.gv$plan_cache_plan_explain where tenant_id = %s and " "plan_id = %s and ip = '%s' and port = %s" % (tenant_id, plan_id, svr_ip, svr_port) else: if self.ob_major_version >= 4: - sql = "select * from sys.gv$ob_plan_cache_plan_explain where tenant_id = %s and plan_id = " \ - "%s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) + sql = "select * from sys.gv$ob_plan_cache_plan_explain where tenant_id = %s and plan_id = " "%s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) else: - sql = "select * from sys.gv$plan_cache_plan_explain where tenant_id = %s and plan_id = " \ - "%s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) + sql = "select * from sys.gv$plan_cache_plan_explain where tenant_id = %s and plan_id = " "%s and svr_ip = '%s' and svr_port = %s" % (tenant_id, plan_id, svr_ip, svr_port) return sql def full_audit_sql_by_trace_id_sql(self, trace_id): if self.tenant_mode == 'mysql': - if self.ob_major_version >=4: - sql = "select /*+ sql_audit */ %s from oceanbase.%s where trace_id = '%s' " \ - "AND client_ip IS NOT NULL ORDER BY QUERY_SQL ASC, REQUEST_ID" % ( - GlobalSqlMeta().get_value(key="sql_audit_item_mysql_obversion4"), self.sql_audit_name, trace_id) + if self.ob_major_version >= 4: + sql = "select /*+ sql_audit */ %s from oceanbase.%s where trace_id = '%s' " "AND client_ip IS NOT NULL ORDER BY QUERY_SQL ASC, REQUEST_ID" % (GlobalSqlMeta().get_value(key="sql_audit_item_mysql_obversion4"), self.sql_audit_name, trace_id) else: - sql = "select /*+ sql_audit */ %s from oceanbase.%s where trace_id = '%s' " \ - "AND client_ip IS NOT NULL ORDER BY QUERY_SQL ASC, REQUEST_ID" % ( - GlobalSqlMeta().get_value(key="sql_audit_item_mysql"), self.sql_audit_name, trace_id) + sql = "select /*+ sql_audit */ %s from oceanbase.%s where trace_id = '%s' " "AND client_ip IS NOT NULL ORDER BY QUERY_SQL ASC, REQUEST_ID" % (GlobalSqlMeta().get_value(key="sql_audit_item_mysql"), self.sql_audit_name, trace_id) else: - if self.ob_major_version >=4: - sql = "select /*+ sql_audit */ %s from sys.%s where trace_id = '%s' AND " \ - "length(client_ip) > 4 ORDER BY REQUEST_ID" % ( - GlobalSqlMeta().get_value(key="sql_audit_item_oracle_obversion4"), self.sql_audit_name, trace_id) + if self.ob_major_version >= 4: + sql = "select /*+ sql_audit */ %s from sys.%s where trace_id = '%s' AND " "length(client_ip) > 4 ORDER BY REQUEST_ID" % (GlobalSqlMeta().get_value(key="sql_audit_item_oracle_obversion4"), self.sql_audit_name, trace_id) else: - sql = "select /*+ sql_audit */ %s from sys.%s where trace_id = '%s' AND " \ - "length(client_ip) > 4 ORDER BY REQUEST_ID" % ( - GlobalSqlMeta().get_value(key="sql_audit_item_oracle"), self.sql_audit_name, trace_id) + sql = "select /*+ sql_audit */ %s from sys.%s where trace_id = '%s' AND " "length(client_ip) > 4 ORDER BY REQUEST_ID" % (GlobalSqlMeta().get_value(key="sql_audit_item_oracle"), self.sql_audit_name, trace_id) return sql def sql_plan_monitor_dfo_op_sql(self, tenant_id, plan_id, trace_id): if self.tenant_mode == 'mysql': if self.ob_major_version >= 4: - sql = str(GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_mysql_obversion4")) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_PLAN_ID##", str(plan_id)) \ - .replace("##REPLACE_TENANT_ID##", str(tenant_id)) \ + sql = ( + str(GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_mysql_obversion4")) + .replace("##REPLACE_TRACE_ID##", trace_id) + .replace("##REPLACE_PLAN_ID##", str(plan_id)) + .replace("##REPLACE_TENANT_ID##", str(tenant_id)) .replace("##REPLACE_PLAN_EXPLAIN_TABLE_NAME##", self.plan_explain_name) + ) else: - sql = str(GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_mysql")) \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_PLAN_ID##", str(plan_id)) \ - .replace("##REPLACE_TENANT_ID##", str(tenant_id)) \ + sql = ( + str(GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_mysql")) + .replace("##REPLACE_TRACE_ID##", trace_id) + .replace("##REPLACE_PLAN_ID##", str(plan_id)) + .replace("##REPLACE_TENANT_ID##", str(tenant_id)) .replace("##REPLACE_PLAN_EXPLAIN_TABLE_NAME##", self.plan_explain_name) + ) else: if self.ob_major_version >= 4: - sql = GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_oracle_obversion4") \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_PLAN_ID##", str(plan_id)) \ - .replace("##REPLACE_TENANT_ID##", str(tenant_id)) \ + sql = ( + GlobalSqlMeta() + .get_value(key="sql_plan_monitor_dfo_op_oracle_obversion4") + .replace("##REPLACE_TRACE_ID##", trace_id) + .replace("##REPLACE_PLAN_ID##", str(plan_id)) + .replace("##REPLACE_TENANT_ID##", str(tenant_id)) .replace("##REPLACE_PLAN_EXPLAIN_TABLE_NAME##", self.plan_explain_name) + ) else: - sql = GlobalSqlMeta().get_value(key="sql_plan_monitor_dfo_op_oracle") \ - .replace("##REPLACE_TRACE_ID##", trace_id) \ - .replace("##REPLACE_PLAN_ID##", str(plan_id)) \ - .replace("##REPLACE_TENANT_ID##", str(tenant_id)) \ + sql = ( + GlobalSqlMeta() + .get_value(key="sql_plan_monitor_dfo_op_oracle") + .replace("##REPLACE_TRACE_ID##", trace_id) + .replace("##REPLACE_PLAN_ID##", str(plan_id)) + .replace("##REPLACE_TENANT_ID##", str(tenant_id)) .replace("##REPLACE_PLAN_EXPLAIN_TABLE_NAME##", self.plan_explain_name) + ) return sql @@ -727,8 +781,7 @@ def sql_plan_monitor_detail_template_sql(self): def report_sql_audit_details(self, sql): if self.enable_dump_db: full_audit_sql_result = self.sys_connector.execute_sql_pretty(sql) - self.__report( - "

SQL_AUDIT 信息

") + self.__report("

SQL_AUDIT 信息

") self.stdio.verbose("report full sql audit complete") # plan cache @@ -784,8 +837,7 @@ def report_plan_explain(self, db_name, raw_sql): def report_sql_plan_monitor_dfo_op(self, sql): data_sql_plan_monitor_dfo_op = self.sys_connector.execute_sql_pretty(sql) - self.__report( - "

SQL_PLAN_MONITOR DFO 级调度时序汇总

") + self.__report("

SQL_PLAN_MONITOR DFO 级调度时序汇总

") self.stdio.verbose("report SQL_PLAN_MONITOR DFO complete") cursor_sql_plan_monitor_dfo_op = self.sys_connector.execute_sql_return_cursor_dictionary(sql) if self.ob_major_version >= 4: @@ -803,18 +855,18 @@ def report_sql_plan_monitor_dfo_op(self, sql): def report_sql_plan_monitor_svr_agg(self, sql_plan_monitor_svr_agg_v1, sql_plan_monitor_svr_agg_v2): cursor_sql_plan_monitor_svr_agg = self.sys_connector.execute_sql_return_cursor(sql_plan_monitor_svr_agg_v1) self.__report( - "

SQL_PLAN_MONITOR SQC 级汇总

") + "

SQL_PLAN_MONITOR SQC 级汇总

" + ) self.stdio.verbose("report SQL_PLAN_MONITOR SQC complete") - cursor_sql_plan_monitor_svr_agg_v1 = self.sys_connector.execute_sql_return_cursor_dictionary( - sql_plan_monitor_svr_agg_v2) + cursor_sql_plan_monitor_svr_agg_v1 = self.sys_connector.execute_sql_return_cursor_dictionary(sql_plan_monitor_svr_agg_v2) if self.ob_major_version >= 4: self.report_svr_agg_graph_data_obversion4('svr_agg_serial_v1', cursor_sql_plan_monitor_svr_agg_v1, '算子优先视图') else: self.report_svr_agg_graph_data('svr_agg_serial_v1', cursor_sql_plan_monitor_svr_agg_v1, '算子优先视图') self.stdio.verbose("report SQL_PLAN_MONITOR SQC operator priority complete") - cursor_data_sql_plan_monitor_svr_agg_v2 = self.sys_connector.execute_sql_return_cursor_dictionary( - sql_plan_monitor_svr_agg_v2) + cursor_data_sql_plan_monitor_svr_agg_v2 = self.sys_connector.execute_sql_return_cursor_dictionary(sql_plan_monitor_svr_agg_v2) if self.ob_major_version >= 4: self.report_svr_agg_graph_data('svr_agg_serial_v2', cursor_data_sql_plan_monitor_svr_agg_v2, '机器优先视图') else: @@ -824,15 +876,14 @@ def report_sql_plan_monitor_svr_agg(self, sql_plan_monitor_svr_agg_v1, sql_plan_ def report_sql_plan_monitor_detail_operator_priority(self, sql): cursor_sql_plan_monitor_detail = self.sys_connector.execute_sql_return_cursor(sql) self.__report( - "

SQL_PLAN_MONITOR 详情

") + "

SQL_PLAN_MONITOR 详情

" + ) self.stdio.verbose("report SQL_PLAN_MONITOR details complete") cursor_sql_plan_monitor_detail_v1 = self.sys_connector.execute_sql_return_cursor_dictionary(sql) if self.ob_major_version >= 4: - self.report_detail_graph_data_obversion4("detail_serial_v1", - cursor_sql_plan_monitor_detail_v1, - '算子优先视图') + self.report_detail_graph_data_obversion4("detail_serial_v1", cursor_sql_plan_monitor_detail_v1, '算子优先视图') else: self.report_detail_graph_data("detail_serial_v1", cursor_sql_plan_monitor_detail_v1, '算子优先视图') self.stdio.verbose("report SQL_PLAN_MONITOR details operator priority complete") @@ -840,9 +891,7 @@ def report_sql_plan_monitor_detail_operator_priority(self, sql): def reportsql_plan_monitor_detail_svr_priority(self, sql): cursor_sql_plan_monitor_detail_v2 = self.sys_connector.execute_sql_return_cursor_dictionary(sql) if self.ob_major_version >= 4: - self.report_detail_graph_data_obversion4("detail_serial_v2", - cursor_sql_plan_monitor_detail_v2, - '线程优先视图') + self.report_detail_graph_data_obversion4("detail_serial_v2", cursor_sql_plan_monitor_detail_v2, '线程优先视图') else: self.report_detail_graph_data("detail_serial_v2", cursor_sql_plan_monitor_detail_v2, '线程优先视图') self.stdio.verbose("report SQL_PLAN_MONITOR details server priority complete") diff --git a/handler/gather/gather_scenes.py b/handler/gather/gather_scenes.py index 6fdc9f01..1717516f 100644 --- a/handler/gather/gather_scenes.py +++ b/handler/gather/gather_scenes.py @@ -47,8 +47,8 @@ def __init__(self, context, gather_pack_dir='./', tasks_base_path="~/.obdiag/gat self.tasks_base_path = tasks_base_path self.task_type = task_type self.variables = {} - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -154,7 +154,7 @@ def __init_variables(self): "observer_data_dir": self.ob_nodes[0].get("home_path") if self.ob_nodes and self.ob_nodes[0].get("home_path") else "", "obproxy_data_dir": self.obproxy_nodes[0].get("home_path") if self.obproxy_nodes and self.obproxy_nodes[0].get("home_path") else "", "from_time": self.from_time_str, - "to_time": self.to_time_str + "to_time": self.to_time_str, } self.stdio.verbose("gather scene variables: {0}".format(self.variables)) except Exception as e: diff --git a/handler/gather/gather_sysstat.py b/handler/gather/gather_sysstat.py index 99b86feb..2b9075aa 100644 --- a/handler/gather/gather_sysstat.py +++ b/handler/gather/gather_sysstat.py @@ -42,8 +42,8 @@ def __init__(self, context, gather_pack_dir='./', is_scene=False): self.remote_stored_path = None self.is_scene = is_scene self.config_path = const.DEFAULT_CONFIG_PATH - if self.context.get_variable("gather_timestamp", None) : - self.gather_timestamp=self.context.get_variable("gather_timestamp") + if self.context.get_variable("gather_timestamp", None): + self.gather_timestamp = self.context.get_variable("gather_timestamp") else: self.gather_timestamp = TimeUtils.get_current_us_timestamp() @@ -89,7 +89,7 @@ def handle(self): if self.is_scene: pack_dir_this_command = self.local_stored_path else: - pack_dir_this_command = os.path.join(self.local_stored_path,"gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) + pack_dir_this_command = os.path.join(self.local_stored_path, "gather_pack_{0}".format(TimeUtils.timestamp_to_filename_time(self.gather_timestamp))) self.stdio.verbose("Use {0} as pack dir.".format(pack_dir_this_command)) gather_tuples = [] @@ -99,11 +99,7 @@ def handle_from_node(node): file_size = "" if len(resp["error"]) == 0: file_size = os.path.getsize(resp["gather_pack_path"]) - gather_tuples.append((node.get("ip"), False, resp["error"], - file_size, - int(time.time() - st), - resp["gather_pack_path"])) - + gather_tuples.append((node.get("ip"), False, resp["error"], file_size, int(time.time() - st), resp["gather_pack_path"])) if self.is_ssh: for node in self.nodes: @@ -121,11 +117,7 @@ def handle_from_node(node): FileUtil.write_append(os.path.join(pack_dir_this_command, "result_summary.txt"), summary_tuples) def __handle_from_node(self, node, local_stored_path): - resp = { - "skip": False, - "error": "", - "gather_pack_path": "" - } + resp = {"skip": False, "error": "", "gather_pack_path": ""} remote_ip = node.get("ip") if self.is_ssh else NetUtils.get_inner_ip() remote_user = node.get("ssh_username") remote_password = node.get("ssh_password") @@ -138,12 +130,9 @@ def __handle_from_node(self, node, local_stored_path): remote_dir_full_path = "/tmp/{0}".format(remote_dir_name) ssh_failed = False try: - ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key,node, self.stdio) + ssh_helper = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format( - remote_user, - remote_ip, - self.config_path)) + self.stdio.exception("ssh {0}@{1}: failed, Please check the {2}".format(remote_user, remote_ip, self.config_path)) ssh_failed = True resp["skip"] = True resp["error"] = "Please check the {0}".format(self.config_path) @@ -164,8 +153,7 @@ def __handle_from_node(self, node, local_stored_path): file_size = get_file_size(self.is_ssh, ssh_helper, remote_file_full_path, self.stdio) if int(file_size) < self.file_size_limit: local_file_path = "{0}/{1}.zip".format(local_stored_path, remote_dir_name) - self.stdio.verbose( - "local file path {0}...".format(local_file_path)) + self.stdio.verbose("local file path {0}...".format(local_file_path)) download_file(self.is_ssh, ssh_helper, remote_file_full_path, local_file_path, self.stdio) resp["error"] = "" else: @@ -177,8 +165,7 @@ def __handle_from_node(self, node, local_stored_path): def __gather_dmesg_current_info(self, ssh_helper, gather_path): try: - dmesg_cmd = "dmesg --ctime > {gather_path}/dmesg.human.current".format( - gather_path=gather_path) + dmesg_cmd = "dmesg --ctime > {gather_path}/dmesg.human.current".format(gather_path=gather_path) self.stdio.verbose("gather dmesg current info, run cmd = [{0}]".format(dmesg_cmd)) SshClient(self.stdio).run(ssh_helper, dmesg_cmd) if self.is_ssh else LocalClient(self.stdio).run(dmesg_cmd) except: @@ -205,11 +192,10 @@ def __tsar_exit(self, ssh_helper): return True except: self.stdio.warn("tsar not found") - + def __gather_cpu_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --cpu -i 1 > {gather_path}/one_day_cpu_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --cpu -i 1 > {gather_path}/one_day_cpu_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather cpu info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -217,8 +203,7 @@ def __gather_cpu_info(self, ssh_helper, gather_path): def __gather_mem_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --mem -i 1 > {gather_path}/one_day_mem_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --mem -i 1 > {gather_path}/one_day_mem_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather memory info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -226,8 +211,7 @@ def __gather_mem_info(self, ssh_helper, gather_path): def __gather_swap_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --swap --load > {gather_path}/tsar_swap_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --swap --load > {gather_path}/tsar_swap_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather swap info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -235,8 +219,7 @@ def __gather_swap_info(self, ssh_helper, gather_path): def __gather_io_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --io > {gather_path}/tsar_io_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --io > {gather_path}/tsar_io_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather io info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -244,8 +227,7 @@ def __gather_io_info(self, ssh_helper, gather_path): def __gather_traffic_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --traffic > {gather_path}/tsar_traffic_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --traffic > {gather_path}/tsar_traffic_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather traffic info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -253,8 +235,7 @@ def __gather_traffic_info(self, ssh_helper, gather_path): def __gather_tcp_udp_info(self, ssh_helper, gather_path): try: - tsar_cmd = "tsar --tcp --udp -d 1 > {gather_path}/tsar_tcp_udp_data.txt".format( - gather_path=gather_path) + tsar_cmd = "tsar --tcp --udp -d 1 > {gather_path}/tsar_tcp_udp_data.txt".format(gather_path=gather_path) self.stdio.verbose("gather tcp and udp info on server {0}, run cmd = [{1}]".format(ssh_helper.get_name(), tsar_cmd)) SshClient(self.stdio).run(ssh_helper, tsar_cmd) if self.is_ssh else LocalClient(self.stdio).run(tsar_cmd) except: @@ -274,7 +255,5 @@ def __get_overall_summary(node_summary_tuple): format_file_size = FileUtil.size_format(num=file_size, output_str=True) except: format_file_size = FileUtil.size_format(num=0, output_str=True) - summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", - format_file_size, "{0} s".format(int(consume_time)), pack_path)) - return "\nGather Sysstat Summary:\n" + \ - tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) + summary_tab.append((node, "Error:" + tup[2] if is_err else "Completed", format_file_size, "{0} s".format(int(consume_time)), pack_path)) + return "\nGather Sysstat Summary:\n" + tabulate.tabulate(summary_tab, headers=field_names, tablefmt="grid", showindex=False) diff --git a/handler/gather/scenes/__init__.py b/handler/gather/scenes/__init__.py index 40982804..94eb8dd1 100644 --- a/handler/gather/scenes/__init__.py +++ b/handler/gather/scenes/__init__.py @@ -14,4 +14,4 @@ @time: 2023/12/26 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/gather/scenes/base.py b/handler/gather/scenes/base.py index 11bc4de5..abc4daaf 100644 --- a/handler/gather/scenes/base.py +++ b/handler/gather/scenes/base.py @@ -25,7 +25,7 @@ class SceneBase(SafeStdio): - def __init__(self,context, scene, report_dir=None, scene_variable_dict={}, env={}, mode="yaml", task_type="observer"): + def __init__(self, context, scene, report_dir=None, scene_variable_dict={}, env={}, mode="yaml", task_type="observer"): self.context = context self.stdio = context.stdio self.scene_variable_dict = scene_variable_dict @@ -50,7 +50,7 @@ def execute(self): self.__execute_yaml_mode(self.obproxy_nodes) elif self.mode == "code": self.__execute_code_mode() - else: + else: self.stdio.error("Unsupported mode. SKIP") raise Exception("Unsupported mode. SKIP") except Exception as e: @@ -62,7 +62,7 @@ def __execute_yaml_mode(self, nodes): self.stdio.verbose("Unadapted by version. SKIP") return "Unadapted by version.SKIP" self.stdio.verbose("filter_by_version is return {0}".format(steps_nu)) - if len(nodes)==0: + if len(nodes) == 0: self.stdio.warn("node is not exist") return node_number = 0 @@ -74,7 +74,7 @@ def __execute_yaml_mode(self, nodes): for step in steps["steps"]: try: self.stdio.verbose("step nu: {0}".format(nu)) - if len(self.cluster)==0: + if len(self.cluster) == 0: self.stdio.error("cluster is not exist") return step_run = Base(self.context, step, node, self.cluster, self.report_dir, self.scene_variable_dict, self.env, node_number) @@ -105,4 +105,3 @@ def __execute_code_mode(self): self.stdio.verbose("hard code scene {0} execute end".format(self.scene["name"])) except Exception as e: self.stdio.error("hard code scene execute failed, error :{0}".format(e)) - diff --git a/handler/gather/scenes/cpu_high.py b/handler/gather/scenes/cpu_high.py index 87923419..4c6624d1 100644 --- a/handler/gather/scenes/cpu_high.py +++ b/handler/gather/scenes/cpu_high.py @@ -22,6 +22,7 @@ from handler.gather.gather_perf import GatherPerfHandler from handler.gather.gather_log import GatherLogHandler + class CPUHighScene(SafeStdio): def __init__(self, context, report_path, task_variable_dict=None, env={}): self.context = context diff --git a/handler/gather/scenes/list.py b/handler/gather/scenes/list.py index a83bde28..d4a970cf 100644 --- a/handler/gather/scenes/list.py +++ b/handler/gather/scenes/list.py @@ -22,6 +22,7 @@ from handler.gather.scenes.register import hardcode_scene_list from common.tool import Util + class GatherScenesListHandler(SafeStdio): def __init__(self, context, yaml_tasks_base_path="~/.obdiag/gather/tasks/"): self.context = context @@ -64,7 +65,7 @@ def get_all_yaml_tasks(self): self.other_tasks[task_name] = task_data except Exception as e: self.stdio.error("get all yaml task failed, error: ", e) - + def get_all_code_tasks(self): try: for scene in hardcode_scene_list: @@ -78,7 +79,12 @@ def get_all_code_tasks(self): self.stdio.error("get all hard code task failed, error: ", e) def __get_hardcode_task(self, scene): - return {"name": scene.name, "command": scene.command, "info_en": scene.info_en, "info_cn": scene.info_cn,} + return { + "name": scene.name, + "command": scene.command, + "info_en": scene.info_en, + "info_cn": scene.info_cn, + } def get_one_yaml_task(self, name): try: @@ -92,7 +98,7 @@ def get_one_yaml_task(self, name): if name == task_name: task_data = YamlUtils.read_yaml_data(os.path.join(root, file)) task_data["name"] = task_name - return task_data + return task_data except Exception as e: self.stdio.error("get one yaml task failed, error: ", e) @@ -125,5 +131,3 @@ def print_scene_data(self): sorted_observer_tasks_dict = {k: v for k, v in sorted_observer_tasks} Util.print_title("Observer Problem Gather Scenes") Util.print_scene(sorted_observer_tasks_dict) - - \ No newline at end of file diff --git a/handler/gather/scenes/px_collect_log.py b/handler/gather/scenes/px_collect_log.py index fb6673fb..1fbc1bec 100644 --- a/handler/gather/scenes/px_collect_log.py +++ b/handler/gather/scenes/px_collect_log.py @@ -20,6 +20,7 @@ from common.ssh import SshHelper import datetime + class SQLPXCollectLogScene(object): def __init__(self, context, scene_name, report_path, task_variable_dict=None, env={}): self.context = context @@ -48,11 +49,11 @@ def execute(self): self.__gather_log() # 3. 分析日志,提取SQC地址 self.__analyze_log() - # 解压日志到一个新的目录 - # 分析日志,提取关键字地址 + # 解压日志到一个新的目录 + # 分析日志,提取关键字地址 # 4. 收集SQC机器的日志 - # 如果存在有效地址,则删除本地被解压的日志和压缩包,重新收集并存储于当前地址 - # 否则不存在,则删除被解压的目录 + # 如果存在有效地址,则删除本地被解压的日志和压缩包,重新收集并存储于当前地址 + # 否则不存在,则删除被解压的目录 if len(self.sql_task_node) != 0: self.stdio.verbose("delete file start") delete_file_in_folder(False, None, self.report_path, self.stdio) @@ -83,7 +84,7 @@ def __analyze_log(self): uzip_dir = self.report_path uzip_dir_local(uzip_dir, self.stdio) ip_port_str = analyze_log_get_sqc_addr(uzip_dir, self.stdio) - if ip_port_str is None or len(ip_port_str) == 0: + if ip_port_str is None or len(ip_port_str) == 0: self.stdio.warn("No logs were found indicating that the SQC interrupted the QC; the error occurred locally in the QC.") self.sql_task_node = [] return @@ -105,7 +106,7 @@ def __analyze_log(self): except Exception as e: self.stdio.exception("analyze observer log failed, error: {0}".format(e)) raise Exception("analyze observer log failed, error: {0}".format(e)) - + def __find_home_path_by_port(self, ip_str, internal_port_str): for node in self.ob_nodes: if node["ip"] == ip_str: @@ -117,17 +118,15 @@ def __find_home_path_by_port(self, ip_str, internal_port_str): try: ssh = SshHelper(self.is_ssh, remote_ip, remote_user, remote_password, remote_port, remote_private_key, node, self.stdio) except Exception as e: - self.stdio.error("ssh {0}@{1}: failed, Please check the config".format( - remote_user, - remote_ip)) + self.stdio.error("ssh {0}@{1}: failed, Please check the config".format(remote_user, remote_ip)) return find_home_path_by_port(True, ssh, internal_port_str, self.stdio) def parse_trace_id(self, trace_id): id_ = trace_id.split('-')[0].split('Y')[1] uval = int(id_, 16) - ip = uval & 0xffffffff - port = (uval >> 32) & 0xffff - ip_str = "{}.{}.{}.{}".format((ip >> 24) & 0xff, (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff) + ip = uval & 0xFFFFFFFF + port = (uval >> 32) & 0xFFFF + ip_str = "{}.{}.{}.{}".format((ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF) origin_ip_port = "{}:{}".format(ip_str, port) return origin_ip_port @@ -145,8 +144,8 @@ def parse_trace_id2(self, trace_id): return origin_ip_port2 def analyze_traceid(self, trace_id): - if (len(trace_id) < 50): - if (trace_id[0] == 'Y'): + if len(trace_id) < 50: + if trace_id[0] == 'Y': return self.parse_trace_id(trace_id) else: return self.parse_trace_id2(trace_id) @@ -183,4 +182,4 @@ def __parse_env(self): self.stdio.verbose("QC addr analyze end {0}".format(self.sql_task_node)) return True except Exception as e: - self.stdio.error("Parse env fail. Exception : {0} .".format(e)) \ No newline at end of file + self.stdio.error("Parse env fail. Exception : {0} .".format(e)) diff --git a/handler/gather/scenes/register.py b/handler/gather/scenes/register.py index a65b4580..d5657c62 100644 --- a/handler/gather/scenes/register.py +++ b/handler/gather/scenes/register.py @@ -19,6 +19,7 @@ from dataclasses import dataclass import datetime + @dataclass class RegisteredHardCodeScene: name: str @@ -26,29 +27,17 @@ class RegisteredHardCodeScene: info_en: str info_cn: str + # 对于不适合通过yaml编排的复杂场景可以用这个类注册,注册后通过代码实现采集逻辑 db_connect = '-h127.0.0.1 -P2881 -utest@test -p****** -Dtest' trace_id = 'Yxx' estimated_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') hardcode_scene_list = [ - RegisteredHardCodeScene( - 'observer.perf_sql', - f'''obdiag gather scene run --scene=observer.perf_sql --env "{{db_connect='{db_connect}', trace_id='{trace_id}'}}"''', - '[SQL performance problem]', - '[SQL性能问题]' - ), - RegisteredHardCodeScene( - 'observer.sql_err', - f'''obdiag gather scene run --scene=observer.sql_err --env "{{db_connect='{db_connect}', trace_id='{trace_id}'}}"''', - '[SQL execution error]', - '[SQL 执行出错]' - ), + RegisteredHardCodeScene('observer.perf_sql', f'''obdiag gather scene run --scene=observer.perf_sql --env "{{db_connect='{db_connect}', trace_id='{trace_id}'}}"''', '[SQL performance problem]', '[SQL性能问题]'), + RegisteredHardCodeScene('observer.sql_err', f'''obdiag gather scene run --scene=observer.sql_err --env "{{db_connect='{db_connect}', trace_id='{trace_id}'}}"''', '[SQL execution error]', '[SQL 执行出错]'), RegisteredHardCodeScene('observer.cpu_high', 'obdiag gather scene run --scene=observer.cpu_high', '[High CPU]', '[CPU高]'), RegisteredHardCodeScene( - 'observer.px_collect_log', - f'''obdiag gather scene run --scene=observer.px_collect_log --env "{{trace_id='{trace_id}', estimated_time='{estimated_time}'}}"''', - '[Collect error source node logs for SQL PX]', - '[SQL PX 收集报错源节点日志]' + 'observer.px_collect_log', f'''obdiag gather scene run --scene=observer.px_collect_log --env "{{trace_id='{trace_id}', estimated_time='{estimated_time}'}}"''', '[Collect error source node logs for SQL PX]', '[SQL PX 收集报错源节点日志]' ), ] diff --git a/handler/gather/scenes/sql_problem.py b/handler/gather/scenes/sql_problem.py index 2c210dd8..0862c6d1 100644 --- a/handler/gather/scenes/sql_problem.py +++ b/handler/gather/scenes/sql_problem.py @@ -26,7 +26,7 @@ class SQLProblemScene(SafeStdio): def __init__(self, context, scene_name, report_path, task_variable_dict=None, env={}): self.context = context - self.stdio=context.stdio + self.stdio = context.stdio if task_variable_dict is None: self.task_variable_dict = {} else: @@ -57,15 +57,14 @@ def __gather_log(self): self.stdio.error("gather observer log failed, error: {0}".format(e)) raise Exception("gather observer log failed, error: {0}".format(e)) - def __gather_obproxy_log(self): try: self.stdio.verbose("gather obproxy log start") handler = GatherObProxyLogHandler(self.context, gather_pack_dir=self.report_path, is_scene=True) if self.scene_name: - if self.scene_name == "observer.sql_err": + if self.scene_name == "observer.sql_err": pass - elif self.scene_name == "observer.perf_sql": + elif self.scene_name == "observer.perf_sql": self.context.set_variable('gather_scope', self.trace_id) else: self.stdio.warn("unsupported scene {0}".format(self.scene_name)) @@ -104,7 +103,7 @@ def __parse_env(self): return True else: self.stdio.error("option env [--trace_id] not found, please run 'obdiag gather scene list' to check usage") - return False + return False else: self.stdio.error("option env not found, please run 'obdiag gather scene list' to check usage") return False diff --git a/handler/gather/step/__init__.py b/handler/gather/step/__init__.py index 4dea303d..d58f83d8 100644 --- a/handler/gather/step/__init__.py +++ b/handler/gather/step/__init__.py @@ -14,4 +14,4 @@ @time: 2024/01/05 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/gather/step/base.py b/handler/gather/step/base.py index f46c6244..064014de 100644 --- a/handler/gather/step/base.py +++ b/handler/gather/step/base.py @@ -25,9 +25,9 @@ class Base(SafeStdio): - def __init__(self, context, step, node, cluster, report_path, task_variable_dict=None, env={}, node_number = 1): + def __init__(self, context, step, node, cluster, report_path, task_variable_dict=None, env={}, node_number=1): self.context = context - self.stdio=context.stdio + self.stdio = context.stdio if task_variable_dict is None: self.task_variable_dict = {} else: @@ -41,11 +41,11 @@ def __init__(self, context, step, node, cluster, report_path, task_variable_dict def execute(self): self.stdio.verbose("step: {0}".format(self.step)) - no_cluster_name_msg="(Please set ob_cluster_name or obproxy_cluster_name)" + no_cluster_name_msg = "(Please set ob_cluster_name or obproxy_cluster_name)" try: if "ip" in self.node: self.task_variable_dict["remote_ip"] = self.node["ip"] - elif "ssh_type" in self.node and self.node["ssh_type"]=="docker": + elif "ssh_type" in self.node and self.node["ssh_type"] == "docker": self.stdio.verbose("execute ssh_type is docker") self.task_variable_dict["remote_ip"] = docker.from_env().containers.get(self.node["container_name"]).attrs['NetworkSettings']['Networks']['bridge']["IPAddress"] self.task_variable_dict["remote_home_path"] = self.node["home_path"] @@ -82,7 +82,7 @@ def execute(self): self.context.set_variable('filter_nodes_list', [self.node]) handler.handle() else: - self.stdio.error("the type not support: {0}" .format(self.step["type"])) + self.stdio.error("the type not support: {0}".format(self.step["type"])) except Exception as e: self.stdio.error("StepBase handler.execute fail, error: {0}".format(e)) if self.step["type"] == "sql": diff --git a/handler/gather/step/sql.py b/handler/gather/step/sql.py index 9105590e..2691daf9 100644 --- a/handler/gather/step/sql.py +++ b/handler/gather/step/sql.py @@ -25,21 +25,16 @@ class StepSQLHandler(SafeStdio): def __init__(self, context, step, ob_cluster, report_path, task_variable_dict): self.context = context - self.stdio=context.stdio + self.stdio = context.stdio try: self.ob_cluster = ob_cluster self.ob_cluster_name = ob_cluster.get("cluster_name") self.tenant_mode = None self.sys_database = None self.database = None - self.ob_connector = OBConnector(ip=ob_cluster.get("db_host"), - port=ob_cluster.get("db_port"), - username=ob_cluster.get("tenant_sys").get("user"), - password=ob_cluster.get("tenant_sys").get("password"), - stdio=self.stdio, - timeout=10000) + self.ob_connector = OBConnector(ip=ob_cluster.get("db_host"), port=ob_cluster.get("db_port"), username=ob_cluster.get("tenant_sys").get("user"), password=ob_cluster.get("tenant_sys").get("password"), stdio=self.stdio, timeout=10000) except Exception as e: - self.stdio.error("StepSQLHandler init fail. Please check the OBCLUSTER conf. OBCLUSTER: {0} Exception : {1} .".format(ob_cluster,e)) + self.stdio.error("StepSQLHandler init fail. Please check the OBCLUSTER conf. OBCLUSTER: {0} Exception : {1} .".format(ob_cluster, e)) self.task_variable_dict = task_variable_dict self.enable_dump_db = False self.enable_fast_dump = False @@ -93,4 +88,4 @@ def report(self, sql, column_names, data): f.write('\n\n' + 'obclient > ' + sql + '\n') f.write(formatted_table) except Exception as e: - self.stdio.error("report sql result to file: {0} failed, error: {1}".format(self.report_file_path, str(e))) \ No newline at end of file + self.stdio.error("report sql result to file: {0} failed, error: {1}".format(self.report_file_path, str(e))) diff --git a/handler/gather/step/ssh.py b/handler/gather/step/ssh.py index 89f2a879..e2ea4c4e 100644 --- a/handler/gather/step/ssh.py +++ b/handler/gather/step/ssh.py @@ -24,7 +24,7 @@ class SshHandler(SafeStdio): def __init__(self, context, step, node, report_path, task_variable_dict): self.context = context - self.stdio=context.stdio + self.stdio = context.stdio self.ssh_report_value = None self.parameters = None self.step = step @@ -64,7 +64,7 @@ def update_step_variable_dict(self): def report(self, command, data): try: with open(self.report_file_path, 'a', encoding='utf-8') as f: - f.write('\n\n' + '['+ self.node.get("ip") + '] shell > ' + command + '\n') + f.write('\n\n' + '[' + self.node.get("ip") + '] shell > ' + command + '\n') f.write(data + '\n') except Exception as e: self.stdio.error("report sql result to file: {0} failed, error: ".format(self.report_file_path)) diff --git a/handler/meta/__init__.py b/handler/meta/__init__.py index 29f4a072..d3a64b03 100644 --- a/handler/meta/__init__.py +++ b/handler/meta/__init__.py @@ -14,4 +14,4 @@ @time: 2023/9/20 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/handler/meta/check_meta.py b/handler/meta/check_meta.py index 84edf924..804bcd03 100644 --- a/handler/meta/check_meta.py +++ b/handler/meta/check_meta.py @@ -49,5 +49,5 @@ def rm_value(self, key): else echo "false" fi - ''' + ''', ) diff --git a/handler/meta/html_meta.py b/handler/meta/html_meta.py index e8388a59..302de58c 100644 --- a/handler/meta/html_meta.py +++ b/handler/meta/html_meta.py @@ -200,7 +200,7 @@ def rm_value(self, key): topnode.get(0).innerHTML = table; } - ''' + ''', ) html_dict.set_value( @@ -235,7 +235,7 @@ def rm_value(self, key): - ''' + ''', ) html_dict.set_value( @@ -285,5 +285,5 @@ def rm_value(self, key): - ''' + ''', ) diff --git a/handler/meta/ob_error.py b/handler/meta/ob_error.py index 45bb0704..a9c074fa 100644 --- a/handler/meta/ob_error.py +++ b/handler/meta/ob_error.py @@ -10,981 +10,983 @@ # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. -OB_RET_DICT = {"-4000": ["OB_ERROR", "Common error", "Internal Error", "Contact OceanBase Support"], - "-4001": ["OB_OBJ_TYPE_ERROR", "Object type error", "Internal Error", "Contact OceanBase Support"], - "-4002": ["OB_INVALID_ARGUMENT", "Invalid argument", "Internal Error", "Contact OceanBase Support"], - "-4003": ["OB_ARRAY_OUT_OF_RANGE", "Array index out of range", "Internal Error", "Contact OceanBase Support"], - "-4004": ["OB_SERVER_LISTEN_ERROR", "Failed to listen to the port", "Internal Error", "Contact OceanBase Support"], - "-4005": ["OB_INIT_TWICE", "The object is initialized twice", "Internal Error", "Contact OceanBase Support"], - "-4006": ["OB_NOT_INIT", "The object is not initialized", "Internal Error", "Contact OceanBase Support"], - "-4007": ["OB_NOT_SUPPORTED", "Not supported feature or function. Compatible Error Code: MySQL: 1235(0A000)", "Internal Error", "Contact OceanBase Support"], - "-4008": ["OB_ITER_END", "End of iteration", "Internal Error", "Contact OceanBase Support"], - "-4009": ["OB_IO_ERROR", "IO error", "Internal Error", "Contact OceanBase Support"], - "-4010": ["OB_ERROR_FUNC_VERSION", "Wrong RPC command version", "Internal Error", "Contact OceanBase Support"], - "-4011": ["OB_PACKET_NOT_SENT", "Can not send packet", "Internal Error", "Contact OceanBase Support"], - "-4012": ["OB_TIMEOUT", "Timeout", "Internal Error", "Contact OceanBase Support"], - "-4013": ["OB_ALLOCATE_MEMORY_FAILED", "No memory or reach tenant memory limit", "Internal Error", "Contact OceanBase Support"], - "-4014": ["OB_INNER_STAT_ERROR", "Inner state error", "Internal Error", "Contact OceanBase Support"], - "-4015": ["OB_ERR_SYS", "System error", "Internal Error", "Contact OceanBase Support"], - "-4016": ["OB_ERR_UNEXPECTED", "Internal error", "Internal Error", "Contact OceanBase Support"], - "-4017": ["OB_ENTRY_EXIST", "Entry already exist", "Internal Error", "Contact OceanBase Support"], - "-4018": ["OB_ENTRY_NOT_EXIST", "Entry not exist", "Internal Error", "Contact OceanBase Support"], - "-4019": ["OB_SIZE_OVERFLOW", "Size overflow", "Internal Error", "Contact OceanBase Support"], - "-4020": ["OB_REF_NUM_NOT_ZERO", "Reference count is not zero", "Internal Error", "Contact OceanBase Support"], - "-4021": ["OB_CONFLICT_VALUE", "Conflict value", "Internal Error", "Contact OceanBase Support"], - "-4022": ["OB_ITEM_NOT_SETTED", "Item not set", "Internal Error", "Contact OceanBase Support"], - "-4023": ["OB_EAGAIN", "Try again", "Internal Error", "Contact OceanBase Support"], - "-4024": ["OB_BUF_NOT_ENOUGH", "Buffer not enough", "Internal Error", "Contact OceanBase Support"], - "-4025": ["OB_PARTIAL_FAILED", "Partial failed", "Internal Error", "Contact OceanBase Support"], - "-4026": ["OB_READ_NOTHING", "Nothing to read", "Internal Error", "Contact OceanBase Support"], - "-4027": ["OB_FILE_NOT_EXIST", "File not exist. Compatible Error Code:MySQL: 1017(HY000)", "Internal Error", "Contact OceanBase Support"], - "-4028": ["OB_DISCONTINUOUS_LOG", "Log entry not continuous", "Internal Error", "Contact OceanBase Support"], - "-4029": ["OB_SCHEMA_ERROR", "Schema error", "Internal Error", "Contact OceanBase Support"], - "-4030": ["OB_TENANT_OUT_OF_MEM", "Over tenant memory limits", "Internal Error", "Contact OceanBase Support"], - "-4031": ["OB_UNKNOWN_OBJ", "Unknown object", "Internal Error", "Contact OceanBase Support"], - "-4032": ["OB_NO_MONITOR_DATA", "No monitor data", "Internal Error", "Contact OceanBase Support"], - "-4033": ["OB_SERIALIZE_ERROR", "Serialize error", "Internal Error", "Contact OceanBase Support"], - "-4034": ["OB_DESERIALIZE_ERROR", "Deserialize error", "Internal Error", "Contact OceanBase Support"], - "-4035": ["OB_AIO_TIMEOUT", "Asynchronous IO error", "Internal Error", "Contact OceanBase Support"], - "-4036": ["OB_NEED_RETRY", "Need retry", "Internal Error", "Contact OceanBase Support"], - "-4037": ["OB_TOO_MANY_SSTABLE", "Too many sstable", "Internal Error", "Contact OceanBase Support"], - "-4038": ["OB_NOT_MASTER", "The observer or zone is not the master", "Internal Error", "Contact OceanBase Support"], - "-4039": ["OB_KILLED_BY_THROTTLING", "Request has killed by sql throttle", "Internal Error", "Contact OceanBase Support"], - "-4041": ["OB_DECRYPT_FAILED", "Decrypt error", "Internal Error", "Contact OceanBase Support"], - "-4042": ["OB_USER_NOT_EXIST", "Can not find any matching row in the user table", "Internal Error", "Contact OceanBase Support"], - "-4043": ["OB_PASSWORD_WRONG", "Access denied for user", "Internal Error", "Contact OceanBase Support"], - "-4044": ["OB_SKEY_VERSION_WRONG", "Wrong skey version", "Internal Error", "Contact OceanBase Support"], - "-4048": ["OB_NOT_REGISTERED", "Not registered", "Internal Error", "Contact OceanBase Support"], - "-4049": ["OB_WAITQUEUE_TIMEOUT", "Task timeout and not executed", "Internal Error", "Contact OceanBase Support"], - "-4050": ["OB_NOT_THE_OBJECT", "Not the object", "Internal Error", "Contact OceanBase Support"], - "-4051": ["OB_ALREADY_REGISTERED", "Already registered", "Internal Error", "Contact OceanBase Support"], - "-4052": ["OB_LAST_LOG_RUINNED", "Corrupted log entry", "Internal Error", "Contact OceanBase Support"], - "-4053": ["OB_NO_CS_SELECTED", "No ChunkServer selected", "Internal Error", "Contact OceanBase Support"], - "-4054": ["OB_NO_TABLETS_CREATED", "No tablets created", "Internal Error", "Contact OceanBase Support"], - "-4055": ["OB_INVALID_ERROR", "Invalid entry", "Internal Error", "Contact OceanBase Support"], - "-4057": ["OB_DECIMAL_OVERFLOW_WARN", "Decimal overflow warning", "Internal Error", "Contact OceanBase Support"], - "-4058": ["OB_DECIMAL_UNLEGAL_ERROR", "Decimal overflow error", "Internal Error", "Contact OceanBase Support"], - "-4060": ["OB_OBJ_DIVIDE_ERROR", "Divide error", "Internal Error", "Contact OceanBase Support"], - "-4061": ["OB_NOT_A_DECIMAL", "Not a decimal", "Internal Error", "Contact OceanBase Support"], - "-4062": ["OB_DECIMAL_PRECISION_NOT_EQUAL", "Decimal precision error", "Internal Error", "Contact OceanBase Support"], - "-4063": ["OB_EMPTY_RANGE", "Empty range", "Internal Error", "Contact OceanBase Support"], - "-4064": ["OB_SESSION_KILLED", "Session killed", "Internal Error", "Contact OceanBase Support"], - "-4065": ["OB_LOG_NOT_SYNC", "Log not sync", "Internal Error", "Contact OceanBase Support"], - "-4066": ["OB_DIR_NOT_EXIST", "Directory not exist", "Internal Error", "Contact OceanBase Support"], - "-4067": ["OB_SESSION_NOT_FOUND", "RPC session not found", "Internal Error", "Contact OceanBase Support"], - "-4068": ["OB_INVALID_LOG", "Invalid log", "Internal Error", "Contact OceanBase Support"], - "-4070": ["OB_INVALID_DATA", "Invalid data", "Internal Error", "Contact OceanBase Support"], - "-4071": ["OB_ALREADY_DONE", "Already done", "Internal Error", "Contact OceanBase Support"], - "-4072": ["OB_CANCELED", "Operation canceled", "Internal Error", "Contact OceanBase Support"], - "-4073": ["OB_LOG_SRC_CHANGED", "Log source changed", "Internal Error", "Contact OceanBase Support"], - "-4074": ["OB_LOG_NOT_ALIGN", "Log not aligned", "Internal Error", "Contact OceanBase Support"], - "-4075": ["OB_LOG_MISSING", "Log entry missed", "Internal Error", "Contact OceanBase Support"], - "-4076": ["OB_NEED_WAIT", "Need wait", "Internal Error", "Contact OceanBase Support"], - "-4077": ["OB_NOT_IMPLEMENT", "Not implemented feature", "Internal Error", "Contact OceanBase Support"], - "-4078": ["OB_DIVISION_BY_ZERO", "Divided by zero", "Internal Error", "Contact OceanBase Support"], - "-4080": ["OB_EXCEED_MEM_LIMIT", "exceed memory limit", "Internal Error", "Contact OceanBase Support"], - "-4081": ["OB_RESULT_UNKNOWN", "Unknown result", "Internal Error", "Contact OceanBase Support"], - "-4084": ["OB_NO_RESULT", "No result", "Internal Error", "Contact OceanBase Support"], - "-4085": ["OB_QUEUE_OVERFLOW", "Queue overflow", "Internal Error", "Contact OceanBase Support"], - "-4097": ["OB_TERM_LAGGED", "Term lagged", "Internal Error", "Contact OceanBase Support"], - "-4098": ["OB_TERM_NOT_MATCH", "Term not match", "Internal Error", "Contact OceanBase Support"], - "-4099": ["OB_START_LOG_CURSOR_INVALID", "Invalid log cursor", "Internal Error", "Contact OceanBase Support"], - "-4100": ["OB_LOCK_NOT_MATCH", "Lock not match", "Internal Error", "Contact OceanBase Support"], - "-4101": ["OB_DEAD_LOCK", "deadlock detected while waiting for resource", "Internal Error", "Contact OceanBase Support"], - "-4102": ["OB_PARTIAL_LOG", "Incomplete log entry", "Internal Error", "Contact OceanBase Support"], - "-4103": ["OB_CHECKSUM_ERROR", "Data checksum error", "Internal Error", "Contact OceanBase Support"], - "-4104": ["OB_INIT_FAIL", "Initialize error", "Internal Error", "Contact OceanBase Support"], - "-4106": ["OB_NOT_ENOUGH_STORE", "not enough commitlog store", "Internal Error", "Contact OceanBase Support"], - "-4107": ["OB_BLOCK_SWITCHED", "block switched when fill commitlog", "Internal Error", "Contact OceanBase Support"], - "-4109": ["OB_STATE_NOT_MATCH", "Server state or role not the same as expected", "Internal Error", "Contact OceanBase Support"], - "-4110": ["OB_READ_ZERO_LOG", "Read zero log", "Internal Error", "Contact OceanBase Support"], - "-4111": ["OB_BLOCK_NEED_FREEZE", "block need freeze", "Internal Error", "Contact OceanBase Support"], - "-4112": ["OB_BLOCK_FROZEN", "block frozen", "Internal Error", "Contact OceanBase Support"], - "-4113": ["OB_IN_FATAL_STATE", "In FATAL state", "Internal Error", "Contact OceanBase Support"], - "-4114": ["OB_IN_STOP_STATE", "In STOP state", "Internal Error", "Contact OceanBase Support"], - "-4115": ["OB_UPS_MASTER_EXISTS", "Master UpdateServer already exists", "Internal Error", "Contact OceanBase Support"], - "-4116": ["OB_LOG_NOT_CLEAR", "Log not clear", "Internal Error", "Contact OceanBase Support"], - "-4117": ["OB_FILE_ALREADY_EXIST", "File already exist", "Internal Error", "Contact OceanBase Support"], - "-4118": ["OB_UNKNOWN_PACKET", "Unknown packet", "Internal Error", "Contact OceanBase Support"], - "-4119": ["OB_RPC_PACKET_TOO_LONG", "RPC packet to send too long", "Internal Error", "Contact OceanBase Support"], - "-4120": ["OB_LOG_TOO_LARGE", "Log too large", "Internal Error", "Contact OceanBase Support"], - "-4121": ["OB_RPC_SEND_ERROR", "RPC send error", "Internal Error", "Contact OceanBase Support"], - "-4122": ["OB_RPC_POST_ERROR", "RPC post error", "Internal Error", "Contact OceanBase Support"], - "-4123": ["OB_LIBEASY_ERROR", "Libeasy error", "Internal Error", "Contact OceanBase Support"], - "-4124": ["OB_CONNECT_ERROR", "Connect error", "Internal Error", "Contact OceanBase Support"], - "-4125": ["OB_NOT_FREE", "Not free", "Internal Error", "Contact OceanBase Support"], - "-4126": ["OB_INIT_SQL_CONTEXT_ERROR", "Init SQL context error", "Internal Error", "Contact OceanBase Support"], - "-4127": ["OB_SKIP_INVALID_ROW", "Skip invalid row", "Internal Error", "Contact OceanBase Support"], - "-4128": ["OB_RPC_PACKET_INVALID", "RPC packet is invalid", "Internal Error", "Contact OceanBase Support"], - "-4133": ["OB_NO_TABLET", "No tablets", "Internal Error", "Contact OceanBase Support"], - "-4138": ["OB_SNAPSHOT_DISCARDED", "Request to read too old versioned data", "Internal Error", "Contact OceanBase Support"], - "-4139": ["OB_DATA_NOT_UPTODATE", "State is stale", "Internal Error", "Contact OceanBase Support"], - "-4142": ["OB_ROW_MODIFIED", "Row modified", "Internal Error", "Contact OceanBase Support"], - "-4143": ["OB_VERSION_NOT_MATCH", "Version not match", "Internal Error", "Contact OceanBase Support"], - "-4144": ["OB_BAD_ADDRESS", "Bad address", "Internal Error", "Contact OceanBase Support"], - "-4146": ["OB_ENQUEUE_FAILED", "Enqueue error", "Internal Error", "Contact OceanBase Support"], - "-4147": ["OB_INVALID_CONFIG", "Invalid config", "Internal Error", "Contact OceanBase Support"], - "-4149": ["OB_STMT_EXPIRED", "Expired statement", "Internal Error", "Contact OceanBase Support"], - "-4150": ["OB_ERR_MIN_VALUE", "Min value", "Internal Error", "Contact OceanBase Support"], - "-4151": ["OB_ERR_MAX_VALUE", "Max value", "Internal Error", "Contact OceanBase Support"], - "-4152": ["OB_ERR_NULL_VALUE", "Null value", "Internal Error", "Contact OceanBase Support"], - "-4153": ["OB_RESOURCE_OUT", "Out of resource", "Internal Error", "Contact OceanBase Support"], - "-4154": ["OB_ERR_SQL_CLIENT", "Internal SQL client error", "Internal Error", "Contact OceanBase Support"], - "-4155": ["OB_META_TABLE_WITHOUT_USE_TABLE", "Meta table without use table", "Internal Error", "Contact OceanBase Support"], - "-4156": ["OB_DISCARD_PACKET", "Discard packet", "Internal Error", "Contact OceanBase Support"], - "-4157": ["OB_OPERATE_OVERFLOW", "value is out of range", "Internal Error", "Contact OceanBase Support"], - "-4158": ["OB_INVALID_DATE_FORMAT", "date format not recognized", "Internal Error", "Contact OceanBase Support"], - "-4159": ["OB_POOL_REGISTERED_FAILED", "register pool failed", "Internal Error", "Contact OceanBase Support"], - "-4160": ["OB_POOL_UNREGISTERED_FAILED", "unregister pool failed", "Internal Error", "Contact OceanBase Support"], - "-4161": ["OB_INVALID_ARGUMENT_NUM", "Invalid argument num", "Internal Error", "Contact OceanBase Support"], - "-4162": ["OB_LEASE_NOT_ENOUGH", "reserved lease not enough", "Internal Error", "Contact OceanBase Support"], - "-4163": ["OB_LEASE_NOT_MATCH", "ups lease not match with rs", "Internal Error", "Contact OceanBase Support"], - "-4164": ["OB_UPS_SWITCH_NOT_HAPPEN", "ups switch not happen", "Internal Error", "Contact OceanBase Support"], - "-4165": ["OB_EMPTY_RESULT", "Empty result", "Internal Error", "Contact OceanBase Support"], - "-4166": ["OB_CACHE_NOT_HIT", "Cache not hit", "Internal Error", "Contact OceanBase Support"], - "-4167": ["OB_NESTED_LOOP_NOT_SUPPORT", "Nested loop not support", "Internal Error", "Contact OceanBase Support"], - "-4168": ["OB_LOG_INVALID_MOD_ID", "Invalid log module id", "Internal Error", "Contact OceanBase Support"], - "-4169": ["OB_LOG_MODULE_UNKNOWN", "Unknown module name", "Internal Error", "Contact OceanBase Support"], - "-4170": ["OB_LOG_LEVEL_INVALID", "Invalid level", "Internal Error", "Contact OceanBase Support"], - "-4171": ["OB_LOG_PARSER_SYNTAX_ERR", "Syntax to set log_level error", "Internal Error", "Contact OceanBase Support"], - "-4172": ["OB_INDEX_OUT_OF_RANGE", "Index out of range", "Internal Error", "Contact OceanBase Support"], - "-4173": ["OB_INT_UNDERFLOW", "Int underflow", "Internal Error", "Contact OceanBase Support"], - "-4174": ["OB_UNKNOWN_CONNECTION", "Unknown thread id", "Internal Error", "Contact OceanBase Support"], - "-4175": ["OB_ERROR_OUT_OF_RANGE", "Out of range", "Internal Error", "Contact OceanBase Support"], - "-4176": ["OB_CACHE_SHRINK_FAILED", "shrink cache failed, no available cache", "Internal Error"], - "-4177": ["OB_OLD_SCHEMA_VERSION", "Schema version too old", "Internal Error", "Contact OceanBase Support"], - "-4178": ["OB_RELEASE_SCHEMA_ERROR", "Release schema error", "Internal Error", "Contact OceanBase Support"], - "-4179": ["OB_OP_NOT_ALLOW", "Operation not allowed now", "Internal Error", "Contact OceanBase Support"], - "-4180": ["OB_NO_EMPTY_ENTRY", "No empty entry", "Internal Error", "Contact OceanBase Support"], - "-4181": ["OB_ERR_ALREADY_EXISTS", "Already exist", "Internal Error", "Contact OceanBase Support"], - "-4182": ["OB_SEARCH_NOT_FOUND", "Value not found", "Internal Error", "Contact OceanBase Support"], - "-4183": ["OB_BEYOND_THE_RANGE", "Key out of range", "Internal Error", "Contact OceanBase Support"], - "-4184": ["OB_CS_OUTOF_DISK_SPACE", "ChunkServer out of disk space", "Internal Error", "Contact OceanBase Support"], - "-4185": ["OB_COLUMN_GROUP_NOT_FOUND", "Column group not found", "Internal Error", "Contact OceanBase Support"], - "-4186": ["OB_CS_COMPRESS_LIB_ERROR", "ChunkServer failed to get compress library", "Internal Error", "Contact OceanBase Support"], - "-4187": ["OB_ITEM_NOT_MATCH", "Item not match", "Internal Error", "Contact OceanBase Support"], - "-4188": ["OB_SCHEDULER_TASK_CNT_MISMATCH", "Running task cnt and unfinished task cnt not consistent", "Internal Error", "Contact OceanBase Support"], - "-4189": ["OB_INVALID_MACRO_BLOCK_TYPE", "the macro block type does not exist", "Internal Error", "Contact OceanBase Support"], - "-4190": ["OB_INVALID_DATE_FORMAT_END", "Incorrect value", "Internal Error", "Contact OceanBase Support"], - "-4200": ["OB_HASH_EXIST", "hash map/set entry exist", "Internal Error", "Contact OceanBase Support"], - "-4201": ["OB_HASH_NOT_EXIST", "hash map/set entry not exist", "Internal Error", "Contact OceanBase Support"], - "-4204": ["OB_HASH_GET_TIMEOUT", "hash map/set get timeout", "Internal Error", "Contact OceanBase Support"], - "-4205": ["OB_HASH_PLACEMENT_RETRY", "hash map/set retry", "Internal Error", "Contact OceanBase Support"], - "-4206": ["OB_HASH_FULL", "hash map/set full", "Internal Error", "Contact OceanBase Support"], - "-4207": ["OB_PACKET_PROCESSED", "packet processed", "Internal Error", "Contact OceanBase Support"], - "-4208": ["OB_WAIT_NEXT_TIMEOUT", "wait next packet timeout", "Internal Error", "Contact OceanBase Support"], - "-4209": ["OB_LEADER_NOT_EXIST", "partition has not leader", "Internal Error", "Contact OceanBase Support"], - "-4210": ["OB_PREPARE_MAJOR_FREEZE_FAILED", "prepare major freeze failed", "Internal Error", "Contact OceanBase Support"], - "-4211": ["OB_COMMIT_MAJOR_FREEZE_FAILED", "commit major freeze failed", "Internal Error", "Contact OceanBase Support"], - "-4212": ["OB_ABORT_MAJOR_FREEZE_FAILED", "abort major freeze failed", "Internal Error", "Contact OceanBase Support"], - "-4213": ["OB_MAJOR_FREEZE_NOT_FINISHED", "last major freeze not finish", "Internal Error", "Contact OceanBase Support"], - "-4214": ["OB_PARTITION_NOT_LEADER", "partition is not leader partition", "Internal Error", "Contact OceanBase Support"], - "-4215": ["OB_WAIT_MAJOR_FREEZE_RESPONSE_TIMEOUT", "wait major freeze response timeout", "Internal Error", "Contact OceanBase Support"], - "-4216": ["OB_CURL_ERROR", "curl error", "Internal Error", "Contact OceanBase Support"], - "-4217": ["OB_MAJOR_FREEZE_NOT_ALLOW", "Major freeze not allowed now", "Internal Error", "Contact OceanBase Support"], - "-4218": ["OB_PREPARE_FREEZE_FAILED", "prepare freeze failed", "Internal Error", "Contact OceanBase Support"], - "-4219": ["OB_INVALID_DATE_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], - "-4220": ["OB_INACTIVE_SQL_CLIENT", "Inactive sql client, only read allowed", "Internal Error"], - "-4221": ["OB_INACTIVE_RPC_PROXY", "Inactive rpc proxy, can not send RPC request", "Internal Error"], - "-4222": ["OB_INTERVAL_WITH_MONTH", "Interval with year or month can not be converted to microseconds", "Internal Error", "Contact OceanBase Support"], - "-4223": ["OB_TOO_MANY_DATETIME_PARTS", "Interval has too many datetime parts", "Internal Error", "Contact OceanBase Support"], - "-4224": ["OB_DATA_OUT_OF_RANGE", "Out of range value for column", "Internal Error", "Contact OceanBase Support"], - "-4225": ["OB_PARTITION_NOT_EXIST", "Partition entry not exists", "Internal Error", "Contact OceanBase Support"], - "-4226": ["OB_ERR_TRUNCATED_WRONG_VALUE_FOR_FIELD", "Incorrect integer value", "Internal Error", "Contact OceanBase Support"], - "-4227": ["OB_ERR_NO_DEFAULT_FOR_FIELD", "Field doesn't have a default value", "Internal Error", "Contact OceanBase Support"], - "-4228": ["OB_ERR_FIELD_SPECIFIED_TWICE", "Column specified twice", "Internal Error", "Contact OceanBase Support"], - "-4232": ["OB_NOT_FOLLOWER", "The observer or zone is not a follower", "Internal Error", "Contact OceanBase Support"], - "-4233": ["OB_ERR_OUT_OF_LOWER_BOUND", "smaller than container lower bound", "Internal Error", "Contact OceanBase Support"], - "-4234": ["OB_ERR_OUT_OF_UPPER_BOUND", "bigger than container upper bound", "Internal Error", "Contact OceanBase Support"], - "-4236": ["OB_OBCONFIG_RETURN_ERROR", "ObConfig return error code", "Internal Error", "Contact OceanBase Support"], - "-4237": ["OB_OBCONFIG_APPNAME_MISMATCH", "Appname mismatch with obconfig result", "Internal Error", "Contact OceanBase Support"], - "-4238": ["OB_ERR_VIEW_SELECT_DERIVED", "View's SELECT contains a subquery in the FROM clause", "Internal Error", "Contact OceanBase Support"], - "-4239": ["OB_CANT_MJ_PATH", "Can not use merge-join to join the tables without join conditions", "Internal Error", "Contact OceanBase Support"], - "-4240": ["OB_ERR_NO_JOIN_ORDER_GENERATED", "No join order generated", "Internal Error", "Contact OceanBase Support"], - "-4241": ["OB_ERR_NO_PATH_GENERATED", "No join path generated", "Internal Error", "Contact OceanBase Support"], - "-4242": ["OB_ERR_WAIT_REMOTE_SCHEMA_REFRESH", "Schema error", "Internal Error", "Contact OceanBase Support"], - "-4243": ["OB_FILE_NOT_OPENED", "file not opened", "Internal Error", "Contact OceanBase Support"], - "-4244": ["OB_TIMER_TASK_HAS_SCHEDULED", "Timer task has been scheduled", "Internal Error", "Contact OceanBase Support"], - "-4245": ["OB_TIMER_TASK_HAS_NOT_SCHEDULED", "Timer task has not been scheduled", "Internal Error", "Contact OceanBase Support"], - "-4246": ["OB_PARSE_DEBUG_SYNC_ERROR", "parse debug sync string error", "Internal Error", "Contact OceanBase Support"], - "-4247": ["OB_UNKNOWN_DEBUG_SYNC_POINT", "unknown debug sync point", "Internal Error", "Contact OceanBase Support"], - "-4248": ["OB_ERR_INTERRUPTED", "task is interrupted while running", "Internal Error", "Contact OceanBase Support"], - "-4249": ["OB_ERR_DATA_TRUNCATED", "Data truncated for argument", "Internal Error", "Contact OceanBase Support"], - "-4250": ["OB_NOT_RUNNING", "module is not running", "Internal Error", "Contact OceanBase Support"], - "-4251": ["OB_INVALID_PARTITION", "partition not valid", "Internal Error", "Contact OceanBase Support"], - "-4252": ["OB_ERR_TIMEOUT_TRUNCATED", "Timeout value truncated to 102 years", "Internal Error", "Contact OceanBase Support"], - "-4254": ["OB_ERR_NET_PACKET_TOO_LARGE", "Got a packet bigger than 'max_allowed_packet' bytes", "Internal Error", "Contact OceanBase Support"], - "-4255": ["OB_TRACE_DESC_NOT_EXIST", "trace log title or key not exist describle", "Internal Error", "Contact OceanBase Support"], - "-4256": ["OB_ERR_NO_DEFAULT", "Variable doesn't have a default value", "Internal Error","Contact OceanBase Support"], - "-4257": ["OB_ERR_COMPRESS_DECOMPRESS_DATA", "compress data or decompress data failed", "Internal Error", "Contact OceanBase Support"], - "-4258": ["OB_ERR_INCORRECT_STRING_VALUE", "Incorrect string value", "Incorrect string value", "Internal Error"], - "-4259": ["OB_ERR_DISTRIBUTED_NOT_SUPPORTED", "Not supported feature or function", "Internal Error", "Contact OceanBase Support"], - "-4260": ["OB_IS_CHANGING_LEADER", "the partition is changing leader", "Internal Error", "Contact OceanBase Support"], - "-4261": ["OB_DATETIME_FUNCTION_OVERFLOW", "Datetime overflow", "Internal Error", "Contact OceanBase Support"], - "-4262": ["OB_ERR_DOUBLE_TRUNCATED", "Truncated incorrect DOUBLE value", "Internal Error", "Contact OceanBase Support"], - "-4263": ["OB_MINOR_FREEZE_NOT_ALLOW", "Minor freeze not allowed now", "Internal Error", "Contact OceanBase Support"], - "-4264": ["OB_LOG_OUTOF_DISK_SPACE", "Log out of disk space", "Internal Error", "Contact OceanBase Support"], - "-4265": ["OB_RPC_CONNECT_ERROR", "Rpc connect error", "Internal Error", "Contact OceanBase Support"], - "-4266": ["OB_MINOR_MERGE_NOT_ALLOW", "minor merge not allow", "Internal Error", "Contact OceanBase Support"], - "-4267": ["OB_CACHE_INVALID", "Cache invalid", "Internal Error", "Contact OceanBase Support"], - "-4268": ["OB_REACH_SERVER_DATA_COPY_IN_CONCURRENCY_LIMIT", "reach server data copy in concurrency", "Internal Error", "Contact OceanBase Support"], - "-4269": ["OB_WORKING_PARTITION_EXIST", "Working partition entry already exists", "Internal Error", "Contact OceanBase Support"], - "-4270": ["OB_WORKING_PARTITION_NOT_EXIST", "Working partition entry does not exists", "Internal Error", "Contact OceanBase Support"], - "-4271": ["OB_LIBEASY_REACH_MEM_LIMIT", "LIBEASY reach memory limit", "Internal Error", "Contact OceanBase Support"], - "-4272": ["OB_MISS_ARGUMENT", "Miss argument", "Miss argument", "Internal Error"], - "-4273": ["OB_CACHE_FREE_BLOCK_NOT_ENOUGH", "free memblock in cache is not enough", "Internal Error", "Contact OceanBase Support"], - "-4274": ["OB_SYNC_WASH_MB_TIMEOUT", "sync wash memblock timeout", "Internal Error", "Contact OceanBase Support"], - "-4275": ["OB_NOT_ALLOW_MIGRATE_IN", "not allow migrate in", "Internal Error", "Contact OceanBase Support"], - "-4276": ["OB_SCHEDULER_TASK_CNT_MISTACH", "Scheduler task cnt does not match", "Internal Error", "Contact OceanBase Support"], - "-4277": ["OB_MISS_ARGUMENT", "Miss argument", "Internal Error", "Contact OceanBase Support"], - "-4278": ["OB_LAST_LOG_NOT_COMPLETE", "last log is not complete", "Internal Error", "Contact OceanBase Support"], - "-4279": ["OB_TABLE_IS_DELETED", "table is deleted", "Internal Error", "Contact OceanBase Support"], - "-4280": ["OB_VERSION_RANGE_NOT_CONTINUES", "version range not continues", "Internal Error", "Contact OceanBase Support"], - "-4281": ["OB_INVALID_IO_BUFFER", "io buffer is invalid", "Internal Error", "Contact OceanBase Support"], - "-4282": ["OB_PARTITION_IS_REMOVED", "partition is removed", "Internal Error", "Contact OceanBase Support"], - "-4283": ["OB_GTS_NOT_READY", "gts is not ready", "Internal Error", "Contact OceanBase Support"], - "-4284": ["OB_MAJOR_SSTABLE_NOT_EXIST", "major sstable not exist", "Internal Error", "Contact OceanBase Support"], - "-4285": ["OB_VERSION_RANGE_DISCARDED", "Request to read too old version range data", "Internal Error", "Contact OceanBase Support"], - "-4286": ["OB_MAJOR_SSTABLE_HAS_MERGED", "major sstable may has been merged", "Internal Error", "Contact OceanBase Support"], - "-4287": ["OB_MINOR_SSTABLE_RANGE_CROSS", "minor sstable version range cross", "Internal Error", "Contact OceanBase Support"], - "-4288": ["OB_MEMTABLE_CANNOT_MINOR_MERGE", "memtable cannot minor merge", "Internal Error", "Contact OceanBase Support"], - "-4289": ["OB_TASK_EXIST", "task exist", "Internal Error", "Contact OceanBase Support"], - "-4290": ["OB_ALLOCATE_DISK_SPACE_FAILED", "cannot allocate disk space", "Internal Error", "Contact OceanBase Support"], - "-4291": ["OB_CANT_FIND_UDF", "Can't load function", "Internal Error", "Contact OceanBase Support"], - "-4292": ["OB_CANT_INITIALIZE_UDF", "Can't initialize function", "Can not initialize function ", "Internal Error"], - "-4293": ["OB_UDF_NO_PATHS", "No paths allowed for shared library", "Internal Error", "Contact OceanBase Support"], - "-4294": ["OB_UDF_EXISTS", "Function already exists", "Function already exists", "Internal Error"], - "-4295": ["OB_CANT_OPEN_LIBRARY", "Can't open shared library", "Can not open shared library", "Internal Error"], - "-4296": ["OB_CANT_FIND_DL_ENTRY", "Can't find symbol in library", "Internal Error", "Contact OceanBase Support"], - "-4297": ["OB_OBJECT_NAME_EXIST", "name is already used by an existing object", "Internal Error", "Contact OceanBase Support"], - "-4298": ["OB_OBJECT_NAME_NOT_EXIST", "object does not exist", "Internal Error", "Contact OceanBase Support"], - "-4300": ["OB_ERR_INVALID_SEQUENCE_NAME", "invalid sequence name", "Internal Error", "Contact OceanBase Support"], - "-4301": ["OB_ERR_DUP_MAXVALUE_SPEC", "duplicate MAXVALUE/NOMAXVALUE specifications", "Internal Error", "Contact OceanBase Support"], - "-4302": ["OB_ERR_DUP_MINVALUE_SPEC", "duplicate MINVALUE/NOMINVALUE specifications", "Internal Error", "Contact OceanBase Support"], - "-4303": ["OB_ERR_DUP_CYCLE_SPEC", "duplicate CYCLE/NOCYCLE specifications", "Internal Error", "Contact OceanBase Support"], - "-4304": ["OB_ERR_DUP_CACHE_SPEC", "duplicate CACHE/NOCACHE specifications", "Internal Error", "Contact OceanBase Support"], - "-4305": ["OB_ERR_DUP_ORDER_SPEC", "duplicate ORDER/NOORDER specifications", "Internal Error", "Contact OceanBase Support"], - "-4306": ["OB_ERR_CONFL_MAXVALUE_SPEC", "conflicting MAXVALUE/NOMAXVALUE specifications", "Internal Error", "Contact OceanBase Support"], - "-4307": ["OB_ERR_CONFL_MINVALUE_SPEC", "conflicting MINVALUE/NOMINVALUE specifications", "Internal Error", "Contact OceanBase Support"], - "-4308": ["OB_ERR_CONFL_CYCLE_SPEC", "conflicting CYCLE/NOCYCLE specifications", "Internal Error", "Contact OceanBase Support"], - "-4309": ["OB_ERR_CONFL_CACHE_SPEC", "conflicting CACHE/NOCACHE specifications", "Internal Error", "Contact OceanBase Support"], - "-4310": ["OB_ERR_CONFL_ORDER_SPEC", "conflicting ORDER/NOORDER specifications", "Internal Error", "Contact OceanBase Support"], - "-4311": ["OB_ERR_ALTER_START_SEQ_NUMBER_NOT_ALLOWED", "cannot alter starting sequence number", "Internal Error", "Contact OceanBase Support"], - "-4312": ["OB_ERR_DUP_INCREMENT_BY_SPEC", "duplicate INCREMENT BY specifications", "Internal Error", "Contact OceanBase Support"], - "-4313": ["OB_ERR_DUP_START_WITH_SPEC", "duplicate START WITH specifications", "Internal Error", "Contact OceanBase Support"], - "-4314": ["OB_ERR_REQUIRE_ALTER_SEQ_OPTION", "no options specified for ALTER SEQUENCE", "Internal Error", "Contact OceanBase Support"], - "-4315": ["OB_ERR_SEQ_NOT_ALLOWED_HERE", "sequence number not allowed here", "Internal Error", "Contact OceanBase Support"], - "-4316": ["OB_ERR_SEQ_NOT_EXIST", "sequence does not exist", "Internal Error", "Contact OceanBase Support"], - "-4317": ["OB_ERR_SEQ_OPTION_MUST_BE_INTEGER", "sequence parameter must be an integer", "Internal Error", "Contact OceanBase Support"], - "-4318": ["OB_ERR_SEQ_INCREMENT_CAN_NOT_BE_ZERO", "INCREMENT must be a nonzero integer", "Internal Error", "Contact OceanBase Support"], - "-4319": ["OB_ERR_SEQ_OPTION_EXCEED_RANGE", "sequence parameter exceeds maximum size allowed", "Internal Error", "Contact OceanBase Support"], - "-4320": ["OB_ERR_MINVALUE_LARGER_THAN_MAXVALUE", "MINVALUE must be less than MAXVALUE", "Internal Error", "Contact OceanBase Support"], - "-4321": ["OB_ERR_SEQ_INCREMENT_TOO_LARGE", "INCREMENT must be less than MAXVALUE minus MINVALUE", "Internal Error", "Contact OceanBase Support"], - "-4322": ["OB_ERR_START_WITH_LESS_THAN_MINVALUE", "START WITH cannot be less than MINVALUE", "Internal Error", "Contact OceanBase Support"], - "-4323": ["OB_ERR_MINVALUE_EXCEED_CURRVAL", "MINVALUE cannot be made to exceed the current value", "Internal Error", "Contact OceanBase Support"], - "-4324": ["OB_ERR_START_WITH_EXCEED_MAXVALUE", "START WITH cannot be more than MAXVALUE", "Internal Error","Contact OceanBase Support"], - "-4325": ["OB_ERR_MAXVALUE_EXCEED_CURRVAL", "MAXVALUE cannot be made to be less than the current value", "Internal Error", "Contact OceanBase Support"], - "-4326": ["OB_ERR_SEQ_CACHE_TOO_SMALL", "the number of values to CACHE must be greater than 1", "Internal Error", "Contact OceanBase Support"], - "-4327": ["OB_ERR_SEQ_OPTION_OUT_OF_RANGE", "sequence option value out of range", "Internal Error", "Contact OceanBase Support"], - "-4328": ["OB_ERR_SEQ_CACHE_TOO_LARGE", "number to CACHE must be less than one cycle", "Internal Error", "Contact OceanBase Support"], - "-4329": ["OB_ERR_SEQ_REQUIRE_MINVALUE", "descending sequences that CYCLE must specify MINVALUE", "Internal Error", "Contact OceanBase Support"], - "-4330": ["OB_ERR_SEQ_REQUIRE_MAXVALUE", "ascending sequences that CYCLE must specify MAXVALUE", "Internal Error", "Contact OceanBase Support"], - "-4331": ["OB_ERR_SEQ_NO_LONGER_EXIST", "sequence no longer exists", "Internal Error", "Contact OceanBase Support"], - "-4332": ["OB_ERR_SEQ_VALUE_EXCEED_LIMIT", "sequence exceeds limit and cannot be instantiated", "Internal Error", "Contact OceanBase Support"], - "-4333": ["OB_ERR_DIVISOR_IS_ZERO", "divisor is equal to zero", "Internal Error", "Contact OceanBase Support"], - "-4334": ["OB_ERR_AES_DECRYPT", "fail to decrypt data", "Internal Error", "Contact OceanBase Support"], - "-4335": ["OB_ERR_AES_ENCRYPT", "fail to encrypt data", "Internal Error", "Contact OceanBase Support"], - "-4336": ["OB_ERR_AES_IV_LENGTH", "The initialization vector supplied to aes_encrypt is too short. Must be at least 16 bytes long", "Internal Error", "Contact OceanBase Support"], - "-4505": ["OB_IMPORT_NOT_IN_SERVER", "Import not in service", "Internal Error", "Contact OceanBase Support"], - "-4507": ["OB_CONVERT_ERROR", "Convert error", "Internal Error", "Contact OceanBase Support"], - "-4510": ["OB_BYPASS_TIMEOUT", "Bypass timeout", "Internal Error", "Contact OceanBase Support"], - "-4512": ["OB_RS_STATE_NOT_ALLOW", "RootServer state error", "Internal Error", "Contact OceanBase Support"], - "-4515": ["OB_NO_REPLICA_VALID", "No replica is valid", "Internal Error", "Contact OceanBase Support"], - "-4517": ["OB_NO_NEED_UPDATE", "No need to update", "Internal Error", "Contact OceanBase Support"], - "-4518": ["OB_CACHE_TIMEOUT", "Cache timeout", "Internal Error", "Contact OceanBase Support"], - "-4519": ["OB_ITER_STOP", "Iteration was stopped", "Internal Error", "Contact OceanBase Support"], - "-4523": ["OB_ZONE_ALREADY_MASTER", "The zone is the master already", "Internal Error", "Contact OceanBase Support"], - "-4524": ["OB_IP_PORT_IS_NOT_SLAVE_ZONE", "Not slave zone", "Internal Error", "Contact OceanBase Support"], - "-4525": ["OB_ZONE_IS_NOT_SLAVE", "Not slave zone", "Internal Error", "Contact OceanBase Support"], - "-4526": ["OB_ZONE_IS_NOT_MASTER", "Not master zone", "Internal Error", "Contact OceanBase Support"], - "-4527": ["OB_CONFIG_NOT_SYNC", "Configuration not sync", "Internal Error", "Contact OceanBase Support"], - "-4528": ["OB_IP_PORT_IS_NOT_ZONE", "Not a zone address", "Internal Error", "Contact OceanBase Support"], - "-4529": ["OB_MASTER_ZONE_NOT_EXIST", "Master zone not exist", "Internal Error", "Contact OceanBase Support"], - "-4530": ["OB_ZONE_INFO_NOT_EXIST", "Zone info not exist", "Internal Error", "Contact OceanBase Support"], - "-4531": ["OB_GET_ZONE_MASTER_UPS_FAILED", "Failed to get master UpdateServer", "Internal Error", "Contact OceanBase Support"], - "-4532": ["OB_MULTIPLE_MASTER_ZONES_EXIST", "Multiple master zones", "Internal Error", "Contact OceanBase Support"], - "-4533": ["OB_INDEXING_ZONE_INVALID", "indexing zone is not exist anymore or not active", "Internal Error", "Contact OceanBase Support"], - "-4537": ["OB_ROOT_TABLE_RANGE_NOT_EXIST", "Tablet range not exist", "Internal Error", "Contact OceanBase Support"], - "-4538": ["OB_ROOT_MIGRATE_CONCURRENCY_FULL", "Migrate concurrency full", "Internal Error", "Contact OceanBase Support"], - "-4539": ["OB_ROOT_MIGRATE_INFO_NOT_FOUND", "Migrate info not found", "Internal Error", "Contact OceanBase Support"], - "-4540": ["OB_NOT_DATA_LOAD_TABLE", "No data to load", "Internal Error", "Contact OceanBase Support"], - "-4541": ["OB_DATA_LOAD_TABLE_DUPLICATED", "Duplicated table data to load", "Internal Error", "Contact OceanBase Support"], - "-4542": ["OB_ROOT_TABLE_ID_EXIST", "Table ID exist", "Internal Error", "Contact OceanBase Support"], - "-4543": ["OB_INDEX_TIMEOUT", "Building index timeout", "Internal Error", "Contact OceanBase Support"], - "-4544": ["OB_ROOT_NOT_INTEGRATED", "Root not integrated", "Internal Error", "Contact OceanBase Support"], - "-4545": ["OB_INDEX_INELIGIBLE", "index data not unique", "Internal Error", "Contact OceanBase Support"], - "-4546": ["OB_REBALANCE_EXEC_TIMEOUT", "execute replication or migration task timeout", "Internal Error", "Contact OceanBase Support"], - "-4547": ["OB_MERGE_NOT_STARTED", "global merge not started", "Internal Error", "Contact OceanBase Support"], - "-4548": ["OB_MERGE_ALREADY_STARTED", "merge already started", "Internal Error", "Contact OceanBase Support"], - "-4549": ["OB_ROOTSERVICE_EXIST", "rootservice already exist", "Internal Error", "Contact OceanBase Support"], - "-4550": ["OB_RS_SHUTDOWN", "rootservice is shutdown", "Internal Error", "Contact OceanBase Support"], - "-4551": ["OB_SERVER_MIGRATE_IN_DENIED", "server migrate in denied", "Internal Error", "Contact OceanBase Support"], - "-4552": ["OB_REBALANCE_TASK_CANT_EXEC", "rebalance task can not executing now", "Internal Error", "Contact OceanBase Support"], - "-4553": ["OB_PARTITION_CNT_REACH_ROOTSERVER_LIMIT", "rootserver can not hold more partition", "Internal Error", "Contact OceanBase Support"], - "-4554": ["OB_REBALANCE_TASK_NOT_IN_PROGRESS", "rebalance task not in progress on observer", "Internal Error", "Contact OceanBase Support"], - "-4600": ["OB_DATA_SOURCE_NOT_EXIST", "Data source not exist", "Internal Error", "Contact OceanBase Support"], - "-4601": ["OB_DATA_SOURCE_TABLE_NOT_EXIST", "Data source table not exist", "Internal Error", "Contact OceanBase Support"], - "-4602": ["OB_DATA_SOURCE_RANGE_NOT_EXIST", "Data source range not exist", "Internal Error", "Contact OceanBase Support"], - "-4603": ["OB_DATA_SOURCE_DATA_NOT_EXIST", "Data source data not exist", "Internal Error", "Contact OceanBase Support"], - "-4604": ["OB_DATA_SOURCE_SYS_ERROR", "Data source sys error", "Internal Error", "Contact OceanBase Support"], - "-4605": ["OB_DATA_SOURCE_TIMEOUT", "Data source timeout", "Internal Error", "Contact OceanBase Support"], - "-4606": ["OB_DATA_SOURCE_CONCURRENCY_FULL", "Data source concurrency full", "Internal Error", "Contact OceanBase Support"], - "-4607": ["OB_DATA_SOURCE_WRONG_URI_FORMAT", "Data source wrong URI format", "Internal Error", "Contact OceanBase Support"], - "-4608": ["OB_SSTABLE_VERSION_UNEQUAL", "SSTable version not equal", "Internal Error", "Contact OceanBase Support"], - "-4609": ["OB_UPS_RENEW_LEASE_NOT_ALLOWED", "ups should not renew its lease", "Internal Error", "Contact OceanBase Support"], - "-4610": ["OB_UPS_COUNT_OVER_LIMIT", "ups count over limit", "Internal Error", "Contact OceanBase Support"], - "-4611": ["OB_NO_UPS_MAJORITY", "ups not form a majority", "Internal Error", "Contact OceanBase Support"], - "-4613": ["OB_INDEX_COUNT_REACH_THE_LIMIT", "created index tables count has reach the limit:128", "Internal Error", "Contact OceanBase Support"], - "-4614": ["OB_TASK_EXPIRED", "task expired", "Internal Error", "Contact OceanBase Support"], - "-4615": ["OB_TABLEGROUP_NOT_EMPTY", "tablegroup is not empty", "Internal Error", "Contact OceanBase Support"], - "-4620": ["OB_INVALID_SERVER_STATUS", "server status is not valid", "Internal Error", "Contact OceanBase Support"], - "-4621": ["OB_WAIT_ELEC_LEADER_TIMEOUT", "wait elect partition leader timeout", "Internal Error", "Contact OceanBase Support"], - "-4622": ["OB_WAIT_ALL_RS_ONLINE_TIMEOUT", "wait all rs online timeout", "Internal Error", "Contact OceanBase Support"], - "-4623": ["OB_ALL_REPLICAS_ON_MERGE_ZONE", "all replicas of partition group are on zones to merge", "Internal Error", "Contact OceanBase Support"], - "-4624": ["OB_MACHINE_RESOURCE_NOT_ENOUGH", "machine resource is not enough to hold a new unit", "Internal Error", "Contact OceanBase Support"], - "-4625": ["OB_NOT_SERVER_CAN_HOLD_SOFTLY", "not server can hole the unit and not over soft limit", "Internal Error", "Contact OceanBase Support"], - "-4626": ["OB_RESOURCE_POOL_ALREADY_GRANTED", "resource pool has already been granted to a tenant", "Internal Error", "Contact OceanBase Support"], - "-4628": ["OB_SERVER_ALREADY_DELETED", "server has already been deleted", "Internal Error", "Contact OceanBase Support"], - "-4629": ["OB_SERVER_NOT_DELETING", "server is not in deleting status", "Internal Error", "Contact OceanBase Support"], - "-4630": ["OB_SERVER_NOT_IN_WHITE_LIST", "server not in server white list", "Internal Error", "Contact OceanBase Support"], - "-4631": ["OB_SERVER_ZONE_NOT_MATCH", "server zone not match", "Internal Error", "Contact OceanBase Support"], - "-4632": ["OB_OVER_ZONE_NUM_LIMIT", "zone num has reach max zone num", "Internal Error", "Contact OceanBase Support"], - "-4633": ["OB_ZONE_STATUS_NOT_MATCH", "zone status not match", "Internal Error", "Contact OceanBase Support"], - "-4634": ["OB_RESOURCE_UNIT_IS_REFERENCED", "resource unit is referenced by resource pool", "Internal Error", "Contact OceanBase Support"], - "-4636": ["OB_DIFFERENT_PRIMARY_ZONE", "table schema primary zone different with other table in sampe tablegroup", "Internal Error", "Contact OceanBase Support"], - "-4637": ["OB_SERVER_NOT_ACTIVE", "server is not active", "Internal Error", "Contact OceanBase Support"], - "-4638": ["OB_RS_NOT_MASTER", "The RootServer is not the master", "Internal Error", "Contact OceanBase Support"], - "-4639": ["OB_CANDIDATE_LIST_ERROR", "The candidate list is invalid", "Internal Error", "Contact OceanBase Support"], - "-4640": ["OB_PARTITION_ZONE_DUPLICATED", "The chosen partition servers belong to same zone.", "Internal Error", "Contact OceanBase Support"], - "-4641": ["OB_ZONE_DUPLICATED", "Duplicated zone in zone list", "Internal Error", "Contact OceanBase Support"], - "-4642": ["OB_NOT_ALL_ZONE_ACTIVE", "Not all zone in zone list are active", "Internal Error", "Contact OceanBase Support"], - "-4643": ["OB_PRIMARY_ZONE_NOT_IN_ZONE_LIST", "primary zone not in zone list", "Internal Error", "Contact OceanBase Support"], - "-4644": ["OB_REPLICA_NUM_NOT_MATCH", "replica num not same with zone count", "Internal Error", "Contact OceanBase Support"], - "-4645": ["OB_ZONE_LIST_POOL_LIST_NOT_MATCH", "zone list not a subset of resource pool list", "Internal Error", "Contact OceanBase Support"], - "-4646": ["OB_INVALID_TENANT_NAME", "tenant name is too long", "Internal Error", "Contact OceanBase Support"], - "-4647": ["OB_EMPTY_RESOURCE_POOL_LIST", "resource pool list is empty", "Internal Error", "Contact OceanBase Support"], - "-4648": ["OB_RESOURCE_UNIT_NOT_EXIST", "resource unit not exist", "Internal Error", "Contact OceanBase Support"], - "-4649": ["OB_RESOURCE_UNIT_EXIST", "resource unit already exist", "Internal Error", "Contact OceanBase Support"], - "-4650": ["OB_RESOURCE_POOL_NOT_EXIST", "resource pool not exist", "Internal Error", "Contact OceanBase Support"], - "-4651": ["OB_RESOURCE_POOL_EXIST", "resource pool already exist", "Internal Error", "Contact OceanBase Support"], - "-4652": ["OB_WAIT_LEADER_SWITCH_TIMEOUT", "wait leader switch timeout", "Internal Error", "Contact OceanBase Support"], - "-4653": ["OB_LOCATION_NOT_EXIST", "location not exist", "Internal Error", "Contact OceanBase Support"], - "-4654": ["OB_LOCATION_LEADER_NOT_EXIST", "location leader not exist", "Internal Error", "Contact OceanBase Support"], - "-4655": ["OB_ZONE_NOT_ACTIVE", "zone not active", "Internal Error", "Contact OceanBase Support"], - "-4656": ["OB_UNIT_NUM_OVER_SERVER_COUNT", "resource pool unit num is bigger than zone server count", "Internal Error", "Contact OceanBase Support"], - "-4657": ["OB_POOL_SERVER_INTERSECT", "resource pool list unit server intersect", "Internal Error", "Contact OceanBase Support"], - "-4658": ["OB_NOT_SINGLE_RESOURCE_POOL", "create tenant only support single resource pool now","Internal Error", "Contact OceanBase Support"], - "-4659": ["OB_INVALID_RESOURCE_UNIT", "invalid resource unit", "Internal Error", "Contact OceanBase Support"], - "-4660": ["OB_STOP_SERVER_IN_MULTIPLE_ZONES", "Can not stop server in multiple zones", "Internal Error", "Contact OceanBase Support"], - "-4661": ["OB_SESSION_ENTRY_EXIST", "Session already exist", "Internal Error", "Contact OceanBase Support"], - "-4662": ["OB_GOT_SIGNAL_ABORTING", "Got signal. Aborting!", "Internal Error", "Contact OceanBase Support"], - "-4663": ["OB_SERVER_NOT_ALIVE", "server is not alive", "Internal Error", "Contact OceanBase Support"], - "-4664": ["OB_GET_LOCATION_TIME_OUT", "Timeout", "Internal Error", "Contact OceanBase Support"], - "-4665": ["OB_UNIT_IS_MIGRATING", "Unit is migrating can not migrate again", "Internal Error", "Contact OceanBase Support"], - "-4666": ["OB_CLUSTER_NO_MATCH", "cluster name is not match", "Internal Error", "Contact OceanBase Support"], - "-4667": ["OB_CHECK_ZONE_MERGE_ORDER","Please check new zone in zone_merge_order. You can show parameters like 'zone_merge_order'", "Internal Error", "Contact OceanBase Support"], - "-4668": ["OB_ERR_ZONE_NOT_EMPTY", "zone not empty", "Internal Error", "Contact OceanBase Support"], - "-4669": ["OB_DIFFERENT_LOCALITY", "locality not match check it", "Internal Error", "Contact OceanBase Support"], - "-4670": ["OB_EMPTY_LOCALITY", "locality is empty", "Internal Error", "Contact OceanBase Support"], - "-4671": ["OB_FULL_REPLICA_NUM_NOT_ENOUGH", "full replica num not enough", "Internal Error", "Contact OceanBase Support"], - "-4672": ["OB_REPLICA_NUM_NOT_ENOUGH", "replica num not enough", "Internal Error", "Contact OceanBase Support"], - "-4673": ["OB_DATA_SOURCE_NOT_VALID", "Data source not valid", "Internal Error", "Contact OceanBase Support"], - "-4674": ["OB_RUN_JOB_NOT_SUCCESS", "run job not success yet", "Internal Error", "Contact OceanBase Support"], - "-4675": ["OB_NO_NEED_REBUILD", "no need to rebuild", "Internal Error", "Contact OceanBase Support"], - "-4676": ["OB_NEED_REMOVE_UNNEED_TABLE", "need remove unneed table", "Internal Error", "Contact OceanBase Support"], - "-4677": ["OB_NO_NEED_MERGE", "no need to merge", "Internal Error", "Contact OceanBase Support"], - "-4678": ["OB_CONFLICT_OPTION", "conflicting specifications", "Internal Error", "Contact OceanBase Support"], - "-4679": ["OB_DUPLICATE_OPTION", "duplicate specifications", "Internal Error", "Contact OceanBase Support"], - "-4680": ["OB_INVALID_OPTION", "invalid specifications", "Internal Error", "Contact OceanBase Support"], - "-4681": ["OB_RPC_NEED_RECONNECT", "rpc need reconnect", "Internal Error", "Contact OceanBase Support"], - "-4682": ["OB_CANNOT_COPY_MAJOR_SSTABLE", "cannot copy major sstable now", "Internal Error", "Contact OceanBase Support"], - "-4683": ["OB_SRC_DO_NOT_ALLOWED_MIGRATE", "src do not allowed migrate", "Internal Error", "Contact OceanBase Support"], - "-4684": ["OB_TOO_MANY_TENANT_PARTITIONS_ERROR", "Too many partitions were defined for this tenant", "Internal Error", "Contact OceanBase Support"], - "-4685": ["OB_ACTIVE_MEMTBALE_NOT_EXSIT", "active memtable not exist", "Internal Error", "Contact OceanBase Support"], - "-5000": ["OB_ERR_PARSER_INIT", "Failed to init SQL parser", "Internal Error", "Contact OceanBase Support"], - "-5001": ["OB_ERR_PARSE_SQL", "Parse error", "Internal Error", "Contact OceanBase Support"], - "-5002": ["OB_ERR_RESOLVE_SQL", "Resolve error", "Internal Error", "Contact OceanBase Support"], - "-5003": ["OB_ERR_GEN_PLAN", "Generate plan error", "Internal Error", "Contact OceanBase Support"], - "-5006": ["OB_ERR_PARSER_SYNTAX", "You have an error in your SQL syntax; check the manual that corresponds to your OceanBase version for the right syntax to use", "Internal Error", "Please check your SQL"], - "-5007": ["OB_ERR_COLUMN_SIZE", "The used SELECT statements have a different number of columns", "Internal Error", "Contact OceanBase Support"], - "-5008": ["OB_ERR_COLUMN_DUPLICATE", "Duplicate column name", "Internal Error", "Contact OceanBase Support"], - "-5010": ["OB_ERR_OPERATOR_UNKNOWN", "Unknown operator", "Internal Error", "Contact OceanBase Support"], - "-5011": ["OB_ERR_STAR_DUPLICATE", "Duplicated star", "Internal Error", "Contact OceanBase Support"], - "-5012": ["OB_ERR_ILLEGAL_ID", "Illegal ID", "Internal Error", "Contact OceanBase Support"], - "-5014": ["OB_ERR_ILLEGAL_VALUE", "Illegal value", "Internal Error", "Contact OceanBase Support"], - "-5015": ["OB_ERR_COLUMN_AMBIGUOUS", "Ambiguous column", "Internal Error", "Contact OceanBase Support"], - "-5016": ["OB_ERR_LOGICAL_PLAN_FAILD", "Generate logical plan error", "Internal Error", "Contact OceanBase Support"], - "-5017": ["OB_ERR_SCHEMA_UNSET", "Schema not set", "Internal Error", "Contact OceanBase Support"], - "-5018": ["OB_ERR_ILLEGAL_NAME", "Illegal name", "Internal Error", "Contact OceanBase Support"], - "-5020": ["OB_ERR_TABLE_EXIST", "Table already exists", "Internal Error", "Contact OceanBase Support"], - "-5019": ["OB_TABLE_NOT_EXIST", "Table doesn't exist", "Internal Error", "Contact OceanBase Support"], - "-5022": ["OB_ERR_EXPR_UNKNOWN", "Unknown expression", "Internal Error", "Contact OceanBase Support"], - "-5023": ["OB_ERR_ILLEGAL_TYPE", "Illegal type", "Internal Error", "Maybe you should use java.sql.Timestamp instead of java.util.Date."], - "-5024": ["OB_ERR_PRIMARY_KEY_DUPLICATE", "Duplicated primary key", "Internal Error", "Contact OceanBase Support"], - "-5025": ["OB_ERR_KEY_NAME_DUPLICATE", "Duplicated key name", "Internal Error", "Contact OceanBase Support"], - "-5026": ["OB_ERR_CREATETIME_DUPLICATE", "Duplicated createtime", "Internal Error", "Contact OceanBase Support"], - "-5027": ["OB_ERR_MODIFYTIME_DUPLICATE", "Duplicated modifytime", "Internal Error", "Contact OceanBase Support"], - "-5028": ["OB_ERR_ILLEGAL_INDEX", "Illegal index", "Internal Error", "Contact OceanBase Support"], - "-5029": ["OB_ERR_INVALID_SCHEMA", "Invalid schema", "Internal Error", "Contact OceanBase Support"], - "-5030": ["OB_ERR_INSERT_NULL_ROWKEY", "Insert null rowkey", "Internal Error", "Contact OceanBase Support"], - "-5031": ["OB_ERR_COLUMN_NOT_FOUND", "Column not found", "Internal Error", "Contact OceanBase Support"], - "-5032": ["OB_ERR_DELETE_NULL_ROWKEY", "Delete null rowkey", "Internal Error", "Contact OceanBase Support"], - "-5034": ["OB_ERR_USER_EMPTY", "No user", "Internal Error", "Contact OceanBase Support"], - "-5035": ["OB_ERR_USER_NOT_EXIST", "User not exist", "Internal Error", "Contact OceanBase Support"], - "-5038": ["OB_ERR_WRONG_PASSWORD", "Incorrect password", "Internal Error", "Contact OceanBase Support"], - "-5039": ["OB_ERR_USER_IS_LOCKED", "User locked", "Internal Error", "Contact OceanBase Support"], - "-5040": ["OB_ERR_UPDATE_ROWKEY_COLUMN", "Can not update rowkey column", "Internal Error", "Contact OceanBase Support"], - "-5041": ["OB_ERR_UPDATE_JOIN_COLUMN", "Can not update join column", "Internal Error", "Contact OceanBase Support"], - "-5043": ["OB_ERR_PREPARE_STMT_NOT_FOUND", "Unknown prepared statement", "Internal Error", "Contact OceanBase Support"], - "-5044": ["OB_ERR_SYS_VARIABLE_UNKNOWN", "Unknown system variable", "Internal Error", "Contact OceanBase Support"], - "-5046": ["OB_ERR_OLDER_PRIVILEGE_VERSION", "Older privilege version", "Internal Error", "Contact OceanBase Support"], - "-5050": ["OB_ERR_USER_EXIST", "User exists", "Internal Error", "Contact OceanBase Support"], - "-5051": ["OB_ERR_PASSWORD_EMPTY", "Empty password", "Internal Error", "Contact OceanBase Support"], - "-5052": ["OB_ERR_GRANT_PRIVILEGES_TO_CREATE_TABLE", "Failed to grant privelege", "Internal Error", "Contact OceanBase Support"], - "-5053": ["OB_ERR_WRONG_DYNAMIC_PARAM", "Wrong dynamic parameters", "Internal Error", "Contact OceanBase Support"], - "-5054": ["OB_ERR_PARAM_SIZE", "Incorrect parameter count", "Internal Error", "Contact OceanBase Support"], - "-5055": ["OB_ERR_FUNCTION_UNKNOWN", "FUNCTION does not exist", "Internal Error", "Contact OceanBase Support"], - "-5056": ["OB_ERR_CREAT_MODIFY_TIME_COLUMN", "CreateTime or ModifyTime column cannot be modified", "Internal Error", "Contact OceanBase Support"], - "-5057": ["OB_ERR_MODIFY_PRIMARY_KEY", "Primary key cannot be modified", "Internal Error", "Contact OceanBase Support"], - "-5058": ["OB_ERR_PARAM_DUPLICATE", "Duplicated parameters", "Internal Error", "Contact OceanBase Support"], - "-5059": ["OB_ERR_TOO_MANY_SESSIONS", "Too many sessions", "Internal Error", "Contact OceanBase Support"], - "-5061": ["OB_ERR_TOO_MANY_PS", "Too many prepared statements", "Internal Error", "Contact OceanBase Support"], - "-5063": ["OB_ERR_HINT_UNKNOWN", "Unknown hint", "Internal Error", "Contact OceanBase Support"], - "-5064": ["OB_ERR_WHEN_UNSATISFIED", "When condition not satisfied", "Internal Error", "Contact OceanBase Support"], - "-5065": ["OB_ERR_QUERY_INTERRUPTED", "Query execution was interrupted", "Internal Error", "Contact OceanBase Support"], - "-5066": ["OB_ERR_SESSION_INTERRUPTED", "OceanBase instance terminated. Disconnection forced", "Internal Error", "Contact OceanBase Support"], - "-5067": ["OB_ERR_UNKNOWN_SESSION_ID", "Unknown session ID", "Internal Error", "Contact OceanBase Support"], - "-5068": ["OB_ERR_PROTOCOL_NOT_RECOGNIZE", "Incorrect protocol", "Internal Error", "Contact OceanBase Support"], - "-5069": ["OB_ERR_WRITE_AUTH_ERROR", "Write auth packet error", "Internal Error", "Contact OceanBase Support"], - "-5070": ["OB_ERR_PARSE_JOIN_INFO", "Wrong join info", "Internal Error", "Contact OceanBase Support"], - "-5071": ["OB_ERR_ALTER_INDEX_COLUMN", "Cannot alter index column", "Internal Error", "Contact OceanBase Support"], - "-5072": ["OB_ERR_MODIFY_INDEX_TABLE", "Cannot modify index table", "Internal Error", "Contact OceanBase Support"], - "-5073": ["OB_ERR_INDEX_UNAVAILABLE", "Index unavailable", "Internal Error", "Contact OceanBase Support"], - "-5074": ["OB_ERR_NOP_VALUE", "NOP cannot be used here", "Internal Error", "Contact OceanBase Support"], - "-5080": ["OB_ERR_PS_TOO_MANY_PARAM", "Prepared statement contains too many placeholders", "Internal Error", "Contact OceanBase Support"], - "-5081": ["OB_ERR_READ_ONLY", "The server is read only now", "Internal Error", "Contact OceanBase Support"], - "-5083": ["OB_ERR_INVALID_TYPE_FOR_OP", "Invalid data type for the operation", "Internal Error", "Contact OceanBase Support"], - "-5084": ["OB_ERR_CAST_VARCHAR_TO_BOOL", "Can not cast varchar value to bool type", "Internal Error", "Contact OceanBase Support"], - "-5085": ["OB_ERR_CAST_VARCHAR_TO_NUMBER", "Not a number Can not cast varchar value to number type", "Internal Error", "Contact OceanBase Support"], - "-5086": ["OB_ERR_CAST_VARCHAR_TO_TIME", "Not timestamp Can not cast varchar value to timestamp type", "Internal Error", "Contact OceanBase Support"], - "-5087": ["OB_ERR_CAST_NUMBER_OVERFLOW", "Result value was out of range when cast to number", "Internal Error", "Contact OceanBase Support"], - "-5090": ["OB_SCHEMA_NUMBER_PRECISION_OVERFLOW", "Precision was out of range", "Internal Error", "Contact OceanBase Support"], - "-5091": ["OB_SCHEMA_NUMBER_SCALE_OVERFLOW", "Scale value was out of range", "Internal Error", "Contact OceanBase Support"], - "-5092": ["OB_ERR_INDEX_UNKNOWN", "Unknown index", "Internal Error", "Contact OceanBase Support"], - "-5093": ["OB_NUMERIC_OVERFLOW", "numeric overflow", "Internal Error", "Contact OceanBase Support"], - "-5094": ["OB_ERR_TOO_MANY_JOIN_TABLES", "too many joined tables", "Internal Error", "Contact OceanBase Support"], - "-5099": ["OB_ERR_SYS_CONFIG_UNKNOWN", "System config unknown", "Internal Error", "Contact OceanBase Support"], - "-5100": ["OB_ERR_LOCAL_VARIABLE", "Local variable", "Internal Error", "Contact OceanBase Support"], - "-5101": ["OB_ERR_GLOBAL_VARIABLE", "Global variable", "Internal Error", "Contact OceanBase Support"], - "-5102": ["OB_ERR_VARIABLE_IS_READONLY", "variable is read only", "Internal Error", "Contact OceanBase Support"], - "-5103": ["OB_ERR_INCORRECT_GLOBAL_LOCAL_VAR", "incorrect global or local variable", "Internal Error", "Contact OceanBase Support"], - "-5107": ["OB_INVALID_ARGUMENT_FOR_IS", "Invalid argument for IS operator", "Internal Error", "Contact OceanBase Support"], - "-5112": ["OB_ERR_USER_VARIABLE_UNKNOWN", "Unknown user variable", "Internal Error", "Contact OceanBase Support"], - "-5114": ["OB_INVALID_NUMERIC", "Invalid numeric", "Internal Error", "Contact OceanBase Support"], - "-5116": ["OB_SQL_LOG_OP_SETCHILD_OVERFLOW", "Logical operator child index overflow", "Internal Error", "Contact OceanBase Support"], - "-5117": ["OB_SQL_EXPLAIN_FAILED", "fail to explain plan", "Internal Error", "Contact OceanBase Support"], - "-5118": ["OB_SQL_OPT_COPY_OP_FAILED", "fail to copy logical operator", "Internal Error", "Contact OceanBase Support"], - "-5119": ["OB_SQL_OPT_GEN_PLAN_FALIED", "fail to generate plan", "Internal Error", "Contact OceanBase Support"], - "-5120": ["OB_SQL_OPT_CREATE_RAWEXPR_FAILED", "fail to create raw expr", "Internal Error", "Contact OceanBase Support"], - "-5121": ["OB_SQL_OPT_JOIN_ORDER_FAILED", "fail to generate join order", "Internal Error", "Contact OceanBase Support"], - "-5122": ["OB_SQL_OPT_ERROR", "optimizer general error", "Internal Error", "Contact OceanBase Support"], - "-5130": ["OB_SQL_RESOLVER_NO_MEMORY", "sql resolver no memory", "Internal Error", "Contact OceanBase Support"], - "-5131": ["OB_SQL_DML_ONLY", "plan cache support dml only", "Internal Error", "Contact OceanBase Support"], - "-5133": ["OB_ERR_NO_GRANT", "No such grant defined", "Internal Error", "Contact OceanBase Support"], - "-5134": ["OB_ERR_NO_DB_SELECTED", "No database selected", "Internal Error", "Contact OceanBase Support"], - "-5135": ["OB_SQL_PC_OVERFLOW", "plan cache is overflow", "Internal Error", "Contact OceanBase Support"], - "-5136": ["OB_SQL_PC_PLAN_DUPLICATE", "plan exists in plan cache already", "Internal Error", "Contact OceanBase Support"], - "-5137": ["OB_SQL_PC_PLAN_EXPIRE", "plan is expired", "Internal Error", "Contact OceanBase Support"], - "-5138": ["OB_SQL_PC_NOT_EXIST", "no plan exist", "Internal Error", "Contact OceanBase Support"], - "-5139": ["OB_SQL_PARAMS_LIMIT", "too many params plan cache not support", "Internal Error", "Contact OceanBase Support"], - "-5140": ["OB_SQL_PC_PLAN_SIZE_LIMIT", "plan is too big to add to plan cache", "Internal Error", "Contact OceanBase Support"], - "-5142": ["OB_ERR_UNKNOWN_CHARSET", "Unknown character set", "Internal Error", "Contact OceanBase Support"], - "-5143": ["OB_ERR_UNKNOWN_COLLATION", "Unknown collation", "Internal Error", "Contact OceanBase Support"], - "-5144": ["OB_ERR_COLLATION_MISMATCH", "The collation is not valid for the character set", "Internal Error", "Contact OceanBase Support"], - "-5145": ["OB_ERR_WRONG_VALUE_FOR_VAR", "Variable can't be set to the value", "Internal Error", "Contact OceanBase Support"], - "-5146": ["OB_UNKNOWN_PARTITION", "Unknown partition", "Internal Error", "Contact OceanBase Support"], - "-5147": ["OB_PARTITION_NOT_MATCH", "Found a row not matching the given partition set", "Internal Error", "Contact OceanBase Support"], - "-5148": ["OB_ER_PASSWD_LENGTH", " Password hash should be a 40-digit hexadecimal number", "Internal Error", "Contact OceanBase Support"], - "-5149": ["OB_ERR_INSERT_INNER_JOIN_COLUMN", "Insert inner join column error", "Internal Error", "Contact OceanBase Support"], - "-5150": ["OB_TENANT_NOT_IN_SERVER", "Tenant not in this server", "Internal Error", "Contact OceanBase Support"], - "-5151": ["OB_TABLEGROUP_NOT_EXIST", "tablegroup not exist", "Internal Error", "Contact OceanBase Support"], - "-5153": ["OB_SUBQUERY_TOO_MANY_ROW", "Subquery returns more than 1 row", "Internal Error", "Contact OceanBase Support"], - "-5154": ["OB_ERR_BAD_DATABASE", "Unknown database", "Internal Error", "Contact OceanBase Support"], - "-5155": ["OB_CANNOT_USER", "User operation failed", "Internal Error", "Contact OceanBase Support"], - "-5156": ["OB_TENANT_EXIST", "tenant already exist", "Internal Error", "Contact OceanBase Support"], - "-5157": ["OB_TENANT_NOT_EXIST", "Unknown tenant", "Internal Error", "Contact OceanBase Support"], - "-5158": ["OB_DATABASE_EXIST", "Can't create database;database exists", "Internal Error", "Contact OceanBase Support"], - "-5159": ["OB_TABLEGROUP_EXIST", "tablegroup already exist", "Internal Error", "Contact OceanBase Support"], - "-5160": ["OB_ERR_INVALID_TENANT_NAME", "invalid tenant name specified in connection string", "Internal Error", "Contact OceanBase Support"], - "-5161": ["OB_EMPTY_TENANT", "tenant is empty", "Internal Error", "Contact OceanBase Support"], - "-5162": ["OB_WRONG_DB_NAME", "Incorrect database name", "Internal Error", "Contact OceanBase Support"], - "-5163": ["OB_WRONG_TABLE_NAME", "Incorrect table name", "Internal Error", "Contact OceanBase Support"], - "-5164": ["OB_WRONG_COLUMN_NAME", "Incorrect column name", "Internal Error", "Contact OceanBase Support"], - "-5165": ["OB_ERR_COLUMN_SPEC", "Incorrect column specifier", "Internal Error", "Contact OceanBase Support"], - "-5166": ["OB_ERR_DB_DROP_EXISTS", "Can't drop database;database doesn't exist", "Internal Error", "Contact OceanBase Support"], - "-5167": ["OB_ERR_DATA_TOO_LONG", "Data too long for column", "Internal Error", "Contact OceanBase Support"], - "-5168": ["OB_ERR_WRONG_VALUE_COUNT_ON_ROW", "column count does not match value count", "Internal Error", "Contact OceanBase Support"], - "-5169": ["OB_ERR_CREATE_USER_WITH_GRANT", "You are not allowed to create a user with GRANT", "Internal Error", "Contact OceanBase Support"], - "-5170": ["OB_ERR_NO_DB_PRIVILEGE", "Access denied for user to database", "Internal Error", "Contact OceanBase Support"], - "-5171": ["OB_ERR_NO_TABLE_PRIVILEGE", "Command denied to user for table", "Internal Error", "Contact OceanBase Support"], - "-5172": ["OB_INVALID_ON_UPDATE", "Invalid ON UPDATE clause", "Internal Error", "Contact OceanBase Support"], - "-5173": ["OB_INVALID_DEFAULT", "Invalid default value", "Internal Error", "Contact OceanBase Support"], - "-5174": ["OB_ERR_UPDATE_TABLE_USED", "Update table used", "Internal Error", "Contact OceanBase Support"], - "-5175": ["OB_ERR_COULUMN_VALUE_NOT_MATCH", "Column count doesn't match value count", "Internal Error", "Contact OceanBase Support"], - "-5176": ["OB_ERR_INVALID_GROUP_FUNC_USE", "Invalid use of group function", "Internal Error", "Contact OceanBase Support"], - "-5177": ["OB_CANT_AGGREGATE_2COLLATIONS", "Illegal mix of collations", "Internal Error", "Contact OceanBase Support"], - "-5178": ["OB_ERR_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD", "Field is of a not allowed type for this type of partitioning", "Internal Error", "Contact OceanBase Support"], - "-5179": ["OB_ERR_TOO_LONG_IDENT", "Identifier name is too long", "Internal Error", "Contact OceanBase Support"], - "-5180": ["OB_ERR_WRONG_TYPE_FOR_VAR", "Incorrect argument type to variable", "Internal Error", "Contact OceanBase Support"], - "-5182": ["OB_ERR_PRIV_USAGE", "Incorrect usage of DB GRANT and GLOBAL PRIVILEGES", "Internal Error", "Contact OceanBase Support"], - "-5183": ["OB_ILLEGAL_GRANT_FOR_TABLE", "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", "Internal Error", "Contact OceanBase Support"], - "-5184": ["OB_ERR_REACH_AUTOINC_MAX", "Failed to read auto-increment value from storage engine", "Internal Error", "Contact OceanBase Support"], - "-5185": ["OB_ERR_NO_TABLES_USED", "No tables used", "Internal Error", "Contact OceanBase Support"], - "-5187": ["OB_CANT_REMOVE_ALL_FIELDS", "You can't delete all columns with ALTER TABLE; use DROP TABLE instead", "Internal Error", "Contact OceanBase Support"], - "-5189": ["OB_NO_PARTS_ERROR", "Number of partitions = 0 is not an allowed value", "Internal Error", "Contact OceanBase Support"], - "-5190": ["OB_WRONG_SUB_KEY", "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys"], - "-5191": ["OB_KEY_PART_0", "Key part length cannot be 0", "Internal Error", "Contact OceanBase Support"], - "-5192": ["OB_ERR_UNKNOWN_TIME_ZONE", "Unknown or incorrect time zone", "Internal Error", "Contact OceanBase Support"], - "-5193": ["OB_ERR_WRONG_AUTO_KEY", "Incorrect table definition; there can be only one auto column", "Internal Error", "Contact OceanBase Support"], - "-5194": ["OB_ERR_TOO_MANY_KEYS", "Too many keys specified", "Internal Error", "Contact OceanBase Support"], - "-5195": ["OB_ERR_TOO_MANY_ROWKEY_COLUMNS", "Too many key parts specified", "Internal Error", "Contact OceanBase Support"], - "-5196": ["OB_ERR_TOO_LONG_KEY_LENGTH", "Specified key was too long", "Internal Error", "Contact OceanBase Support"], - "-5197": ["OB_ERR_TOO_MANY_COLUMNS", "Too many columns", "Internal Error", "Contact OceanBase Support"], - "-5199": ["OB_ERR_TOO_BIG_ROWSIZE", "Row size too large", "Internal Error", "Contact OceanBase Support"], - "-5200": ["OB_ERR_UNKNOWN_TABLE", "Unknown table", "Internal Error", "Contact OceanBase Support"], - "-5201": ["OB_ERR_BAD_TABLE", "Unknown table", "Internal Error", "Contact OceanBase Support"], - "-5202": ["OB_ERR_TOO_BIG_SCALE", "Too big scale specified for column", "Internal Error", "Contact OceanBase Support"], - "-5203": ["OB_ERR_TOO_BIG_PRECISION", "Too big precision specified for column", "Internal Error", "Contact OceanBase Support"], - "-5206": ["OB_WRONG_GROUP_FIELD", "Can't group on column", "Internal Error", "Contact OceanBase Support"], - "-5207": ["OB_NON_UNIQ_ERROR", "Column is ambiguous", "Internal Error", "Contact OceanBase Support"], - "-5208": ["OB_ERR_NONUNIQ_TABLE", "Not unique table/alias", "Internal Error", "Contact OceanBase Support"], - "-5209": ["OB_ERR_CANT_DROP_FIELD_OR_KEY", "Can't DROP Column; check that column/key exists", "Internal Error", "Contact OceanBase Support"], - "-5210": ["OB_ERR_MULTIPLE_PRI_KEY", "Multiple primary key defined", "Internal Error", "Contact OceanBase Support"], - "-5211": ["OB_ERR_KEY_COLUMN_DOES_NOT_EXITS", "Key column doesn't exist in table", "Internal Error", "Contact OceanBase Support"], - "-5212": ["OB_ERR_AUTO_PARTITION_KEY", "auto-increment column should not be part of partition key", "Internal Error", "Contact OceanBase Support"], - "-5213": ["OB_ERR_CANT_USE_OPTION_HERE", "Incorrect usage/placement", "Internal Error", "Contact OceanBase Support"], - "-5214": ["OB_ERR_WRONG_OBJECT", "Wrong object", "Internal Error", "Contact OceanBase Support"], - "-5215": ["OB_ERR_ON_RENAME", "Error on rename table", "Internal Error", "Contact OceanBase Support"], - "-5216": ["OB_ERR_WRONG_KEY_COLUMN", "The used storage engine can't index column", "Internal Error", "Contact OceanBase Support"], - "-5217": ["OB_ERR_BAD_FIELD_ERROR", "Unknown column", "Internal Error", "Contact OceanBase Support"], - "-5218": ["OB_ERR_WRONG_FIELD_WITH_GROUP", "column is not in GROUP BY", "Internal Error", "Contact OceanBase Support"], - "-5219": ["OB_ERR_CANT_CHANGE_TX_CHARACTERISTICS", "Transaction characteristics can't be changed while a transaction is in progress", "Internal Error", "Contact OceanBase Support"], - "-5220": ["OB_ERR_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION", "Cannot execute statement in a READ ONLY transaction.", "Internal Error", "Contact OceanBase Support"], - "-5222": ["OB_ERR_TRUNCATED_WRONG_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], - "-5223": ["OB_ERR_WRONG_IDENT_NAME", "wrong ident name", "Internal Error", "Contact OceanBase Support"], - "-5224": ["OB_WRONG_NAME_FOR_INDEX", "Incorrect index name", "Internal Error", "Contact OceanBase Support"], - "-5226": ["OB_REACH_MEMORY_LIMIT", "plan cache memory used reach the high water mark.", "Internal Error", "Contact OceanBase Support"], - "-5228": ["OB_ERR_NON_UPDATABLE_TABLE", "The target table is not updatable", "Internal Error", "Contact OceanBase Support"], - "-5229": ["OB_ERR_WARN_DATA_OUT_OF_RANGE", "Out of range value for column", "Internal Error", "Contact OceanBase Support"], - "-5233": ["OB_ERR_OPTION_PREVENTS_STATEMENT", "The MySQL server is running with the --read-only option so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], - "-5234": ["OB_ERR_DB_READ_ONLY", "The database is read only so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], - "-5235": ["OB_ERR_TABLE_READ_ONLY", "The table is read only so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], - "-5236": ["OB_ERR_LOCK_OR_ACTIVE_TRANSACTION", "Can't execute the given command because you have active locked tables or an active transaction", "Internal Error", "Contact OceanBase Support"], - "-5237": ["OB_ERR_SAME_NAME_PARTITION_FIELD", "Duplicate partition field name", "Internal Error", "Contact OceanBase Support"], - "-5238": ["OB_ERR_TABLENAME_NOT_ALLOWED_HERE", "Table from one of the SELECTs cannot be used in global ORDER clause", "Internal Error", "Contact OceanBase Support"], - "-5239": ["OB_ERR_VIEW_RECURSIVE", "view contains recursion", "Internal Error", "Contact OceanBase Support"], - "-5240": ["OB_ERR_QUALIFIER", "Column part of USING clause cannot have qualifier", "Internal Error", "Contact OceanBase Support"], - "-5241": ["OB_ERR_WRONG_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], - "-5242": ["OB_ERR_VIEW_WRONG_LIST", "View's SELECT and view's field list have different column counts", "Internal Error", "Contact OceanBase Support"], - "-5243": ["OB_SYS_VARS_MAYBE_DIFF_VERSION", "system variables' version maybe different", "Internal Error", "Contact OceanBase Support"], - "-5244": ["OB_ERR_AUTO_INCREMENT_CONFLICT", "Auto-increment value in UPDATE conflicts with internally generated values", "Internal Error", "Contact OceanBase Support"], - "-5245": ["OB_ERR_TASK_SKIPPED", "some tasks are skipped", "Internal Error", "Contact OceanBase Support"], - "-5246": ["OB_ERR_NAME_BECOMES_EMPTY", "Name has become ''", "Internal Error", "Contact OceanBase Support"], - "-5247": ["OB_ERR_REMOVED_SPACES", "Leading spaces are removed from name ", "Internal Error", "Contact OceanBase Support"], - "-5248": ["OB_WARN_ADD_AUTOINCREMENT_COLUMN", "Alter table add auto_increment column is dangerous", "Internal Error", "Contact OceanBase Support"], - "-5249": ["OB_WARN_CHAMGE_NULL_ATTRIBUTE", "Alter table change nullable column to not nullable is dangerous", "Internal Error", "Contact OceanBase Support"], - "-5250": ["OB_ERR_INVALID_CHARACTER_STRING", "Invalid character string", "Internal Error", "Contact OceanBase Support"], - "-5251": ["OB_ERR_KILL_DENIED", "You are not owner of thread", "Internal Error", "Contact OceanBase Support"], - "-5252": ["OB_ERR_COLUMN_DEFINITION_AMBIGUOUS", "Column definition is ambiguous. Column has both NULL and NOT NULL attributes", "Internal Error", "Contact OceanBase Support"], - "-5253": ["OB_ERR_EMPTY_QUERY", "Query was empty", "Internal Error", "Contact OceanBase Support"], - "-5255": ["OB_ERR_FIELD_NOT_FOUND_PART", "Field in list of fields for partition function not found in table", "Internal Error", "Contact OceanBase Support"], - "-5256": ["OB_ERR_PRIMARY_CANT_HAVE_NULL", "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Internal Error", "Contact OceanBase Support"], - "-5257": ["OB_ERR_PARTITION_FUNC_NOT_ALLOWED_ERROR", "The PARTITION function returns the wrong type", "Internal Error", "Contact OceanBase Support"], - "-5258": ["OB_ERR_INVALID_BLOCK_SIZE", "Invalid block size, block size should between 16384 and 1048576", "Internal Error", "Contact OceanBase Support"], - "-5259": ["OB_ERR_UNKNOWN_STORAGE_ENGINE", "Unknown storage engine", "Internal Error", "Contact OceanBase Support"], - "-5260": ["OB_ERR_TENANT_IS_LOCKED", "Tenant is locked", "Internal Error", "Contact OceanBase Support"], - "-5261": ["OB_EER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF", "A UNIQUE INDEX/PRIMARY KEY must include all columns in the table's partitioning function", "Internal Error", "Contact OceanBase Support"], - "-5262": ["OB_ERR_PARTITION_FUNCTION_IS_NOT_ALLOWED", "This partition function is not allowed", "Internal Error", "Contact OceanBase Support"], - "-5263": ["OB_ERR_AGGREGATE_ORDER_FOR_UNION", "aggregate order for union", "Internal Error", "Contact OceanBase Support"], - "-5264": ["OB_ERR_OUTLINE_EXIST", "Outline exists", "Internal Error", "Contact OceanBase Support"], - "-5265": ["OB_OUTLINE_NOT_EXIST", "Outline not exists", "Internal Error", "Contact OceanBase Support"], - "-5266": ["OB_WARN_OPTION_BELOW_LIMIT", "The value should be no less than the limit", "Internal Error", "Contact OceanBase Support"], - "-5267": ["OB_INVALID_OUTLINE", "invalid outline", "Internal Error", "Contact OceanBase Support"], - "-5268": ["OB_REACH_MAX_CONCURRENT_NUM", "SQL reach max concurrent num", "Internal Error", "Contact OceanBase Support"], - "-5269": ["OB_ERR_OPERATION_ON_RECYCLE_OBJECT", "can not perform DDL/DML over objects in Recycle Bin", "Internal Error", "Contact OceanBase Support"], - "-5270": ["OB_ERR_OBJECT_NOT_IN_RECYCLEBIN", "object not in RECYCLE BIN", "Internal Error", "Contact OceanBase Support"], - "-5271": ["OB_ERR_CON_COUNT_ERROR", "Too many connections", "Internal Error", "Contact OceanBase Support"], - "-5272": ["OB_ERR_OUTLINE_CONTENT_EXIST", "Outline content already exists when added", "Internal Error", "Contact OceanBase Support"], - "-5273": ["OB_ERR_OUTLINE_MAX_CONCURRENT_EXIST", "Max concurrent already exists when added", "Internal Error", "Contact OceanBase Support"], - "-5274": ["OB_ERR_VALUES_IS_NOT_INT_TYPE_ERROR", "VALUES value for partition must have type INT", "Internal Error", "Contact OceanBase Support"], - "-5275": ["OB_ERR_WRONG_TYPE_COLUMN_VALUE_ERROR", "Partition column values of incorrect type", "Internal Error", "Contact OceanBase Support"], - "-5276": ["OB_ERR_PARTITION_COLUMN_LIST_ERROR", "Inconsistency in usage of column lists for partitioning", "Internal Error", "Contact OceanBase Support"], - "-5277": ["OB_ERR_TOO_MANY_VALUES_ERROR", "Cannot have more than one value for this type of RANGE partitioning", "Internal Error", "Contact OceanBase Support"], - "-5278": ["OB_ERR_PARTITION_VALUE_ERROR", "This partition value with incorrect charset type", "Internal Error", "Contact OceanBase Support"], - "-5279": ["OB_ERR_PARTITION_INTERVAL_ERROR", "Partition interval must have type INT", "Internal Error", "Contact OceanBase Support"], - "-5280": ["OB_ERR_SAME_NAME_PARTITION", "Duplicate partition name", "Internal Error", "Contact OceanBase Support"], - "-5281": ["OB_ERR_RANGE_NOT_INCREASING_ERROR", "VALUES LESS THAN value must be strictly increasing for each partition", "Internal Error", "Contact OceanBase Support"], - "-5282": ["OB_ERR_PARSE_PARTITION_RANGE", "Wrong number of partitions defined, mismatch with previous setting", "Internal Error", "Contact OceanBase Support"], - "-5283": ["OB_ERR_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF", "A PRIMARY KEY must include all columns in the table's partitioning function", "Internal Error", "Contact OceanBase Support"], - "-5284": ["OB_NO_PARTITION_FOR_GIVEN_VALUE", "Table has no partition for value", "Internal Error", "Contact OceanBase Support"], - "-5285": ["OB_EER_NULL_IN_VALUES_LESS_THAN", "Not allowed to use NULL value in VALUES LESS THAN", "Internal Error", "Contact OceanBase Support"], - "-5286": ["OB_ERR_PARTITION_CONST_DOMAIN_ERROR", "Partition constant is out of partition function domain", "Internal Error", "Contact OceanBase Support"], - "-5287": ["OB_ERR_TOO_MANY_PARTITION_FUNC_FIELDS", "Too many fields in 'list of partition fields'", "Internal Error", "Contact OceanBase Support"], - "-5288": ["OB_ERR_BAD_FT_COLUMN", "Column cannot be part of FULLTEXT index", "Internal Error", "Contact OceanBase Support"], - "-5289": ["OB_ERR_KEY_DOES_NOT_EXISTS", "key does not exist in table", "Internal Error", "Contact OceanBase Support"], - "-5290": ["OB_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN", "non-default value for generated column is not allowed", "Internal Error", "Contact OceanBase Support"], - "-5291": ["OB_ERR_BAD_CTXCAT_COLUMN", "The CTXCAT column must be contiguous in the index column list", "Internal Error", "Contact OceanBase Support"], - "-5292": ["OB_ERR_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN", "not supported for generated columns", "Internal Error", "Contact OceanBase Support"], - "-5293": ["OB_ERR_DEPENDENT_BY_GENERATED_COLUMN", "Column has a generated column dependency", "Internal Error", "Contact OceanBase Support"], - "-5294": ["OB_ERR_TOO_MANY_ROWS", "Result consisted of more than one row", "Internal Error", "Contact OceanBase Support"], - "-5295": ["OB_WRONG_FIELD_TERMINATORS", "Field separator argument is not what is expected; check the manual", "Internal Error", "Contact OceanBase Support"], - "-5296": ["OB_NO_READABLE_REPLICA", "there has no readable replica", "Internal Error", "Contact OceanBase Support"], - "-5297": ["OB_ERR_UNEXPECTED_TZ_TRANSITION", "unexpected time zone info transition", "Internal Error", "Contact OceanBase Support"], - "-5298": ["OB_ERR_SYNONYM_EXIST", "synonym exists", "Internal Error", "Contact OceanBase Support"], - "-5299": ["OB_SYNONYM_NOT_EXIST", "synonym not exists", "Internal Error", "Contact OceanBase Support"], - "-5300": ["OB_ERR_MISS_ORDER_BY_EXPR", "missing ORDER BY expression in the window specification", "Internal Error", "Contact OceanBase Support"], - "-5301": ["OB_ERR_NOT_CONST_EXPR", "The argument of the window function should be a constant for a partition", "Internal Error", "Contact OceanBase Support"], - "-5302": ["OB_ERR_PARTITION_MGMT_ON_NONPARTITIONED", "Partition management on a not partitioned table is not possible", "Internal Error", "Contact OceanBase Support"], - "-5303": ["OB_ERR_DROP_PARTITION_NON_EXISTENT", "Error in list of partitions", "Internal Error", "Contact OceanBase Support"], - "-5304": ["OB_ERR_PARTITION_MGMT_ON_TWOPART_TABLE", "Partition management on a two-part table is not possible", "Internal Error", "Contact OceanBase Support"], - "-5305": ["OB_ERR_ONLY_ON_RANGE_LIST_PARTITION", "can only be used on RANGE/LIST partitions", "Internal Error", "Contact OceanBase Support"], - "-5306": ["OB_ERR_DROP_LAST_PARTITION", "Cannot remove all partitions, use DROP TABLE instead", "Internal Error"], - "-5307": ["OB_ERR_SCHEDULER_THREAD_NOT_ENOUGH", "Scheduler thread number is not enough", "Internal Error", "Contact OceanBase Support"], - "-5308": ["OB_ERR_IGNORE_USER_HOST_NAME", "Ignore the host name", "Internal Error", "Contact OceanBase Support"], - "-5309": ["OB_IGNORE_SQL_IN_RESTORE", "Ignore sql in restore process", "Internal Error", "Contact OceanBase Support"], - "-5310": ["OB_ERR_TEMPORARY_TABLE_WITH_PARTITION", "Cannot create temporary table with partitions", "Internal Error", "Contact OceanBase Support"], - "-5311": ["OB_ERR_INVALID_COLUMN_ID", "Invalid column id", "Internal Error", "Contact OceanBase Support"], - "-5312": ["OB_SYNC_DDL_DUPLICATE", "Duplicated ddl id", "Internal Error", "Contact OceanBase Support"], - "-5313": ["OB_SYNC_DDL_ERROR", "Failed to sync ddl", "Internal Error", "Contact OceanBase Support"], - "-5314": ["OB_ERR_ROW_IS_REFERENCED", "Cannot delete or update a parent row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], - "-5315": ["OB_ERR_NO_REFERENCED_ROW", "Cannot add or update a child row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], - "-5317": ["OB_ERR_CANNOT_ADD_FOREIGN", "Cannot add foreign key constraint", "Internal Error", "Contact OceanBase Support"], - "-5318": ["OB_ERR_WRONG_FK_DEF", "Incorrect foreign key definition: Key reference and table reference don't match", "Internal Error", "Contact OceanBase Support"], - "-5319": ["OB_ERR_INVALID_CHILD_COLUMN_LENGTH_FK", "Invalid child column length", "Internal Error", "Contact OceanBase Support"], - "-5320": ["OB_ERR_ALTER_COLUMN_FK", "Cannot alter foreign key column", "Internal Error", "Contact OceanBase Support"], - "-5321": ["OB_ERR_CONNECT_BY_REQUIRED", "CONNECT BY clause required in this query block", "Internal Error", "Contact OceanBase Support"], - "-5322": ["OB_ERR_INVALID_PSEUDO_COLUMN_PLACE", "Specified pseudocolumn, operator or function not allowed here", "Internal Error", "Contact OceanBase Support"], - "-5323": ["OB_ERR_NOCYCLE_REQUIRED", "NOCYCLE keyword is required with CONNECT_BY_ISCYCLE pseudocolumn", "Internal Error", "Contact OceanBase Support"], - "-5324": ["OB_ERR_CONNECT_BY_LOOP", "CONNECT BY loop in user data", "Internal Error", "Contact OceanBase Support"], - "-5325": ["OB_ERR_INVALID_SIBLINGS", "ORDER SIBLINGS BY clause not allowed here", "Internal Error", "Contact OceanBase Support"], - "-5326": ["OB_ERR_INVALID_SEPARATOR", "when using SYS_CONNECT_BY_PATH function, cannot have separator as part of column value", "Internal Error", "Contact OceanBase Support"], - "-5327": ["OB_ERR_INVALID_SYNONYM_NAME", "Database can not be specified in public synonym", "Internal Error", "Contact OceanBase Support"], - "-5328": ["OB_ERR_LOOP_OF_SYNONYM", "Looping chain of synonyms", "Internal Error", "Contact OceanBase Support"], - "-5329": ["OB_ERR_SYNONYM_SAME_AS_OBJECT", "Cannot create a synonym with same name as object", "Internal Error", "Contact OceanBase Support"], - "-5330": ["OB_ERR_SYNONYM_TRANSLATION_INVALID", "Synonym translation is no longer valid", "Internal Error", "Contact OceanBase Support"], - "-5331": ["OB_ERR_EXIST_OBJECT", "Name is already used by an existing object", "Internal Error", "Contact OceanBase Support"], - "-5332": ["OB_ERR_ILLEGAL_VALUE_FOR_TYPE", "Illegal value found during parsing", "Internal Error", "Contact OceanBase Support"], - "-5333": ["OB_ER_TOO_LONG_SET_ENUM_VALUE", "Too long enumeration/set value for column.", "Internal Error", "Contact OceanBase Support"], - "-5334": ["OB_ER_DUPLICATED_VALUE_IN_TYPE", "Column has duplicated value", "Internal Error", "Contact OceanBase Support"], - "-5335": ["OB_ER_TOO_BIG_ENUM", "Too many enumeration values for column", "Internal Error", "Contact OceanBase Support"], - "-5336": ["OB_ERR_TOO_BIG_SET", "Too many strings for column", "Internal Error", "Contact OceanBase Support"], - "-5337": ["OB_ERR_WRONG_ROWID", "rowid is wrong", "Internal Error", "Contact OceanBase Support"], - "-5338": ["OB_ERR_INVALID_WINDOW_FUNCTION_PLACE", "Window Function not allowed here", "Internal Error", "Contact OceanBase Support"], - "-5339": ["OB_ERR_PARSE_PARTITION_LIST", "Fail to parse list partition", "Internal Error", "Contact OceanBase Support"], - "-5340": ["OB_ERR_MULTIPLE_DEF_CONST_IN_LIST_PART", "Multiple definition of same constant in list partitioning", "Internal Error", "Contact OceanBase Support"], - "-5341": ["OB_ERR_INVALID_TIMEZONE_REGION_ID", "timezone region ID is invalid", "Internal Error", "Contact OceanBase Support"], - "-5342": ["OB_ERR_INVALID_HEX_NUMBER", "invalid hex number", "Internal Error", "Contact OceanBase Support"], - "-5343": ["OB_ERR_WRONG_FUNC_ARGUMENTS_TYPE", "wrong number or types of arguments in function", "Internal Error", "Contact OceanBase Support"], - "-5344": ["OB_ERR_MULTI_UPDATE_KEY_CONFLICT", "Primary key/partition key update is not allowed", "Internal Error", "Contact OceanBase Support"], - "-5345": ["OB_ERR_INSUFFICIENT_PX_WORKER", "insufficient parallel query worker available", "Internal Error", "Contact OceanBase Support"], - "-5346": ["OB_ERR_FOR_UPDATE_EXPR_NOT_ALLOWED", "FOR UPDATE of this query expression is not allowed", "Internal Error", "Contact OceanBase Support"], - "-5347": ["OB_ERR_WIN_FUNC_ARG_NOT_IN_PARTITION_BY", "argument should be a function of expressions in PARTITION BY", "Internal Error", "Contact OceanBase Support"], - "-5348": ["OB_ERR_TOO_LONG_STRING_IN_CONCAT", "result of string concatenation is too long", "Internal Error", "Contact OceanBase Support"], - "-5350": ["OB_ERR_UPD_CAUSE_PART_CHANGE", "updating partition key column would cause a partition change", "Internal Error", "Contact OceanBase Support"], - "-5541": ["OB_ERR_SP_ALREADY_EXISTS", "procedure/function already exists", "Internal Error", "Contact OceanBase Support"], - "-5542": ["OB_ERR_SP_DOES_NOT_EXIST", "procedure/function does not exist", "Internal Error", "Contact OceanBase Support"], - "-5543": ["OB_ERR_SP_UNDECLARED_VAR", "Undeclared variable", "Internal Error", "Contact OceanBase Support"], - "-5544": ["OB_ERR_SP_UNDECLARED_TYPE", "Undeclared type", "Internal Error", "Contact OceanBase Support"], - "-5545": ["OB_ERR_SP_COND_MISMATCH", "Undefined CONDITION", "Internal Error", "Contact OceanBase Support"], - "-5546": ["OB_ERR_SP_LILABEL_MISMATCH", "no matching label", "Internal Error", "Contact OceanBase Support"], - "-5547": ["OB_ERR_SP_CURSOR_MISMATCH", "Undefined CURSOR", "Internal Error", "Contact OceanBase Support"], - "-5548": ["OB_ERR_SP_DUP_PARAM", "Duplicate parameter", "Internal Error", "Contact OceanBase Support"], - "-5549": ["OB_ERR_SP_DUP_VAR", "Duplicate variable", "Internal Error", "Contact OceanBase Support"], - "-5550": ["OB_ERR_SP_DUP_TYPE", "Duplicate type", "Internal Error", "Contact OceanBase Support"], - "-5551": ["OB_ERR_SP_DUP_CONDITION", "Duplicate condition", "Internal Error", "Contact OceanBase Support"], - "-5552": ["OB_ERR_SP_DUP_LABEL", "Duplicate label", "Internal Error", "Contact OceanBase Support"], - "-5553": ["OB_ERR_SP_DUP_CURSOR", "Duplicate cursor", "Internal Error", "Contact OceanBase Support"], - "-5554": ["OB_ERR_SP_INVALID_FETCH_ARG", "Incorrect number of FETCH variables", "Internal Error", "Contact OceanBase Support"], - "-5555": ["OB_ERR_SP_WRONG_ARG_NUM", "Incorrect number of arguments", "Internal Error", "Contact OceanBase Support"], - "-5556": ["OB_ERR_SP_UNHANDLED_EXCEPTION", "Unhandled exception has occurred in PL", "Internal Error", "Contact OceanBase Support"], - "-5557": ["OB_ERR_SP_BAD_CONDITION_TYPE", "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE", "Internal Error", "Contact OceanBase Support"], - "-5558": ["OB_ERR_PACKAGE_ALREADY_EXISTS", "package already exists", "Internal Error", "Contact OceanBase Support"], - "-5559": ["OB_ERR_PACKAGE_DOSE_NOT_EXIST", "package does not exist", "Internal Error", "Contact OceanBase Support"], - "-5560": ["OB_EER_UNKNOWN_STMT_HANDLER", "Unknown prepared statement handle", "Internal Error", "Contact OceanBase Support"], - "-5561": ["OB_ERR_INVALID_WINDOW_FUNC_USE", "Invalid use of window function", "Internal Error", "Contact OceanBase Support"], - "-5563": ["OB_ERR_CONTRAINT_NOT_FOUND", "Constraint not found", "Internal Error", "Contact OceanBase Support"], - "-5564": ["OB_ERR_ALTER_TABLE_ALTER_DUPLICATED_INDEX", "Duplicate alter index operations", "Internal Error", "Contact OceanBase Support"], - "-5565": ["OB_EER_INVALID_ARGUMENT_FOR_LOGARITHM", "Invalid argument for logarithm", "Internal Error", "Contact OceanBase Support"], - "-5566": ["OB_ERR_REORGANIZE_OUTSIDE_RANGE", "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range", "Internal Error", "Contact OceanBase Support"], - "-5568": ["OB_ER_UNSUPPORTED_PS", "This command is not supported in the prepared statement protocol yet", "Internal Error", "Contact OceanBase Support"], - "-5569": ["OB_ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG", "stmt is not allowed in stored function", "Internal Error", "Contact OceanBase Support"], - "-5570": ["OB_ER_SP_NO_RECURSION", "Recursive stored functions are not allowed.", "Internal Error", "Contact OceanBase Support"], - "-5571": ["OB_ER_SP_CASE_NOT_FOUND", "Case not found for CASE statement", "Internal Error", "Contact OceanBase Support"], - "-5572": ["OB_ERR_INVALID_SPLIT_COUNT", "a partition may be split into exactly two new partitions", "Internal Error", "Contact OceanBase Support"], - "-5573": ["OB_ERR_INVALID_SPLIT_GRAMMAR", "this physical attribute may not be specified for a table partition", "Internal Error", "Contact OceanBase Support"], - "-5574": ["OB_ERR_MISS_VALUES", "missing VALUES keyword", "Internal Error", "Contact OceanBase Support"], - "-5575": ["OB_ERR_MISS_AT_VALUES", "missing AT or VALUES keyword", "Internal Error", "Contact OceanBase Support"], - "-5576": ["OB_ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG","Explicit or implicit commit is not allowed in stored function.", "Internal Error", "Contact OceanBase Support"], - "-5577": ["OB_PC_GET_LOCATION_ERROR", "Plan cache get location failed", "Internal Error", "Contact OceanBase Support"], - "-5578": ["OB_PC_LOCK_CONFLICT", "Plan cache lock conflict", "Internal Error", "Contact OceanBase Support"], - "-5579": ["OB_ER_SP_NO_RETSET", "Not allowed to return a result set in pl function", "Internal Error", "Contact OceanBase Support"], - "-5580": ["OB_ER_SP_NORETURNEND", "FUNCTION ended without RETURN", "Internal Error", "Contact OceanBase Support"], - "-5581": ["OB_ERR_SP_DUP_HANDLER", "Duplicate handler declared in the same block", "Internal Error", "Contact OceanBase Support"], - "-5582": ["OB_ER_SP_NO_RECURSIVE_CREATE", "Can't create a routine from within another routine", "Internal Error", "Contact OceanBase Support"], - "-5583": ["OB_ER_SP_BADRETURN", "RETURN is only allowed in a FUNCTION", "Internal Error", "Contact OceanBase Support"], - "-5584": ["OB_ER_SP_BAD_CURSOR_SELECT", "Cursor SELECT must not have INTO", "Internal Error", "Contact OceanBase Support"], - "-5585": ["OB_ER_SP_BAD_SQLSTATE", "Bad SQLSTATE", "Internal Error", "Contact OceanBase Support"], - "-5586": ["OB_ER_SP_VARCOND_AFTER_CURSHNDLR", "Variable or condition declaration after cursor or handler declaration", "Internal Error", "Contact OceanBase Support"], - "-5587": ["OB_ER_SP_CURSOR_AFTER_HANDLER", "Cursor declaration after handler declaration", "Internal Error", "Contact OceanBase Support"], - "-5588": ["OB_ER_SP_WRONG_NAME", "Incorrect routine name", "Internal Error", "Contact OceanBase Support"], - "-5589": ["OB_ER_SP_CURSOR_ALREADY_OPEN", "Cursor is already open", "Internal Error", "Contact OceanBase Support"], - "-5590": ["OB_ER_SP_CURSOR_NOT_OPEN", "Cursor is not open", "Internal Error", "Contact OceanBase Support"], - "-5591": ["OB_ER_SP_CANT_SET_AUTOCOMMIT", "Not allowed to set autocommit from a stored function", "Internal Error", "Contact OceanBase Support"], - "-5592": ["OB_ER_SP_NOT_VAR_ARG", "OUT or INOUT argument for routine is not a variable", "Internal Error", "Contact OceanBase Support"], - "-5593": ["OB_ER_SP_LILABEL_MISMATCH", "with no matching label", "Internal Error", "Contact OceanBase Support"], - "-5594": ["OB_ERR_TRUNCATE_ILLEGAL_FK", "Cannot truncate a table referenced in a foreign key constraint", "Internal Error", "Contact OceanBase Support"], - "-5596": ["OB_ER_INVALID_USE_OF_NULL", "Invalid use of NULL value", "Internal Error", "Contact OceanBase Support"], - "-5597": ["OB_ERR_SPLIT_LIST_LESS_VALUE", "last resulting partition cannot contain bounds", "Internal Error", "Contact OceanBase Support"], - "-5598": ["OB_ERR_ADD_PARTITION_TO_DEFAULT_LIST", "cannot add partition when DEFAULT partition exists", "Internal Error", "Contact OceanBase Support"], - "-5599": ["OB_ERR_SPLIT_INTO_ONE_PARTITION", "cannot split partition into one partition, use rename instead", "Internal Error"], - "-5600": ["OB_ERR_NO_TENANT_PRIVILEGE", "can not create user in sys tenant", "Internal Error", "Contact OceanBase Support"], - "-5601": ["OB_ERR_INVALID_PERCENTAGE", "Percentage should between 1 and 99", "Internal Error", "Contact OceanBase Support"], - "-5602": ["OB_ERR_COLLECT_HISTOGRAM", "Should collect histogram after major freeze", "Internal Error", "Contact OceanBase Support"], - "-5603": ["OB_ER_TEMP_TABLE_IN_USE", "Attempt to create, alter or drop an index on temporary table already in use", "Internal Error"], - "-5604": ["OB_ERR_INVALID_NLS_PARAMETER_STRING", "invalid NLS parameter string used in SQL function", "Internal Error", "Contact OceanBase Support"], - "-5605": ["OB_ERR_DATETIME_INTERVAL_PRECISION_OUT_OF_RANGE", "datetime/interval precision is out of range", "Internal Error", "Contact OceanBase Support"], - "-5606": ["OB_ERR_INVALID_NUMBER_FORMAT_MODEL", "Invalid number format model", "Internal Error", "Contact OceanBase Support"], - "-5607": ["OB_ERR_CMD_NOT_PROPERLY_ENDED", "SQL command not properly ended", "Internal Error", "Contact OceanBase Support"], - "-5608": ["OB_ERR_INVALID_NUMBER_FORMAT_MODEL", "invalid number format model", "Internal Error", "Contact OceanBase Support"], - "-5609": ["OB_WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED", "Non-ASCII separator arguments are not fully supported", "Internal Error", "Contact OceanBase Support"], - "-5610": ["OB_WARN_AMBIGUOUS_FIELD_TERM", "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY", "Internal Error", "Contact OceanBase Support"], - "-5611": ["OB_WARN_TOO_FEW_RECORDS", "Row doesn't contain data for all columns", "Internal Error", "Contact OceanBase Support"], - "-5612": ["OB_WARN_TOO_MANY_RECORDS", "Row was truncated; it contained more data than there were input columns", "Internal Error", "Contact OceanBase Support"], - "-5613": ["OB_ERR_TOO_MANY_VALUES", "too many values", "Internal Error", "Contact OceanBase Support"], - "-5614": ["OB_ERR_NOT_ENOUGH_VALUES", "not enough values", "Internal Error", "Contact OceanBase Support"], - "-5615": ["OB_ERR_MORE_THAN_ONE_ROW", "single-row subquery returns more than one row", "Internal Error", "Contact OceanBase Support"], - "-5616": ["OB_ERR_NOT_SUBQUERY", "UPDATE ... SET expression must be a subquery", "Internal Error", "Contact OceanBase Support"], - "-5617": ["OB_INAPPROPRIATE_INTO", "inappropriate INTO", "Internal Error", "Contact OceanBase Support"], - "-5618": ["OB_ERR_TABLE_IS_REFERENCED", "Cannot delete or update a parent row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], - "-5619": ["OB_ERR_QUALIFIER_EXISTS_FOR_USING_COLUMN", "Column part of using clause can not have qualifier", "Internal Error", "Contact OceanBase Support"], - "-5620": ["OB_ERR_OUTER_JOIN_NESTED", "two tables cannot be outer-joined to each other", "Internal Error", "Contact OceanBase Support"], - "-5621": ["OB_ERR_MULTI_OUTER_JOIN_TABLE", "a predicate may reference only one outer-joined table", "Internal Error", "Contact OceanBase Support"], - "-5622": ["OB_ERR_OUTER_JOIN_ON_CORRELATION_COLUMN", "an outer join cannot be specified on a correlation column", "Internal Error", "Contact OceanBase Support"], - "-5624": ["OB_ERR_OUTER_JOIN_WITH_SUBQUERY", "a column may not be outer-joined to a subquery", "Internal Error", "Contact OceanBase Support"], - "-5627": ["OB_SCHEMA_EAGAIN", "Schema try again", "Internal Error", "Contact OceanBase Support"], - "-5628": ["OB_ERR_ZERO_LEN_COL", "zero-length columns are not allowed", "Internal Error", "Contact OceanBase Support"], - "-5629": ["OB_ERR_YEAR_CONFLICTS_WITH_JULIAN_DATE", "year conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], - "-5630": ["OB_ERR_DAY_OF_YEAR_CONFLICTS_WITH_JULIAN_DATE", "day of year conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], - "-5631": ["OB_ERR_MONTH_CONFLICTS_WITH_JULIAN_DATE", "month conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], - "-5632": ["OB_ERR_DAY_OF_MONTH_CONFLICTS_WITH_JULIAN_DATE", "day of month conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], - "-5633": ["OB_ERR_DAY_OF_WEEK_CONFLICTS_WITH_JULIAN_DATE", "day of week conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], - "-5634": ["OB_ERR_HOUR_CONFLICTS_WITH_SECONDS_IN_DAY", "hour conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], - "-5635": ["OB_ERR_MINUTES_OF_HOUR_CONFLICTS_WITH_SECONDS_IN_DAY", "minutes of hour conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], - "-5636": ["OB_ERR_SECONDS_OF_MINUTE_CONFLICTS_WITH_SECONDS_IN_DAY", "seconds of minute conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], - "-5637": ["OB_ERR_DATE_NOT_VALID_FOR_MONTH_SPECIFIED", "date not valid for month specified", "Internal Error", "Contact OceanBase Support"], - "-5638": ["OB_ERR_INPUT_VALUE_NOT_LONG_ENOUGH", "input value not long enough for date format", "Internal Error", "Contact OceanBase Support"], - "-5640": ["OB_ERR_INVALID_QUARTER_VALUE", "quarter must be between 1 and 4", "Internal Error", "Contact OceanBase Support"], - "-5641": ["OB_ERR_INVALID_MONTH", "not a valid month", "Internal Error", "Contact OceanBase Support"], - "-5642": ["OB_ERR_INVALID_DAY_OF_THE_WEEK", "not a valid day of the week", "Internal Error", "Contact OceanBase Support"], - "-5644": ["OB_ERR_INVALID_HOUR12_VALUE", "hour must be between 1 and 12", "Internal Error", "Contact OceanBase Support"], - "-5645": ["OB_ERR_INVALID_HOUR24_VALUE", "hour must be between 0 and 23", "Internal Error", "Contact OceanBase Support"], - "-5646": ["OB_ERR_INVALID_MINUTES_VALUE", "minutes must be between 0 and 59", "Internal Error", "Contact OceanBase Support"], - "-5647": ["OB_ERR_INVALID_SECONDS_VALUE", "seconds must be between 0 and 59", "Internal Error", "Contact OceanBase Support"], - "-5648": ["OB_ERR_INVALID_SECONDS_IN_DAY_VALUE", "seconds in day must be between 0 and 86399", "Internal Error", "Contact OceanBase Support"], - "-5649": ["OB_ERR_INVALID_JULIAN_DATE_VALUE", "julian date must be between 1 and 5373484", "Internal Error", "Contact OceanBase Support"], - "-5650": ["OB_ERR_AM_OR_PM_REQUIRED", "AM/A.M. or PM/P.M. required", "Internal Error", "Contact OceanBase Support"], - "-5651": ["OB_ERR_BC_OR_AD_REQUIRED", "BC/B.C. or AD/A.D. required", "Internal Error", "Contact OceanBase Support"], - "-5652": ["OB_ERR_FORMAT_CODE_APPEARS_TWICE", "format code appears twice", "Internal Error", "Contact OceanBase Support"], - "-5653": ["OB_ERR_DAY_OF_WEEK_SPECIFIED_MORE_THAN_ONCE", "day of week may only be specified once", "Internal Error", "Contact OceanBase Support"], - "-5654": ["OB_ERR_SIGNED_YEAR_PRECLUDES_USE_OF_BC_AD", "signed year precludes use of BC/AD", "Internal Error", "Contact OceanBase Support"], - "-5655": ["OB_ERR_JULIAN_DATE_PRECLUDES_USE_OF_DAY_OF_YEAR", "Julian date precludes use of day of year", "Internal Error", "Contact OceanBase Support"], - "-5656": ["OB_ERR_YEAR_MAY_ONLY_BE_SPECIFIED_ONCE", "year may only be specified once", "Internal Error", "Contact OceanBase Support"], - "-5657": ["OB_ERR_HOUR_MAY_ONLY_BE_SPECIFIED_ONCE", "hour may only be specified once", "Internal Error", "Contact OceanBase Support"], - "-5658": ["OB_ERR_AM_PM_CONFLICTS_WITH_USE_OF_AM_DOT_PM_DOT", "AM/PM conflicts with use of A.M./P.M.", "Internal Error", "Contact OceanBase Support"], - "-5659": ["OB_ERR_BC_AD_CONFLICT_WITH_USE_OF_BC_DOT_AD_DOT", "BC/AD conflicts with use of B.C./A.D.", "Internal Error", "Contact OceanBase Support"], - "-5660": ["OB_ERR_MONTH_MAY_ONLY_BE_SPECIFIED_ONCE", "month may only be specified once", "Internal Error", "Contact OceanBase Support"], - "-5661": ["OB_ERR_DAY_OF_WEEK_MAY_ONLY_BE_SPECIFIED_ONCE", "day of week may only be specified once", "Internal Error", "Contact OceanBase Support"], - "-5662": ["OB_ERR_FORMAT_CODE_CANNOT_APPEAR", "format code cannot appear in date input format", "Internal Error", "Contact OceanBase Support"], - "-5663": ["OB_ERR_NON_NUMERIC_CHARACTER_VALUE", "a non-numeric character was found where a numeric was expected", "Internal Error", "Contact OceanBase Support"], - "-5664": ["OB_INVALID_MERIDIAN_INDICATOR_USE", "'HH24' precludes use of meridian indicator", "Internal Error", "Contact OceanBase Support"], - "-5665": ["OB_ERR_INVALID_CHAR_FOLLOWING_ESCAPE_CHAR", "missing or illegal character following the escape character", "Internal Error", "Contact OceanBase Support"], - "-5666": ["OB_ERR_INVALID_ESCAPE_CHAR_LENGTH", "escape character must be character string of length 1", "Internal Error", "Contact OceanBase Support"], - "-5667": ["OB_ERR_DAY_OF_MONTH_RANGE", "day of month must be between 1 and last day of month", "Internal Error", "Contact OceanBase Support"], - "-5668": ["OB_ERR_NOT_SELECTED_EXPR", "not a SELECTed expression", "Internal Error", "Contact OceanBase Support"], - "-5671": ["OB_ERR_UK_PK_DUPLICATE", "such unique or primary key already exists in the table", "Internal Error", "Contact OceanBase Support"], - "-5672": ["OB_ERR_COLUMN_LIST_ALREADY_INDEXED", "such column list already indexed", "Internal Error", "Contact OceanBase Support"], - "-5673": ["OB_ERR_BUSHY_TREE_NOT_SUPPORTED", "PX does not support processing a bushy tree", "Internal Error", "Contact OceanBase Support"], - "-5674": ["OB_ERR_ARGUMENT_OUT_OF_RANGE", "argument is out of range", "Internal Error", "Contact OceanBase Support"], - "-5675": ["OB_ERR_ORDER_BY_ITEM_NOT_IN_SELECT_LIST", "ORDER BY item must be the number of a SELECT-list expression", "Internal Error", "Contact OceanBase Support"], - "-5676": ["OB_ERR_INTERVAL_INVALID", "the interval is invalid", "Internal Error", "Contact OceanBase Support"], - "-5677": ["OB_ERR_NUMERIC_OR_VALUE_ERROR", "PL/SQL: numeric or value error", "Internal Error", "Contact OceanBase Support"], - "-5678": ["OB_ERR_CONSTRAINT_NAME_DUPLICATE", "name already used by an existing constraint", "Internal Error", "Contact OceanBase Support"], - "-5679": ["OB_ERR_ONLY_HAVE_INVISIBLE_COL_IN_TABLE", "table must have at least one column that is not invisible", "Internal Error", "Contact OceanBase Support"], - "-5680": ["OB_ERR_INVISIBLE_COL_ON_UNSUPPORTED_TABLE_TYPE", "Invisible column is not supported on this type of table.", "Internal Error", "Contact OceanBase Support"], - "-5681": ["OB_ERR_MODIFY_COL_VISIBILITY_COMBINED_WITH_OTHER_OPTION", "Column visibility modifications cannot be combined with any other modified column DDL option.", "Internal Error", "Contact OceanBase Support"], - "-5682": ["OB_ERR_MODIFY_COL_VISIBILITY_BY_SYS_USER", "The visibility of a column from a table owned by a SYS user cannot be changed.", "Internal Error", "Contact OceanBase Support"], - "-5683": ["OB_ERR_TOO_MANY_ARGS_FOR_FUN", "too many arguments for function", "too many arguments for function", "Internal Error"], - "-6001": ["OB_TRANSACTION_SET_VIOLATION", "Transaction set changed during the execution", "Internal Error", "Contact OceanBase Support"], - "-6002": ["OB_TRANS_ROLLBACKED", "Transaction rollbacked", "Internal Error", "Contact OceanBase Support"], - "-6003": ["OB_ERR_EXCLUSIVE_LOCK_CONFLICT", "Lock wait timeout exceeded; try restarting transaction", "Internal Error", "Contact OceanBase Support"], - "-6004": ["OB_ERR_SHARED_LOCK_CONFLICT", "Shared lock conflict", "Internal Error", "Contact OceanBase Support"], - "-6005": ["OB_TRY_LOCK_ROW_CONFLICT", "Try lock row conflict", "Internal Error", "Contact OceanBase Support"], - "-6006": ["OB_ERR_EXCLUSIVE_LOCK_CONFLICT_NOWAIT", "Lock wait timeout exceeded; try restarting transaction", "Internal Error", "Contact OceanBase Support"], - "-6201": ["OB_CLOCK_OUT_OF_ORDER", "Clock out of order", "Internal Error", "Contact OceanBase Support"], - "-6203": ["OB_MASK_SET_NO_NODE", "Mask set has no node", "Internal Error", "Contact OceanBase Support"], - "-6204": ["OB_TRANS_HAS_DECIDED", "Transaction has been decided", "Internal Error", "Contact OceanBase Support"], - "-6205": ["OB_TRANS_INVALID_STATE", "Transaction state invalid", "Internal Error", "Contact OceanBase Support"], - "-6206": ["OB_TRANS_STATE_NOT_CHANGE", "Transaction state not changed", "Internal Error", "Contact OceanBase Support"], - "-6207": ["OB_TRANS_PROTOCOL_ERROR", "Transaction protocol error", "Internal Error", "Contact OceanBase Support"], - "-6208": ["OB_TRANS_INVALID_MESSAGE", "Transaction message invalid", "Internal Error", "Contact OceanBase Support"], - "-6209": ["OB_TRANS_INVALID_MESSAGE_TYPE", "Transaction message type invalid", "Internal Error", "Contact OceanBase Support"], - "-6210": ["OB_TRANS_TIMEOUT", "Transaction is timeout", "Internal Error", "Contact OceanBase Support"], - "-6211": ["OB_TRANS_KILLED", "Transaction is killed", "Transaction is killed", "Internal Error", "Contact OceanBase Support"], - "-6212": ["OB_TRANS_STMT_TIMEOUT", "Statement is timeout", "Internal Error", "Contact OceanBase Support"], - "-6213": ["OB_TRANS_CTX_NOT_EXIST", "Transaction context does not exist", "Internal Error", "Contact OceanBase Support"], - "-6214": ["OB_PARTITION_IS_FROZEN", "Partition is frozen", "Internal Error", "Contact OceanBase Support"], - "-6215": ["OB_PARTITION_IS_NOT_FROZEN", "Partition is not frozen", "Internal Error", "Contact OceanBase Support"], - "-6219": ["OB_TRANS_INVALID_LOG_TYPE", "Transaction invalid log type", "Internal Error", "Contact OceanBase Support"], - "-6220": ["OB_TRANS_SQL_SEQUENCE_ILLEGAL", "SQL sequence illegal", "Internal Error", "Contact OceanBase Support"], - "-6221": ["OB_TRANS_CANNOT_BE_KILLED", "Transaction context cannot be killed", "Internal Error", "Contact OceanBase Support"], - "-6222": ["OB_TRANS_STATE_UNKNOWN", "Transaction state unknown", "Internal Error", "Contact OceanBase Support"], - "-6223": ["OB_TRANS_IS_EXITING", "Transaction exiting", "Internal Error", "Contact OceanBase Support"], - "-6224": ["OB_TRANS_NEED_ROLLBACK", "transaction need rollback", "Internal Error", "Contact OceanBase Support"], - "-6225": ["OB_TRANS_UNKNOWN", "Transaction result is unknown", "Internal Error", "Contact OceanBase Support"], - "-6226": ["OB_ERR_READ_ONLY_TRANSACTION", "Cannot execute statement in a READ ONLY transaction", "Internal Error", "Contact OceanBase Support"], - "-6227": ["OB_PARTITION_IS_NOT_STOPPED", "Partition is not stopped", "Internal Error", "Contact OceanBase Support"], - "-6228": ["OB_PARTITION_IS_STOPPED", "Partition has been stopped", "Internal Error", "Contact OceanBase Support"], - "-6229": ["OB_PARTITION_IS_BLOCKED", "Partition has been blocked", "Internal Error", "Contact OceanBase Support"], - "-6230": ["OB_TRANS_RPC_TIMEOUT", "transaction rpc timeout", "Internal Error", "Contact OceanBase Support"], - "-6231": ["OB_REPLICA_NOT_READABLE", "replica is not readable", "Internal Error", "Contact OceanBase Support"], - "-6232": ["OB_PARTITION_IS_SPLITTING", "Partition is splitting", "Internal Error", "Contact OceanBase Support"], - "-6233": ["OB_TRANS_COMMITED", "Transaction has been commited", "Internal Error", "Contact OceanBase Support"], - "-6234": ["OB_TRANS_CTX_COUNT_REACH_LIMIT", "transaction context count reach limit", "Internal Error", "Contact OceanBase Support"], - "-6301": ["OB_LOG_ID_NOT_FOUND", "log id not found", "Internal Error", "Contact OceanBase Support"], - "-6302": ["OB_LSR_THREAD_STOPPED", "log scan runnable thread stop", "Internal Error", "Contact OceanBase Support"], - "-6303": ["OB_NO_LOG", "no log ever scanned", "Internal Error", "Contact OceanBase Support"], - "-6304": ["OB_LOG_ID_RANGE_ERROR", "log id range error", "Internal Error", "Contact OceanBase Support"], - "-6305": ["OB_LOG_ITER_ENOUGH", "iter scans enough files", "Internal Error", "Contact OceanBase Support"], - "-6306": ["OB_CLOG_INVALID_ACK", "invalid ack msg", "Internal Error", "Contact OceanBase Support"], - "-6307": ["OB_CLOG_CACHE_INVALID", "clog cache invalid", "Internal Error", "Contact OceanBase Support"], - "-6308": ["OB_EXT_HANDLE_UNFINISH", "external executor handle do not finish", "Internal Error", "Contact OceanBase Support"], - "-6309": ["OB_CURSOR_NOT_EXIST", "cursor not exist", "Internal Error", "Contact OceanBase Support"], - "-6310": ["OB_STREAM_NOT_EXIST", "stream not exist", "Internal Error", "Contact OceanBase Support"], - "-6311": ["OB_STREAM_BUSY", "stream busy", "Internal Error", "Contact OceanBase Support"], - "-6312": ["OB_FILE_RECYCLED", "file recycled", "Internal Error", "Contact OceanBase Support"], - "-6313": ["OB_REPLAY_EAGAIN_TOO_MUCH_TIME", "replay eagain cost too much time", "Internal Error", "Contact OceanBase Support"], - "-6314": ["OB_MEMBER_CHANGE_FAILED", "member change log sync failed", "Internal Error", "Contact OceanBase Support"], - "-6315": ["OB_NO_NEED_BATCH_CTX", "no need batch ctx", "Internal Error", "Contact OceanBase Support"], - "-6316": ["OB_TOO_LARGE_LOG_ID", "too large log id", "Internal Error", "Contact OceanBase Support"], - "-6317": ["OB_ALLOC_LOG_ID_NEED_RETRY", "alloc log id need retry", "Internal Error", "Contact OceanBase Support"], - "-6318": ["OB_TRANS_ONE_PC_NOT_ALLOWED", "transaction one pc not allowed", "Internal Error", "Contact OceanBase Support"], - "-6319": ["OB_LOG_NEED_REBUILD", "need rebuild", "Internal Error", "Contact OceanBase Support"], - "-6320": ["OB_TOO_MANY_LOG_TASK", "too many log tasks", "Internal Error", "Contact OceanBase Support"], - "-6321": ["OB_INVALID_BATCH_SIZE", "ob invalid batch size", "Internal Error", "Contact OceanBase Support"], - "-6322": ["OB_CLOG_SLIDE_TIMEOUT", "ob clog slide timeout", "Internal Error", "Contact OceanBase Support"], - "-7000": ["OB_ELECTION_WARN_LOGBUF_FULL", "The log buffer is full", "Internal Error", "Contact OceanBase Support"], - "-7001": ["OB_ELECTION_WARN_LOGBUF_EMPTY", "The log buffer is empty", "Internal Error", "Contact OceanBase Support"], - "-7002": ["OB_ELECTION_WARN_NOT_RUNNING", "The object is not running", "Internal Error", "Contact OceanBase Support"], - "-7003": ["OB_ELECTION_WARN_IS_RUNNING", "The object is running", "Internal Error", "Contact OceanBase Support"], - "-7004": ["OB_ELECTION_WARN_NOT_REACH_MAJORITY", "Election does not reach majority", "Internal Error", "Contact OceanBase Support"], - "-7005": ["OB_ELECTION_WARN_INVALID_SERVER", "The server is not valid", "Internal Error", "Contact OceanBase Support"], - "-7006": ["OB_ELECTION_WARN_INVALID_LEADER", "The leader is not valid", "Internal Error", "Contact OceanBase Support"], - "-7007": ["OB_ELECTION_WARN_LEADER_LEASE_EXPIRED", "The leader lease is expired", "Internal Error", "Contact OceanBase Support"], - "-7010": ["OB_ELECTION_WARN_INVALID_MESSAGE", "The message is not valid", "Internal Error", "Contact OceanBase Support"], - "-7011": ["OB_ELECTION_WARN_MESSAGE_NOT_INTIME", "The message is not intime", "Internal Error", "Contact OceanBase Support"], - "-7012": ["OB_ELECTION_WARN_NOT_CANDIDATE", "The server is not candidate", "Internal Error", "Contact OceanBase Support"], - "-7013": ["OB_ELECTION_WARN_NOT_CANDIDATE_OR_VOTER", "The server is not candidate or voter", "Internal Error", "Contact OceanBase Support"], - "-7014": ["OB_ELECTION_WARN_PROTOCOL_ERROR", "Election protocol error", "Internal Error", "Contact OceanBase Support"], - "-7015": ["OB_ELECTION_WARN_RUNTIME_OUT_OF_RANGE", "The task run time out of range", "Internal Error", "Contact OceanBase Support"], - "-7021": ["OB_ELECTION_WARN_LAST_OPERATION_NOT_DONE", "Last operation has not done", "Internal Error", "Contact OceanBase Support"], - "-7022": ["OB_ELECTION_WARN_CURRENT_SERVER_NOT_LEADER", "Current server is not leader", "Internal Error", "Contact OceanBase Support"], - "-7024": ["OB_ELECTION_WARN_NO_PREPARE_MESSAGE", "There is not prepare message", "Internal Error", "Contact OceanBase Support"], - "-7025": ["OB_ELECTION_ERROR_MULTI_PREPARE_MESSAGE", "There is more than one prepare message", "Internal Error", "Contact OceanBase Support"], - "-7026": ["OB_ELECTION_NOT_EXIST", "Election does not exist", "Internal Error", "Contact OceanBase Support"], - "-7027": ["OB_ELECTION_MGR_IS_RUNNING", "Election manager is running", "Internal Error", "Contact OceanBase Support"], - "-7029": ["OB_ELECTION_WARN_NO_MAJORITY_PREPARE_MESSAGE", "Election msg pool not have majority prepare message", "Internal Error", "Contact OceanBase Support"], - "-7030": ["OB_ELECTION_ASYNC_LOG_WARN_INIT", "Election async log init error", "Internal Error", "Contact OceanBase Support"], - "-7031": ["OB_ELECTION_WAIT_LEADER_MESSAGE", "Election waiting leader message", "Internal Error", "Contact OceanBase Support"], - "-7032": ["OB_ELECTION_GROUP_NOT_EXIST", "Election group not exist", "Internal Error", "Contact OceanBase Support"], - "-7033": ["OB_UNEXPECT_EG_VERSION", "unexpected eg_version", "Internal Error", "Contact OceanBase Support"], - "-7034": ["OB_ELECTION_GROUP_MGR_IS_RUNNING", "election_group_mgr is running", "Internal Error", "Contact OceanBase Support"], - "-7035": ["OB_ELECTION_MGR_NOT_RUNNING", "Election manager is not running", "Internal Error", "Contact OceanBase Support"], - "-7100": ["OB_TRANSFER_TASK_COMPLETED", "transfer task completed", "Internal Error", "Contact OceanBase Support"], - "-7101": ["OB_TOO_MANY_TRANSFER_TASK", "too many transfer tasks", "Internal Error", "Contact OceanBase Support"], - "-7102": ["OB_TRANSFER_TASK_EXIST", "transfer task exist", "Internal Error", "Contact OceanBase Support"], - "-7103": ["OB_TRANSFER_TASK_NOT_EXIST", "transfer task not exist", "Internal Error", "Contact OceanBase Support"], - "-7104": ["OB_NOT_ALLOW_TO_REMOVE", "not allow to remove", "Internal Error", "Contact OceanBase Support"], - "-7105": ["OB_RG_NOT_MATCH", "replication group not match", "Internal Error", "Contact OceanBase Support"], - "-7106": ["OB_TRANSFER_TASK_ABORTED", "transfer task aborted", "Internal Error", "Contact OceanBase Support"], - "-7107": ["OB_TRANSFER_INVALID_MESSAGE", "transfer invalid message", "Internal Error", "Contact OceanBase Support"], - "-7108": ["OB_TRANSFER_CTX_TS_NOT_MATCH", "transfer ctx_ts not match", "Internal Error", "Contact OceanBase Support"], - "-8001": ["OB_SERVER_IS_INIT", "Server is initializing", "Internal Error", "Contact OceanBase Support"], - "-8002": ["OB_SERVER_IS_STOPPING", "Server is stopping", "Internal Error", "Contact OceanBase Support"], - "-8003": ["OB_PACKET_CHECKSUM_ERROR", "Packet checksum error", "Internal Error", "Contact OceanBase Support"], - "-8004": ["OB_PACKET_CLUSTER_ID_NOT_MATCH", "Packet cluster_id not match", "Internal Error", "Contact OceanBase Support"], - "-9001": ["OB_URI_ERROR", "URI error", "Internal Error", "Contact OceanBase Support"], - "-9002": ["OB_FINAL_MD5_ERROR", "OSS file MD5 error", "Internal Error", "Contact OceanBase Support"], - "-9003": ["OB_OSS_ERROR", "OSS error", "Internal Error", "Contact OceanBase Support"], - "-9004": ["OB_INIT_MD5_ERROR", "Init MD5 fail", "Internal Error", "Contact OceanBase Support"], - "-9005": ["OB_OUT_OF_ELEMENT", "Out of element", "Internal Error", "Contact OceanBase Support"], - "-9006": ["OB_UPDATE_MD5_ERROR", "Update MD5 fail", "Internal Error", "Contact OceanBase Support"], - "-9007": ["OB_FILE_LENGTH_INVALID", "Invalid OSS file length", "Internal Error", "Contact OceanBase Support"], - "-9008": ["OB_NOT_READ_ALL_DATA", "Read all data fail", "Internal Error", "Contact OceanBase Support"], - "-9009": ["OB_BUILD_MD5_ERROR", "Build MD5 fail", "Internal Error", "Contact OceanBase Support"], - "-9010": ["OB_MD5_NOT_MATCH", "OSS file MD5 not match", "Internal Error", "Contact OceanBase Support"], - "-9011": ["OB_OSS_FILE_NOT_EXIST", "Can not find oss file", "Internal Error", "Contact OceanBase Support"], - "-9012": ["OB_OSS_DATA_VERSION_NOT_MATCHED", "Can not get data version from timestamp", "Internal Error", "Contact OceanBase Support"], - "-9013": ["OB_OSS_WRITE_ERROR", "Write OSS file error", "Internal Error", "Contact OceanBase Support"], - "-9014": ["OB_RESTORE_IN_PROGRESS", "Another restore is in progress", "Internal Error", "Contact OceanBase Support"], - "-9015": ["OB_AGENT_INITING_BACKUP_COUNT_ERROR", "agent initing backup count error", "Internal Error", "Contact OceanBase Support"], - "-9016": ["OB_CLUSTER_NAME_NOT_EQUAL", "ob cluster name not equal", "Internal Error", "Contact OceanBase Support"], - "-9017": ["OB_RS_LIST_INVAILD", "rs list invalid", "Internal Error", "Contact OceanBase Support"], - "-9018": ["OB_AGENT_HAS_FAILED_TASK", "agent has failed task", "Internal Error", "Contact OceanBase Support"], - "-9019": ["OB_RESTORE_PARTITION_IS_COMPELETE", "restore partition is compelete", "Internal Error", "Contact OceanBase Support"], - "-9020": ["OB_RESTORE_PARTITION_TWICE", "restore partition twice", "Internal Error", "Contact OceanBase Support"], - "-32031": ["OB_ERR_CTE_ILLEGAL_QUERY_NAME", "illegal reference of a query name in WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32033": ["OB_ERR_CTE_UNSUPPORTED_COLUMN_ALIASING", "unsupported column aliasing", "Internal Error", "Contact OceanBase Support"], - "-32034": ["OB_ERR_UNSUPPORTED_USE_OF_CTE", "unsupported use of WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32038": ["OB_ERR_CTE_COLUMN_NUMBER_NOT_MATCH", "number of WITH clause column names does not match number of elements in select list", "Internal Error", "Contact OceanBase Support"], - "-32039": ["OB_ERR_NEED_COLUMN_ALIAS_LIST_IN_RECURSIVE_CTE", "recursive WITH clause must have column alias list", "Internal Error", "Contact OceanBase Support"], - "-32040": ["OB_ERR_NEED_UNION_ALL_IN_RECURSIVE_CTE", "recursive WITH clause must use a UNION ALL operation", "Internal Error", "Contact OceanBase Support"], - "-32041": ["OB_ERR_NEED_ONLY_TWO_BRANCH_IN_RECURSIVE_CTE", "UNION ALL operation in recursive WITH clause must have only two branches", "Internal Error", "Contact OceanBase Support"], - "-32042": ["OB_ERR_NEED_REFERENCE_ITSELF_DIRECTLY_IN_RECURSIVE_CTE", "recursive WITH clause must reference itself directly in one of the UNION ALL branches", "Internal Error", "Contact OceanBase Support"], - "-32043": ["OB_ERR_NEED_INIT_BRANCH_IN_RECURSIVE_CTE", "recursive WITH clause needs an initialization branch", "Internal Error", "Contact OceanBase Support"], - "-32044": ["OB_ERR_CYCLE_FOUND_IN_RECURSIVE_CTE", "cycle detected while executing recursive WITH query", "Internal Error", "Contact OceanBase Support"], - "-32045": ["OB_ERR_CTE_REACH_MAX_LEVEL_RECURSION", "maximum level of recursion reached while executing recursive WITH query", "Internal Error", "Contact OceanBase Support"], - "-32046": ["OB_ERR_CTE_ILLEGAL_SEARCH_PSEUDO_NAME", "sequence column name for SEARCH clause must not be part of the column alias list", "Internal Error", "Contact OceanBase Support"], - "-32047": ["OB_ERR_CTE_ILLEGAL_CYCLE_NON_CYCLE_VALUE", "cycle mark value and non-cycle mark value must be one byte character string values", "Internal Error", "Contact OceanBase Support"], - "-32048": ["OB_ERR_CTE_ILLEGAL_CYCLE_PSEUDO_NAME", "cycle mark column name for CYCLE clause must not be part of the column alias list", "Internal Error", "Contact OceanBase Support"], - "-32049": ["OB_ERR_CTE_COLUMN_ALIAS_DUPLICATE", "duplicate name found in column alias list for WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32480": ["OB_ERR_CTE_ILLEGAL_SEARCH_CYCLE_CLAUSE", "SEARCH and CYCLE clauses can only be specified for recursive WITH clause elements", "Internal Error", "Contact OceanBase Support"], - "-32481": ["OB_ERR_CTE_DUPLICATE_CYCLE_NON_CYCLE_VALUE", "cycle value for CYCLE clause must be different from the non-cycle value", "Internal Error", "Contact OceanBase Support"], - "-32482": ["OB_ERR_CTE_DUPLICATE_SEQ_NAME_CYCLE_COLUMN", "sequence column for SEARCH clause must be different from the cycle mark column for CYCLE clause", "Internal Error", "Contact OceanBase Support"], - "-32483": ["OB_ERR_CTE_DUPLICATE_NAME_IN_SEARCH_CLAUSE", "duplicate name found in sort specification list for SEARCH clause of WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32484": ["OB_ERR_CTE_DUPLICATE_NAME_IN_CYCLE_CLAUSE", "duplicate name found in cycle column list for CYCLE clause of WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32485": ["OB_ERR_CTE_ILLEGAL_COLUMN_IN_CYCLE_CLAUSE", "element in cycle column list of CYCLE clause must appear in the column alias list of the WITH clause element", "Internal Error", "Contact OceanBase Support"], - "-32486": ["OB_ERR_CTE_ILLEGAL_RECURSIVE_BRANCH", "unsupported operation in recursive branch of recursive WITH clause", "Internal Error", "Contact OceanBase Support"], - "-32487": ["OB_ERR_ILLEGAL_JOIN_IN_RECURSIVE_CTE", "unsupported join in recursive WITH query", "Internal Error", "Contact OceanBase Support"], - "-32488": ["OB_ERR_CTE_NEED_COLUMN_ALIAS_LIST", "WITH clause element did not have a column alias list", "Internal Error", "Contact OceanBase Support"], - "-32489": ["OB_ERR_CTE_ILLEGAL_COLUMN_IN_SERACH_CALUSE", "element in sort specification list of SEARCH clause did not appear in the column alias list of the WITH clause element", "Internal Error", "Contact OceanBase Support"], - "-32490": ["OB_ERR_CTE_RECURSIVE_QUERY_NAME_REFERENCED_MORE_THAN_ONCE", "recursive query name referenced more than once in recursive branch of recursive WITH clause element", "Internal Error", "Contact OceanBase Support"], - "-976": ["OB_ERR_CBY_PSEUDO_COLUMN_NOT_ALLOWED", "Specified pseudo column or operator not allowed here", "Internal Error", "Contact OceanBase Support"], - "-1436": ["OB_ERR_CBY_LOOP", "CONNECT BY loop in user data", "Internal Error", "Contact OceanBase Support"], - "-1437": ["OB_ERR_CBY_JOIN_NOT_ALLOWED", "cannot have join with CONNECT BY", "Internal Error", "Contact OceanBase Support"], - "-1788": ["OB_ERR_CBY_CONNECT_BY_REQUIRED", "CONNECT BY clause required in this query block", "Internal Error", "Contact OceanBase Support"], - "-30002": ["OB_ERR_CBY_CONNECT_BY_PATH_NOT_ALLOWED", "SYS_CONNECT_BY_PATH function is not allowed here", "Internal Error", "Contact OceanBase Support"], - "-30003": ["OB_ERR_CBY_CONNECT_BY_PATH_ILLEGAL_PARAM", "illegal parameter in SYS_CONNECT_BY_PATH function", "Internal Error", "Contact OceanBase Support"], - "-30004": ["OB_ERR_CBY_CONNECT_BY_PATH_INVALID_SEPARATOR", "A column value contained the string that the SYS_CONNECT_BY_PATH function was to use to separate column values", "Internal Error", "Contact OceanBase Support"], - "-30007": ["OB_ERR_CBY_CONNECT_BY_ROOT_ILLEGAL_USED", "CONNECT BY ROOT operator is not supported in the START WITH or in the CONNECT BY condition", "Internal Error", "Contact OceanBase Support"], - "-30929": ["OB_ERR_CBY_OREDER_SIBLINGS_BY_NOT_ALLOWED", "ORDER SIBLINGS BY clause not allowed here", "Internal Error", "Contact OceanBase Support"], - "-30930": ["OB_ERR_CBY_NOCYCLE_REQUIRED", "NOCYCLE keyword is required with CONNECT_BY_ISCYCLE pseudo column", "Internal Error", "Contact OceanBase Support"]} +OB_RET_DICT = { + "-4000": ["OB_ERROR", "Common error", "Internal Error", "Contact OceanBase Support"], + "-4001": ["OB_OBJ_TYPE_ERROR", "Object type error", "Internal Error", "Contact OceanBase Support"], + "-4002": ["OB_INVALID_ARGUMENT", "Invalid argument", "Internal Error", "Contact OceanBase Support"], + "-4003": ["OB_ARRAY_OUT_OF_RANGE", "Array index out of range", "Internal Error", "Contact OceanBase Support"], + "-4004": ["OB_SERVER_LISTEN_ERROR", "Failed to listen to the port", "Internal Error", "Contact OceanBase Support"], + "-4005": ["OB_INIT_TWICE", "The object is initialized twice", "Internal Error", "Contact OceanBase Support"], + "-4006": ["OB_NOT_INIT", "The object is not initialized", "Internal Error", "Contact OceanBase Support"], + "-4007": ["OB_NOT_SUPPORTED", "Not supported feature or function. Compatible Error Code: MySQL: 1235(0A000)", "Internal Error", "Contact OceanBase Support"], + "-4008": ["OB_ITER_END", "End of iteration", "Internal Error", "Contact OceanBase Support"], + "-4009": ["OB_IO_ERROR", "IO error", "Internal Error", "Contact OceanBase Support"], + "-4010": ["OB_ERROR_FUNC_VERSION", "Wrong RPC command version", "Internal Error", "Contact OceanBase Support"], + "-4011": ["OB_PACKET_NOT_SENT", "Can not send packet", "Internal Error", "Contact OceanBase Support"], + "-4012": ["OB_TIMEOUT", "Timeout", "Internal Error", "Contact OceanBase Support"], + "-4013": ["OB_ALLOCATE_MEMORY_FAILED", "No memory or reach tenant memory limit", "Internal Error", "Contact OceanBase Support"], + "-4014": ["OB_INNER_STAT_ERROR", "Inner state error", "Internal Error", "Contact OceanBase Support"], + "-4015": ["OB_ERR_SYS", "System error", "Internal Error", "Contact OceanBase Support"], + "-4016": ["OB_ERR_UNEXPECTED", "Internal error", "Internal Error", "Contact OceanBase Support"], + "-4017": ["OB_ENTRY_EXIST", "Entry already exist", "Internal Error", "Contact OceanBase Support"], + "-4018": ["OB_ENTRY_NOT_EXIST", "Entry not exist", "Internal Error", "Contact OceanBase Support"], + "-4019": ["OB_SIZE_OVERFLOW", "Size overflow", "Internal Error", "Contact OceanBase Support"], + "-4020": ["OB_REF_NUM_NOT_ZERO", "Reference count is not zero", "Internal Error", "Contact OceanBase Support"], + "-4021": ["OB_CONFLICT_VALUE", "Conflict value", "Internal Error", "Contact OceanBase Support"], + "-4022": ["OB_ITEM_NOT_SETTED", "Item not set", "Internal Error", "Contact OceanBase Support"], + "-4023": ["OB_EAGAIN", "Try again", "Internal Error", "Contact OceanBase Support"], + "-4024": ["OB_BUF_NOT_ENOUGH", "Buffer not enough", "Internal Error", "Contact OceanBase Support"], + "-4025": ["OB_PARTIAL_FAILED", "Partial failed", "Internal Error", "Contact OceanBase Support"], + "-4026": ["OB_READ_NOTHING", "Nothing to read", "Internal Error", "Contact OceanBase Support"], + "-4027": ["OB_FILE_NOT_EXIST", "File not exist. Compatible Error Code:MySQL: 1017(HY000)", "Internal Error", "Contact OceanBase Support"], + "-4028": ["OB_DISCONTINUOUS_LOG", "Log entry not continuous", "Internal Error", "Contact OceanBase Support"], + "-4029": ["OB_SCHEMA_ERROR", "Schema error", "Internal Error", "Contact OceanBase Support"], + "-4030": ["OB_TENANT_OUT_OF_MEM", "Over tenant memory limits", "Internal Error", "Contact OceanBase Support"], + "-4031": ["OB_UNKNOWN_OBJ", "Unknown object", "Internal Error", "Contact OceanBase Support"], + "-4032": ["OB_NO_MONITOR_DATA", "No monitor data", "Internal Error", "Contact OceanBase Support"], + "-4033": ["OB_SERIALIZE_ERROR", "Serialize error", "Internal Error", "Contact OceanBase Support"], + "-4034": ["OB_DESERIALIZE_ERROR", "Deserialize error", "Internal Error", "Contact OceanBase Support"], + "-4035": ["OB_AIO_TIMEOUT", "Asynchronous IO error", "Internal Error", "Contact OceanBase Support"], + "-4036": ["OB_NEED_RETRY", "Need retry", "Internal Error", "Contact OceanBase Support"], + "-4037": ["OB_TOO_MANY_SSTABLE", "Too many sstable", "Internal Error", "Contact OceanBase Support"], + "-4038": ["OB_NOT_MASTER", "The observer or zone is not the master", "Internal Error", "Contact OceanBase Support"], + "-4039": ["OB_KILLED_BY_THROTTLING", "Request has killed by sql throttle", "Internal Error", "Contact OceanBase Support"], + "-4041": ["OB_DECRYPT_FAILED", "Decrypt error", "Internal Error", "Contact OceanBase Support"], + "-4042": ["OB_USER_NOT_EXIST", "Can not find any matching row in the user table", "Internal Error", "Contact OceanBase Support"], + "-4043": ["OB_PASSWORD_WRONG", "Access denied for user", "Internal Error", "Contact OceanBase Support"], + "-4044": ["OB_SKEY_VERSION_WRONG", "Wrong skey version", "Internal Error", "Contact OceanBase Support"], + "-4048": ["OB_NOT_REGISTERED", "Not registered", "Internal Error", "Contact OceanBase Support"], + "-4049": ["OB_WAITQUEUE_TIMEOUT", "Task timeout and not executed", "Internal Error", "Contact OceanBase Support"], + "-4050": ["OB_NOT_THE_OBJECT", "Not the object", "Internal Error", "Contact OceanBase Support"], + "-4051": ["OB_ALREADY_REGISTERED", "Already registered", "Internal Error", "Contact OceanBase Support"], + "-4052": ["OB_LAST_LOG_RUINNED", "Corrupted log entry", "Internal Error", "Contact OceanBase Support"], + "-4053": ["OB_NO_CS_SELECTED", "No ChunkServer selected", "Internal Error", "Contact OceanBase Support"], + "-4054": ["OB_NO_TABLETS_CREATED", "No tablets created", "Internal Error", "Contact OceanBase Support"], + "-4055": ["OB_INVALID_ERROR", "Invalid entry", "Internal Error", "Contact OceanBase Support"], + "-4057": ["OB_DECIMAL_OVERFLOW_WARN", "Decimal overflow warning", "Internal Error", "Contact OceanBase Support"], + "-4058": ["OB_DECIMAL_UNLEGAL_ERROR", "Decimal overflow error", "Internal Error", "Contact OceanBase Support"], + "-4060": ["OB_OBJ_DIVIDE_ERROR", "Divide error", "Internal Error", "Contact OceanBase Support"], + "-4061": ["OB_NOT_A_DECIMAL", "Not a decimal", "Internal Error", "Contact OceanBase Support"], + "-4062": ["OB_DECIMAL_PRECISION_NOT_EQUAL", "Decimal precision error", "Internal Error", "Contact OceanBase Support"], + "-4063": ["OB_EMPTY_RANGE", "Empty range", "Internal Error", "Contact OceanBase Support"], + "-4064": ["OB_SESSION_KILLED", "Session killed", "Internal Error", "Contact OceanBase Support"], + "-4065": ["OB_LOG_NOT_SYNC", "Log not sync", "Internal Error", "Contact OceanBase Support"], + "-4066": ["OB_DIR_NOT_EXIST", "Directory not exist", "Internal Error", "Contact OceanBase Support"], + "-4067": ["OB_SESSION_NOT_FOUND", "RPC session not found", "Internal Error", "Contact OceanBase Support"], + "-4068": ["OB_INVALID_LOG", "Invalid log", "Internal Error", "Contact OceanBase Support"], + "-4070": ["OB_INVALID_DATA", "Invalid data", "Internal Error", "Contact OceanBase Support"], + "-4071": ["OB_ALREADY_DONE", "Already done", "Internal Error", "Contact OceanBase Support"], + "-4072": ["OB_CANCELED", "Operation canceled", "Internal Error", "Contact OceanBase Support"], + "-4073": ["OB_LOG_SRC_CHANGED", "Log source changed", "Internal Error", "Contact OceanBase Support"], + "-4074": ["OB_LOG_NOT_ALIGN", "Log not aligned", "Internal Error", "Contact OceanBase Support"], + "-4075": ["OB_LOG_MISSING", "Log entry missed", "Internal Error", "Contact OceanBase Support"], + "-4076": ["OB_NEED_WAIT", "Need wait", "Internal Error", "Contact OceanBase Support"], + "-4077": ["OB_NOT_IMPLEMENT", "Not implemented feature", "Internal Error", "Contact OceanBase Support"], + "-4078": ["OB_DIVISION_BY_ZERO", "Divided by zero", "Internal Error", "Contact OceanBase Support"], + "-4080": ["OB_EXCEED_MEM_LIMIT", "exceed memory limit", "Internal Error", "Contact OceanBase Support"], + "-4081": ["OB_RESULT_UNKNOWN", "Unknown result", "Internal Error", "Contact OceanBase Support"], + "-4084": ["OB_NO_RESULT", "No result", "Internal Error", "Contact OceanBase Support"], + "-4085": ["OB_QUEUE_OVERFLOW", "Queue overflow", "Internal Error", "Contact OceanBase Support"], + "-4097": ["OB_TERM_LAGGED", "Term lagged", "Internal Error", "Contact OceanBase Support"], + "-4098": ["OB_TERM_NOT_MATCH", "Term not match", "Internal Error", "Contact OceanBase Support"], + "-4099": ["OB_START_LOG_CURSOR_INVALID", "Invalid log cursor", "Internal Error", "Contact OceanBase Support"], + "-4100": ["OB_LOCK_NOT_MATCH", "Lock not match", "Internal Error", "Contact OceanBase Support"], + "-4101": ["OB_DEAD_LOCK", "deadlock detected while waiting for resource", "Internal Error", "Contact OceanBase Support"], + "-4102": ["OB_PARTIAL_LOG", "Incomplete log entry", "Internal Error", "Contact OceanBase Support"], + "-4103": ["OB_CHECKSUM_ERROR", "Data checksum error", "Internal Error", "Contact OceanBase Support"], + "-4104": ["OB_INIT_FAIL", "Initialize error", "Internal Error", "Contact OceanBase Support"], + "-4106": ["OB_NOT_ENOUGH_STORE", "not enough commitlog store", "Internal Error", "Contact OceanBase Support"], + "-4107": ["OB_BLOCK_SWITCHED", "block switched when fill commitlog", "Internal Error", "Contact OceanBase Support"], + "-4109": ["OB_STATE_NOT_MATCH", "Server state or role not the same as expected", "Internal Error", "Contact OceanBase Support"], + "-4110": ["OB_READ_ZERO_LOG", "Read zero log", "Internal Error", "Contact OceanBase Support"], + "-4111": ["OB_BLOCK_NEED_FREEZE", "block need freeze", "Internal Error", "Contact OceanBase Support"], + "-4112": ["OB_BLOCK_FROZEN", "block frozen", "Internal Error", "Contact OceanBase Support"], + "-4113": ["OB_IN_FATAL_STATE", "In FATAL state", "Internal Error", "Contact OceanBase Support"], + "-4114": ["OB_IN_STOP_STATE", "In STOP state", "Internal Error", "Contact OceanBase Support"], + "-4115": ["OB_UPS_MASTER_EXISTS", "Master UpdateServer already exists", "Internal Error", "Contact OceanBase Support"], + "-4116": ["OB_LOG_NOT_CLEAR", "Log not clear", "Internal Error", "Contact OceanBase Support"], + "-4117": ["OB_FILE_ALREADY_EXIST", "File already exist", "Internal Error", "Contact OceanBase Support"], + "-4118": ["OB_UNKNOWN_PACKET", "Unknown packet", "Internal Error", "Contact OceanBase Support"], + "-4119": ["OB_RPC_PACKET_TOO_LONG", "RPC packet to send too long", "Internal Error", "Contact OceanBase Support"], + "-4120": ["OB_LOG_TOO_LARGE", "Log too large", "Internal Error", "Contact OceanBase Support"], + "-4121": ["OB_RPC_SEND_ERROR", "RPC send error", "Internal Error", "Contact OceanBase Support"], + "-4122": ["OB_RPC_POST_ERROR", "RPC post error", "Internal Error", "Contact OceanBase Support"], + "-4123": ["OB_LIBEASY_ERROR", "Libeasy error", "Internal Error", "Contact OceanBase Support"], + "-4124": ["OB_CONNECT_ERROR", "Connect error", "Internal Error", "Contact OceanBase Support"], + "-4125": ["OB_NOT_FREE", "Not free", "Internal Error", "Contact OceanBase Support"], + "-4126": ["OB_INIT_SQL_CONTEXT_ERROR", "Init SQL context error", "Internal Error", "Contact OceanBase Support"], + "-4127": ["OB_SKIP_INVALID_ROW", "Skip invalid row", "Internal Error", "Contact OceanBase Support"], + "-4128": ["OB_RPC_PACKET_INVALID", "RPC packet is invalid", "Internal Error", "Contact OceanBase Support"], + "-4133": ["OB_NO_TABLET", "No tablets", "Internal Error", "Contact OceanBase Support"], + "-4138": ["OB_SNAPSHOT_DISCARDED", "Request to read too old versioned data", "Internal Error", "Contact OceanBase Support"], + "-4139": ["OB_DATA_NOT_UPTODATE", "State is stale", "Internal Error", "Contact OceanBase Support"], + "-4142": ["OB_ROW_MODIFIED", "Row modified", "Internal Error", "Contact OceanBase Support"], + "-4143": ["OB_VERSION_NOT_MATCH", "Version not match", "Internal Error", "Contact OceanBase Support"], + "-4144": ["OB_BAD_ADDRESS", "Bad address", "Internal Error", "Contact OceanBase Support"], + "-4146": ["OB_ENQUEUE_FAILED", "Enqueue error", "Internal Error", "Contact OceanBase Support"], + "-4147": ["OB_INVALID_CONFIG", "Invalid config", "Internal Error", "Contact OceanBase Support"], + "-4149": ["OB_STMT_EXPIRED", "Expired statement", "Internal Error", "Contact OceanBase Support"], + "-4150": ["OB_ERR_MIN_VALUE", "Min value", "Internal Error", "Contact OceanBase Support"], + "-4151": ["OB_ERR_MAX_VALUE", "Max value", "Internal Error", "Contact OceanBase Support"], + "-4152": ["OB_ERR_NULL_VALUE", "Null value", "Internal Error", "Contact OceanBase Support"], + "-4153": ["OB_RESOURCE_OUT", "Out of resource", "Internal Error", "Contact OceanBase Support"], + "-4154": ["OB_ERR_SQL_CLIENT", "Internal SQL client error", "Internal Error", "Contact OceanBase Support"], + "-4155": ["OB_META_TABLE_WITHOUT_USE_TABLE", "Meta table without use table", "Internal Error", "Contact OceanBase Support"], + "-4156": ["OB_DISCARD_PACKET", "Discard packet", "Internal Error", "Contact OceanBase Support"], + "-4157": ["OB_OPERATE_OVERFLOW", "value is out of range", "Internal Error", "Contact OceanBase Support"], + "-4158": ["OB_INVALID_DATE_FORMAT", "date format not recognized", "Internal Error", "Contact OceanBase Support"], + "-4159": ["OB_POOL_REGISTERED_FAILED", "register pool failed", "Internal Error", "Contact OceanBase Support"], + "-4160": ["OB_POOL_UNREGISTERED_FAILED", "unregister pool failed", "Internal Error", "Contact OceanBase Support"], + "-4161": ["OB_INVALID_ARGUMENT_NUM", "Invalid argument num", "Internal Error", "Contact OceanBase Support"], + "-4162": ["OB_LEASE_NOT_ENOUGH", "reserved lease not enough", "Internal Error", "Contact OceanBase Support"], + "-4163": ["OB_LEASE_NOT_MATCH", "ups lease not match with rs", "Internal Error", "Contact OceanBase Support"], + "-4164": ["OB_UPS_SWITCH_NOT_HAPPEN", "ups switch not happen", "Internal Error", "Contact OceanBase Support"], + "-4165": ["OB_EMPTY_RESULT", "Empty result", "Internal Error", "Contact OceanBase Support"], + "-4166": ["OB_CACHE_NOT_HIT", "Cache not hit", "Internal Error", "Contact OceanBase Support"], + "-4167": ["OB_NESTED_LOOP_NOT_SUPPORT", "Nested loop not support", "Internal Error", "Contact OceanBase Support"], + "-4168": ["OB_LOG_INVALID_MOD_ID", "Invalid log module id", "Internal Error", "Contact OceanBase Support"], + "-4169": ["OB_LOG_MODULE_UNKNOWN", "Unknown module name", "Internal Error", "Contact OceanBase Support"], + "-4170": ["OB_LOG_LEVEL_INVALID", "Invalid level", "Internal Error", "Contact OceanBase Support"], + "-4171": ["OB_LOG_PARSER_SYNTAX_ERR", "Syntax to set log_level error", "Internal Error", "Contact OceanBase Support"], + "-4172": ["OB_INDEX_OUT_OF_RANGE", "Index out of range", "Internal Error", "Contact OceanBase Support"], + "-4173": ["OB_INT_UNDERFLOW", "Int underflow", "Internal Error", "Contact OceanBase Support"], + "-4174": ["OB_UNKNOWN_CONNECTION", "Unknown thread id", "Internal Error", "Contact OceanBase Support"], + "-4175": ["OB_ERROR_OUT_OF_RANGE", "Out of range", "Internal Error", "Contact OceanBase Support"], + "-4176": ["OB_CACHE_SHRINK_FAILED", "shrink cache failed, no available cache", "Internal Error"], + "-4177": ["OB_OLD_SCHEMA_VERSION", "Schema version too old", "Internal Error", "Contact OceanBase Support"], + "-4178": ["OB_RELEASE_SCHEMA_ERROR", "Release schema error", "Internal Error", "Contact OceanBase Support"], + "-4179": ["OB_OP_NOT_ALLOW", "Operation not allowed now", "Internal Error", "Contact OceanBase Support"], + "-4180": ["OB_NO_EMPTY_ENTRY", "No empty entry", "Internal Error", "Contact OceanBase Support"], + "-4181": ["OB_ERR_ALREADY_EXISTS", "Already exist", "Internal Error", "Contact OceanBase Support"], + "-4182": ["OB_SEARCH_NOT_FOUND", "Value not found", "Internal Error", "Contact OceanBase Support"], + "-4183": ["OB_BEYOND_THE_RANGE", "Key out of range", "Internal Error", "Contact OceanBase Support"], + "-4184": ["OB_CS_OUTOF_DISK_SPACE", "ChunkServer out of disk space", "Internal Error", "Contact OceanBase Support"], + "-4185": ["OB_COLUMN_GROUP_NOT_FOUND", "Column group not found", "Internal Error", "Contact OceanBase Support"], + "-4186": ["OB_CS_COMPRESS_LIB_ERROR", "ChunkServer failed to get compress library", "Internal Error", "Contact OceanBase Support"], + "-4187": ["OB_ITEM_NOT_MATCH", "Item not match", "Internal Error", "Contact OceanBase Support"], + "-4188": ["OB_SCHEDULER_TASK_CNT_MISMATCH", "Running task cnt and unfinished task cnt not consistent", "Internal Error", "Contact OceanBase Support"], + "-4189": ["OB_INVALID_MACRO_BLOCK_TYPE", "the macro block type does not exist", "Internal Error", "Contact OceanBase Support"], + "-4190": ["OB_INVALID_DATE_FORMAT_END", "Incorrect value", "Internal Error", "Contact OceanBase Support"], + "-4200": ["OB_HASH_EXIST", "hash map/set entry exist", "Internal Error", "Contact OceanBase Support"], + "-4201": ["OB_HASH_NOT_EXIST", "hash map/set entry not exist", "Internal Error", "Contact OceanBase Support"], + "-4204": ["OB_HASH_GET_TIMEOUT", "hash map/set get timeout", "Internal Error", "Contact OceanBase Support"], + "-4205": ["OB_HASH_PLACEMENT_RETRY", "hash map/set retry", "Internal Error", "Contact OceanBase Support"], + "-4206": ["OB_HASH_FULL", "hash map/set full", "Internal Error", "Contact OceanBase Support"], + "-4207": ["OB_PACKET_PROCESSED", "packet processed", "Internal Error", "Contact OceanBase Support"], + "-4208": ["OB_WAIT_NEXT_TIMEOUT", "wait next packet timeout", "Internal Error", "Contact OceanBase Support"], + "-4209": ["OB_LEADER_NOT_EXIST", "partition has not leader", "Internal Error", "Contact OceanBase Support"], + "-4210": ["OB_PREPARE_MAJOR_FREEZE_FAILED", "prepare major freeze failed", "Internal Error", "Contact OceanBase Support"], + "-4211": ["OB_COMMIT_MAJOR_FREEZE_FAILED", "commit major freeze failed", "Internal Error", "Contact OceanBase Support"], + "-4212": ["OB_ABORT_MAJOR_FREEZE_FAILED", "abort major freeze failed", "Internal Error", "Contact OceanBase Support"], + "-4213": ["OB_MAJOR_FREEZE_NOT_FINISHED", "last major freeze not finish", "Internal Error", "Contact OceanBase Support"], + "-4214": ["OB_PARTITION_NOT_LEADER", "partition is not leader partition", "Internal Error", "Contact OceanBase Support"], + "-4215": ["OB_WAIT_MAJOR_FREEZE_RESPONSE_TIMEOUT", "wait major freeze response timeout", "Internal Error", "Contact OceanBase Support"], + "-4216": ["OB_CURL_ERROR", "curl error", "Internal Error", "Contact OceanBase Support"], + "-4217": ["OB_MAJOR_FREEZE_NOT_ALLOW", "Major freeze not allowed now", "Internal Error", "Contact OceanBase Support"], + "-4218": ["OB_PREPARE_FREEZE_FAILED", "prepare freeze failed", "Internal Error", "Contact OceanBase Support"], + "-4219": ["OB_INVALID_DATE_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], + "-4220": ["OB_INACTIVE_SQL_CLIENT", "Inactive sql client, only read allowed", "Internal Error"], + "-4221": ["OB_INACTIVE_RPC_PROXY", "Inactive rpc proxy, can not send RPC request", "Internal Error"], + "-4222": ["OB_INTERVAL_WITH_MONTH", "Interval with year or month can not be converted to microseconds", "Internal Error", "Contact OceanBase Support"], + "-4223": ["OB_TOO_MANY_DATETIME_PARTS", "Interval has too many datetime parts", "Internal Error", "Contact OceanBase Support"], + "-4224": ["OB_DATA_OUT_OF_RANGE", "Out of range value for column", "Internal Error", "Contact OceanBase Support"], + "-4225": ["OB_PARTITION_NOT_EXIST", "Partition entry not exists", "Internal Error", "Contact OceanBase Support"], + "-4226": ["OB_ERR_TRUNCATED_WRONG_VALUE_FOR_FIELD", "Incorrect integer value", "Internal Error", "Contact OceanBase Support"], + "-4227": ["OB_ERR_NO_DEFAULT_FOR_FIELD", "Field doesn't have a default value", "Internal Error", "Contact OceanBase Support"], + "-4228": ["OB_ERR_FIELD_SPECIFIED_TWICE", "Column specified twice", "Internal Error", "Contact OceanBase Support"], + "-4232": ["OB_NOT_FOLLOWER", "The observer or zone is not a follower", "Internal Error", "Contact OceanBase Support"], + "-4233": ["OB_ERR_OUT_OF_LOWER_BOUND", "smaller than container lower bound", "Internal Error", "Contact OceanBase Support"], + "-4234": ["OB_ERR_OUT_OF_UPPER_BOUND", "bigger than container upper bound", "Internal Error", "Contact OceanBase Support"], + "-4236": ["OB_OBCONFIG_RETURN_ERROR", "ObConfig return error code", "Internal Error", "Contact OceanBase Support"], + "-4237": ["OB_OBCONFIG_APPNAME_MISMATCH", "Appname mismatch with obconfig result", "Internal Error", "Contact OceanBase Support"], + "-4238": ["OB_ERR_VIEW_SELECT_DERIVED", "View's SELECT contains a subquery in the FROM clause", "Internal Error", "Contact OceanBase Support"], + "-4239": ["OB_CANT_MJ_PATH", "Can not use merge-join to join the tables without join conditions", "Internal Error", "Contact OceanBase Support"], + "-4240": ["OB_ERR_NO_JOIN_ORDER_GENERATED", "No join order generated", "Internal Error", "Contact OceanBase Support"], + "-4241": ["OB_ERR_NO_PATH_GENERATED", "No join path generated", "Internal Error", "Contact OceanBase Support"], + "-4242": ["OB_ERR_WAIT_REMOTE_SCHEMA_REFRESH", "Schema error", "Internal Error", "Contact OceanBase Support"], + "-4243": ["OB_FILE_NOT_OPENED", "file not opened", "Internal Error", "Contact OceanBase Support"], + "-4244": ["OB_TIMER_TASK_HAS_SCHEDULED", "Timer task has been scheduled", "Internal Error", "Contact OceanBase Support"], + "-4245": ["OB_TIMER_TASK_HAS_NOT_SCHEDULED", "Timer task has not been scheduled", "Internal Error", "Contact OceanBase Support"], + "-4246": ["OB_PARSE_DEBUG_SYNC_ERROR", "parse debug sync string error", "Internal Error", "Contact OceanBase Support"], + "-4247": ["OB_UNKNOWN_DEBUG_SYNC_POINT", "unknown debug sync point", "Internal Error", "Contact OceanBase Support"], + "-4248": ["OB_ERR_INTERRUPTED", "task is interrupted while running", "Internal Error", "Contact OceanBase Support"], + "-4249": ["OB_ERR_DATA_TRUNCATED", "Data truncated for argument", "Internal Error", "Contact OceanBase Support"], + "-4250": ["OB_NOT_RUNNING", "module is not running", "Internal Error", "Contact OceanBase Support"], + "-4251": ["OB_INVALID_PARTITION", "partition not valid", "Internal Error", "Contact OceanBase Support"], + "-4252": ["OB_ERR_TIMEOUT_TRUNCATED", "Timeout value truncated to 102 years", "Internal Error", "Contact OceanBase Support"], + "-4254": ["OB_ERR_NET_PACKET_TOO_LARGE", "Got a packet bigger than 'max_allowed_packet' bytes", "Internal Error", "Contact OceanBase Support"], + "-4255": ["OB_TRACE_DESC_NOT_EXIST", "trace log title or key not exist describle", "Internal Error", "Contact OceanBase Support"], + "-4256": ["OB_ERR_NO_DEFAULT", "Variable doesn't have a default value", "Internal Error", "Contact OceanBase Support"], + "-4257": ["OB_ERR_COMPRESS_DECOMPRESS_DATA", "compress data or decompress data failed", "Internal Error", "Contact OceanBase Support"], + "-4258": ["OB_ERR_INCORRECT_STRING_VALUE", "Incorrect string value", "Incorrect string value", "Internal Error"], + "-4259": ["OB_ERR_DISTRIBUTED_NOT_SUPPORTED", "Not supported feature or function", "Internal Error", "Contact OceanBase Support"], + "-4260": ["OB_IS_CHANGING_LEADER", "the partition is changing leader", "Internal Error", "Contact OceanBase Support"], + "-4261": ["OB_DATETIME_FUNCTION_OVERFLOW", "Datetime overflow", "Internal Error", "Contact OceanBase Support"], + "-4262": ["OB_ERR_DOUBLE_TRUNCATED", "Truncated incorrect DOUBLE value", "Internal Error", "Contact OceanBase Support"], + "-4263": ["OB_MINOR_FREEZE_NOT_ALLOW", "Minor freeze not allowed now", "Internal Error", "Contact OceanBase Support"], + "-4264": ["OB_LOG_OUTOF_DISK_SPACE", "Log out of disk space", "Internal Error", "Contact OceanBase Support"], + "-4265": ["OB_RPC_CONNECT_ERROR", "Rpc connect error", "Internal Error", "Contact OceanBase Support"], + "-4266": ["OB_MINOR_MERGE_NOT_ALLOW", "minor merge not allow", "Internal Error", "Contact OceanBase Support"], + "-4267": ["OB_CACHE_INVALID", "Cache invalid", "Internal Error", "Contact OceanBase Support"], + "-4268": ["OB_REACH_SERVER_DATA_COPY_IN_CONCURRENCY_LIMIT", "reach server data copy in concurrency", "Internal Error", "Contact OceanBase Support"], + "-4269": ["OB_WORKING_PARTITION_EXIST", "Working partition entry already exists", "Internal Error", "Contact OceanBase Support"], + "-4270": ["OB_WORKING_PARTITION_NOT_EXIST", "Working partition entry does not exists", "Internal Error", "Contact OceanBase Support"], + "-4271": ["OB_LIBEASY_REACH_MEM_LIMIT", "LIBEASY reach memory limit", "Internal Error", "Contact OceanBase Support"], + "-4272": ["OB_MISS_ARGUMENT", "Miss argument", "Miss argument", "Internal Error"], + "-4273": ["OB_CACHE_FREE_BLOCK_NOT_ENOUGH", "free memblock in cache is not enough", "Internal Error", "Contact OceanBase Support"], + "-4274": ["OB_SYNC_WASH_MB_TIMEOUT", "sync wash memblock timeout", "Internal Error", "Contact OceanBase Support"], + "-4275": ["OB_NOT_ALLOW_MIGRATE_IN", "not allow migrate in", "Internal Error", "Contact OceanBase Support"], + "-4276": ["OB_SCHEDULER_TASK_CNT_MISTACH", "Scheduler task cnt does not match", "Internal Error", "Contact OceanBase Support"], + "-4277": ["OB_MISS_ARGUMENT", "Miss argument", "Internal Error", "Contact OceanBase Support"], + "-4278": ["OB_LAST_LOG_NOT_COMPLETE", "last log is not complete", "Internal Error", "Contact OceanBase Support"], + "-4279": ["OB_TABLE_IS_DELETED", "table is deleted", "Internal Error", "Contact OceanBase Support"], + "-4280": ["OB_VERSION_RANGE_NOT_CONTINUES", "version range not continues", "Internal Error", "Contact OceanBase Support"], + "-4281": ["OB_INVALID_IO_BUFFER", "io buffer is invalid", "Internal Error", "Contact OceanBase Support"], + "-4282": ["OB_PARTITION_IS_REMOVED", "partition is removed", "Internal Error", "Contact OceanBase Support"], + "-4283": ["OB_GTS_NOT_READY", "gts is not ready", "Internal Error", "Contact OceanBase Support"], + "-4284": ["OB_MAJOR_SSTABLE_NOT_EXIST", "major sstable not exist", "Internal Error", "Contact OceanBase Support"], + "-4285": ["OB_VERSION_RANGE_DISCARDED", "Request to read too old version range data", "Internal Error", "Contact OceanBase Support"], + "-4286": ["OB_MAJOR_SSTABLE_HAS_MERGED", "major sstable may has been merged", "Internal Error", "Contact OceanBase Support"], + "-4287": ["OB_MINOR_SSTABLE_RANGE_CROSS", "minor sstable version range cross", "Internal Error", "Contact OceanBase Support"], + "-4288": ["OB_MEMTABLE_CANNOT_MINOR_MERGE", "memtable cannot minor merge", "Internal Error", "Contact OceanBase Support"], + "-4289": ["OB_TASK_EXIST", "task exist", "Internal Error", "Contact OceanBase Support"], + "-4290": ["OB_ALLOCATE_DISK_SPACE_FAILED", "cannot allocate disk space", "Internal Error", "Contact OceanBase Support"], + "-4291": ["OB_CANT_FIND_UDF", "Can't load function", "Internal Error", "Contact OceanBase Support"], + "-4292": ["OB_CANT_INITIALIZE_UDF", "Can't initialize function", "Can not initialize function ", "Internal Error"], + "-4293": ["OB_UDF_NO_PATHS", "No paths allowed for shared library", "Internal Error", "Contact OceanBase Support"], + "-4294": ["OB_UDF_EXISTS", "Function already exists", "Function already exists", "Internal Error"], + "-4295": ["OB_CANT_OPEN_LIBRARY", "Can't open shared library", "Can not open shared library", "Internal Error"], + "-4296": ["OB_CANT_FIND_DL_ENTRY", "Can't find symbol in library", "Internal Error", "Contact OceanBase Support"], + "-4297": ["OB_OBJECT_NAME_EXIST", "name is already used by an existing object", "Internal Error", "Contact OceanBase Support"], + "-4298": ["OB_OBJECT_NAME_NOT_EXIST", "object does not exist", "Internal Error", "Contact OceanBase Support"], + "-4300": ["OB_ERR_INVALID_SEQUENCE_NAME", "invalid sequence name", "Internal Error", "Contact OceanBase Support"], + "-4301": ["OB_ERR_DUP_MAXVALUE_SPEC", "duplicate MAXVALUE/NOMAXVALUE specifications", "Internal Error", "Contact OceanBase Support"], + "-4302": ["OB_ERR_DUP_MINVALUE_SPEC", "duplicate MINVALUE/NOMINVALUE specifications", "Internal Error", "Contact OceanBase Support"], + "-4303": ["OB_ERR_DUP_CYCLE_SPEC", "duplicate CYCLE/NOCYCLE specifications", "Internal Error", "Contact OceanBase Support"], + "-4304": ["OB_ERR_DUP_CACHE_SPEC", "duplicate CACHE/NOCACHE specifications", "Internal Error", "Contact OceanBase Support"], + "-4305": ["OB_ERR_DUP_ORDER_SPEC", "duplicate ORDER/NOORDER specifications", "Internal Error", "Contact OceanBase Support"], + "-4306": ["OB_ERR_CONFL_MAXVALUE_SPEC", "conflicting MAXVALUE/NOMAXVALUE specifications", "Internal Error", "Contact OceanBase Support"], + "-4307": ["OB_ERR_CONFL_MINVALUE_SPEC", "conflicting MINVALUE/NOMINVALUE specifications", "Internal Error", "Contact OceanBase Support"], + "-4308": ["OB_ERR_CONFL_CYCLE_SPEC", "conflicting CYCLE/NOCYCLE specifications", "Internal Error", "Contact OceanBase Support"], + "-4309": ["OB_ERR_CONFL_CACHE_SPEC", "conflicting CACHE/NOCACHE specifications", "Internal Error", "Contact OceanBase Support"], + "-4310": ["OB_ERR_CONFL_ORDER_SPEC", "conflicting ORDER/NOORDER specifications", "Internal Error", "Contact OceanBase Support"], + "-4311": ["OB_ERR_ALTER_START_SEQ_NUMBER_NOT_ALLOWED", "cannot alter starting sequence number", "Internal Error", "Contact OceanBase Support"], + "-4312": ["OB_ERR_DUP_INCREMENT_BY_SPEC", "duplicate INCREMENT BY specifications", "Internal Error", "Contact OceanBase Support"], + "-4313": ["OB_ERR_DUP_START_WITH_SPEC", "duplicate START WITH specifications", "Internal Error", "Contact OceanBase Support"], + "-4314": ["OB_ERR_REQUIRE_ALTER_SEQ_OPTION", "no options specified for ALTER SEQUENCE", "Internal Error", "Contact OceanBase Support"], + "-4315": ["OB_ERR_SEQ_NOT_ALLOWED_HERE", "sequence number not allowed here", "Internal Error", "Contact OceanBase Support"], + "-4316": ["OB_ERR_SEQ_NOT_EXIST", "sequence does not exist", "Internal Error", "Contact OceanBase Support"], + "-4317": ["OB_ERR_SEQ_OPTION_MUST_BE_INTEGER", "sequence parameter must be an integer", "Internal Error", "Contact OceanBase Support"], + "-4318": ["OB_ERR_SEQ_INCREMENT_CAN_NOT_BE_ZERO", "INCREMENT must be a nonzero integer", "Internal Error", "Contact OceanBase Support"], + "-4319": ["OB_ERR_SEQ_OPTION_EXCEED_RANGE", "sequence parameter exceeds maximum size allowed", "Internal Error", "Contact OceanBase Support"], + "-4320": ["OB_ERR_MINVALUE_LARGER_THAN_MAXVALUE", "MINVALUE must be less than MAXVALUE", "Internal Error", "Contact OceanBase Support"], + "-4321": ["OB_ERR_SEQ_INCREMENT_TOO_LARGE", "INCREMENT must be less than MAXVALUE minus MINVALUE", "Internal Error", "Contact OceanBase Support"], + "-4322": ["OB_ERR_START_WITH_LESS_THAN_MINVALUE", "START WITH cannot be less than MINVALUE", "Internal Error", "Contact OceanBase Support"], + "-4323": ["OB_ERR_MINVALUE_EXCEED_CURRVAL", "MINVALUE cannot be made to exceed the current value", "Internal Error", "Contact OceanBase Support"], + "-4324": ["OB_ERR_START_WITH_EXCEED_MAXVALUE", "START WITH cannot be more than MAXVALUE", "Internal Error", "Contact OceanBase Support"], + "-4325": ["OB_ERR_MAXVALUE_EXCEED_CURRVAL", "MAXVALUE cannot be made to be less than the current value", "Internal Error", "Contact OceanBase Support"], + "-4326": ["OB_ERR_SEQ_CACHE_TOO_SMALL", "the number of values to CACHE must be greater than 1", "Internal Error", "Contact OceanBase Support"], + "-4327": ["OB_ERR_SEQ_OPTION_OUT_OF_RANGE", "sequence option value out of range", "Internal Error", "Contact OceanBase Support"], + "-4328": ["OB_ERR_SEQ_CACHE_TOO_LARGE", "number to CACHE must be less than one cycle", "Internal Error", "Contact OceanBase Support"], + "-4329": ["OB_ERR_SEQ_REQUIRE_MINVALUE", "descending sequences that CYCLE must specify MINVALUE", "Internal Error", "Contact OceanBase Support"], + "-4330": ["OB_ERR_SEQ_REQUIRE_MAXVALUE", "ascending sequences that CYCLE must specify MAXVALUE", "Internal Error", "Contact OceanBase Support"], + "-4331": ["OB_ERR_SEQ_NO_LONGER_EXIST", "sequence no longer exists", "Internal Error", "Contact OceanBase Support"], + "-4332": ["OB_ERR_SEQ_VALUE_EXCEED_LIMIT", "sequence exceeds limit and cannot be instantiated", "Internal Error", "Contact OceanBase Support"], + "-4333": ["OB_ERR_DIVISOR_IS_ZERO", "divisor is equal to zero", "Internal Error", "Contact OceanBase Support"], + "-4334": ["OB_ERR_AES_DECRYPT", "fail to decrypt data", "Internal Error", "Contact OceanBase Support"], + "-4335": ["OB_ERR_AES_ENCRYPT", "fail to encrypt data", "Internal Error", "Contact OceanBase Support"], + "-4336": ["OB_ERR_AES_IV_LENGTH", "The initialization vector supplied to aes_encrypt is too short. Must be at least 16 bytes long", "Internal Error", "Contact OceanBase Support"], + "-4505": ["OB_IMPORT_NOT_IN_SERVER", "Import not in service", "Internal Error", "Contact OceanBase Support"], + "-4507": ["OB_CONVERT_ERROR", "Convert error", "Internal Error", "Contact OceanBase Support"], + "-4510": ["OB_BYPASS_TIMEOUT", "Bypass timeout", "Internal Error", "Contact OceanBase Support"], + "-4512": ["OB_RS_STATE_NOT_ALLOW", "RootServer state error", "Internal Error", "Contact OceanBase Support"], + "-4515": ["OB_NO_REPLICA_VALID", "No replica is valid", "Internal Error", "Contact OceanBase Support"], + "-4517": ["OB_NO_NEED_UPDATE", "No need to update", "Internal Error", "Contact OceanBase Support"], + "-4518": ["OB_CACHE_TIMEOUT", "Cache timeout", "Internal Error", "Contact OceanBase Support"], + "-4519": ["OB_ITER_STOP", "Iteration was stopped", "Internal Error", "Contact OceanBase Support"], + "-4523": ["OB_ZONE_ALREADY_MASTER", "The zone is the master already", "Internal Error", "Contact OceanBase Support"], + "-4524": ["OB_IP_PORT_IS_NOT_SLAVE_ZONE", "Not slave zone", "Internal Error", "Contact OceanBase Support"], + "-4525": ["OB_ZONE_IS_NOT_SLAVE", "Not slave zone", "Internal Error", "Contact OceanBase Support"], + "-4526": ["OB_ZONE_IS_NOT_MASTER", "Not master zone", "Internal Error", "Contact OceanBase Support"], + "-4527": ["OB_CONFIG_NOT_SYNC", "Configuration not sync", "Internal Error", "Contact OceanBase Support"], + "-4528": ["OB_IP_PORT_IS_NOT_ZONE", "Not a zone address", "Internal Error", "Contact OceanBase Support"], + "-4529": ["OB_MASTER_ZONE_NOT_EXIST", "Master zone not exist", "Internal Error", "Contact OceanBase Support"], + "-4530": ["OB_ZONE_INFO_NOT_EXIST", "Zone info not exist", "Internal Error", "Contact OceanBase Support"], + "-4531": ["OB_GET_ZONE_MASTER_UPS_FAILED", "Failed to get master UpdateServer", "Internal Error", "Contact OceanBase Support"], + "-4532": ["OB_MULTIPLE_MASTER_ZONES_EXIST", "Multiple master zones", "Internal Error", "Contact OceanBase Support"], + "-4533": ["OB_INDEXING_ZONE_INVALID", "indexing zone is not exist anymore or not active", "Internal Error", "Contact OceanBase Support"], + "-4537": ["OB_ROOT_TABLE_RANGE_NOT_EXIST", "Tablet range not exist", "Internal Error", "Contact OceanBase Support"], + "-4538": ["OB_ROOT_MIGRATE_CONCURRENCY_FULL", "Migrate concurrency full", "Internal Error", "Contact OceanBase Support"], + "-4539": ["OB_ROOT_MIGRATE_INFO_NOT_FOUND", "Migrate info not found", "Internal Error", "Contact OceanBase Support"], + "-4540": ["OB_NOT_DATA_LOAD_TABLE", "No data to load", "Internal Error", "Contact OceanBase Support"], + "-4541": ["OB_DATA_LOAD_TABLE_DUPLICATED", "Duplicated table data to load", "Internal Error", "Contact OceanBase Support"], + "-4542": ["OB_ROOT_TABLE_ID_EXIST", "Table ID exist", "Internal Error", "Contact OceanBase Support"], + "-4543": ["OB_INDEX_TIMEOUT", "Building index timeout", "Internal Error", "Contact OceanBase Support"], + "-4544": ["OB_ROOT_NOT_INTEGRATED", "Root not integrated", "Internal Error", "Contact OceanBase Support"], + "-4545": ["OB_INDEX_INELIGIBLE", "index data not unique", "Internal Error", "Contact OceanBase Support"], + "-4546": ["OB_REBALANCE_EXEC_TIMEOUT", "execute replication or migration task timeout", "Internal Error", "Contact OceanBase Support"], + "-4547": ["OB_MERGE_NOT_STARTED", "global merge not started", "Internal Error", "Contact OceanBase Support"], + "-4548": ["OB_MERGE_ALREADY_STARTED", "merge already started", "Internal Error", "Contact OceanBase Support"], + "-4549": ["OB_ROOTSERVICE_EXIST", "rootservice already exist", "Internal Error", "Contact OceanBase Support"], + "-4550": ["OB_RS_SHUTDOWN", "rootservice is shutdown", "Internal Error", "Contact OceanBase Support"], + "-4551": ["OB_SERVER_MIGRATE_IN_DENIED", "server migrate in denied", "Internal Error", "Contact OceanBase Support"], + "-4552": ["OB_REBALANCE_TASK_CANT_EXEC", "rebalance task can not executing now", "Internal Error", "Contact OceanBase Support"], + "-4553": ["OB_PARTITION_CNT_REACH_ROOTSERVER_LIMIT", "rootserver can not hold more partition", "Internal Error", "Contact OceanBase Support"], + "-4554": ["OB_REBALANCE_TASK_NOT_IN_PROGRESS", "rebalance task not in progress on observer", "Internal Error", "Contact OceanBase Support"], + "-4600": ["OB_DATA_SOURCE_NOT_EXIST", "Data source not exist", "Internal Error", "Contact OceanBase Support"], + "-4601": ["OB_DATA_SOURCE_TABLE_NOT_EXIST", "Data source table not exist", "Internal Error", "Contact OceanBase Support"], + "-4602": ["OB_DATA_SOURCE_RANGE_NOT_EXIST", "Data source range not exist", "Internal Error", "Contact OceanBase Support"], + "-4603": ["OB_DATA_SOURCE_DATA_NOT_EXIST", "Data source data not exist", "Internal Error", "Contact OceanBase Support"], + "-4604": ["OB_DATA_SOURCE_SYS_ERROR", "Data source sys error", "Internal Error", "Contact OceanBase Support"], + "-4605": ["OB_DATA_SOURCE_TIMEOUT", "Data source timeout", "Internal Error", "Contact OceanBase Support"], + "-4606": ["OB_DATA_SOURCE_CONCURRENCY_FULL", "Data source concurrency full", "Internal Error", "Contact OceanBase Support"], + "-4607": ["OB_DATA_SOURCE_WRONG_URI_FORMAT", "Data source wrong URI format", "Internal Error", "Contact OceanBase Support"], + "-4608": ["OB_SSTABLE_VERSION_UNEQUAL", "SSTable version not equal", "Internal Error", "Contact OceanBase Support"], + "-4609": ["OB_UPS_RENEW_LEASE_NOT_ALLOWED", "ups should not renew its lease", "Internal Error", "Contact OceanBase Support"], + "-4610": ["OB_UPS_COUNT_OVER_LIMIT", "ups count over limit", "Internal Error", "Contact OceanBase Support"], + "-4611": ["OB_NO_UPS_MAJORITY", "ups not form a majority", "Internal Error", "Contact OceanBase Support"], + "-4613": ["OB_INDEX_COUNT_REACH_THE_LIMIT", "created index tables count has reach the limit:128", "Internal Error", "Contact OceanBase Support"], + "-4614": ["OB_TASK_EXPIRED", "task expired", "Internal Error", "Contact OceanBase Support"], + "-4615": ["OB_TABLEGROUP_NOT_EMPTY", "tablegroup is not empty", "Internal Error", "Contact OceanBase Support"], + "-4620": ["OB_INVALID_SERVER_STATUS", "server status is not valid", "Internal Error", "Contact OceanBase Support"], + "-4621": ["OB_WAIT_ELEC_LEADER_TIMEOUT", "wait elect partition leader timeout", "Internal Error", "Contact OceanBase Support"], + "-4622": ["OB_WAIT_ALL_RS_ONLINE_TIMEOUT", "wait all rs online timeout", "Internal Error", "Contact OceanBase Support"], + "-4623": ["OB_ALL_REPLICAS_ON_MERGE_ZONE", "all replicas of partition group are on zones to merge", "Internal Error", "Contact OceanBase Support"], + "-4624": ["OB_MACHINE_RESOURCE_NOT_ENOUGH", "machine resource is not enough to hold a new unit", "Internal Error", "Contact OceanBase Support"], + "-4625": ["OB_NOT_SERVER_CAN_HOLD_SOFTLY", "not server can hole the unit and not over soft limit", "Internal Error", "Contact OceanBase Support"], + "-4626": ["OB_RESOURCE_POOL_ALREADY_GRANTED", "resource pool has already been granted to a tenant", "Internal Error", "Contact OceanBase Support"], + "-4628": ["OB_SERVER_ALREADY_DELETED", "server has already been deleted", "Internal Error", "Contact OceanBase Support"], + "-4629": ["OB_SERVER_NOT_DELETING", "server is not in deleting status", "Internal Error", "Contact OceanBase Support"], + "-4630": ["OB_SERVER_NOT_IN_WHITE_LIST", "server not in server white list", "Internal Error", "Contact OceanBase Support"], + "-4631": ["OB_SERVER_ZONE_NOT_MATCH", "server zone not match", "Internal Error", "Contact OceanBase Support"], + "-4632": ["OB_OVER_ZONE_NUM_LIMIT", "zone num has reach max zone num", "Internal Error", "Contact OceanBase Support"], + "-4633": ["OB_ZONE_STATUS_NOT_MATCH", "zone status not match", "Internal Error", "Contact OceanBase Support"], + "-4634": ["OB_RESOURCE_UNIT_IS_REFERENCED", "resource unit is referenced by resource pool", "Internal Error", "Contact OceanBase Support"], + "-4636": ["OB_DIFFERENT_PRIMARY_ZONE", "table schema primary zone different with other table in sampe tablegroup", "Internal Error", "Contact OceanBase Support"], + "-4637": ["OB_SERVER_NOT_ACTIVE", "server is not active", "Internal Error", "Contact OceanBase Support"], + "-4638": ["OB_RS_NOT_MASTER", "The RootServer is not the master", "Internal Error", "Contact OceanBase Support"], + "-4639": ["OB_CANDIDATE_LIST_ERROR", "The candidate list is invalid", "Internal Error", "Contact OceanBase Support"], + "-4640": ["OB_PARTITION_ZONE_DUPLICATED", "The chosen partition servers belong to same zone.", "Internal Error", "Contact OceanBase Support"], + "-4641": ["OB_ZONE_DUPLICATED", "Duplicated zone in zone list", "Internal Error", "Contact OceanBase Support"], + "-4642": ["OB_NOT_ALL_ZONE_ACTIVE", "Not all zone in zone list are active", "Internal Error", "Contact OceanBase Support"], + "-4643": ["OB_PRIMARY_ZONE_NOT_IN_ZONE_LIST", "primary zone not in zone list", "Internal Error", "Contact OceanBase Support"], + "-4644": ["OB_REPLICA_NUM_NOT_MATCH", "replica num not same with zone count", "Internal Error", "Contact OceanBase Support"], + "-4645": ["OB_ZONE_LIST_POOL_LIST_NOT_MATCH", "zone list not a subset of resource pool list", "Internal Error", "Contact OceanBase Support"], + "-4646": ["OB_INVALID_TENANT_NAME", "tenant name is too long", "Internal Error", "Contact OceanBase Support"], + "-4647": ["OB_EMPTY_RESOURCE_POOL_LIST", "resource pool list is empty", "Internal Error", "Contact OceanBase Support"], + "-4648": ["OB_RESOURCE_UNIT_NOT_EXIST", "resource unit not exist", "Internal Error", "Contact OceanBase Support"], + "-4649": ["OB_RESOURCE_UNIT_EXIST", "resource unit already exist", "Internal Error", "Contact OceanBase Support"], + "-4650": ["OB_RESOURCE_POOL_NOT_EXIST", "resource pool not exist", "Internal Error", "Contact OceanBase Support"], + "-4651": ["OB_RESOURCE_POOL_EXIST", "resource pool already exist", "Internal Error", "Contact OceanBase Support"], + "-4652": ["OB_WAIT_LEADER_SWITCH_TIMEOUT", "wait leader switch timeout", "Internal Error", "Contact OceanBase Support"], + "-4653": ["OB_LOCATION_NOT_EXIST", "location not exist", "Internal Error", "Contact OceanBase Support"], + "-4654": ["OB_LOCATION_LEADER_NOT_EXIST", "location leader not exist", "Internal Error", "Contact OceanBase Support"], + "-4655": ["OB_ZONE_NOT_ACTIVE", "zone not active", "Internal Error", "Contact OceanBase Support"], + "-4656": ["OB_UNIT_NUM_OVER_SERVER_COUNT", "resource pool unit num is bigger than zone server count", "Internal Error", "Contact OceanBase Support"], + "-4657": ["OB_POOL_SERVER_INTERSECT", "resource pool list unit server intersect", "Internal Error", "Contact OceanBase Support"], + "-4658": ["OB_NOT_SINGLE_RESOURCE_POOL", "create tenant only support single resource pool now", "Internal Error", "Contact OceanBase Support"], + "-4659": ["OB_INVALID_RESOURCE_UNIT", "invalid resource unit", "Internal Error", "Contact OceanBase Support"], + "-4660": ["OB_STOP_SERVER_IN_MULTIPLE_ZONES", "Can not stop server in multiple zones", "Internal Error", "Contact OceanBase Support"], + "-4661": ["OB_SESSION_ENTRY_EXIST", "Session already exist", "Internal Error", "Contact OceanBase Support"], + "-4662": ["OB_GOT_SIGNAL_ABORTING", "Got signal. Aborting!", "Internal Error", "Contact OceanBase Support"], + "-4663": ["OB_SERVER_NOT_ALIVE", "server is not alive", "Internal Error", "Contact OceanBase Support"], + "-4664": ["OB_GET_LOCATION_TIME_OUT", "Timeout", "Internal Error", "Contact OceanBase Support"], + "-4665": ["OB_UNIT_IS_MIGRATING", "Unit is migrating can not migrate again", "Internal Error", "Contact OceanBase Support"], + "-4666": ["OB_CLUSTER_NO_MATCH", "cluster name is not match", "Internal Error", "Contact OceanBase Support"], + "-4667": ["OB_CHECK_ZONE_MERGE_ORDER", "Please check new zone in zone_merge_order. You can show parameters like 'zone_merge_order'", "Internal Error", "Contact OceanBase Support"], + "-4668": ["OB_ERR_ZONE_NOT_EMPTY", "zone not empty", "Internal Error", "Contact OceanBase Support"], + "-4669": ["OB_DIFFERENT_LOCALITY", "locality not match check it", "Internal Error", "Contact OceanBase Support"], + "-4670": ["OB_EMPTY_LOCALITY", "locality is empty", "Internal Error", "Contact OceanBase Support"], + "-4671": ["OB_FULL_REPLICA_NUM_NOT_ENOUGH", "full replica num not enough", "Internal Error", "Contact OceanBase Support"], + "-4672": ["OB_REPLICA_NUM_NOT_ENOUGH", "replica num not enough", "Internal Error", "Contact OceanBase Support"], + "-4673": ["OB_DATA_SOURCE_NOT_VALID", "Data source not valid", "Internal Error", "Contact OceanBase Support"], + "-4674": ["OB_RUN_JOB_NOT_SUCCESS", "run job not success yet", "Internal Error", "Contact OceanBase Support"], + "-4675": ["OB_NO_NEED_REBUILD", "no need to rebuild", "Internal Error", "Contact OceanBase Support"], + "-4676": ["OB_NEED_REMOVE_UNNEED_TABLE", "need remove unneed table", "Internal Error", "Contact OceanBase Support"], + "-4677": ["OB_NO_NEED_MERGE", "no need to merge", "Internal Error", "Contact OceanBase Support"], + "-4678": ["OB_CONFLICT_OPTION", "conflicting specifications", "Internal Error", "Contact OceanBase Support"], + "-4679": ["OB_DUPLICATE_OPTION", "duplicate specifications", "Internal Error", "Contact OceanBase Support"], + "-4680": ["OB_INVALID_OPTION", "invalid specifications", "Internal Error", "Contact OceanBase Support"], + "-4681": ["OB_RPC_NEED_RECONNECT", "rpc need reconnect", "Internal Error", "Contact OceanBase Support"], + "-4682": ["OB_CANNOT_COPY_MAJOR_SSTABLE", "cannot copy major sstable now", "Internal Error", "Contact OceanBase Support"], + "-4683": ["OB_SRC_DO_NOT_ALLOWED_MIGRATE", "src do not allowed migrate", "Internal Error", "Contact OceanBase Support"], + "-4684": ["OB_TOO_MANY_TENANT_PARTITIONS_ERROR", "Too many partitions were defined for this tenant", "Internal Error", "Contact OceanBase Support"], + "-4685": ["OB_ACTIVE_MEMTBALE_NOT_EXSIT", "active memtable not exist", "Internal Error", "Contact OceanBase Support"], + "-5000": ["OB_ERR_PARSER_INIT", "Failed to init SQL parser", "Internal Error", "Contact OceanBase Support"], + "-5001": ["OB_ERR_PARSE_SQL", "Parse error", "Internal Error", "Contact OceanBase Support"], + "-5002": ["OB_ERR_RESOLVE_SQL", "Resolve error", "Internal Error", "Contact OceanBase Support"], + "-5003": ["OB_ERR_GEN_PLAN", "Generate plan error", "Internal Error", "Contact OceanBase Support"], + "-5006": ["OB_ERR_PARSER_SYNTAX", "You have an error in your SQL syntax; check the manual that corresponds to your OceanBase version for the right syntax to use", "Internal Error", "Please check your SQL"], + "-5007": ["OB_ERR_COLUMN_SIZE", "The used SELECT statements have a different number of columns", "Internal Error", "Contact OceanBase Support"], + "-5008": ["OB_ERR_COLUMN_DUPLICATE", "Duplicate column name", "Internal Error", "Contact OceanBase Support"], + "-5010": ["OB_ERR_OPERATOR_UNKNOWN", "Unknown operator", "Internal Error", "Contact OceanBase Support"], + "-5011": ["OB_ERR_STAR_DUPLICATE", "Duplicated star", "Internal Error", "Contact OceanBase Support"], + "-5012": ["OB_ERR_ILLEGAL_ID", "Illegal ID", "Internal Error", "Contact OceanBase Support"], + "-5014": ["OB_ERR_ILLEGAL_VALUE", "Illegal value", "Internal Error", "Contact OceanBase Support"], + "-5015": ["OB_ERR_COLUMN_AMBIGUOUS", "Ambiguous column", "Internal Error", "Contact OceanBase Support"], + "-5016": ["OB_ERR_LOGICAL_PLAN_FAILD", "Generate logical plan error", "Internal Error", "Contact OceanBase Support"], + "-5017": ["OB_ERR_SCHEMA_UNSET", "Schema not set", "Internal Error", "Contact OceanBase Support"], + "-5018": ["OB_ERR_ILLEGAL_NAME", "Illegal name", "Internal Error", "Contact OceanBase Support"], + "-5020": ["OB_ERR_TABLE_EXIST", "Table already exists", "Internal Error", "Contact OceanBase Support"], + "-5019": ["OB_TABLE_NOT_EXIST", "Table doesn't exist", "Internal Error", "Contact OceanBase Support"], + "-5022": ["OB_ERR_EXPR_UNKNOWN", "Unknown expression", "Internal Error", "Contact OceanBase Support"], + "-5023": ["OB_ERR_ILLEGAL_TYPE", "Illegal type", "Internal Error", "Maybe you should use java.sql.Timestamp instead of java.util.Date."], + "-5024": ["OB_ERR_PRIMARY_KEY_DUPLICATE", "Duplicated primary key", "Internal Error", "Contact OceanBase Support"], + "-5025": ["OB_ERR_KEY_NAME_DUPLICATE", "Duplicated key name", "Internal Error", "Contact OceanBase Support"], + "-5026": ["OB_ERR_CREATETIME_DUPLICATE", "Duplicated createtime", "Internal Error", "Contact OceanBase Support"], + "-5027": ["OB_ERR_MODIFYTIME_DUPLICATE", "Duplicated modifytime", "Internal Error", "Contact OceanBase Support"], + "-5028": ["OB_ERR_ILLEGAL_INDEX", "Illegal index", "Internal Error", "Contact OceanBase Support"], + "-5029": ["OB_ERR_INVALID_SCHEMA", "Invalid schema", "Internal Error", "Contact OceanBase Support"], + "-5030": ["OB_ERR_INSERT_NULL_ROWKEY", "Insert null rowkey", "Internal Error", "Contact OceanBase Support"], + "-5031": ["OB_ERR_COLUMN_NOT_FOUND", "Column not found", "Internal Error", "Contact OceanBase Support"], + "-5032": ["OB_ERR_DELETE_NULL_ROWKEY", "Delete null rowkey", "Internal Error", "Contact OceanBase Support"], + "-5034": ["OB_ERR_USER_EMPTY", "No user", "Internal Error", "Contact OceanBase Support"], + "-5035": ["OB_ERR_USER_NOT_EXIST", "User not exist", "Internal Error", "Contact OceanBase Support"], + "-5038": ["OB_ERR_WRONG_PASSWORD", "Incorrect password", "Internal Error", "Contact OceanBase Support"], + "-5039": ["OB_ERR_USER_IS_LOCKED", "User locked", "Internal Error", "Contact OceanBase Support"], + "-5040": ["OB_ERR_UPDATE_ROWKEY_COLUMN", "Can not update rowkey column", "Internal Error", "Contact OceanBase Support"], + "-5041": ["OB_ERR_UPDATE_JOIN_COLUMN", "Can not update join column", "Internal Error", "Contact OceanBase Support"], + "-5043": ["OB_ERR_PREPARE_STMT_NOT_FOUND", "Unknown prepared statement", "Internal Error", "Contact OceanBase Support"], + "-5044": ["OB_ERR_SYS_VARIABLE_UNKNOWN", "Unknown system variable", "Internal Error", "Contact OceanBase Support"], + "-5046": ["OB_ERR_OLDER_PRIVILEGE_VERSION", "Older privilege version", "Internal Error", "Contact OceanBase Support"], + "-5050": ["OB_ERR_USER_EXIST", "User exists", "Internal Error", "Contact OceanBase Support"], + "-5051": ["OB_ERR_PASSWORD_EMPTY", "Empty password", "Internal Error", "Contact OceanBase Support"], + "-5052": ["OB_ERR_GRANT_PRIVILEGES_TO_CREATE_TABLE", "Failed to grant privelege", "Internal Error", "Contact OceanBase Support"], + "-5053": ["OB_ERR_WRONG_DYNAMIC_PARAM", "Wrong dynamic parameters", "Internal Error", "Contact OceanBase Support"], + "-5054": ["OB_ERR_PARAM_SIZE", "Incorrect parameter count", "Internal Error", "Contact OceanBase Support"], + "-5055": ["OB_ERR_FUNCTION_UNKNOWN", "FUNCTION does not exist", "Internal Error", "Contact OceanBase Support"], + "-5056": ["OB_ERR_CREAT_MODIFY_TIME_COLUMN", "CreateTime or ModifyTime column cannot be modified", "Internal Error", "Contact OceanBase Support"], + "-5057": ["OB_ERR_MODIFY_PRIMARY_KEY", "Primary key cannot be modified", "Internal Error", "Contact OceanBase Support"], + "-5058": ["OB_ERR_PARAM_DUPLICATE", "Duplicated parameters", "Internal Error", "Contact OceanBase Support"], + "-5059": ["OB_ERR_TOO_MANY_SESSIONS", "Too many sessions", "Internal Error", "Contact OceanBase Support"], + "-5061": ["OB_ERR_TOO_MANY_PS", "Too many prepared statements", "Internal Error", "Contact OceanBase Support"], + "-5063": ["OB_ERR_HINT_UNKNOWN", "Unknown hint", "Internal Error", "Contact OceanBase Support"], + "-5064": ["OB_ERR_WHEN_UNSATISFIED", "When condition not satisfied", "Internal Error", "Contact OceanBase Support"], + "-5065": ["OB_ERR_QUERY_INTERRUPTED", "Query execution was interrupted", "Internal Error", "Contact OceanBase Support"], + "-5066": ["OB_ERR_SESSION_INTERRUPTED", "OceanBase instance terminated. Disconnection forced", "Internal Error", "Contact OceanBase Support"], + "-5067": ["OB_ERR_UNKNOWN_SESSION_ID", "Unknown session ID", "Internal Error", "Contact OceanBase Support"], + "-5068": ["OB_ERR_PROTOCOL_NOT_RECOGNIZE", "Incorrect protocol", "Internal Error", "Contact OceanBase Support"], + "-5069": ["OB_ERR_WRITE_AUTH_ERROR", "Write auth packet error", "Internal Error", "Contact OceanBase Support"], + "-5070": ["OB_ERR_PARSE_JOIN_INFO", "Wrong join info", "Internal Error", "Contact OceanBase Support"], + "-5071": ["OB_ERR_ALTER_INDEX_COLUMN", "Cannot alter index column", "Internal Error", "Contact OceanBase Support"], + "-5072": ["OB_ERR_MODIFY_INDEX_TABLE", "Cannot modify index table", "Internal Error", "Contact OceanBase Support"], + "-5073": ["OB_ERR_INDEX_UNAVAILABLE", "Index unavailable", "Internal Error", "Contact OceanBase Support"], + "-5074": ["OB_ERR_NOP_VALUE", "NOP cannot be used here", "Internal Error", "Contact OceanBase Support"], + "-5080": ["OB_ERR_PS_TOO_MANY_PARAM", "Prepared statement contains too many placeholders", "Internal Error", "Contact OceanBase Support"], + "-5081": ["OB_ERR_READ_ONLY", "The server is read only now", "Internal Error", "Contact OceanBase Support"], + "-5083": ["OB_ERR_INVALID_TYPE_FOR_OP", "Invalid data type for the operation", "Internal Error", "Contact OceanBase Support"], + "-5084": ["OB_ERR_CAST_VARCHAR_TO_BOOL", "Can not cast varchar value to bool type", "Internal Error", "Contact OceanBase Support"], + "-5085": ["OB_ERR_CAST_VARCHAR_TO_NUMBER", "Not a number Can not cast varchar value to number type", "Internal Error", "Contact OceanBase Support"], + "-5086": ["OB_ERR_CAST_VARCHAR_TO_TIME", "Not timestamp Can not cast varchar value to timestamp type", "Internal Error", "Contact OceanBase Support"], + "-5087": ["OB_ERR_CAST_NUMBER_OVERFLOW", "Result value was out of range when cast to number", "Internal Error", "Contact OceanBase Support"], + "-5090": ["OB_SCHEMA_NUMBER_PRECISION_OVERFLOW", "Precision was out of range", "Internal Error", "Contact OceanBase Support"], + "-5091": ["OB_SCHEMA_NUMBER_SCALE_OVERFLOW", "Scale value was out of range", "Internal Error", "Contact OceanBase Support"], + "-5092": ["OB_ERR_INDEX_UNKNOWN", "Unknown index", "Internal Error", "Contact OceanBase Support"], + "-5093": ["OB_NUMERIC_OVERFLOW", "numeric overflow", "Internal Error", "Contact OceanBase Support"], + "-5094": ["OB_ERR_TOO_MANY_JOIN_TABLES", "too many joined tables", "Internal Error", "Contact OceanBase Support"], + "-5099": ["OB_ERR_SYS_CONFIG_UNKNOWN", "System config unknown", "Internal Error", "Contact OceanBase Support"], + "-5100": ["OB_ERR_LOCAL_VARIABLE", "Local variable", "Internal Error", "Contact OceanBase Support"], + "-5101": ["OB_ERR_GLOBAL_VARIABLE", "Global variable", "Internal Error", "Contact OceanBase Support"], + "-5102": ["OB_ERR_VARIABLE_IS_READONLY", "variable is read only", "Internal Error", "Contact OceanBase Support"], + "-5103": ["OB_ERR_INCORRECT_GLOBAL_LOCAL_VAR", "incorrect global or local variable", "Internal Error", "Contact OceanBase Support"], + "-5107": ["OB_INVALID_ARGUMENT_FOR_IS", "Invalid argument for IS operator", "Internal Error", "Contact OceanBase Support"], + "-5112": ["OB_ERR_USER_VARIABLE_UNKNOWN", "Unknown user variable", "Internal Error", "Contact OceanBase Support"], + "-5114": ["OB_INVALID_NUMERIC", "Invalid numeric", "Internal Error", "Contact OceanBase Support"], + "-5116": ["OB_SQL_LOG_OP_SETCHILD_OVERFLOW", "Logical operator child index overflow", "Internal Error", "Contact OceanBase Support"], + "-5117": ["OB_SQL_EXPLAIN_FAILED", "fail to explain plan", "Internal Error", "Contact OceanBase Support"], + "-5118": ["OB_SQL_OPT_COPY_OP_FAILED", "fail to copy logical operator", "Internal Error", "Contact OceanBase Support"], + "-5119": ["OB_SQL_OPT_GEN_PLAN_FALIED", "fail to generate plan", "Internal Error", "Contact OceanBase Support"], + "-5120": ["OB_SQL_OPT_CREATE_RAWEXPR_FAILED", "fail to create raw expr", "Internal Error", "Contact OceanBase Support"], + "-5121": ["OB_SQL_OPT_JOIN_ORDER_FAILED", "fail to generate join order", "Internal Error", "Contact OceanBase Support"], + "-5122": ["OB_SQL_OPT_ERROR", "optimizer general error", "Internal Error", "Contact OceanBase Support"], + "-5130": ["OB_SQL_RESOLVER_NO_MEMORY", "sql resolver no memory", "Internal Error", "Contact OceanBase Support"], + "-5131": ["OB_SQL_DML_ONLY", "plan cache support dml only", "Internal Error", "Contact OceanBase Support"], + "-5133": ["OB_ERR_NO_GRANT", "No such grant defined", "Internal Error", "Contact OceanBase Support"], + "-5134": ["OB_ERR_NO_DB_SELECTED", "No database selected", "Internal Error", "Contact OceanBase Support"], + "-5135": ["OB_SQL_PC_OVERFLOW", "plan cache is overflow", "Internal Error", "Contact OceanBase Support"], + "-5136": ["OB_SQL_PC_PLAN_DUPLICATE", "plan exists in plan cache already", "Internal Error", "Contact OceanBase Support"], + "-5137": ["OB_SQL_PC_PLAN_EXPIRE", "plan is expired", "Internal Error", "Contact OceanBase Support"], + "-5138": ["OB_SQL_PC_NOT_EXIST", "no plan exist", "Internal Error", "Contact OceanBase Support"], + "-5139": ["OB_SQL_PARAMS_LIMIT", "too many params plan cache not support", "Internal Error", "Contact OceanBase Support"], + "-5140": ["OB_SQL_PC_PLAN_SIZE_LIMIT", "plan is too big to add to plan cache", "Internal Error", "Contact OceanBase Support"], + "-5142": ["OB_ERR_UNKNOWN_CHARSET", "Unknown character set", "Internal Error", "Contact OceanBase Support"], + "-5143": ["OB_ERR_UNKNOWN_COLLATION", "Unknown collation", "Internal Error", "Contact OceanBase Support"], + "-5144": ["OB_ERR_COLLATION_MISMATCH", "The collation is not valid for the character set", "Internal Error", "Contact OceanBase Support"], + "-5145": ["OB_ERR_WRONG_VALUE_FOR_VAR", "Variable can't be set to the value", "Internal Error", "Contact OceanBase Support"], + "-5146": ["OB_UNKNOWN_PARTITION", "Unknown partition", "Internal Error", "Contact OceanBase Support"], + "-5147": ["OB_PARTITION_NOT_MATCH", "Found a row not matching the given partition set", "Internal Error", "Contact OceanBase Support"], + "-5148": ["OB_ER_PASSWD_LENGTH", " Password hash should be a 40-digit hexadecimal number", "Internal Error", "Contact OceanBase Support"], + "-5149": ["OB_ERR_INSERT_INNER_JOIN_COLUMN", "Insert inner join column error", "Internal Error", "Contact OceanBase Support"], + "-5150": ["OB_TENANT_NOT_IN_SERVER", "Tenant not in this server", "Internal Error", "Contact OceanBase Support"], + "-5151": ["OB_TABLEGROUP_NOT_EXIST", "tablegroup not exist", "Internal Error", "Contact OceanBase Support"], + "-5153": ["OB_SUBQUERY_TOO_MANY_ROW", "Subquery returns more than 1 row", "Internal Error", "Contact OceanBase Support"], + "-5154": ["OB_ERR_BAD_DATABASE", "Unknown database", "Internal Error", "Contact OceanBase Support"], + "-5155": ["OB_CANNOT_USER", "User operation failed", "Internal Error", "Contact OceanBase Support"], + "-5156": ["OB_TENANT_EXIST", "tenant already exist", "Internal Error", "Contact OceanBase Support"], + "-5157": ["OB_TENANT_NOT_EXIST", "Unknown tenant", "Internal Error", "Contact OceanBase Support"], + "-5158": ["OB_DATABASE_EXIST", "Can't create database;database exists", "Internal Error", "Contact OceanBase Support"], + "-5159": ["OB_TABLEGROUP_EXIST", "tablegroup already exist", "Internal Error", "Contact OceanBase Support"], + "-5160": ["OB_ERR_INVALID_TENANT_NAME", "invalid tenant name specified in connection string", "Internal Error", "Contact OceanBase Support"], + "-5161": ["OB_EMPTY_TENANT", "tenant is empty", "Internal Error", "Contact OceanBase Support"], + "-5162": ["OB_WRONG_DB_NAME", "Incorrect database name", "Internal Error", "Contact OceanBase Support"], + "-5163": ["OB_WRONG_TABLE_NAME", "Incorrect table name", "Internal Error", "Contact OceanBase Support"], + "-5164": ["OB_WRONG_COLUMN_NAME", "Incorrect column name", "Internal Error", "Contact OceanBase Support"], + "-5165": ["OB_ERR_COLUMN_SPEC", "Incorrect column specifier", "Internal Error", "Contact OceanBase Support"], + "-5166": ["OB_ERR_DB_DROP_EXISTS", "Can't drop database;database doesn't exist", "Internal Error", "Contact OceanBase Support"], + "-5167": ["OB_ERR_DATA_TOO_LONG", "Data too long for column", "Internal Error", "Contact OceanBase Support"], + "-5168": ["OB_ERR_WRONG_VALUE_COUNT_ON_ROW", "column count does not match value count", "Internal Error", "Contact OceanBase Support"], + "-5169": ["OB_ERR_CREATE_USER_WITH_GRANT", "You are not allowed to create a user with GRANT", "Internal Error", "Contact OceanBase Support"], + "-5170": ["OB_ERR_NO_DB_PRIVILEGE", "Access denied for user to database", "Internal Error", "Contact OceanBase Support"], + "-5171": ["OB_ERR_NO_TABLE_PRIVILEGE", "Command denied to user for table", "Internal Error", "Contact OceanBase Support"], + "-5172": ["OB_INVALID_ON_UPDATE", "Invalid ON UPDATE clause", "Internal Error", "Contact OceanBase Support"], + "-5173": ["OB_INVALID_DEFAULT", "Invalid default value", "Internal Error", "Contact OceanBase Support"], + "-5174": ["OB_ERR_UPDATE_TABLE_USED", "Update table used", "Internal Error", "Contact OceanBase Support"], + "-5175": ["OB_ERR_COULUMN_VALUE_NOT_MATCH", "Column count doesn't match value count", "Internal Error", "Contact OceanBase Support"], + "-5176": ["OB_ERR_INVALID_GROUP_FUNC_USE", "Invalid use of group function", "Internal Error", "Contact OceanBase Support"], + "-5177": ["OB_CANT_AGGREGATE_2COLLATIONS", "Illegal mix of collations", "Internal Error", "Contact OceanBase Support"], + "-5178": ["OB_ERR_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD", "Field is of a not allowed type for this type of partitioning", "Internal Error", "Contact OceanBase Support"], + "-5179": ["OB_ERR_TOO_LONG_IDENT", "Identifier name is too long", "Internal Error", "Contact OceanBase Support"], + "-5180": ["OB_ERR_WRONG_TYPE_FOR_VAR", "Incorrect argument type to variable", "Internal Error", "Contact OceanBase Support"], + "-5182": ["OB_ERR_PRIV_USAGE", "Incorrect usage of DB GRANT and GLOBAL PRIVILEGES", "Internal Error", "Contact OceanBase Support"], + "-5183": ["OB_ILLEGAL_GRANT_FOR_TABLE", "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", "Internal Error", "Contact OceanBase Support"], + "-5184": ["OB_ERR_REACH_AUTOINC_MAX", "Failed to read auto-increment value from storage engine", "Internal Error", "Contact OceanBase Support"], + "-5185": ["OB_ERR_NO_TABLES_USED", "No tables used", "Internal Error", "Contact OceanBase Support"], + "-5187": ["OB_CANT_REMOVE_ALL_FIELDS", "You can't delete all columns with ALTER TABLE; use DROP TABLE instead", "Internal Error", "Contact OceanBase Support"], + "-5189": ["OB_NO_PARTS_ERROR", "Number of partitions = 0 is not an allowed value", "Internal Error", "Contact OceanBase Support"], + "-5190": ["OB_WRONG_SUB_KEY", "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys"], + "-5191": ["OB_KEY_PART_0", "Key part length cannot be 0", "Internal Error", "Contact OceanBase Support"], + "-5192": ["OB_ERR_UNKNOWN_TIME_ZONE", "Unknown or incorrect time zone", "Internal Error", "Contact OceanBase Support"], + "-5193": ["OB_ERR_WRONG_AUTO_KEY", "Incorrect table definition; there can be only one auto column", "Internal Error", "Contact OceanBase Support"], + "-5194": ["OB_ERR_TOO_MANY_KEYS", "Too many keys specified", "Internal Error", "Contact OceanBase Support"], + "-5195": ["OB_ERR_TOO_MANY_ROWKEY_COLUMNS", "Too many key parts specified", "Internal Error", "Contact OceanBase Support"], + "-5196": ["OB_ERR_TOO_LONG_KEY_LENGTH", "Specified key was too long", "Internal Error", "Contact OceanBase Support"], + "-5197": ["OB_ERR_TOO_MANY_COLUMNS", "Too many columns", "Internal Error", "Contact OceanBase Support"], + "-5199": ["OB_ERR_TOO_BIG_ROWSIZE", "Row size too large", "Internal Error", "Contact OceanBase Support"], + "-5200": ["OB_ERR_UNKNOWN_TABLE", "Unknown table", "Internal Error", "Contact OceanBase Support"], + "-5201": ["OB_ERR_BAD_TABLE", "Unknown table", "Internal Error", "Contact OceanBase Support"], + "-5202": ["OB_ERR_TOO_BIG_SCALE", "Too big scale specified for column", "Internal Error", "Contact OceanBase Support"], + "-5203": ["OB_ERR_TOO_BIG_PRECISION", "Too big precision specified for column", "Internal Error", "Contact OceanBase Support"], + "-5206": ["OB_WRONG_GROUP_FIELD", "Can't group on column", "Internal Error", "Contact OceanBase Support"], + "-5207": ["OB_NON_UNIQ_ERROR", "Column is ambiguous", "Internal Error", "Contact OceanBase Support"], + "-5208": ["OB_ERR_NONUNIQ_TABLE", "Not unique table/alias", "Internal Error", "Contact OceanBase Support"], + "-5209": ["OB_ERR_CANT_DROP_FIELD_OR_KEY", "Can't DROP Column; check that column/key exists", "Internal Error", "Contact OceanBase Support"], + "-5210": ["OB_ERR_MULTIPLE_PRI_KEY", "Multiple primary key defined", "Internal Error", "Contact OceanBase Support"], + "-5211": ["OB_ERR_KEY_COLUMN_DOES_NOT_EXITS", "Key column doesn't exist in table", "Internal Error", "Contact OceanBase Support"], + "-5212": ["OB_ERR_AUTO_PARTITION_KEY", "auto-increment column should not be part of partition key", "Internal Error", "Contact OceanBase Support"], + "-5213": ["OB_ERR_CANT_USE_OPTION_HERE", "Incorrect usage/placement", "Internal Error", "Contact OceanBase Support"], + "-5214": ["OB_ERR_WRONG_OBJECT", "Wrong object", "Internal Error", "Contact OceanBase Support"], + "-5215": ["OB_ERR_ON_RENAME", "Error on rename table", "Internal Error", "Contact OceanBase Support"], + "-5216": ["OB_ERR_WRONG_KEY_COLUMN", "The used storage engine can't index column", "Internal Error", "Contact OceanBase Support"], + "-5217": ["OB_ERR_BAD_FIELD_ERROR", "Unknown column", "Internal Error", "Contact OceanBase Support"], + "-5218": ["OB_ERR_WRONG_FIELD_WITH_GROUP", "column is not in GROUP BY", "Internal Error", "Contact OceanBase Support"], + "-5219": ["OB_ERR_CANT_CHANGE_TX_CHARACTERISTICS", "Transaction characteristics can't be changed while a transaction is in progress", "Internal Error", "Contact OceanBase Support"], + "-5220": ["OB_ERR_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION", "Cannot execute statement in a READ ONLY transaction.", "Internal Error", "Contact OceanBase Support"], + "-5222": ["OB_ERR_TRUNCATED_WRONG_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], + "-5223": ["OB_ERR_WRONG_IDENT_NAME", "wrong ident name", "Internal Error", "Contact OceanBase Support"], + "-5224": ["OB_WRONG_NAME_FOR_INDEX", "Incorrect index name", "Internal Error", "Contact OceanBase Support"], + "-5226": ["OB_REACH_MEMORY_LIMIT", "plan cache memory used reach the high water mark.", "Internal Error", "Contact OceanBase Support"], + "-5228": ["OB_ERR_NON_UPDATABLE_TABLE", "The target table is not updatable", "Internal Error", "Contact OceanBase Support"], + "-5229": ["OB_ERR_WARN_DATA_OUT_OF_RANGE", "Out of range value for column", "Internal Error", "Contact OceanBase Support"], + "-5233": ["OB_ERR_OPTION_PREVENTS_STATEMENT", "The MySQL server is running with the --read-only option so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], + "-5234": ["OB_ERR_DB_READ_ONLY", "The database is read only so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], + "-5235": ["OB_ERR_TABLE_READ_ONLY", "The table is read only so it cannot execute this statement", "Internal Error", "Contact OceanBase Support"], + "-5236": ["OB_ERR_LOCK_OR_ACTIVE_TRANSACTION", "Can't execute the given command because you have active locked tables or an active transaction", "Internal Error", "Contact OceanBase Support"], + "-5237": ["OB_ERR_SAME_NAME_PARTITION_FIELD", "Duplicate partition field name", "Internal Error", "Contact OceanBase Support"], + "-5238": ["OB_ERR_TABLENAME_NOT_ALLOWED_HERE", "Table from one of the SELECTs cannot be used in global ORDER clause", "Internal Error", "Contact OceanBase Support"], + "-5239": ["OB_ERR_VIEW_RECURSIVE", "view contains recursion", "Internal Error", "Contact OceanBase Support"], + "-5240": ["OB_ERR_QUALIFIER", "Column part of USING clause cannot have qualifier", "Internal Error", "Contact OceanBase Support"], + "-5241": ["OB_ERR_WRONG_VALUE", "Incorrect value", "Internal Error", "Contact OceanBase Support"], + "-5242": ["OB_ERR_VIEW_WRONG_LIST", "View's SELECT and view's field list have different column counts", "Internal Error", "Contact OceanBase Support"], + "-5243": ["OB_SYS_VARS_MAYBE_DIFF_VERSION", "system variables' version maybe different", "Internal Error", "Contact OceanBase Support"], + "-5244": ["OB_ERR_AUTO_INCREMENT_CONFLICT", "Auto-increment value in UPDATE conflicts with internally generated values", "Internal Error", "Contact OceanBase Support"], + "-5245": ["OB_ERR_TASK_SKIPPED", "some tasks are skipped", "Internal Error", "Contact OceanBase Support"], + "-5246": ["OB_ERR_NAME_BECOMES_EMPTY", "Name has become ''", "Internal Error", "Contact OceanBase Support"], + "-5247": ["OB_ERR_REMOVED_SPACES", "Leading spaces are removed from name ", "Internal Error", "Contact OceanBase Support"], + "-5248": ["OB_WARN_ADD_AUTOINCREMENT_COLUMN", "Alter table add auto_increment column is dangerous", "Internal Error", "Contact OceanBase Support"], + "-5249": ["OB_WARN_CHAMGE_NULL_ATTRIBUTE", "Alter table change nullable column to not nullable is dangerous", "Internal Error", "Contact OceanBase Support"], + "-5250": ["OB_ERR_INVALID_CHARACTER_STRING", "Invalid character string", "Internal Error", "Contact OceanBase Support"], + "-5251": ["OB_ERR_KILL_DENIED", "You are not owner of thread", "Internal Error", "Contact OceanBase Support"], + "-5252": ["OB_ERR_COLUMN_DEFINITION_AMBIGUOUS", "Column definition is ambiguous. Column has both NULL and NOT NULL attributes", "Internal Error", "Contact OceanBase Support"], + "-5253": ["OB_ERR_EMPTY_QUERY", "Query was empty", "Internal Error", "Contact OceanBase Support"], + "-5255": ["OB_ERR_FIELD_NOT_FOUND_PART", "Field in list of fields for partition function not found in table", "Internal Error", "Contact OceanBase Support"], + "-5256": ["OB_ERR_PRIMARY_CANT_HAVE_NULL", "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", "Internal Error", "Contact OceanBase Support"], + "-5257": ["OB_ERR_PARTITION_FUNC_NOT_ALLOWED_ERROR", "The PARTITION function returns the wrong type", "Internal Error", "Contact OceanBase Support"], + "-5258": ["OB_ERR_INVALID_BLOCK_SIZE", "Invalid block size, block size should between 16384 and 1048576", "Internal Error", "Contact OceanBase Support"], + "-5259": ["OB_ERR_UNKNOWN_STORAGE_ENGINE", "Unknown storage engine", "Internal Error", "Contact OceanBase Support"], + "-5260": ["OB_ERR_TENANT_IS_LOCKED", "Tenant is locked", "Internal Error", "Contact OceanBase Support"], + "-5261": ["OB_EER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF", "A UNIQUE INDEX/PRIMARY KEY must include all columns in the table's partitioning function", "Internal Error", "Contact OceanBase Support"], + "-5262": ["OB_ERR_PARTITION_FUNCTION_IS_NOT_ALLOWED", "This partition function is not allowed", "Internal Error", "Contact OceanBase Support"], + "-5263": ["OB_ERR_AGGREGATE_ORDER_FOR_UNION", "aggregate order for union", "Internal Error", "Contact OceanBase Support"], + "-5264": ["OB_ERR_OUTLINE_EXIST", "Outline exists", "Internal Error", "Contact OceanBase Support"], + "-5265": ["OB_OUTLINE_NOT_EXIST", "Outline not exists", "Internal Error", "Contact OceanBase Support"], + "-5266": ["OB_WARN_OPTION_BELOW_LIMIT", "The value should be no less than the limit", "Internal Error", "Contact OceanBase Support"], + "-5267": ["OB_INVALID_OUTLINE", "invalid outline", "Internal Error", "Contact OceanBase Support"], + "-5268": ["OB_REACH_MAX_CONCURRENT_NUM", "SQL reach max concurrent num", "Internal Error", "Contact OceanBase Support"], + "-5269": ["OB_ERR_OPERATION_ON_RECYCLE_OBJECT", "can not perform DDL/DML over objects in Recycle Bin", "Internal Error", "Contact OceanBase Support"], + "-5270": ["OB_ERR_OBJECT_NOT_IN_RECYCLEBIN", "object not in RECYCLE BIN", "Internal Error", "Contact OceanBase Support"], + "-5271": ["OB_ERR_CON_COUNT_ERROR", "Too many connections", "Internal Error", "Contact OceanBase Support"], + "-5272": ["OB_ERR_OUTLINE_CONTENT_EXIST", "Outline content already exists when added", "Internal Error", "Contact OceanBase Support"], + "-5273": ["OB_ERR_OUTLINE_MAX_CONCURRENT_EXIST", "Max concurrent already exists when added", "Internal Error", "Contact OceanBase Support"], + "-5274": ["OB_ERR_VALUES_IS_NOT_INT_TYPE_ERROR", "VALUES value for partition must have type INT", "Internal Error", "Contact OceanBase Support"], + "-5275": ["OB_ERR_WRONG_TYPE_COLUMN_VALUE_ERROR", "Partition column values of incorrect type", "Internal Error", "Contact OceanBase Support"], + "-5276": ["OB_ERR_PARTITION_COLUMN_LIST_ERROR", "Inconsistency in usage of column lists for partitioning", "Internal Error", "Contact OceanBase Support"], + "-5277": ["OB_ERR_TOO_MANY_VALUES_ERROR", "Cannot have more than one value for this type of RANGE partitioning", "Internal Error", "Contact OceanBase Support"], + "-5278": ["OB_ERR_PARTITION_VALUE_ERROR", "This partition value with incorrect charset type", "Internal Error", "Contact OceanBase Support"], + "-5279": ["OB_ERR_PARTITION_INTERVAL_ERROR", "Partition interval must have type INT", "Internal Error", "Contact OceanBase Support"], + "-5280": ["OB_ERR_SAME_NAME_PARTITION", "Duplicate partition name", "Internal Error", "Contact OceanBase Support"], + "-5281": ["OB_ERR_RANGE_NOT_INCREASING_ERROR", "VALUES LESS THAN value must be strictly increasing for each partition", "Internal Error", "Contact OceanBase Support"], + "-5282": ["OB_ERR_PARSE_PARTITION_RANGE", "Wrong number of partitions defined, mismatch with previous setting", "Internal Error", "Contact OceanBase Support"], + "-5283": ["OB_ERR_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF", "A PRIMARY KEY must include all columns in the table's partitioning function", "Internal Error", "Contact OceanBase Support"], + "-5284": ["OB_NO_PARTITION_FOR_GIVEN_VALUE", "Table has no partition for value", "Internal Error", "Contact OceanBase Support"], + "-5285": ["OB_EER_NULL_IN_VALUES_LESS_THAN", "Not allowed to use NULL value in VALUES LESS THAN", "Internal Error", "Contact OceanBase Support"], + "-5286": ["OB_ERR_PARTITION_CONST_DOMAIN_ERROR", "Partition constant is out of partition function domain", "Internal Error", "Contact OceanBase Support"], + "-5287": ["OB_ERR_TOO_MANY_PARTITION_FUNC_FIELDS", "Too many fields in 'list of partition fields'", "Internal Error", "Contact OceanBase Support"], + "-5288": ["OB_ERR_BAD_FT_COLUMN", "Column cannot be part of FULLTEXT index", "Internal Error", "Contact OceanBase Support"], + "-5289": ["OB_ERR_KEY_DOES_NOT_EXISTS", "key does not exist in table", "Internal Error", "Contact OceanBase Support"], + "-5290": ["OB_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN", "non-default value for generated column is not allowed", "Internal Error", "Contact OceanBase Support"], + "-5291": ["OB_ERR_BAD_CTXCAT_COLUMN", "The CTXCAT column must be contiguous in the index column list", "Internal Error", "Contact OceanBase Support"], + "-5292": ["OB_ERR_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN", "not supported for generated columns", "Internal Error", "Contact OceanBase Support"], + "-5293": ["OB_ERR_DEPENDENT_BY_GENERATED_COLUMN", "Column has a generated column dependency", "Internal Error", "Contact OceanBase Support"], + "-5294": ["OB_ERR_TOO_MANY_ROWS", "Result consisted of more than one row", "Internal Error", "Contact OceanBase Support"], + "-5295": ["OB_WRONG_FIELD_TERMINATORS", "Field separator argument is not what is expected; check the manual", "Internal Error", "Contact OceanBase Support"], + "-5296": ["OB_NO_READABLE_REPLICA", "there has no readable replica", "Internal Error", "Contact OceanBase Support"], + "-5297": ["OB_ERR_UNEXPECTED_TZ_TRANSITION", "unexpected time zone info transition", "Internal Error", "Contact OceanBase Support"], + "-5298": ["OB_ERR_SYNONYM_EXIST", "synonym exists", "Internal Error", "Contact OceanBase Support"], + "-5299": ["OB_SYNONYM_NOT_EXIST", "synonym not exists", "Internal Error", "Contact OceanBase Support"], + "-5300": ["OB_ERR_MISS_ORDER_BY_EXPR", "missing ORDER BY expression in the window specification", "Internal Error", "Contact OceanBase Support"], + "-5301": ["OB_ERR_NOT_CONST_EXPR", "The argument of the window function should be a constant for a partition", "Internal Error", "Contact OceanBase Support"], + "-5302": ["OB_ERR_PARTITION_MGMT_ON_NONPARTITIONED", "Partition management on a not partitioned table is not possible", "Internal Error", "Contact OceanBase Support"], + "-5303": ["OB_ERR_DROP_PARTITION_NON_EXISTENT", "Error in list of partitions", "Internal Error", "Contact OceanBase Support"], + "-5304": ["OB_ERR_PARTITION_MGMT_ON_TWOPART_TABLE", "Partition management on a two-part table is not possible", "Internal Error", "Contact OceanBase Support"], + "-5305": ["OB_ERR_ONLY_ON_RANGE_LIST_PARTITION", "can only be used on RANGE/LIST partitions", "Internal Error", "Contact OceanBase Support"], + "-5306": ["OB_ERR_DROP_LAST_PARTITION", "Cannot remove all partitions, use DROP TABLE instead", "Internal Error"], + "-5307": ["OB_ERR_SCHEDULER_THREAD_NOT_ENOUGH", "Scheduler thread number is not enough", "Internal Error", "Contact OceanBase Support"], + "-5308": ["OB_ERR_IGNORE_USER_HOST_NAME", "Ignore the host name", "Internal Error", "Contact OceanBase Support"], + "-5309": ["OB_IGNORE_SQL_IN_RESTORE", "Ignore sql in restore process", "Internal Error", "Contact OceanBase Support"], + "-5310": ["OB_ERR_TEMPORARY_TABLE_WITH_PARTITION", "Cannot create temporary table with partitions", "Internal Error", "Contact OceanBase Support"], + "-5311": ["OB_ERR_INVALID_COLUMN_ID", "Invalid column id", "Internal Error", "Contact OceanBase Support"], + "-5312": ["OB_SYNC_DDL_DUPLICATE", "Duplicated ddl id", "Internal Error", "Contact OceanBase Support"], + "-5313": ["OB_SYNC_DDL_ERROR", "Failed to sync ddl", "Internal Error", "Contact OceanBase Support"], + "-5314": ["OB_ERR_ROW_IS_REFERENCED", "Cannot delete or update a parent row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], + "-5315": ["OB_ERR_NO_REFERENCED_ROW", "Cannot add or update a child row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], + "-5317": ["OB_ERR_CANNOT_ADD_FOREIGN", "Cannot add foreign key constraint", "Internal Error", "Contact OceanBase Support"], + "-5318": ["OB_ERR_WRONG_FK_DEF", "Incorrect foreign key definition: Key reference and table reference don't match", "Internal Error", "Contact OceanBase Support"], + "-5319": ["OB_ERR_INVALID_CHILD_COLUMN_LENGTH_FK", "Invalid child column length", "Internal Error", "Contact OceanBase Support"], + "-5320": ["OB_ERR_ALTER_COLUMN_FK", "Cannot alter foreign key column", "Internal Error", "Contact OceanBase Support"], + "-5321": ["OB_ERR_CONNECT_BY_REQUIRED", "CONNECT BY clause required in this query block", "Internal Error", "Contact OceanBase Support"], + "-5322": ["OB_ERR_INVALID_PSEUDO_COLUMN_PLACE", "Specified pseudocolumn, operator or function not allowed here", "Internal Error", "Contact OceanBase Support"], + "-5323": ["OB_ERR_NOCYCLE_REQUIRED", "NOCYCLE keyword is required with CONNECT_BY_ISCYCLE pseudocolumn", "Internal Error", "Contact OceanBase Support"], + "-5324": ["OB_ERR_CONNECT_BY_LOOP", "CONNECT BY loop in user data", "Internal Error", "Contact OceanBase Support"], + "-5325": ["OB_ERR_INVALID_SIBLINGS", "ORDER SIBLINGS BY clause not allowed here", "Internal Error", "Contact OceanBase Support"], + "-5326": ["OB_ERR_INVALID_SEPARATOR", "when using SYS_CONNECT_BY_PATH function, cannot have separator as part of column value", "Internal Error", "Contact OceanBase Support"], + "-5327": ["OB_ERR_INVALID_SYNONYM_NAME", "Database can not be specified in public synonym", "Internal Error", "Contact OceanBase Support"], + "-5328": ["OB_ERR_LOOP_OF_SYNONYM", "Looping chain of synonyms", "Internal Error", "Contact OceanBase Support"], + "-5329": ["OB_ERR_SYNONYM_SAME_AS_OBJECT", "Cannot create a synonym with same name as object", "Internal Error", "Contact OceanBase Support"], + "-5330": ["OB_ERR_SYNONYM_TRANSLATION_INVALID", "Synonym translation is no longer valid", "Internal Error", "Contact OceanBase Support"], + "-5331": ["OB_ERR_EXIST_OBJECT", "Name is already used by an existing object", "Internal Error", "Contact OceanBase Support"], + "-5332": ["OB_ERR_ILLEGAL_VALUE_FOR_TYPE", "Illegal value found during parsing", "Internal Error", "Contact OceanBase Support"], + "-5333": ["OB_ER_TOO_LONG_SET_ENUM_VALUE", "Too long enumeration/set value for column.", "Internal Error", "Contact OceanBase Support"], + "-5334": ["OB_ER_DUPLICATED_VALUE_IN_TYPE", "Column has duplicated value", "Internal Error", "Contact OceanBase Support"], + "-5335": ["OB_ER_TOO_BIG_ENUM", "Too many enumeration values for column", "Internal Error", "Contact OceanBase Support"], + "-5336": ["OB_ERR_TOO_BIG_SET", "Too many strings for column", "Internal Error", "Contact OceanBase Support"], + "-5337": ["OB_ERR_WRONG_ROWID", "rowid is wrong", "Internal Error", "Contact OceanBase Support"], + "-5338": ["OB_ERR_INVALID_WINDOW_FUNCTION_PLACE", "Window Function not allowed here", "Internal Error", "Contact OceanBase Support"], + "-5339": ["OB_ERR_PARSE_PARTITION_LIST", "Fail to parse list partition", "Internal Error", "Contact OceanBase Support"], + "-5340": ["OB_ERR_MULTIPLE_DEF_CONST_IN_LIST_PART", "Multiple definition of same constant in list partitioning", "Internal Error", "Contact OceanBase Support"], + "-5341": ["OB_ERR_INVALID_TIMEZONE_REGION_ID", "timezone region ID is invalid", "Internal Error", "Contact OceanBase Support"], + "-5342": ["OB_ERR_INVALID_HEX_NUMBER", "invalid hex number", "Internal Error", "Contact OceanBase Support"], + "-5343": ["OB_ERR_WRONG_FUNC_ARGUMENTS_TYPE", "wrong number or types of arguments in function", "Internal Error", "Contact OceanBase Support"], + "-5344": ["OB_ERR_MULTI_UPDATE_KEY_CONFLICT", "Primary key/partition key update is not allowed", "Internal Error", "Contact OceanBase Support"], + "-5345": ["OB_ERR_INSUFFICIENT_PX_WORKER", "insufficient parallel query worker available", "Internal Error", "Contact OceanBase Support"], + "-5346": ["OB_ERR_FOR_UPDATE_EXPR_NOT_ALLOWED", "FOR UPDATE of this query expression is not allowed", "Internal Error", "Contact OceanBase Support"], + "-5347": ["OB_ERR_WIN_FUNC_ARG_NOT_IN_PARTITION_BY", "argument should be a function of expressions in PARTITION BY", "Internal Error", "Contact OceanBase Support"], + "-5348": ["OB_ERR_TOO_LONG_STRING_IN_CONCAT", "result of string concatenation is too long", "Internal Error", "Contact OceanBase Support"], + "-5350": ["OB_ERR_UPD_CAUSE_PART_CHANGE", "updating partition key column would cause a partition change", "Internal Error", "Contact OceanBase Support"], + "-5541": ["OB_ERR_SP_ALREADY_EXISTS", "procedure/function already exists", "Internal Error", "Contact OceanBase Support"], + "-5542": ["OB_ERR_SP_DOES_NOT_EXIST", "procedure/function does not exist", "Internal Error", "Contact OceanBase Support"], + "-5543": ["OB_ERR_SP_UNDECLARED_VAR", "Undeclared variable", "Internal Error", "Contact OceanBase Support"], + "-5544": ["OB_ERR_SP_UNDECLARED_TYPE", "Undeclared type", "Internal Error", "Contact OceanBase Support"], + "-5545": ["OB_ERR_SP_COND_MISMATCH", "Undefined CONDITION", "Internal Error", "Contact OceanBase Support"], + "-5546": ["OB_ERR_SP_LILABEL_MISMATCH", "no matching label", "Internal Error", "Contact OceanBase Support"], + "-5547": ["OB_ERR_SP_CURSOR_MISMATCH", "Undefined CURSOR", "Internal Error", "Contact OceanBase Support"], + "-5548": ["OB_ERR_SP_DUP_PARAM", "Duplicate parameter", "Internal Error", "Contact OceanBase Support"], + "-5549": ["OB_ERR_SP_DUP_VAR", "Duplicate variable", "Internal Error", "Contact OceanBase Support"], + "-5550": ["OB_ERR_SP_DUP_TYPE", "Duplicate type", "Internal Error", "Contact OceanBase Support"], + "-5551": ["OB_ERR_SP_DUP_CONDITION", "Duplicate condition", "Internal Error", "Contact OceanBase Support"], + "-5552": ["OB_ERR_SP_DUP_LABEL", "Duplicate label", "Internal Error", "Contact OceanBase Support"], + "-5553": ["OB_ERR_SP_DUP_CURSOR", "Duplicate cursor", "Internal Error", "Contact OceanBase Support"], + "-5554": ["OB_ERR_SP_INVALID_FETCH_ARG", "Incorrect number of FETCH variables", "Internal Error", "Contact OceanBase Support"], + "-5555": ["OB_ERR_SP_WRONG_ARG_NUM", "Incorrect number of arguments", "Internal Error", "Contact OceanBase Support"], + "-5556": ["OB_ERR_SP_UNHANDLED_EXCEPTION", "Unhandled exception has occurred in PL", "Internal Error", "Contact OceanBase Support"], + "-5557": ["OB_ERR_SP_BAD_CONDITION_TYPE", "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE", "Internal Error", "Contact OceanBase Support"], + "-5558": ["OB_ERR_PACKAGE_ALREADY_EXISTS", "package already exists", "Internal Error", "Contact OceanBase Support"], + "-5559": ["OB_ERR_PACKAGE_DOSE_NOT_EXIST", "package does not exist", "Internal Error", "Contact OceanBase Support"], + "-5560": ["OB_EER_UNKNOWN_STMT_HANDLER", "Unknown prepared statement handle", "Internal Error", "Contact OceanBase Support"], + "-5561": ["OB_ERR_INVALID_WINDOW_FUNC_USE", "Invalid use of window function", "Internal Error", "Contact OceanBase Support"], + "-5563": ["OB_ERR_CONTRAINT_NOT_FOUND", "Constraint not found", "Internal Error", "Contact OceanBase Support"], + "-5564": ["OB_ERR_ALTER_TABLE_ALTER_DUPLICATED_INDEX", "Duplicate alter index operations", "Internal Error", "Contact OceanBase Support"], + "-5565": ["OB_EER_INVALID_ARGUMENT_FOR_LOGARITHM", "Invalid argument for logarithm", "Internal Error", "Contact OceanBase Support"], + "-5566": ["OB_ERR_REORGANIZE_OUTSIDE_RANGE", "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range", "Internal Error", "Contact OceanBase Support"], + "-5568": ["OB_ER_UNSUPPORTED_PS", "This command is not supported in the prepared statement protocol yet", "Internal Error", "Contact OceanBase Support"], + "-5569": ["OB_ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG", "stmt is not allowed in stored function", "Internal Error", "Contact OceanBase Support"], + "-5570": ["OB_ER_SP_NO_RECURSION", "Recursive stored functions are not allowed.", "Internal Error", "Contact OceanBase Support"], + "-5571": ["OB_ER_SP_CASE_NOT_FOUND", "Case not found for CASE statement", "Internal Error", "Contact OceanBase Support"], + "-5572": ["OB_ERR_INVALID_SPLIT_COUNT", "a partition may be split into exactly two new partitions", "Internal Error", "Contact OceanBase Support"], + "-5573": ["OB_ERR_INVALID_SPLIT_GRAMMAR", "this physical attribute may not be specified for a table partition", "Internal Error", "Contact OceanBase Support"], + "-5574": ["OB_ERR_MISS_VALUES", "missing VALUES keyword", "Internal Error", "Contact OceanBase Support"], + "-5575": ["OB_ERR_MISS_AT_VALUES", "missing AT or VALUES keyword", "Internal Error", "Contact OceanBase Support"], + "-5576": ["OB_ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG", "Explicit or implicit commit is not allowed in stored function.", "Internal Error", "Contact OceanBase Support"], + "-5577": ["OB_PC_GET_LOCATION_ERROR", "Plan cache get location failed", "Internal Error", "Contact OceanBase Support"], + "-5578": ["OB_PC_LOCK_CONFLICT", "Plan cache lock conflict", "Internal Error", "Contact OceanBase Support"], + "-5579": ["OB_ER_SP_NO_RETSET", "Not allowed to return a result set in pl function", "Internal Error", "Contact OceanBase Support"], + "-5580": ["OB_ER_SP_NORETURNEND", "FUNCTION ended without RETURN", "Internal Error", "Contact OceanBase Support"], + "-5581": ["OB_ERR_SP_DUP_HANDLER", "Duplicate handler declared in the same block", "Internal Error", "Contact OceanBase Support"], + "-5582": ["OB_ER_SP_NO_RECURSIVE_CREATE", "Can't create a routine from within another routine", "Internal Error", "Contact OceanBase Support"], + "-5583": ["OB_ER_SP_BADRETURN", "RETURN is only allowed in a FUNCTION", "Internal Error", "Contact OceanBase Support"], + "-5584": ["OB_ER_SP_BAD_CURSOR_SELECT", "Cursor SELECT must not have INTO", "Internal Error", "Contact OceanBase Support"], + "-5585": ["OB_ER_SP_BAD_SQLSTATE", "Bad SQLSTATE", "Internal Error", "Contact OceanBase Support"], + "-5586": ["OB_ER_SP_VARCOND_AFTER_CURSHNDLR", "Variable or condition declaration after cursor or handler declaration", "Internal Error", "Contact OceanBase Support"], + "-5587": ["OB_ER_SP_CURSOR_AFTER_HANDLER", "Cursor declaration after handler declaration", "Internal Error", "Contact OceanBase Support"], + "-5588": ["OB_ER_SP_WRONG_NAME", "Incorrect routine name", "Internal Error", "Contact OceanBase Support"], + "-5589": ["OB_ER_SP_CURSOR_ALREADY_OPEN", "Cursor is already open", "Internal Error", "Contact OceanBase Support"], + "-5590": ["OB_ER_SP_CURSOR_NOT_OPEN", "Cursor is not open", "Internal Error", "Contact OceanBase Support"], + "-5591": ["OB_ER_SP_CANT_SET_AUTOCOMMIT", "Not allowed to set autocommit from a stored function", "Internal Error", "Contact OceanBase Support"], + "-5592": ["OB_ER_SP_NOT_VAR_ARG", "OUT or INOUT argument for routine is not a variable", "Internal Error", "Contact OceanBase Support"], + "-5593": ["OB_ER_SP_LILABEL_MISMATCH", "with no matching label", "Internal Error", "Contact OceanBase Support"], + "-5594": ["OB_ERR_TRUNCATE_ILLEGAL_FK", "Cannot truncate a table referenced in a foreign key constraint", "Internal Error", "Contact OceanBase Support"], + "-5596": ["OB_ER_INVALID_USE_OF_NULL", "Invalid use of NULL value", "Internal Error", "Contact OceanBase Support"], + "-5597": ["OB_ERR_SPLIT_LIST_LESS_VALUE", "last resulting partition cannot contain bounds", "Internal Error", "Contact OceanBase Support"], + "-5598": ["OB_ERR_ADD_PARTITION_TO_DEFAULT_LIST", "cannot add partition when DEFAULT partition exists", "Internal Error", "Contact OceanBase Support"], + "-5599": ["OB_ERR_SPLIT_INTO_ONE_PARTITION", "cannot split partition into one partition, use rename instead", "Internal Error"], + "-5600": ["OB_ERR_NO_TENANT_PRIVILEGE", "can not create user in sys tenant", "Internal Error", "Contact OceanBase Support"], + "-5601": ["OB_ERR_INVALID_PERCENTAGE", "Percentage should between 1 and 99", "Internal Error", "Contact OceanBase Support"], + "-5602": ["OB_ERR_COLLECT_HISTOGRAM", "Should collect histogram after major freeze", "Internal Error", "Contact OceanBase Support"], + "-5603": ["OB_ER_TEMP_TABLE_IN_USE", "Attempt to create, alter or drop an index on temporary table already in use", "Internal Error"], + "-5604": ["OB_ERR_INVALID_NLS_PARAMETER_STRING", "invalid NLS parameter string used in SQL function", "Internal Error", "Contact OceanBase Support"], + "-5605": ["OB_ERR_DATETIME_INTERVAL_PRECISION_OUT_OF_RANGE", "datetime/interval precision is out of range", "Internal Error", "Contact OceanBase Support"], + "-5606": ["OB_ERR_INVALID_NUMBER_FORMAT_MODEL", "Invalid number format model", "Internal Error", "Contact OceanBase Support"], + "-5607": ["OB_ERR_CMD_NOT_PROPERLY_ENDED", "SQL command not properly ended", "Internal Error", "Contact OceanBase Support"], + "-5608": ["OB_ERR_INVALID_NUMBER_FORMAT_MODEL", "invalid number format model", "Internal Error", "Contact OceanBase Support"], + "-5609": ["OB_WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED", "Non-ASCII separator arguments are not fully supported", "Internal Error", "Contact OceanBase Support"], + "-5610": ["OB_WARN_AMBIGUOUS_FIELD_TERM", "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY", "Internal Error", "Contact OceanBase Support"], + "-5611": ["OB_WARN_TOO_FEW_RECORDS", "Row doesn't contain data for all columns", "Internal Error", "Contact OceanBase Support"], + "-5612": ["OB_WARN_TOO_MANY_RECORDS", "Row was truncated; it contained more data than there were input columns", "Internal Error", "Contact OceanBase Support"], + "-5613": ["OB_ERR_TOO_MANY_VALUES", "too many values", "Internal Error", "Contact OceanBase Support"], + "-5614": ["OB_ERR_NOT_ENOUGH_VALUES", "not enough values", "Internal Error", "Contact OceanBase Support"], + "-5615": ["OB_ERR_MORE_THAN_ONE_ROW", "single-row subquery returns more than one row", "Internal Error", "Contact OceanBase Support"], + "-5616": ["OB_ERR_NOT_SUBQUERY", "UPDATE ... SET expression must be a subquery", "Internal Error", "Contact OceanBase Support"], + "-5617": ["OB_INAPPROPRIATE_INTO", "inappropriate INTO", "Internal Error", "Contact OceanBase Support"], + "-5618": ["OB_ERR_TABLE_IS_REFERENCED", "Cannot delete or update a parent row: a foreign key constraint fails", "Internal Error", "Contact OceanBase Support"], + "-5619": ["OB_ERR_QUALIFIER_EXISTS_FOR_USING_COLUMN", "Column part of using clause can not have qualifier", "Internal Error", "Contact OceanBase Support"], + "-5620": ["OB_ERR_OUTER_JOIN_NESTED", "two tables cannot be outer-joined to each other", "Internal Error", "Contact OceanBase Support"], + "-5621": ["OB_ERR_MULTI_OUTER_JOIN_TABLE", "a predicate may reference only one outer-joined table", "Internal Error", "Contact OceanBase Support"], + "-5622": ["OB_ERR_OUTER_JOIN_ON_CORRELATION_COLUMN", "an outer join cannot be specified on a correlation column", "Internal Error", "Contact OceanBase Support"], + "-5624": ["OB_ERR_OUTER_JOIN_WITH_SUBQUERY", "a column may not be outer-joined to a subquery", "Internal Error", "Contact OceanBase Support"], + "-5627": ["OB_SCHEMA_EAGAIN", "Schema try again", "Internal Error", "Contact OceanBase Support"], + "-5628": ["OB_ERR_ZERO_LEN_COL", "zero-length columns are not allowed", "Internal Error", "Contact OceanBase Support"], + "-5629": ["OB_ERR_YEAR_CONFLICTS_WITH_JULIAN_DATE", "year conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], + "-5630": ["OB_ERR_DAY_OF_YEAR_CONFLICTS_WITH_JULIAN_DATE", "day of year conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], + "-5631": ["OB_ERR_MONTH_CONFLICTS_WITH_JULIAN_DATE", "month conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], + "-5632": ["OB_ERR_DAY_OF_MONTH_CONFLICTS_WITH_JULIAN_DATE", "day of month conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], + "-5633": ["OB_ERR_DAY_OF_WEEK_CONFLICTS_WITH_JULIAN_DATE", "day of week conflicts with Julian date", "Internal Error", "Contact OceanBase Support"], + "-5634": ["OB_ERR_HOUR_CONFLICTS_WITH_SECONDS_IN_DAY", "hour conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], + "-5635": ["OB_ERR_MINUTES_OF_HOUR_CONFLICTS_WITH_SECONDS_IN_DAY", "minutes of hour conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], + "-5636": ["OB_ERR_SECONDS_OF_MINUTE_CONFLICTS_WITH_SECONDS_IN_DAY", "seconds of minute conflicts with seconds in day", "Internal Error", "Contact OceanBase Support"], + "-5637": ["OB_ERR_DATE_NOT_VALID_FOR_MONTH_SPECIFIED", "date not valid for month specified", "Internal Error", "Contact OceanBase Support"], + "-5638": ["OB_ERR_INPUT_VALUE_NOT_LONG_ENOUGH", "input value not long enough for date format", "Internal Error", "Contact OceanBase Support"], + "-5640": ["OB_ERR_INVALID_QUARTER_VALUE", "quarter must be between 1 and 4", "Internal Error", "Contact OceanBase Support"], + "-5641": ["OB_ERR_INVALID_MONTH", "not a valid month", "Internal Error", "Contact OceanBase Support"], + "-5642": ["OB_ERR_INVALID_DAY_OF_THE_WEEK", "not a valid day of the week", "Internal Error", "Contact OceanBase Support"], + "-5644": ["OB_ERR_INVALID_HOUR12_VALUE", "hour must be between 1 and 12", "Internal Error", "Contact OceanBase Support"], + "-5645": ["OB_ERR_INVALID_HOUR24_VALUE", "hour must be between 0 and 23", "Internal Error", "Contact OceanBase Support"], + "-5646": ["OB_ERR_INVALID_MINUTES_VALUE", "minutes must be between 0 and 59", "Internal Error", "Contact OceanBase Support"], + "-5647": ["OB_ERR_INVALID_SECONDS_VALUE", "seconds must be between 0 and 59", "Internal Error", "Contact OceanBase Support"], + "-5648": ["OB_ERR_INVALID_SECONDS_IN_DAY_VALUE", "seconds in day must be between 0 and 86399", "Internal Error", "Contact OceanBase Support"], + "-5649": ["OB_ERR_INVALID_JULIAN_DATE_VALUE", "julian date must be between 1 and 5373484", "Internal Error", "Contact OceanBase Support"], + "-5650": ["OB_ERR_AM_OR_PM_REQUIRED", "AM/A.M. or PM/P.M. required", "Internal Error", "Contact OceanBase Support"], + "-5651": ["OB_ERR_BC_OR_AD_REQUIRED", "BC/B.C. or AD/A.D. required", "Internal Error", "Contact OceanBase Support"], + "-5652": ["OB_ERR_FORMAT_CODE_APPEARS_TWICE", "format code appears twice", "Internal Error", "Contact OceanBase Support"], + "-5653": ["OB_ERR_DAY_OF_WEEK_SPECIFIED_MORE_THAN_ONCE", "day of week may only be specified once", "Internal Error", "Contact OceanBase Support"], + "-5654": ["OB_ERR_SIGNED_YEAR_PRECLUDES_USE_OF_BC_AD", "signed year precludes use of BC/AD", "Internal Error", "Contact OceanBase Support"], + "-5655": ["OB_ERR_JULIAN_DATE_PRECLUDES_USE_OF_DAY_OF_YEAR", "Julian date precludes use of day of year", "Internal Error", "Contact OceanBase Support"], + "-5656": ["OB_ERR_YEAR_MAY_ONLY_BE_SPECIFIED_ONCE", "year may only be specified once", "Internal Error", "Contact OceanBase Support"], + "-5657": ["OB_ERR_HOUR_MAY_ONLY_BE_SPECIFIED_ONCE", "hour may only be specified once", "Internal Error", "Contact OceanBase Support"], + "-5658": ["OB_ERR_AM_PM_CONFLICTS_WITH_USE_OF_AM_DOT_PM_DOT", "AM/PM conflicts with use of A.M./P.M.", "Internal Error", "Contact OceanBase Support"], + "-5659": ["OB_ERR_BC_AD_CONFLICT_WITH_USE_OF_BC_DOT_AD_DOT", "BC/AD conflicts with use of B.C./A.D.", "Internal Error", "Contact OceanBase Support"], + "-5660": ["OB_ERR_MONTH_MAY_ONLY_BE_SPECIFIED_ONCE", "month may only be specified once", "Internal Error", "Contact OceanBase Support"], + "-5661": ["OB_ERR_DAY_OF_WEEK_MAY_ONLY_BE_SPECIFIED_ONCE", "day of week may only be specified once", "Internal Error", "Contact OceanBase Support"], + "-5662": ["OB_ERR_FORMAT_CODE_CANNOT_APPEAR", "format code cannot appear in date input format", "Internal Error", "Contact OceanBase Support"], + "-5663": ["OB_ERR_NON_NUMERIC_CHARACTER_VALUE", "a non-numeric character was found where a numeric was expected", "Internal Error", "Contact OceanBase Support"], + "-5664": ["OB_INVALID_MERIDIAN_INDICATOR_USE", "'HH24' precludes use of meridian indicator", "Internal Error", "Contact OceanBase Support"], + "-5665": ["OB_ERR_INVALID_CHAR_FOLLOWING_ESCAPE_CHAR", "missing or illegal character following the escape character", "Internal Error", "Contact OceanBase Support"], + "-5666": ["OB_ERR_INVALID_ESCAPE_CHAR_LENGTH", "escape character must be character string of length 1", "Internal Error", "Contact OceanBase Support"], + "-5667": ["OB_ERR_DAY_OF_MONTH_RANGE", "day of month must be between 1 and last day of month", "Internal Error", "Contact OceanBase Support"], + "-5668": ["OB_ERR_NOT_SELECTED_EXPR", "not a SELECTed expression", "Internal Error", "Contact OceanBase Support"], + "-5671": ["OB_ERR_UK_PK_DUPLICATE", "such unique or primary key already exists in the table", "Internal Error", "Contact OceanBase Support"], + "-5672": ["OB_ERR_COLUMN_LIST_ALREADY_INDEXED", "such column list already indexed", "Internal Error", "Contact OceanBase Support"], + "-5673": ["OB_ERR_BUSHY_TREE_NOT_SUPPORTED", "PX does not support processing a bushy tree", "Internal Error", "Contact OceanBase Support"], + "-5674": ["OB_ERR_ARGUMENT_OUT_OF_RANGE", "argument is out of range", "Internal Error", "Contact OceanBase Support"], + "-5675": ["OB_ERR_ORDER_BY_ITEM_NOT_IN_SELECT_LIST", "ORDER BY item must be the number of a SELECT-list expression", "Internal Error", "Contact OceanBase Support"], + "-5676": ["OB_ERR_INTERVAL_INVALID", "the interval is invalid", "Internal Error", "Contact OceanBase Support"], + "-5677": ["OB_ERR_NUMERIC_OR_VALUE_ERROR", "PL/SQL: numeric or value error", "Internal Error", "Contact OceanBase Support"], + "-5678": ["OB_ERR_CONSTRAINT_NAME_DUPLICATE", "name already used by an existing constraint", "Internal Error", "Contact OceanBase Support"], + "-5679": ["OB_ERR_ONLY_HAVE_INVISIBLE_COL_IN_TABLE", "table must have at least one column that is not invisible", "Internal Error", "Contact OceanBase Support"], + "-5680": ["OB_ERR_INVISIBLE_COL_ON_UNSUPPORTED_TABLE_TYPE", "Invisible column is not supported on this type of table.", "Internal Error", "Contact OceanBase Support"], + "-5681": ["OB_ERR_MODIFY_COL_VISIBILITY_COMBINED_WITH_OTHER_OPTION", "Column visibility modifications cannot be combined with any other modified column DDL option.", "Internal Error", "Contact OceanBase Support"], + "-5682": ["OB_ERR_MODIFY_COL_VISIBILITY_BY_SYS_USER", "The visibility of a column from a table owned by a SYS user cannot be changed.", "Internal Error", "Contact OceanBase Support"], + "-5683": ["OB_ERR_TOO_MANY_ARGS_FOR_FUN", "too many arguments for function", "too many arguments for function", "Internal Error"], + "-6001": ["OB_TRANSACTION_SET_VIOLATION", "Transaction set changed during the execution", "Internal Error", "Contact OceanBase Support"], + "-6002": ["OB_TRANS_ROLLBACKED", "Transaction rollbacked", "Internal Error", "Contact OceanBase Support"], + "-6003": ["OB_ERR_EXCLUSIVE_LOCK_CONFLICT", "Lock wait timeout exceeded; try restarting transaction", "Internal Error", "Contact OceanBase Support"], + "-6004": ["OB_ERR_SHARED_LOCK_CONFLICT", "Shared lock conflict", "Internal Error", "Contact OceanBase Support"], + "-6005": ["OB_TRY_LOCK_ROW_CONFLICT", "Try lock row conflict", "Internal Error", "Contact OceanBase Support"], + "-6006": ["OB_ERR_EXCLUSIVE_LOCK_CONFLICT_NOWAIT", "Lock wait timeout exceeded; try restarting transaction", "Internal Error", "Contact OceanBase Support"], + "-6201": ["OB_CLOCK_OUT_OF_ORDER", "Clock out of order", "Internal Error", "Contact OceanBase Support"], + "-6203": ["OB_MASK_SET_NO_NODE", "Mask set has no node", "Internal Error", "Contact OceanBase Support"], + "-6204": ["OB_TRANS_HAS_DECIDED", "Transaction has been decided", "Internal Error", "Contact OceanBase Support"], + "-6205": ["OB_TRANS_INVALID_STATE", "Transaction state invalid", "Internal Error", "Contact OceanBase Support"], + "-6206": ["OB_TRANS_STATE_NOT_CHANGE", "Transaction state not changed", "Internal Error", "Contact OceanBase Support"], + "-6207": ["OB_TRANS_PROTOCOL_ERROR", "Transaction protocol error", "Internal Error", "Contact OceanBase Support"], + "-6208": ["OB_TRANS_INVALID_MESSAGE", "Transaction message invalid", "Internal Error", "Contact OceanBase Support"], + "-6209": ["OB_TRANS_INVALID_MESSAGE_TYPE", "Transaction message type invalid", "Internal Error", "Contact OceanBase Support"], + "-6210": ["OB_TRANS_TIMEOUT", "Transaction is timeout", "Internal Error", "Contact OceanBase Support"], + "-6211": ["OB_TRANS_KILLED", "Transaction is killed", "Transaction is killed", "Internal Error", "Contact OceanBase Support"], + "-6212": ["OB_TRANS_STMT_TIMEOUT", "Statement is timeout", "Internal Error", "Contact OceanBase Support"], + "-6213": ["OB_TRANS_CTX_NOT_EXIST", "Transaction context does not exist", "Internal Error", "Contact OceanBase Support"], + "-6214": ["OB_PARTITION_IS_FROZEN", "Partition is frozen", "Internal Error", "Contact OceanBase Support"], + "-6215": ["OB_PARTITION_IS_NOT_FROZEN", "Partition is not frozen", "Internal Error", "Contact OceanBase Support"], + "-6219": ["OB_TRANS_INVALID_LOG_TYPE", "Transaction invalid log type", "Internal Error", "Contact OceanBase Support"], + "-6220": ["OB_TRANS_SQL_SEQUENCE_ILLEGAL", "SQL sequence illegal", "Internal Error", "Contact OceanBase Support"], + "-6221": ["OB_TRANS_CANNOT_BE_KILLED", "Transaction context cannot be killed", "Internal Error", "Contact OceanBase Support"], + "-6222": ["OB_TRANS_STATE_UNKNOWN", "Transaction state unknown", "Internal Error", "Contact OceanBase Support"], + "-6223": ["OB_TRANS_IS_EXITING", "Transaction exiting", "Internal Error", "Contact OceanBase Support"], + "-6224": ["OB_TRANS_NEED_ROLLBACK", "transaction need rollback", "Internal Error", "Contact OceanBase Support"], + "-6225": ["OB_TRANS_UNKNOWN", "Transaction result is unknown", "Internal Error", "Contact OceanBase Support"], + "-6226": ["OB_ERR_READ_ONLY_TRANSACTION", "Cannot execute statement in a READ ONLY transaction", "Internal Error", "Contact OceanBase Support"], + "-6227": ["OB_PARTITION_IS_NOT_STOPPED", "Partition is not stopped", "Internal Error", "Contact OceanBase Support"], + "-6228": ["OB_PARTITION_IS_STOPPED", "Partition has been stopped", "Internal Error", "Contact OceanBase Support"], + "-6229": ["OB_PARTITION_IS_BLOCKED", "Partition has been blocked", "Internal Error", "Contact OceanBase Support"], + "-6230": ["OB_TRANS_RPC_TIMEOUT", "transaction rpc timeout", "Internal Error", "Contact OceanBase Support"], + "-6231": ["OB_REPLICA_NOT_READABLE", "replica is not readable", "Internal Error", "Contact OceanBase Support"], + "-6232": ["OB_PARTITION_IS_SPLITTING", "Partition is splitting", "Internal Error", "Contact OceanBase Support"], + "-6233": ["OB_TRANS_COMMITED", "Transaction has been commited", "Internal Error", "Contact OceanBase Support"], + "-6234": ["OB_TRANS_CTX_COUNT_REACH_LIMIT", "transaction context count reach limit", "Internal Error", "Contact OceanBase Support"], + "-6301": ["OB_LOG_ID_NOT_FOUND", "log id not found", "Internal Error", "Contact OceanBase Support"], + "-6302": ["OB_LSR_THREAD_STOPPED", "log scan runnable thread stop", "Internal Error", "Contact OceanBase Support"], + "-6303": ["OB_NO_LOG", "no log ever scanned", "Internal Error", "Contact OceanBase Support"], + "-6304": ["OB_LOG_ID_RANGE_ERROR", "log id range error", "Internal Error", "Contact OceanBase Support"], + "-6305": ["OB_LOG_ITER_ENOUGH", "iter scans enough files", "Internal Error", "Contact OceanBase Support"], + "-6306": ["OB_CLOG_INVALID_ACK", "invalid ack msg", "Internal Error", "Contact OceanBase Support"], + "-6307": ["OB_CLOG_CACHE_INVALID", "clog cache invalid", "Internal Error", "Contact OceanBase Support"], + "-6308": ["OB_EXT_HANDLE_UNFINISH", "external executor handle do not finish", "Internal Error", "Contact OceanBase Support"], + "-6309": ["OB_CURSOR_NOT_EXIST", "cursor not exist", "Internal Error", "Contact OceanBase Support"], + "-6310": ["OB_STREAM_NOT_EXIST", "stream not exist", "Internal Error", "Contact OceanBase Support"], + "-6311": ["OB_STREAM_BUSY", "stream busy", "Internal Error", "Contact OceanBase Support"], + "-6312": ["OB_FILE_RECYCLED", "file recycled", "Internal Error", "Contact OceanBase Support"], + "-6313": ["OB_REPLAY_EAGAIN_TOO_MUCH_TIME", "replay eagain cost too much time", "Internal Error", "Contact OceanBase Support"], + "-6314": ["OB_MEMBER_CHANGE_FAILED", "member change log sync failed", "Internal Error", "Contact OceanBase Support"], + "-6315": ["OB_NO_NEED_BATCH_CTX", "no need batch ctx", "Internal Error", "Contact OceanBase Support"], + "-6316": ["OB_TOO_LARGE_LOG_ID", "too large log id", "Internal Error", "Contact OceanBase Support"], + "-6317": ["OB_ALLOC_LOG_ID_NEED_RETRY", "alloc log id need retry", "Internal Error", "Contact OceanBase Support"], + "-6318": ["OB_TRANS_ONE_PC_NOT_ALLOWED", "transaction one pc not allowed", "Internal Error", "Contact OceanBase Support"], + "-6319": ["OB_LOG_NEED_REBUILD", "need rebuild", "Internal Error", "Contact OceanBase Support"], + "-6320": ["OB_TOO_MANY_LOG_TASK", "too many log tasks", "Internal Error", "Contact OceanBase Support"], + "-6321": ["OB_INVALID_BATCH_SIZE", "ob invalid batch size", "Internal Error", "Contact OceanBase Support"], + "-6322": ["OB_CLOG_SLIDE_TIMEOUT", "ob clog slide timeout", "Internal Error", "Contact OceanBase Support"], + "-7000": ["OB_ELECTION_WARN_LOGBUF_FULL", "The log buffer is full", "Internal Error", "Contact OceanBase Support"], + "-7001": ["OB_ELECTION_WARN_LOGBUF_EMPTY", "The log buffer is empty", "Internal Error", "Contact OceanBase Support"], + "-7002": ["OB_ELECTION_WARN_NOT_RUNNING", "The object is not running", "Internal Error", "Contact OceanBase Support"], + "-7003": ["OB_ELECTION_WARN_IS_RUNNING", "The object is running", "Internal Error", "Contact OceanBase Support"], + "-7004": ["OB_ELECTION_WARN_NOT_REACH_MAJORITY", "Election does not reach majority", "Internal Error", "Contact OceanBase Support"], + "-7005": ["OB_ELECTION_WARN_INVALID_SERVER", "The server is not valid", "Internal Error", "Contact OceanBase Support"], + "-7006": ["OB_ELECTION_WARN_INVALID_LEADER", "The leader is not valid", "Internal Error", "Contact OceanBase Support"], + "-7007": ["OB_ELECTION_WARN_LEADER_LEASE_EXPIRED", "The leader lease is expired", "Internal Error", "Contact OceanBase Support"], + "-7010": ["OB_ELECTION_WARN_INVALID_MESSAGE", "The message is not valid", "Internal Error", "Contact OceanBase Support"], + "-7011": ["OB_ELECTION_WARN_MESSAGE_NOT_INTIME", "The message is not intime", "Internal Error", "Contact OceanBase Support"], + "-7012": ["OB_ELECTION_WARN_NOT_CANDIDATE", "The server is not candidate", "Internal Error", "Contact OceanBase Support"], + "-7013": ["OB_ELECTION_WARN_NOT_CANDIDATE_OR_VOTER", "The server is not candidate or voter", "Internal Error", "Contact OceanBase Support"], + "-7014": ["OB_ELECTION_WARN_PROTOCOL_ERROR", "Election protocol error", "Internal Error", "Contact OceanBase Support"], + "-7015": ["OB_ELECTION_WARN_RUNTIME_OUT_OF_RANGE", "The task run time out of range", "Internal Error", "Contact OceanBase Support"], + "-7021": ["OB_ELECTION_WARN_LAST_OPERATION_NOT_DONE", "Last operation has not done", "Internal Error", "Contact OceanBase Support"], + "-7022": ["OB_ELECTION_WARN_CURRENT_SERVER_NOT_LEADER", "Current server is not leader", "Internal Error", "Contact OceanBase Support"], + "-7024": ["OB_ELECTION_WARN_NO_PREPARE_MESSAGE", "There is not prepare message", "Internal Error", "Contact OceanBase Support"], + "-7025": ["OB_ELECTION_ERROR_MULTI_PREPARE_MESSAGE", "There is more than one prepare message", "Internal Error", "Contact OceanBase Support"], + "-7026": ["OB_ELECTION_NOT_EXIST", "Election does not exist", "Internal Error", "Contact OceanBase Support"], + "-7027": ["OB_ELECTION_MGR_IS_RUNNING", "Election manager is running", "Internal Error", "Contact OceanBase Support"], + "-7029": ["OB_ELECTION_WARN_NO_MAJORITY_PREPARE_MESSAGE", "Election msg pool not have majority prepare message", "Internal Error", "Contact OceanBase Support"], + "-7030": ["OB_ELECTION_ASYNC_LOG_WARN_INIT", "Election async log init error", "Internal Error", "Contact OceanBase Support"], + "-7031": ["OB_ELECTION_WAIT_LEADER_MESSAGE", "Election waiting leader message", "Internal Error", "Contact OceanBase Support"], + "-7032": ["OB_ELECTION_GROUP_NOT_EXIST", "Election group not exist", "Internal Error", "Contact OceanBase Support"], + "-7033": ["OB_UNEXPECT_EG_VERSION", "unexpected eg_version", "Internal Error", "Contact OceanBase Support"], + "-7034": ["OB_ELECTION_GROUP_MGR_IS_RUNNING", "election_group_mgr is running", "Internal Error", "Contact OceanBase Support"], + "-7035": ["OB_ELECTION_MGR_NOT_RUNNING", "Election manager is not running", "Internal Error", "Contact OceanBase Support"], + "-7100": ["OB_TRANSFER_TASK_COMPLETED", "transfer task completed", "Internal Error", "Contact OceanBase Support"], + "-7101": ["OB_TOO_MANY_TRANSFER_TASK", "too many transfer tasks", "Internal Error", "Contact OceanBase Support"], + "-7102": ["OB_TRANSFER_TASK_EXIST", "transfer task exist", "Internal Error", "Contact OceanBase Support"], + "-7103": ["OB_TRANSFER_TASK_NOT_EXIST", "transfer task not exist", "Internal Error", "Contact OceanBase Support"], + "-7104": ["OB_NOT_ALLOW_TO_REMOVE", "not allow to remove", "Internal Error", "Contact OceanBase Support"], + "-7105": ["OB_RG_NOT_MATCH", "replication group not match", "Internal Error", "Contact OceanBase Support"], + "-7106": ["OB_TRANSFER_TASK_ABORTED", "transfer task aborted", "Internal Error", "Contact OceanBase Support"], + "-7107": ["OB_TRANSFER_INVALID_MESSAGE", "transfer invalid message", "Internal Error", "Contact OceanBase Support"], + "-7108": ["OB_TRANSFER_CTX_TS_NOT_MATCH", "transfer ctx_ts not match", "Internal Error", "Contact OceanBase Support"], + "-8001": ["OB_SERVER_IS_INIT", "Server is initializing", "Internal Error", "Contact OceanBase Support"], + "-8002": ["OB_SERVER_IS_STOPPING", "Server is stopping", "Internal Error", "Contact OceanBase Support"], + "-8003": ["OB_PACKET_CHECKSUM_ERROR", "Packet checksum error", "Internal Error", "Contact OceanBase Support"], + "-8004": ["OB_PACKET_CLUSTER_ID_NOT_MATCH", "Packet cluster_id not match", "Internal Error", "Contact OceanBase Support"], + "-9001": ["OB_URI_ERROR", "URI error", "Internal Error", "Contact OceanBase Support"], + "-9002": ["OB_FINAL_MD5_ERROR", "OSS file MD5 error", "Internal Error", "Contact OceanBase Support"], + "-9003": ["OB_OSS_ERROR", "OSS error", "Internal Error", "Contact OceanBase Support"], + "-9004": ["OB_INIT_MD5_ERROR", "Init MD5 fail", "Internal Error", "Contact OceanBase Support"], + "-9005": ["OB_OUT_OF_ELEMENT", "Out of element", "Internal Error", "Contact OceanBase Support"], + "-9006": ["OB_UPDATE_MD5_ERROR", "Update MD5 fail", "Internal Error", "Contact OceanBase Support"], + "-9007": ["OB_FILE_LENGTH_INVALID", "Invalid OSS file length", "Internal Error", "Contact OceanBase Support"], + "-9008": ["OB_NOT_READ_ALL_DATA", "Read all data fail", "Internal Error", "Contact OceanBase Support"], + "-9009": ["OB_BUILD_MD5_ERROR", "Build MD5 fail", "Internal Error", "Contact OceanBase Support"], + "-9010": ["OB_MD5_NOT_MATCH", "OSS file MD5 not match", "Internal Error", "Contact OceanBase Support"], + "-9011": ["OB_OSS_FILE_NOT_EXIST", "Can not find oss file", "Internal Error", "Contact OceanBase Support"], + "-9012": ["OB_OSS_DATA_VERSION_NOT_MATCHED", "Can not get data version from timestamp", "Internal Error", "Contact OceanBase Support"], + "-9013": ["OB_OSS_WRITE_ERROR", "Write OSS file error", "Internal Error", "Contact OceanBase Support"], + "-9014": ["OB_RESTORE_IN_PROGRESS", "Another restore is in progress", "Internal Error", "Contact OceanBase Support"], + "-9015": ["OB_AGENT_INITING_BACKUP_COUNT_ERROR", "agent initing backup count error", "Internal Error", "Contact OceanBase Support"], + "-9016": ["OB_CLUSTER_NAME_NOT_EQUAL", "ob cluster name not equal", "Internal Error", "Contact OceanBase Support"], + "-9017": ["OB_RS_LIST_INVAILD", "rs list invalid", "Internal Error", "Contact OceanBase Support"], + "-9018": ["OB_AGENT_HAS_FAILED_TASK", "agent has failed task", "Internal Error", "Contact OceanBase Support"], + "-9019": ["OB_RESTORE_PARTITION_IS_COMPELETE", "restore partition is compelete", "Internal Error", "Contact OceanBase Support"], + "-9020": ["OB_RESTORE_PARTITION_TWICE", "restore partition twice", "Internal Error", "Contact OceanBase Support"], + "-32031": ["OB_ERR_CTE_ILLEGAL_QUERY_NAME", "illegal reference of a query name in WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32033": ["OB_ERR_CTE_UNSUPPORTED_COLUMN_ALIASING", "unsupported column aliasing", "Internal Error", "Contact OceanBase Support"], + "-32034": ["OB_ERR_UNSUPPORTED_USE_OF_CTE", "unsupported use of WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32038": ["OB_ERR_CTE_COLUMN_NUMBER_NOT_MATCH", "number of WITH clause column names does not match number of elements in select list", "Internal Error", "Contact OceanBase Support"], + "-32039": ["OB_ERR_NEED_COLUMN_ALIAS_LIST_IN_RECURSIVE_CTE", "recursive WITH clause must have column alias list", "Internal Error", "Contact OceanBase Support"], + "-32040": ["OB_ERR_NEED_UNION_ALL_IN_RECURSIVE_CTE", "recursive WITH clause must use a UNION ALL operation", "Internal Error", "Contact OceanBase Support"], + "-32041": ["OB_ERR_NEED_ONLY_TWO_BRANCH_IN_RECURSIVE_CTE", "UNION ALL operation in recursive WITH clause must have only two branches", "Internal Error", "Contact OceanBase Support"], + "-32042": ["OB_ERR_NEED_REFERENCE_ITSELF_DIRECTLY_IN_RECURSIVE_CTE", "recursive WITH clause must reference itself directly in one of the UNION ALL branches", "Internal Error", "Contact OceanBase Support"], + "-32043": ["OB_ERR_NEED_INIT_BRANCH_IN_RECURSIVE_CTE", "recursive WITH clause needs an initialization branch", "Internal Error", "Contact OceanBase Support"], + "-32044": ["OB_ERR_CYCLE_FOUND_IN_RECURSIVE_CTE", "cycle detected while executing recursive WITH query", "Internal Error", "Contact OceanBase Support"], + "-32045": ["OB_ERR_CTE_REACH_MAX_LEVEL_RECURSION", "maximum level of recursion reached while executing recursive WITH query", "Internal Error", "Contact OceanBase Support"], + "-32046": ["OB_ERR_CTE_ILLEGAL_SEARCH_PSEUDO_NAME", "sequence column name for SEARCH clause must not be part of the column alias list", "Internal Error", "Contact OceanBase Support"], + "-32047": ["OB_ERR_CTE_ILLEGAL_CYCLE_NON_CYCLE_VALUE", "cycle mark value and non-cycle mark value must be one byte character string values", "Internal Error", "Contact OceanBase Support"], + "-32048": ["OB_ERR_CTE_ILLEGAL_CYCLE_PSEUDO_NAME", "cycle mark column name for CYCLE clause must not be part of the column alias list", "Internal Error", "Contact OceanBase Support"], + "-32049": ["OB_ERR_CTE_COLUMN_ALIAS_DUPLICATE", "duplicate name found in column alias list for WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32480": ["OB_ERR_CTE_ILLEGAL_SEARCH_CYCLE_CLAUSE", "SEARCH and CYCLE clauses can only be specified for recursive WITH clause elements", "Internal Error", "Contact OceanBase Support"], + "-32481": ["OB_ERR_CTE_DUPLICATE_CYCLE_NON_CYCLE_VALUE", "cycle value for CYCLE clause must be different from the non-cycle value", "Internal Error", "Contact OceanBase Support"], + "-32482": ["OB_ERR_CTE_DUPLICATE_SEQ_NAME_CYCLE_COLUMN", "sequence column for SEARCH clause must be different from the cycle mark column for CYCLE clause", "Internal Error", "Contact OceanBase Support"], + "-32483": ["OB_ERR_CTE_DUPLICATE_NAME_IN_SEARCH_CLAUSE", "duplicate name found in sort specification list for SEARCH clause of WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32484": ["OB_ERR_CTE_DUPLICATE_NAME_IN_CYCLE_CLAUSE", "duplicate name found in cycle column list for CYCLE clause of WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32485": ["OB_ERR_CTE_ILLEGAL_COLUMN_IN_CYCLE_CLAUSE", "element in cycle column list of CYCLE clause must appear in the column alias list of the WITH clause element", "Internal Error", "Contact OceanBase Support"], + "-32486": ["OB_ERR_CTE_ILLEGAL_RECURSIVE_BRANCH", "unsupported operation in recursive branch of recursive WITH clause", "Internal Error", "Contact OceanBase Support"], + "-32487": ["OB_ERR_ILLEGAL_JOIN_IN_RECURSIVE_CTE", "unsupported join in recursive WITH query", "Internal Error", "Contact OceanBase Support"], + "-32488": ["OB_ERR_CTE_NEED_COLUMN_ALIAS_LIST", "WITH clause element did not have a column alias list", "Internal Error", "Contact OceanBase Support"], + "-32489": ["OB_ERR_CTE_ILLEGAL_COLUMN_IN_SERACH_CALUSE", "element in sort specification list of SEARCH clause did not appear in the column alias list of the WITH clause element", "Internal Error", "Contact OceanBase Support"], + "-32490": ["OB_ERR_CTE_RECURSIVE_QUERY_NAME_REFERENCED_MORE_THAN_ONCE", "recursive query name referenced more than once in recursive branch of recursive WITH clause element", "Internal Error", "Contact OceanBase Support"], + "-976": ["OB_ERR_CBY_PSEUDO_COLUMN_NOT_ALLOWED", "Specified pseudo column or operator not allowed here", "Internal Error", "Contact OceanBase Support"], + "-1436": ["OB_ERR_CBY_LOOP", "CONNECT BY loop in user data", "Internal Error", "Contact OceanBase Support"], + "-1437": ["OB_ERR_CBY_JOIN_NOT_ALLOWED", "cannot have join with CONNECT BY", "Internal Error", "Contact OceanBase Support"], + "-1788": ["OB_ERR_CBY_CONNECT_BY_REQUIRED", "CONNECT BY clause required in this query block", "Internal Error", "Contact OceanBase Support"], + "-30002": ["OB_ERR_CBY_CONNECT_BY_PATH_NOT_ALLOWED", "SYS_CONNECT_BY_PATH function is not allowed here", "Internal Error", "Contact OceanBase Support"], + "-30003": ["OB_ERR_CBY_CONNECT_BY_PATH_ILLEGAL_PARAM", "illegal parameter in SYS_CONNECT_BY_PATH function", "Internal Error", "Contact OceanBase Support"], + "-30004": ["OB_ERR_CBY_CONNECT_BY_PATH_INVALID_SEPARATOR", "A column value contained the string that the SYS_CONNECT_BY_PATH function was to use to separate column values", "Internal Error", "Contact OceanBase Support"], + "-30007": ["OB_ERR_CBY_CONNECT_BY_ROOT_ILLEGAL_USED", "CONNECT BY ROOT operator is not supported in the START WITH or in the CONNECT BY condition", "Internal Error", "Contact OceanBase Support"], + "-30929": ["OB_ERR_CBY_OREDER_SIBLINGS_BY_NOT_ALLOWED", "ORDER SIBLINGS BY clause not allowed here", "Internal Error", "Contact OceanBase Support"], + "-30930": ["OB_ERR_CBY_NOCYCLE_REQUIRED", "NOCYCLE keyword is required with CONNECT_BY_ISCYCLE pseudo column", "Internal Error", "Contact OceanBase Support"], +} diff --git a/handler/meta/sql_meta.py b/handler/meta/sql_meta.py index f98b45d1..7538e7cf 100644 --- a/handler/meta/sql_meta.py +++ b/handler/meta/sql_meta.py @@ -62,7 +62,7 @@ def rm_value(self, key): svr_port from oceanbase.##REPLACE_SQL_AUDIT_TABLE_NAME## where query_sql != '' and is_inner_sql=0 and trace_id='##REPLACE_TRACE_ID##' order by REQUEST_TIME desc limit 1 - ''' + ''', ) sql_dict.set_value( @@ -86,22 +86,22 @@ def rm_value(self, key): svr_port from sys.##REPLACE_SQL_AUDIT_TABLE_NAME##, V$VERSION where length(query_sql)>4 and trace_id='##REPLACE_TRACE_ID##' order by REQUEST_TIME desc) where rownum < 2 - ''' + ''', ) sql_dict.set_value( - "sql_audit_item_mysql", - '`SVR_IP`,`SVR_PORT`,`REQUEST_ID`,`SQL_EXEC_ID`,`TRACE_ID`,`SID`,`CLIENT_IP`,`CLIENT_PORT`,`TENANT_ID`,' - '`EFFECTIVE_TENANT_ID`,`TENANT_NAME`,`USER_ID`,`USER_NAME`,`USER_CLIENT_IP`,`DB_ID`,`DB_NAME`,`SQL_ID`,' - '`QUERY_SQL`,`PLAN_ID`,`AFFECTED_ROWS`,`RETURN_ROWS`,`PARTITION_CNT`,`RET_CODE`,`QC_ID`,`DFO_ID`,`SQC_ID`,' - '`WORKER_ID`,`EVENT`,`P1TEXT`,`P1`,`P2TEXT`,`P2`,`P3TEXT`,`P3`,`LEVEL`,`WAIT_CLASS_ID`,`WAIT_CLASS`,`STATE`,' - '`WAIT_TIME_MICRO`,`TOTAL_WAIT_TIME_MICRO`,`TOTAL_WAITS`,`RPC_COUNT`,`PLAN_TYPE`,`IS_INNER_SQL`,' - '`IS_EXECUTOR_RPC`,`IS_HIT_PLAN`,`REQUEST_TIME`,`ELAPSED_TIME`,`NET_TIME`,`NET_WAIT_TIME`,`QUEUE_TIME`,' - '`DECODE_TIME`,`GET_PLAN_TIME`,`EXECUTE_TIME`,`APPLICATION_WAIT_TIME`,`CONCURRENCY_WAIT_TIME`,' - '`USER_IO_WAIT_TIME`,`SCHEDULE_TIME`,`ROW_CACHE_HIT`,`BLOOM_FILTER_CACHE_HIT`,`BLOCK_CACHE_HIT`,' - '`BLOCK_INDEX_CACHE_HIT`,`DISK_READS`,`RETRY_CNT`,`TABLE_SCAN`,`CONSISTENCY_LEVEL`,`MEMSTORE_READ_ROW_COUNT`,' - '`SSSTORE_READ_ROW_COUNT`,`REQUEST_MEMORY_USED`,`EXPECTED_WORKER_COUNT`,`USED_WORKER_COUNT`,`PS_STMT_ID`,' - '`TRANSACTION_HASH`,`REQUEST_TYPE`,`IS_BATCHED_MULTI_STMT`,`OB_TRACE_INFO`,`PLAN_HASH` ' + "sql_audit_item_mysql", + '`SVR_IP`,`SVR_PORT`,`REQUEST_ID`,`SQL_EXEC_ID`,`TRACE_ID`,`SID`,`CLIENT_IP`,`CLIENT_PORT`,`TENANT_ID`,' + '`EFFECTIVE_TENANT_ID`,`TENANT_NAME`,`USER_ID`,`USER_NAME`,`USER_CLIENT_IP`,`DB_ID`,`DB_NAME`,`SQL_ID`,' + '`QUERY_SQL`,`PLAN_ID`,`AFFECTED_ROWS`,`RETURN_ROWS`,`PARTITION_CNT`,`RET_CODE`,`QC_ID`,`DFO_ID`,`SQC_ID`,' + '`WORKER_ID`,`EVENT`,`P1TEXT`,`P1`,`P2TEXT`,`P2`,`P3TEXT`,`P3`,`LEVEL`,`WAIT_CLASS_ID`,`WAIT_CLASS`,`STATE`,' + '`WAIT_TIME_MICRO`,`TOTAL_WAIT_TIME_MICRO`,`TOTAL_WAITS`,`RPC_COUNT`,`PLAN_TYPE`,`IS_INNER_SQL`,' + '`IS_EXECUTOR_RPC`,`IS_HIT_PLAN`,`REQUEST_TIME`,`ELAPSED_TIME`,`NET_TIME`,`NET_WAIT_TIME`,`QUEUE_TIME`,' + '`DECODE_TIME`,`GET_PLAN_TIME`,`EXECUTE_TIME`,`APPLICATION_WAIT_TIME`,`CONCURRENCY_WAIT_TIME`,' + '`USER_IO_WAIT_TIME`,`SCHEDULE_TIME`,`ROW_CACHE_HIT`,`BLOOM_FILTER_CACHE_HIT`,`BLOCK_CACHE_HIT`,' + '`BLOCK_INDEX_CACHE_HIT`,`DISK_READS`,`RETRY_CNT`,`TABLE_SCAN`,`CONSISTENCY_LEVEL`,`MEMSTORE_READ_ROW_COUNT`,' + '`SSSTORE_READ_ROW_COUNT`,`REQUEST_MEMORY_USED`,`EXPECTED_WORKER_COUNT`,`USED_WORKER_COUNT`,`PS_STMT_ID`,' + '`TRANSACTION_HASH`,`REQUEST_TYPE`,`IS_BATCHED_MULTI_STMT`,`OB_TRACE_INFO`,`PLAN_HASH` ', ) sql_dict.set_value( @@ -116,22 +116,22 @@ def rm_value(self, key): '"USER_IO_WAIT_TIME","SCHEDULE_TIME","ROW_CACHE_HIT","BLOOM_FILTER_CACHE_HIT","BLOCK_CACHE_HIT",' '"BLOCK_INDEX_CACHE_HIT","DISK_READS","RETRY_CNT","TABLE_SCAN","CONSISTENCY_LEVEL","MEMSTORE_READ_ROW_COUNT",' '"SSSTORE_READ_ROW_COUNT","REQUEST_MEMORY_USED","EXPECTED_WORKER_COUNT","USED_WORKER_COUNT","PS_STMT_ID",' - '"TRANSACTION_HASH","REQUEST_TYPE","IS_BATCHED_MULTI_STMT","OB_TRACE_INFO","PLAN_HASH" ' + '"TRANSACTION_HASH","REQUEST_TYPE","IS_BATCHED_MULTI_STMT","OB_TRACE_INFO","PLAN_HASH" ', ) sql_dict.set_value( - "sql_audit_item_mysql_obversion4", - '`SVR_IP`,`SVR_PORT`,`REQUEST_ID`,`SQL_EXEC_ID`,`TRACE_ID`,`SID`,`CLIENT_IP`,`CLIENT_PORT`,`TENANT_ID`,' - '`EFFECTIVE_TENANT_ID`,`TENANT_NAME`,`USER_ID`,`USER_NAME`,`USER_CLIENT_IP`,`DB_ID`,`DB_NAME`,`SQL_ID`,' - '`QUERY_SQL`,`PLAN_ID`,`AFFECTED_ROWS`,`RETURN_ROWS`,`PARTITION_CNT`,`RET_CODE`,`QC_ID`,`DFO_ID`,`SQC_ID`,' - '`WORKER_ID`,`EVENT`,`P1TEXT`,`P1`,`P2TEXT`,`P2`,`P3TEXT`,`P3`,`LEVEL`,`WAIT_CLASS_ID`,`WAIT_CLASS`,`STATE`,' - '`WAIT_TIME_MICRO`,`TOTAL_WAIT_TIME_MICRO`,`TOTAL_WAITS`,`RPC_COUNT`,`PLAN_TYPE`,`IS_INNER_SQL`,' - '`IS_EXECUTOR_RPC`,`IS_HIT_PLAN`,`REQUEST_TIME`,`ELAPSED_TIME`,`NET_TIME`,`NET_WAIT_TIME`,`QUEUE_TIME`,' - '`DECODE_TIME`,`GET_PLAN_TIME`,`EXECUTE_TIME`,`APPLICATION_WAIT_TIME`,`CONCURRENCY_WAIT_TIME`,' - '`USER_IO_WAIT_TIME`,`SCHEDULE_TIME`,`ROW_CACHE_HIT`,`BLOOM_FILTER_CACHE_HIT`,`BLOCK_CACHE_HIT`,' - '`DISK_READS`,`RETRY_CNT`,`TABLE_SCAN`,`CONSISTENCY_LEVEL`,`MEMSTORE_READ_ROW_COUNT`,' - '`SSSTORE_READ_ROW_COUNT`,`REQUEST_MEMORY_USED`,`EXPECTED_WORKER_COUNT`,`USED_WORKER_COUNT`,' - '`TX_ID`,`REQUEST_TYPE`,`IS_BATCHED_MULTI_STMT`,`OB_TRACE_INFO`,`PLAN_HASH` ' + "sql_audit_item_mysql_obversion4", + '`SVR_IP`,`SVR_PORT`,`REQUEST_ID`,`SQL_EXEC_ID`,`TRACE_ID`,`SID`,`CLIENT_IP`,`CLIENT_PORT`,`TENANT_ID`,' + '`EFFECTIVE_TENANT_ID`,`TENANT_NAME`,`USER_ID`,`USER_NAME`,`USER_CLIENT_IP`,`DB_ID`,`DB_NAME`,`SQL_ID`,' + '`QUERY_SQL`,`PLAN_ID`,`AFFECTED_ROWS`,`RETURN_ROWS`,`PARTITION_CNT`,`RET_CODE`,`QC_ID`,`DFO_ID`,`SQC_ID`,' + '`WORKER_ID`,`EVENT`,`P1TEXT`,`P1`,`P2TEXT`,`P2`,`P3TEXT`,`P3`,`LEVEL`,`WAIT_CLASS_ID`,`WAIT_CLASS`,`STATE`,' + '`WAIT_TIME_MICRO`,`TOTAL_WAIT_TIME_MICRO`,`TOTAL_WAITS`,`RPC_COUNT`,`PLAN_TYPE`,`IS_INNER_SQL`,' + '`IS_EXECUTOR_RPC`,`IS_HIT_PLAN`,`REQUEST_TIME`,`ELAPSED_TIME`,`NET_TIME`,`NET_WAIT_TIME`,`QUEUE_TIME`,' + '`DECODE_TIME`,`GET_PLAN_TIME`,`EXECUTE_TIME`,`APPLICATION_WAIT_TIME`,`CONCURRENCY_WAIT_TIME`,' + '`USER_IO_WAIT_TIME`,`SCHEDULE_TIME`,`ROW_CACHE_HIT`,`BLOOM_FILTER_CACHE_HIT`,`BLOCK_CACHE_HIT`,' + '`DISK_READS`,`RETRY_CNT`,`TABLE_SCAN`,`CONSISTENCY_LEVEL`,`MEMSTORE_READ_ROW_COUNT`,' + '`SSSTORE_READ_ROW_COUNT`,`REQUEST_MEMORY_USED`,`EXPECTED_WORKER_COUNT`,`USED_WORKER_COUNT`,' + '`TX_ID`,`REQUEST_TYPE`,`IS_BATCHED_MULTI_STMT`,`OB_TRACE_INFO`,`PLAN_HASH` ', ) sql_dict.set_value( @@ -146,7 +146,7 @@ def rm_value(self, key): '"USER_IO_WAIT_TIME","SCHEDULE_TIME","ROW_CACHE_HIT","BLOOM_FILTER_CACHE_HIT","BLOCK_CACHE_HIT",' '"DISK_READS","RETRY_CNT","TABLE_SCAN","CONSISTENCY_LEVEL","MEMSTORE_READ_ROW_COUNT",' '"SSSTORE_READ_ROW_COUNT","REQUEST_MEMORY_USED","EXPECTED_WORKER_COUNT","USED_WORKER_COUNT",' - '"TX_ID","REQUEST_TYPE","IS_BATCHED_MULTI_STMT","OB_TRACE_INFO","PLAN_HASH" ' + '"TX_ID","REQUEST_TYPE","IS_BATCHED_MULTI_STMT","OB_TRACE_INFO","PLAN_HASH" ', ) sql_dict.set_value( @@ -250,7 +250,7 @@ def rm_value(self, key): plan_monitor.PLAN_LINE_ID, PLAN_OPERATION, plan_monitor.PLAN_DEPTH ORDER BY plan_monitor.PLAN_LINE_ID ASC; - ''' + ''', ) sql_dict.set_value( @@ -324,7 +324,7 @@ def rm_value(self, key): plan_monitor.PLAN_LINE_ID, plan_monitor.PLAN_OPERATION ORDER BY plan_monitor.PLAN_LINE_ID ASC - ''' + ''', ) sql_dict.set_value( @@ -406,7 +406,7 @@ def rm_value(self, key): PLAN_LINE_ID,PLAN_OPERATION,PLAN_DEPTH,SVR_IP,SVR_PORT ORDER BY ##REPLACE_ORDER_BY## - ''' + ''', ) sql_dict.set_value( @@ -461,7 +461,7 @@ def rm_value(self, key): PLAN_LINE_ID,PLAN_OPERATION,SVR_IP,SVR_PORT ORDER BY ##REPLACE_ORDER_BY## - ''' + ''', ) sql_dict.set_value( @@ -504,7 +504,7 @@ def rm_value(self, key): trace_id = '##REPLACE_TRACE_ID##' ORDER BY ##REPLACE_ORDER_BY## - ''' + ''', ) sql_dict.set_value( @@ -547,7 +547,7 @@ def rm_value(self, key): trace_id = '##REPLACE_TRACE_ID##' ORDER BY ##REPLACE_ORDER_BY## - ''' + ''', ) sql_dict.set_value( @@ -656,7 +656,7 @@ def rm_value(self, key): plan_monitor.PLAN_LINE_ID, PLAN_OPERATION, plan_monitor.PLAN_DEPTH ORDER BY plan_monitor.PLAN_LINE_ID ASC; - ''' + ''', ) sql_dict.set_value( @@ -734,7 +734,7 @@ def rm_value(self, key): plan_monitor.PLAN_LINE_ID, plan_monitor.PLAN_OPERATION ORDER BY plan_monitor.PLAN_LINE_ID ASC; - ''' + ''', ) sql_dict.set_value( @@ -822,7 +822,7 @@ def rm_value(self, key): PLAN_LINE_ID,PLAN_OPERATION,PLAN_DEPTH,SVR_IP,SVR_PORT ORDER BY ##REPLACE_ORDER_BY##; - ''' + ''', ) sql_dict.set_value( @@ -881,7 +881,7 @@ def rm_value(self, key): PLAN_LINE_ID,PLAN_OPERATION,SVR_IP,SVR_PORT ORDER BY ##REPLACE_ORDER_BY##; - ''' + ''', ) sql_dict.set_value( @@ -895,7 +895,7 @@ def rm_value(self, key): WHERE trace_id = '##REPLACE_TRACE_ID##' GROUP BY PLAN_LINE_ID, PLAN_OPERATION, PLAN_DEPTH ORDER BY PLAN_LINE_ID - ''' + ''', ) sql_dict.set_value( @@ -909,7 +909,7 @@ def rm_value(self, key): WHERE trace_id = '##REPLACE_TRACE_ID##' GROUP BY PLAN_LINE_ID, PLAN_OPERATION, PLAN_DEPTH ORDER BY PLAN_LINE_ID - ''' + ''', ) sql_dict.set_value( @@ -955,7 +955,7 @@ def rm_value(self, key): trace_id = '##REPLACE_TRACE_ID##' ORDER BY ##REPLACE_ORDER_BY##; - ''' + ''', ) sql_dict.set_value( @@ -1001,5 +1001,5 @@ def rm_value(self, key): trace_id = '##REPLACE_TRACE_ID##' ORDER BY ##REPLACE_ORDER_BY##; - ''' + ''', ) diff --git a/handler/rca/__init__.py b/handler/rca/__init__.py index eea6dbce..d037c710 100644 --- a/handler/rca/__init__.py +++ b/handler/rca/__init__.py @@ -15,4 +15,3 @@ @file: __init__.py @desc: """ - diff --git a/handler/rca/plugins/__init__.py b/handler/rca/plugins/__init__.py index eea6dbce..d037c710 100644 --- a/handler/rca/plugins/__init__.py +++ b/handler/rca/plugins/__init__.py @@ -15,4 +15,3 @@ @file: __init__.py @desc: """ - diff --git a/handler/rca/plugins/gather.py b/handler/rca/plugins/gather.py index 0bf1caf1..7fc6c620 100644 --- a/handler/rca/plugins/gather.py +++ b/handler/rca/plugins/gather.py @@ -23,7 +23,7 @@ from handler.gather.gather_obproxy_log import GatherObProxyLogHandler -class Gather_log(): +class Gather_log: def __init__(self, context): self.conf_map = {} self.context = context @@ -72,12 +72,12 @@ def execute(self, save_path=""): if not self.conf_map["filter_nodes_list"] or len(self.conf_map["filter_nodes_list"]) == 0: self.context.set_variable("filter_nodes_list", self.conf_map["filter_nodes_list"]) # execute on all nodes_list - handle=None + handle = None for conf in self.conf_map: self.context.set_variable(conf, self.conf_map[conf]) if self.conf_map["gather_target"] == 'observer': all_node = self.context.cluster_config.get("servers") - if self.conf_map["filter_nodes_list"] and len(self.conf_map["filter_nodes_list"]>0): + if self.conf_map["filter_nodes_list"] and len(self.conf_map["filter_nodes_list"] > 0): # execute on specific nodes_list for gather_node in self.conf_map["filter_nodes_list"]: for node in all_node: @@ -85,7 +85,7 @@ def execute(self, save_path=""): nodes_list.append(node) self.stdio.verbose("{0} is in the nodes list".format(node.get("ip"))) self.conf_map["filter_nodes_list"] = nodes_list - handle=GatherLogHandler(self.context) + handle = GatherLogHandler(self.context) elif self.conf_map["gather_target"] == 'obproxy': all_node = self.context.get_variable('obproxy_nodes') if self.conf_map["filter_nodes_list"]: @@ -97,31 +97,31 @@ def execute(self, save_path=""): else: nodes_list.append(node) self.conf_map["filter_nodes_list"] = nodes_list - handle=GatherObProxyLogHandler(self.context) + handle = GatherObProxyLogHandler(self.context) if handle is None: self.stdio.error("rca gather handle the target cannot be empty!") raise Exception("rca gather handle the target cannot be empty!") else: handle.handle() - gather_result=handle.pack_dir_this_command + gather_result = handle.pack_dir_this_command zip_files = os.listdir(gather_result) - result_log_files=[] + result_log_files = [] for zip_file in zip_files: if "zip" not in zip_file: continue # open zip file - self.stdio.verbose("open zip file: {0}".format(os.path.join(gather_result,zip_file))) - with zipfile.ZipFile(os.path.join(gather_result,zip_file), 'r') as zip_ref: + self.stdio.verbose("open zip file: {0}".format(os.path.join(gather_result, zip_file))) + with zipfile.ZipFile(os.path.join(gather_result, zip_file), 'r') as zip_ref: # Extract all files to the current directory zip_ref.extractall(gather_result) for file_name in os.listdir(gather_result): if "zip" not in file_name and "result_summary.txt" not in file_name: - log_dir=os.path.join(gather_result,file_name) + log_dir = os.path.join(gather_result, file_name) for log_file in os.listdir(log_dir): - result_log_files.append(os.path.join(log_dir,log_file)) - self.stdio.verbose("result_log_files add {0}".format(os.path.join(log_dir,log_file))) + result_log_files.append(os.path.join(log_dir, log_file)) + self.stdio.verbose("result_log_files add {0}".format(os.path.join(log_dir, log_file))) self.reset() diff --git a/handler/rca/rca_exception.py b/handler/rca/rca_exception.py index 7cfcff95..a18301e1 100644 --- a/handler/rca/rca_exception.py +++ b/handler/rca/rca_exception.py @@ -30,7 +30,6 @@ def __str__(self): return repr(self) - class RCAExecuteException(Exception): def __init__(self, msg=None, obj=None): self.msg, self.obj = msg, obj @@ -42,7 +41,6 @@ def __str__(self): return repr(self) - class RCANotNeedExecuteException(Exception): def __init__(self, msg=None, obj=None): self.msg, self.obj = msg, obj diff --git a/handler/rca/rca_handler.py b/handler/rca/rca_handler.py index 94d4d017..d7ecb71f 100644 --- a/handler/rca/rca_handler.py +++ b/handler/rca/rca_handler.py @@ -43,12 +43,7 @@ def __init__(self, context): context_observer_nodes = [] if observer_nodes is not None: for node in observer_nodes: - ssh = SshHelper(True, node.get("ip"), - node.get("ssh_username"), - node.get("ssh_password"), - node.get("ssh_port"), - node.get("ssh_key_file"), - node) + ssh = SshHelper(True, node.get("ip"), node.get("ssh_username"), node.get("ssh_password"), node.get("ssh_port"), node.get("ssh_key_file"), node) node["ssher"] = ssh context_observer_nodes.append(node) self.context.set_variable("observer_nodes", context_observer_nodes) @@ -57,12 +52,7 @@ def __init__(self, context): context_obproxy_nodes = [] if obproxy_nodes is not None: for node in obproxy_nodes: - ssh = SshHelper(True, node.get("ip"), - node.get("ssh_username"), - node.get("ssh_password"), - node.get("ssh_port"), - node.get("ssh_key_file"), - node) + ssh = SshHelper(True, node.get("ip"), node.get("ssh_username"), node.get("ssh_password"), node.get("ssh_port"), node.get("ssh_key_file"), node) node["ssher"] = ssh context_obproxy_nodes.append(node) self.context.set_variable("obproxy_nodes", context_obproxy_nodes) @@ -70,12 +60,9 @@ def __init__(self, context): # build ob_connector try: if self.ob_cluster is not None: - ob_connector = OBConnector(ip=self.ob_cluster.get("db_host"), - port=self.ob_cluster.get("db_port"), - username=self.ob_cluster.get("tenant_sys").get("user"), - password=self.ob_cluster.get("tenant_sys").get("password"), - stdio=self.stdio, - timeout=10000) + ob_connector = OBConnector( + ip=self.ob_cluster.get("db_host"), port=self.ob_cluster.get("db_port"), username=self.ob_cluster.get("tenant_sys").get("user"), password=self.ob_cluster.get("tenant_sys").get("password"), stdio=self.stdio, timeout=10000 + ) self.context.set_variable("ob_connector", ob_connector) except Exception as e: self.stdio.warn("RCAHandler init ob_connector failed: {0}. If the scene need it, please check the conf.yaml".format(str(e))) @@ -94,8 +81,7 @@ def __init__(self, context): observer_version = get_observer_version_by_sql(self.ob_cluster, self.stdio) except Exception as e: if len(context_observer_nodes) > 0: - observer_version = get_observer_version(True, context_observer_nodes[0]["ssher"], - context_observer_nodes[0]["home_path"],self.stdio) + observer_version = get_observer_version(True, context_observer_nodes[0]["ssher"], context_observer_nodes[0]["home_path"], self.stdio) else: self.stdio.warn("RCAHandler Failed to get observer version:{0}".format(e)) self.stdio.verbose("RCAHandler.init get observer version: {0}".format(observer_version)) @@ -112,8 +98,7 @@ def __init__(self, context): obproxy_version = "" try: if len(context_obproxy_nodes) > 0: - obproxy_version = get_obproxy_version(True, context_obproxy_nodes[0]["ssher"], - context_obproxy_nodes[0]["home_path"],self.stdio) + obproxy_version = get_obproxy_version(True, context_obproxy_nodes[0]["ssher"], context_obproxy_nodes[0]["home_path"], self.stdio) except Exception as e: self.stdio.warn("RCAHandler.init Failed to get obproxy version. Error:{0}".format(e)) if obproxy_version != "": @@ -144,20 +129,19 @@ def __init__(self, context): rca_scene_parameters = Util.get_option(self.options, 'input_parameters', "") if rca_scene_parameters != "": try: - rca_scene_parameters=json.loads(rca_scene_parameters) + rca_scene_parameters = json.loads(rca_scene_parameters) except Exception as e: raise Exception("Failed to parse input_parameters. Please check the option is json:{0}".format(rca_scene_parameters)) else: - rca_scene_parameters= {} + rca_scene_parameters = {} self.context.set_variable("input_parameters", rca_scene_parameters) self.store_dir = Util.get_option(self.options, 'store_dir', "./rca/") self.context.set_variable("store_dir", self.store_dir) self.stdio.verbose( "RCAHandler init.cluster:{0}, init.nodes:{1}, init.obproxy_nodes:{2}, init.store_dir:{3}".format( - self.cluster.get( - "ob_cluster_name") or self.cluster.get( - "obproxy_cluster_name"), StringUtils.node_cut_passwd_for_log(self.nodes), - StringUtils.node_cut_passwd_for_log(self.obproxy_nodes), self.store_dir)) + self.cluster.get("ob_cluster_name") or self.cluster.get("obproxy_cluster_name"), StringUtils.node_cut_passwd_for_log(self.nodes), StringUtils.node_cut_passwd_for_log(self.obproxy_nodes), self.store_dir + ) + ) def get_result_path(self): return self.store_dir @@ -172,9 +156,7 @@ def handle(self): if self.rca_scene is None: raise Exception("rca_scene :{0} is not exist".format(scene_name)) - self.store_dir = os.path.expanduser("{0}/{1}_{2}".format(self.store_dir, scene_name, - datetime.datetime.now().strftime( - '%Y%m%d%H%M%S'))) + self.store_dir = os.path.expanduser("{0}/{1}_{2}".format(self.store_dir, scene_name, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))) if not os.path.exists(self.store_dir): os.mkdir(self.store_dir) @@ -204,8 +186,7 @@ def execute(self): self.rca_scene.export_result() except Exception as e: raise Exception("rca_scene.export_result err: {0}".format(e)) - self.stdio.print( "rca finished. For more details, the result on '" + Fore.YELLOW + self.get_result_path() + Style.RESET_ALL + "' \nYou can get the suggest by '" + Fore.YELLOW + "cat " + self.get_result_path() + "/record" + Style.RESET_ALL + "'") - + self.stdio.print("rca finished. For more details, the result on '" + Fore.YELLOW + self.get_result_path() + Style.RESET_ALL + "' \nYou can get the suggest by '" + Fore.YELLOW + "cat " + self.get_result_path() + "/record" + Style.RESET_ALL + "'") class RcaScene: @@ -234,7 +215,7 @@ def init(self, context): self.report = context.get_variable('report') self.obproxy_version = context.get_variable('obproxy_version', default="") self.observer_version = context.get_variable('observer_version', default="") - self.ob_connector = context.get_variable('ob_connector',default=None) + self.ob_connector = context.get_variable('ob_connector', default=None) self.store_dir = context.get_variable('store_dir') self.ob_cluster = context.get_variable('ob_cluster') self.input_parameters = context.get_variable('input_parameters') or {} @@ -250,13 +231,15 @@ def get_result(self): def get_scene_info(self): raise Exception("rca ({0}) scene.get_scene_info() undefined".format(type(self).__name__)) + def export_result(self): return self.Result.export() + def get_all_tenants_id(self): try: if self.ob_connector is None: raise Exception("ob_connector is None") - all_tenant_id_data=self.ob_connector.execute_sql("select tenant_id from oceanbase.__all_tenant;")[0] + all_tenant_id_data = self.ob_connector.execute_sql("select tenant_id from oceanbase.__all_tenant;")[0] return all_tenant_id_data except Exception as e: raise Exception("run rca's get_all_tenants_id. Exception: {0}".format(e)) @@ -302,6 +285,7 @@ def add_record(self, record): def add_suggest(self, suggest): self.suggest += suggest + def suggest_is_empty(self): return self.suggest == "The suggest: " diff --git a/handler/rca/rca_list.py b/handler/rca/rca_list.py index c03ef872..71b94be4 100644 --- a/handler/rca/rca_list.py +++ b/handler/rca/rca_list.py @@ -31,8 +31,7 @@ def __init__(self, context, work_path=const.RCA_WORK_PATH): if os.path.exists(os.path.expanduser(work_path)): self.work_path = os.path.expanduser(work_path) else: - self.stdio.warn( - "input rca work_path not exists: {0}, use default path {1}".format(work_path, const.RCA_WORK_PATH)) + self.stdio.warn("input rca work_path not exists: {0}, use default path {1}".format(work_path, const.RCA_WORK_PATH)) self.work_path = const.RCA_WORK_PATH def get_all_scenes(self): @@ -56,17 +55,13 @@ def get_all_scenes(self): for scene_name, scene in scene_list.items(): scene_info = scene.get_scene_info() - scene_info_list[scene_name] = {"name": scene_name, - "command": "obdiag rca run --scene={0}".format(scene_name), - "info_en": scene_info["info_en"], - "info_cn": scene_info["info_cn"] - } - return scene_info_list,scene_list + scene_info_list[scene_name] = {"name": scene_name, "command": "obdiag rca run --scene={0}".format(scene_name), "info_en": scene_info["info_en"], "info_cn": scene_info["info_cn"]} + return scene_info_list, scene_list def handle(self): try: self.stdio.verbose("list rca scenes") - scene_info_list,scene_itme_list = self.get_all_scenes() + scene_info_list, scene_itme_list = self.get_all_scenes() Util.print_scene(scene_info_list) except Exception as e: self.stdio.error("RcaScenesListHandler Exception: {0}".format(e)) diff --git a/handler/rca/scene/ddl_disk_full_scene.py b/handler/rca/scene/ddl_disk_full_scene.py index b45db4ac..0659d3b8 100644 --- a/handler/rca/scene/ddl_disk_full_scene.py +++ b/handler/rca/scene/ddl_disk_full_scene.py @@ -55,7 +55,7 @@ def init(self, context): if table_name is None or table_name == "" or tenant_name is None or tenant_name == "": raise RCAInitException("table_name or tenant_name is None. Please check the input parameters.") if action_type is not None: - if action_type=="add_index": + if action_type == "add_index": self.action_type = action_type self.verbose("action type is {0}.".format(action_type)) if index_name is not None and index_name.strip() != "": @@ -67,25 +67,19 @@ def init(self, context): else: self.stdio.error("action type is {0}, but only support add_index now.".format(action_type)) - tenant_data = self.ob_connector.execute_sql( - "select tenant_id from oceanbase.__all_tenant where tenant_name = '{0}';".format(tenant_name)) + tenant_data = self.ob_connector.execute_sql("select tenant_id from oceanbase.__all_tenant where tenant_name = '{0}';".format(tenant_name)) if len(tenant_data) == 0: - raise RCAInitException( - "can not find tenant id by tenant name: {0}. Please check the tenant name.".format(tenant_name)) + raise RCAInitException("can not find tenant id by tenant name: {0}. Please check the tenant name.".format(tenant_name)) self.tenant_id = tenant_data[0][0] if self.tenant_id is None: - raise RCAInitException( - "can not find tenant id by tenant name: {0}. Please check the tenant name.".format(tenant_name)) + raise RCAInitException("can not find tenant id by tenant name: {0}. Please check the tenant name.".format(tenant_name)) - table_id_data = self.ob_connector.execute_sql( - "select table_id from oceanbase.__all_virtual_table where table_name = '{0}';".format(table_name)) + table_id_data = self.ob_connector.execute_sql("select table_id from oceanbase.__all_virtual_table where table_name = '{0}';".format(table_name)) if len(table_id_data) == 0: - raise RCAInitException( - "can not find table id by table name: {0}. Please check the table name.".format(table_name)) + raise RCAInitException("can not find table id by table name: {0}. Please check the table name.".format(table_name)) self.table_id = table_id_data[0][0] if self.table_id is None: - raise RCAInitException( - "can not find table id by table name: {0}. Please check the table name.".format(table_name)) + raise RCAInitException("can not find table id by table name: {0}. Please check the table name.".format(table_name)) self.verbose("table_id is {0}, tenant_id is {1}.".format(self.table_id, self.tenant_id)) def verbose(self, info): @@ -100,15 +94,14 @@ def execute(self): self.verbose("start to get estimated_data_size...") ## if the action is not add_index sql = "select svr_ip, svr_port, sum(original_size) as estimated_data_size from oceanbase.__all_virtual_tablet_sstable_macro_info where tablet_id in (select tablet_id from oceanbase.__all_virtual_tablet_to_table_history where table_id = {0}) and (svr_ip, svr_port) in (select svr_ip, svr_port from oceanbase.__all_virtual_ls_meta_table where role = 1) group by svr_ip, svr_port;".format( - self.table_id) + self.table_id + ) self.verbose("execute_sql is {0}".format(sql)) tablet_size_data = self.ob_connector.execute_sql(sql) self.verbose("tablet_size_data is {0}".format(tablet_size_data)) record.add_record("tablet_size_data is {0}".format(tablet_size_data)) if len(tablet_size_data) <= 0 or tablet_size_data[0][2] is None: - raise RCAExecuteException( - "can not find tablet size info or estimated_data_size. please check the data:{0}.".format( - tablet_size_data)) + raise RCAExecuteException("can not find tablet size info or estimated_data_size. please check the data:{0}.".format(tablet_size_data)) self.estimated_size = tablet_size_data self.verbose("estimated_size is {0}".format(self.estimated_size)) record.add_record("estimated_size is {0}".format(self.estimated_size)) @@ -119,24 +112,21 @@ def execute(self): record.add_record("index_name is {0}".format(self.index_name)) record.add_record("action_type is {0}".format(self.action_type)) ## if the action is add_index - sql = "select table_id from oceanbase.__all_virtual_table_history where tenant_id = '{0}' and data_table_id = '{1}' and table_name like '%{2}%';".format( - self.tenant_id, self.table_id, self.index_name) + sql = "select table_id from oceanbase.__all_virtual_table_history where tenant_id = '{0}' and data_table_id = '{1}' and table_name like '%{2}%';".format(self.tenant_id, self.table_id, self.index_name) self.verbose("execute_sql is {0}".format(sql)) self.index_table_id = self.ob_connector.execute_sql(sql)[0][0] self.verbose("index_table_id is {0}".format(self.index_table_id)) record.add_record("index_table_id is {0}".format(self.index_table_id)) # Query the sum of the lengths of all columns in the main table - sql = "select table_id, sum(data_length) from oceanbase.__all_virtual_column_history where tenant_id = '{0}' and table_id = '{1}';".format( - self.tenant_id, self.table_id) + sql = "select table_id, sum(data_length) from oceanbase.__all_virtual_column_history where tenant_id = '{0}' and table_id = '{1}';".format(self.tenant_id, self.table_id) self.verbose("execute_sql is {0}".format(sql)) main_table_sum_of_data_length = int(self.ob_connector.execute_sql(sql)[0][1]) self.verbose("main_table_sum_of_data_length is {0}".format(main_table_sum_of_data_length)) record.add_record("main_table_sum_of_data_length is {0}".format(main_table_sum_of_data_length)) # The sum of the lengths of all columns in the query index - sql = "select table_id, sum(data_length) from oceanbase.__all_virtual_column_history where tenant_id = '{0}' and table_id = '{1}';".format( - self.tenant_id, self.index_table_id) + sql = "select table_id, sum(data_length) from oceanbase.__all_virtual_column_history where tenant_id = '{0}' and table_id = '{1}';".format(self.tenant_id, self.index_table_id) self.verbose("execute_sql is {0}".format(sql)) index_table_sum_of_data_length = int(self.ob_connector.execute_sql(sql)[0][1]) self.verbose("index_table_sum_of_data_length is {0}".format(index_table_sum_of_data_length)) @@ -146,8 +136,7 @@ def execute(self): new_estimated_size = [] for node_estimated_size in self.estimated_size: new_node_estimated_size = [node_estimated_size[0], node_estimated_size[1]] - estimiated_index_size = int(index_table_sum_of_data_length / main_table_sum_of_data_length / 1024 / 1024 * int( - node_estimated_size[2])) + estimiated_index_size = int(index_table_sum_of_data_length / main_table_sum_of_data_length / 1024 / 1024 * int(node_estimated_size[2])) new_node_estimated_size.append(estimiated_index_size) new_estimated_size.append(new_node_estimated_size) @@ -162,9 +151,7 @@ def execute(self): record.add_record("On target_server_ip is {0}, target_server_port is {1}, target_server_estimated_size is {2}".format(target_server_ip, target_server_port, target_server_estimated_size)) # get target_server_total_size and target_server_used_size - target_server_data = self.ob_connector.execute_sql( - "select total_size, used_size from oceanbase.__all_virtual_disk_stat where svr_ip = '{0}' and svr_port = {1};".format( - target_server_ip, target_server_port)) + target_server_data = self.ob_connector.execute_sql("select total_size, used_size from oceanbase.__all_virtual_disk_stat where svr_ip = '{0}' and svr_port = {1};".format(target_server_ip, target_server_port)) target_server_total_size = int(target_server_data[0][0]) self.verbose("target_server_total_size is {0}".format(target_server_total_size)) record.add_record("target_server_total_size is {0}".format(target_server_total_size)) @@ -174,36 +161,29 @@ def execute(self): record.add_record("target_server_used_size is {0}".format(target_server_used_size)) # get data_disk_usage_limit_percentage - sql = "SELECT VALUE FROM oceanbase.GV$OB_PARAMETERS WHERE SVR_IP='{0}' and SVR_PORT='{1}' and NAME LIKE \"data_disk_usage_limit_percentage\"".format( - target_server_ip, target_server_port) + sql = "SELECT VALUE FROM oceanbase.GV$OB_PARAMETERS WHERE SVR_IP='{0}' and SVR_PORT='{1}' and NAME LIKE \"data_disk_usage_limit_percentage\"".format(target_server_ip, target_server_port) self.verbose("execute_sql is {0}".format(sql)) data_disk_usage_limit_percentage = int(self.ob_connector.execute_sql(sql)[0][0]) # data_disk_usage_limit_percentage is a Cluster level configuration items self.verbose("data_disk_usage_limit_percentage is {0}".format(data_disk_usage_limit_percentage)) record.add_record("data_disk_usage_limit_percentage is {0}".format(data_disk_usage_limit_percentage)) - if self.observer_version == "4.3.0.0" or StringUtils.compare_versions_greater(self.observer_version, - "4.3.0.0"): - target_server_estimated_size =int(target_server_estimated_size * 15/10) + if self.observer_version == "4.3.0.0" or StringUtils.compare_versions_greater(self.observer_version, "4.3.0.0"): + target_server_estimated_size = int(target_server_estimated_size * 15 / 10) else: - target_server_estimated_size =int(target_server_estimated_size * 55/10) + target_server_estimated_size = int(target_server_estimated_size * 55 / 10) self.verbose("target_server_estimated_size is {0}".format(target_server_estimated_size)) record.add_record("target_server_estimated_size is {0}".format(target_server_estimated_size)) - - available_disk_space=int(target_server_total_size/100*data_disk_usage_limit_percentage-target_server_used_size) + available_disk_space = int(target_server_total_size / 100 * data_disk_usage_limit_percentage - target_server_used_size) self.verbose("available_disk_space is {0}".format(available_disk_space)) record.add_record("available_disk_space is {0}".format(available_disk_space)) if target_server_estimated_size - available_disk_space > 0: record.add_record("target_server_estimated_size - available_disk_space is {0}".format(target_server_estimated_size - available_disk_space)) - record.add_suggest( - "the disk space of server({0}:{1}) disk is not enough. please add the server disk".format( - target_server_ip, target_server_port)) + record.add_suggest("the disk space of server({0}:{1}) disk is not enough. please add the server disk".format(target_server_ip, target_server_port)) else: record.add_record("target_server_estimated_size - available_disk_space is {0}".format(target_server_estimated_size - available_disk_space)) - record.add_suggest( - "the disk space of server({0}:{1}) is enough. Don't warn ".format(target_server_ip, - target_server_port)) + record.add_suggest("the disk space of server({0}:{1}) is enough. Don't warn ".format(target_server_ip, target_server_port)) self.Result.records.append(record) except Exception as e: raise RCAExecuteException("DDlDiskFullScene execute error: {0}".format(e)) @@ -215,10 +195,11 @@ def export_result(self): def get_scene_info(self): - return {"name": "ddl_disk_full", - "info_en": "Insufficient disk space reported during DDL process. ", - "info_cn": 'DDL过程中报磁盘空间不足的问题', - } + return { + "name": "ddl_disk_full", + "info_en": "Insufficient disk space reported during DDL process. ", + "info_cn": 'DDL过程中报磁盘空间不足的问题', + } -ddl_disk_full = DDlDiskFullScene() \ No newline at end of file +ddl_disk_full = DDlDiskFullScene() diff --git a/handler/rca/scene/disconnection_scene.py b/handler/rca/scene/disconnection_scene.py index 754224a0..0b46212e 100644 --- a/handler/rca/scene/disconnection_scene.py +++ b/handler/rca/scene/disconnection_scene.py @@ -19,6 +19,7 @@ from handler.rca.rca_handler import RcaScene, RCA_ResultRecord from common.tool import StringUtils + class DisconnectionScene(RcaScene): def __init__(self): super().__init__() @@ -52,10 +53,11 @@ def export_result(self): def get_scene_info(self): # 设定场景分析的返回场景使用说明,需要的参数等等 - return {"name": "disconnection", - "info_en": "root cause analysis of disconnection", - "info_cn": "针对断链接场景的根因分析", - } + return { + "name": "disconnection", + "info_en": "root cause analysis of disconnection", + "info_cn": "针对断链接场景的根因分析", + } def __execute_obproxy_one_node(self, node): self.gather_log.grep("CONNECTION](trace_type") @@ -63,11 +65,11 @@ def __execute_obproxy_one_node(self, node): self.gather_log.set_parameters("target", "obproxy") self.gather_log.set_parameters("scope", "obproxy_diagnosis") if self.input_parameters.get("since") is not None: - since=self.input_parameters.get("since") + since = self.input_parameters.get("since") self.gather_log.set_parameters("since", since) self.work_path = self.store_dir - logs_name=self.gather_log.execute() - if len(logs_name)==0: + logs_name = self.gather_log.execute() + if len(logs_name) == 0: self.stdio.warn("not found log about disconnection. On node: {0}".format(node["ip"])) return self.stdio.verbose("logs_name:{0}".format(logs_name)) @@ -79,9 +81,8 @@ def __execute_obproxy_one_node(self, node): for line in log_list: try: record = RCA_ResultRecord() - record.add_record( - "node:{1} obproxy_diagnosis_log:{0}".format(line, node.get("ip"))) - log_check = DisconnectionLog(self.context,line, record) + record.add_record("node:{1} obproxy_diagnosis_log:{0}".format(line, node.get("ip"))) + log_check = DisconnectionLog(self.context, line, record) suggest = log_check.execute() record.add_suggest(suggest) self.stdio.verbose("suggest:{0}".format(suggest)) @@ -92,7 +93,7 @@ def __execute_obproxy_one_node(self, node): class DisconnectionLog: - def __init__(self,context, log, record): + def __init__(self, context, log, record): self.context = context self.stdio = context.stdio self.record = record @@ -105,14 +106,7 @@ def __init__(self,context, log, record): try: self.log = log - pattern = re.compile( - r'trace_type="(.*?)".*' - r'cs_id:(\d+).*' - r'server_session_id:(\d+).*' - r'error_code:([-0-9]+).*' - r'error_msg:"(.*?)"' - - ) + pattern = re.compile(r'trace_type="(.*?)".*' r'cs_id:(\d+).*' r'server_session_id:(\d+).*' r'error_code:([-0-9]+).*' r'error_msg:"(.*?)"') # 搜索日志条目 matches = pattern.search(log) @@ -143,7 +137,7 @@ def __init__(self,context, log, record): def execute(self): # self.get_suggest() try: - suggest = get_disconnectionSuggest(self.context,self.trace_type, self.error_code, self.error_msg, self.record) + suggest = get_disconnectionSuggest(self.context, self.trace_type, self.error_code, self.error_msg, self.record) return suggest except Exception as e: raise Exception("DisconnectionLog execute err: {0}".format(e)) @@ -155,30 +149,20 @@ def execute(self): "does not exist": "Ensure the existence of the corresponding cluster, which can be confirmed by directly connecting to ObServer", "cluster info is empty": "Directly connect to the Observer to execute the sql statement in the internal_sql field to confirm whether the cluster information returned by the Observer is empty", }, - "-4043": { - "dummy entry is empty, please check if the tenant exists": "Ensure the existence of the corresponding tenant, which can be confirmed by directly connecting to ObServer" - }, - "-8205": { - "can not pass white list": "Confirm whether the ObProxy whitelist is configured correctly through OCP" - }, - "-1227": { - "Access denied": "Confirm if the ObServer whitelist is configured correctly" - }, + "-4043": {"dummy entry is empty, please check if the tenant exists": "Ensure the existence of the corresponding tenant, which can be confirmed by directly connecting to ObServer"}, + "-8205": {"can not pass white list": "Confirm whether the ObProxy whitelist is configured correctly through OCP"}, + "-1227": {"Access denied": "Confirm if the ObServer whitelist is configured correctly"}, "-5059": { "too many sessions": "You can adjust the global configuration client_max_connections of ObProxy to temporarily avoid it.", "hold too many connections": "Need to contact the public cloud platform to adjust the connection limit for cloud tenants", - }, "-8004": { "obproxy is configured to use ssl connection": "Modify the SSL protocol configuration enable_client_ssl, or use SSL protocol access", - }, - "-10021": { "user proxyro is rejected while proxyro_check on": "Should not be used directly proxyro@sys Accessing databases", "connection with cluster name and tenant name is rejected while cloud_full_user_name_check off": "Should not be used directly proxyro@sys Accessing databases", "cluster name and tenant name is required while full_username_check on": "When non-cloud users turn off enable_full_user_name, ObProxy will restrict non-three-segment access", - }, "-10018": { "fail to check observer version, proxyro@sys access denied, error resp": "The password for deploying proxyro by default is not a problem. If you manually change the password for proxyro user, please ensure that the configuration of the ObProxy startup parameter is correct", @@ -186,13 +170,11 @@ def execute(self): "fail to check observer version": "Directly connect to the Observer to execute the sql statement in the internal_sql field to confirm whether the cluster information returned by the Observer is empty", "fail to check cluster info": "Directly connect to the Observer to execute the sql statement in the internal_sql field to confirm whether the cluster information returned by the Observer is empty", "fail to init server state": "Directly connect to the Observer to execute the sql statement in the internal_sql field to confirm whether the cluster information returned by the Observer is empty", - }, "-10301": { "fail to fetch root server list from config server " "fail to fetch root server list from local": "You can manually pull the url of the config_server configured at startup to confirm whether the information returned by the config server is normal", }, - }, "TIMEOUT_TRACE": { "-10022": { @@ -208,15 +190,9 @@ def execute(self): }, }, "SERVER_VC_TRACE": { - "-10013": { - "Fail to build connection to observer": "Need the cooperation of the observer for diagnosis" - }, - "-10014": { - " received while proxy transferring request": "Need the cooperation of the observer for diagnosis" - }, - "-10016": { - " received while proxy reading response": "Need the cooperation of the observer for diagnosis" - } + "-10013": {"Fail to build connection to observer": "Need the cooperation of the observer for diagnosis"}, + "-10014": {" received while proxy transferring request": "Need the cooperation of the observer for diagnosis"}, + "-10016": {" received while proxy reading response": "Need the cooperation of the observer for diagnosis"}, }, "CLIENT_VC_TRACE": { "-10010": { @@ -245,21 +221,15 @@ def execute(self): "ora fatal error": "Unexpected error scenario", "primary cluster switchover to standby, disconnect": "The possible connection loss problem during the switch between the primary and secondary databases, which is consistent with the expected scenario", }, - "-5065": { - "connection was killed by user self, cs_id": "In line with the expected scenario, the diagnostic log is recorded", - "connection was killed by user session": "In line with the expected scenario, the diagnostic log is recorded" - }, + "-5065": {"connection was killed by user self, cs_id": "In line with the expected scenario, the diagnostic log is recorded", "connection was killed by user session": "In line with the expected scenario, the diagnostic log is recorded"}, }, - } -def get_disconnectionSuggest(context,trace_type, error_code, error_msg, record): - stdio=context.stdio +def get_disconnectionSuggest(context, trace_type, error_code, error_msg, record): + stdio = context.stdio if trace_type == "" or error_code == "" or error_msg == "": - raise Exception( - "not find the suggest. Please contact the community and upload the exception information.. trace_type:{0}, error_code:{1}, error_msg:{2}".format( - trace_type, error_code, error_msg)) + raise Exception("not find the suggest. Please contact the community and upload the exception information.. trace_type:{0}, error_code:{1}, error_msg:{2}".format(trace_type, error_code, error_msg)) Suggest_trace_type = DisconnectionAllSuggest.get(trace_type) record.add_record('trace_type:{0}'.format(trace_type)) if Suggest_trace_type: @@ -271,22 +241,17 @@ def get_disconnectionSuggest(context,trace_type, error_code, error_msg, record): for suggest_error_msg in error_msgs: # 子串 if suggest_error_msg in error_msg: - stdio.verbose( - "find the suggest. trace_type:{0}, error_code:{1}, error_msg:{2}".format(trace_type, error_code, - error_msg)) + stdio.verbose("find the suggest. trace_type:{0}, error_code:{1}, error_msg:{2}".format(trace_type, error_code, error_msg)) suggest += "\n" suggest += Suggest_error_code.get(suggest_error_msg) if suggest.strip() != "": - stdio.verbose( - "find the suggest. trace_type:{0}, error_code:{1}, error_msg:{2}, suggest:{3}".format(trace_type, - error_code, - error_msg, - suggest.strip())) + stdio.verbose("find the suggest. trace_type:{0}, error_code:{1}, error_msg:{2}, suggest:{3}".format(trace_type, error_code, error_msg, suggest.strip())) return suggest.strip() else: suggest = "not find the suggest. Please contact the community and upload the exception information.. trace_type:{0}, error_code:{1}, error_msg:{2}. The suggestions are as follows. You can try using the following suggestions or submit the logs to the Oceanbase community.".format( - trace_type, error_code, error_msg) + trace_type, error_code, error_msg + ) suggest += "\n" for error_msg_by_Suggest_error_code in Suggest_error_code: @@ -296,4 +261,6 @@ def get_disconnectionSuggest(context,trace_type, error_code, error_msg, record): raise Exception("the disconnection error_code :{0} ,not support.".format(error_code)) else: raise Exception("the disconnection trace_type :{0} ,not support.".format(trace_type)) -disconnection=DisconnectionScene() \ No newline at end of file + + +disconnection = DisconnectionScene() diff --git a/handler/rca/scene/lock_conflict_scene.py b/handler/rca/scene/lock_conflict_scene.py index 6e0ab71a..5736ecbf 100644 --- a/handler/rca/scene/lock_conflict_scene.py +++ b/handler/rca/scene/lock_conflict_scene.py @@ -43,49 +43,51 @@ def execute(self): def __execute_4_2(self): first_record = RCA_ResultRecord() # get trans_id - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - 'select * from oceanbase.GV$OB_LOCKS where BLOCK=1 and TYPE="TX" limit 50;') + cursor = self.ob_connector.execute_sql_return_cursor_dictionary('select * from oceanbase.GV$OB_LOCKS where BLOCK=1 and TYPE="TX" limit 50;') data = cursor.fetchall() if len(data) == 0: first_record.add_record("on GV$OB_LOCKS result is null") first_record.add_suggest("No block lock found. Not Need Execute") self.Result.records.append(first_record) raise RCANotNeedExecuteException("No block lock found.") - first_record.add_record( - "by select * from oceanbase.GV$OB_LOCKS where BLOCK=1; the len is {0}".format(len(data))) + first_record.add_record("by select * from oceanbase.GV$OB_LOCKS where BLOCK=1; the len is {0}".format(len(data))) for OB_LOCKS_data in data: trans_record = RCA_ResultRecord() first_record_records = first_record.records.copy() trans_record.records.extend(first_record_records) self.Result.records.append(trans_record) try: - if OB_LOCKS_data.get('ID1') is None:# Holding lock session id + if OB_LOCKS_data.get('ID1') is None: # Holding lock session id trans_record.add_record("Holding lock trans_id is null") trans_record.add_suggest("Holding lock trans_id is null. can not do next") continue else: trans_id = OB_LOCKS_data['ID1'] trans_record.add_record("holding lock trans_id is {0}".format(trans_id)) - wait_lock_trans_id=OB_LOCKS_data['TRANS_ID'] - cursor_by_trans_id = self.ob_connector.execute_sql_return_cursor_dictionary( - 'select * from oceanbase.V$OB_TRANSACTION_PARTICIPANTS where TX_ID="{0}";'.format(wait_lock_trans_id)) + wait_lock_trans_id = OB_LOCKS_data['TRANS_ID'] + cursor_by_trans_id = self.ob_connector.execute_sql_return_cursor_dictionary('select * from oceanbase.V$OB_TRANSACTION_PARTICIPANTS where TX_ID="{0}";'.format(wait_lock_trans_id)) self.stdio.verbose("get SESSION_ID by trans_id:{0}".format(trans_id)) trans_record.add_record("wait_lock_trans_id is {0}".format(wait_lock_trans_id)) session_datas = cursor_by_trans_id.fetchall() - trans_record.add_record( - "get SESSION_ID by wait_lock_trans_id:{0}. get data:{0}".format(trans_id, session_datas)) + trans_record.add_record("get SESSION_ID by wait_lock_trans_id:{0}. get data:{0}".format(trans_id, session_datas)) if len(session_datas) != 1: - trans_record.add_suggest("wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id)) + trans_record.add_suggest( + "wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id) + ) continue if session_datas[0].get("SESSION_ID") is not None: trans_record.add_record("get SESSION_ID:{0}".format(session_datas[0].get("SESSION_ID"))) - trans_record.add_suggest("Sessions corresponding to lock transactions. The ID is {0}, " - "which may be a lock conflict issue.You can be accessed through kill " - "session to rollback the corresponding transaction with ID. Please " - "note that this will result in corresponding transaction regression! " - "".format(session_datas[0].get("SESSION_ID"))) + trans_record.add_suggest( + "Sessions corresponding to lock transactions. The ID is {0}, " + "which may be a lock conflict issue.You can be accessed through kill " + "session to rollback the corresponding transaction with ID. Please " + "note that this will result in corresponding transaction regression! " + "".format(session_datas[0].get("SESSION_ID")) + ) else: - trans_record.add_record("wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id)) + trans_record.add_record( + "wait_lock_session_id is not get. The holding lock trans_id is {0}. You can resolve lock conflicts by killing this locked session, but this may cause business exceptions. Please use with caution.".format(trans_id) + ) except Exception as e: trans_record.add_record("get SESSION_ID panic. OB_LOCKS_data:{0} error: {1}".format(OB_LOCKS_data, e)) @@ -95,17 +97,14 @@ def __execute_4_2(self): def __execute_old(self): first_record = RCA_ResultRecord() - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - "select * from oceanbase.__all_virtual_lock_wait_stat order by try_lock_times limit 50;") + cursor = self.ob_connector.execute_sql_return_cursor_dictionary("select * from oceanbase.__all_virtual_lock_wait_stat order by try_lock_times limit 50;") virtual_lock_wait_stat_datas = cursor.fetchall() if len(virtual_lock_wait_stat_datas) == 0: first_record.add_record("on __all_virtual_trans_stat result is null") first_record.add_suggest("No block lock found. Not Need Execute") self.Result.records.append(first_record) raise RCANotNeedExecuteException("No block lock found.") - first_record.add_record( - "by select * from oceanbase.__all_virtual_lock_wait_stat order by try_lock_times limit 50; the len is {0}".format( - len(virtual_lock_wait_stat_datas))) + first_record.add_record("by select * from oceanbase.__all_virtual_lock_wait_stat order by try_lock_times limit 50; the len is {0}".format(len(virtual_lock_wait_stat_datas))) for trans_lock_data in virtual_lock_wait_stat_datas: trans_id = trans_lock_data["block_session_id"] @@ -115,19 +114,22 @@ def __execute_old(self): self.Result.records.append(trans_record) trans_record.add_record("block_data is {0}".format(trans_lock_data)) trans_record.add_record("block_session_id is {0}".format(trans_id)) - trans_record.add_suggest("Sessions corresponding to lock transactions. The ID is {0}, " - "which may be a lock conflict issue.You can be accessed through kill " - "session_Roll back the corresponding transaction with ID. Please " - "note that this will result in corresponding transaction regression! " - "".format(trans_lock_data.get("block_session_id"))) + trans_record.add_suggest( + "Sessions corresponding to lock transactions. The ID is {0}, " + "which may be a lock conflict issue.You can be accessed through kill " + "session_Roll back the corresponding transaction with ID. Please " + "note that this will result in corresponding transaction regression! " + "".format(trans_lock_data.get("block_session_id")) + ) return def get_scene_info(self): - return {"name": "lock_conflict", - "info_en": "root cause analysis of lock conflict", - "info_cn": "针对锁冲突的根因分析", - } + return { + "name": "lock_conflict", + "info_en": "root cause analysis of lock conflict", + "info_cn": "针对锁冲突的根因分析", + } def export_result(self): return self.Result.export() diff --git a/handler/rca/scene/log_error_scene.py b/handler/rca/scene/log_error_scene.py index 39ab8929..e4d8f2df 100644 --- a/handler/rca/scene/log_error_scene.py +++ b/handler/rca/scene/log_error_scene.py @@ -89,30 +89,20 @@ def execute_by_tenant_id(self, tenant_id, record): return record.add_record("start step2") step_next_tag = True - ls_ids = self.ob_connector.execute_sql( - "select distinct (ls_id) from oceanbase.__all_virtual_log_stat where tenant_id={0};".format(tenant_id)) + ls_ids = self.ob_connector.execute_sql("select distinct (ls_id) from oceanbase.__all_virtual_log_stat where tenant_id={0};".format(tenant_id)) if ls_ids is None or len(ls_ids) <= 0: self.stdio.warn("not found log about election_leader. tenant_id: {0}".format(tenant_id)) - record.add_suggest( - "not found log on oceanbase.__all_virtual_log_stat. tenant_id: {0}".format(tenant_id)) + record.add_suggest("not found log on oceanbase.__all_virtual_log_stat. tenant_id: {0}".format(tenant_id)) return for ls_id in ls_ids: ls_id = ls_id[0] - leader_ls_id_bool = self.ob_connector.execute_sql( - 'select count(0) from oceanbase.__all_virtual_log_stat where role="LEADER" and tenant_id={0} and ls_id="{1}";'.format( - tenant_id, ls_id)) + leader_ls_id_bool = self.ob_connector.execute_sql('select count(0) from oceanbase.__all_virtual_log_stat where role="LEADER" and tenant_id={0} and ls_id="{1}";'.format(tenant_id, ls_id)) leader_ls_id_bool = leader_ls_id_bool[0] if leader_ls_id_bool <= 0: - record.add_record( - "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, - ls_id)) - record.add_suggest( - "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, - ls_id)) - self.stdio.warn( - "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, - ls_id)) + record.add_record("tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, ls_id)) + record.add_suggest("tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, ls_id)) + self.stdio.warn("tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_log_stat no LEADER".format(tenant_id, ls_id)) step_next_tag = False if step_next_tag is False: @@ -120,7 +110,6 @@ def execute_by_tenant_id(self, tenant_id, record): return return - except Exception as e: self.stdio.warn("execute_by_tenant_id:{0} Exception:{1}".format(tenant_id, e)) @@ -155,7 +144,7 @@ def execute_421(self): self.verbose("tenant_diagnose_data:{0}".format(tenant_diagnose_data)) self.stdio.start_loading("no_leader scene start analyzing...") for tenant_id in tenant_diagnose_data: - record_one_tenant=self.execute_421_no_leader_by_tenant_id(tenant_id, tenant_diagnose_data[tenant_id]) + record_one_tenant = self.execute_421_no_leader_by_tenant_id(tenant_id, tenant_diagnose_data[tenant_id]) self.Result.records.append(record_one_tenant) self.stdio.stop_loading('no_leader scene end') return @@ -163,39 +152,39 @@ def execute_421(self): except Exception as e: raise RCAExecuteException("execute_421 execute error: {0}".format(e)) - def execute_421_no_leader_by_tenant_id(self, tenant_id,diagnose_data): + def execute_421_no_leader_by_tenant_id(self, tenant_id, diagnose_data): record = RCA_ResultRecord() try: self.stdio.verbose("start execute_421_no_leader_by_tenant_id") record.add_record("tenant_id: {0}.".format(tenant_id)) - leader_nu={} + leader_nu = {} record.add_record("start step1") for diagnose_data_by_tenant_id in diagnose_data: if diagnose_data_by_tenant_id["election_role"].upper() == "LEADER": - leader_nu[diagnose_data_by_tenant_id["ls_id"]] = leader_nu.get( - diagnose_data_by_tenant_id["ls_id"], 0) + 1 + leader_nu[diagnose_data_by_tenant_id["ls_id"]] = leader_nu.get(diagnose_data_by_tenant_id["ls_id"], 0) + 1 else: - leader_nu[diagnose_data_by_tenant_id["ls_id"]] = leader_nu.get( - diagnose_data_by_tenant_id["ls_id"], 0) + leader_nu[diagnose_data_by_tenant_id["ls_id"]] = leader_nu.get(diagnose_data_by_tenant_id["ls_id"], 0) record.add_record("all ls_id:{0}".format(list(leader_nu.keys()))) self.verbose("all ls_id:{0}".format(list(leader_nu.keys()))) - scene_1_tag=True + scene_1_tag = True for ls_id in leader_nu: record.add_record("on ls_id: {1} ".format(tenant_id, ls_id)) self.verbose("on tenant_id: {0}, ls_id: {1} ".format(tenant_id, ls_id)) if leader_nu[ls_id] > 1: self.stdio.warn("the leader number > 1") record.add_record("the ls_id's leader number > 1") - record.add_suggest( - "tenant_id: {0}, ls_id: {1} .the ls_id's leader number > 1".format(tenant_id, ls_id)) + record.add_suggest("tenant_id: {0}, ls_id: {1} .the ls_id's leader number > 1".format(tenant_id, ls_id)) scene_1_tag = False continue elif leader_nu[ls_id] == 0: self.stdio.warn( - "the leader number = 0,The election layer is unable to select a new owner, and a common problem in this scenario is that the message delay is too large. You can continue to troubleshoot the problem of message delay or backlog in the log") + "the leader number = 0,The election layer is unable to select a new owner, and a common problem in this scenario is that the message delay is too large. You can continue to troubleshoot the problem of message delay or backlog in the log" + ) record.add_suggest( "tenant_id: {0}, ls_id: {1} .the leader number = 0. The election layer is unable to select a new owner, and a common problem in this scenario is that the message delay is too large. You can continue to troubleshoot the problem of message delay or backlog in the log".format( - tenant_id, ls_id)) + tenant_id, ls_id + ) + ) scene_1_tag = False continue else: @@ -213,35 +202,29 @@ def execute_421_no_leader_by_tenant_id(self, tenant_id,diagnose_data): for tenant_diagnose_data_by_tenant_id in diagnose_data: ls_id = tenant_diagnose_data_by_tenant_id["ls_id"] record.add_record("on ls_id: {1} ".format(tenant_id, ls_id)) - if tenant_diagnose_data_by_tenant_id["election_role"].upper() == "LEADER" and \ - tenant_diagnose_data_by_tenant_id["palf_role"].upper() != "LEADER" and \ - tenant_diagnose_data_by_tenant_id["palf_state"].upper() != "ACTIVE": + if tenant_diagnose_data_by_tenant_id["election_role"].upper() == "LEADER" and tenant_diagnose_data_by_tenant_id["palf_role"].upper() != "LEADER" and tenant_diagnose_data_by_tenant_id["palf_state"].upper() != "ACTIVE": self.stdio.warn( "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_ha_diagnose election_role is LEADER but palf_role is {2} and palf_state is {3}".format( - tenant_id, - ls_id, - tenant_diagnose_data_by_tenant_id["palf_role"], - tenant_diagnose_data_by_tenant_id["palf_state"])) + tenant_id, ls_id, tenant_diagnose_data_by_tenant_id["palf_role"], tenant_diagnose_data_by_tenant_id["palf_state"] + ) + ) record.add_record( "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_ha_diagnose election_role is LEADER but palf_role is {2} and palf_state is {3}".format( - tenant_id, - ls_id, - tenant_diagnose_data_by_tenant_id["palf_role"], - tenant_diagnose_data_by_tenant_id["palf_state"])) + tenant_id, ls_id, tenant_diagnose_data_by_tenant_id["palf_role"], tenant_diagnose_data_by_tenant_id["palf_state"] + ) + ) record.add_suggest( "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_ha_diagnose election_role is LEADER but palf_role is {2} and palf_state is {3}. The newly elected leader failed to take office in the palf layer, and the palf_state can be used to determine at which stage the palf failed to take office.".format( - tenant_id, - ls_id, - tenant_diagnose_data_by_tenant_id["palf_role"], - tenant_diagnose_data_by_tenant_id["palf_state"])) + tenant_id, ls_id, tenant_diagnose_data_by_tenant_id["palf_role"], tenant_diagnose_data_by_tenant_id["palf_state"] + ) + ) scene_2_tag = False else: self.verbose( "tenant_id: {0}, ls_id: {1} on oceanbase.__all_virtual_ha_diagnose election_role is LEADER , palf_role is {2} and palf_state is {3}".format( - tenant_id, - ls_id, - tenant_diagnose_data_by_tenant_id["palf_role"], - tenant_diagnose_data_by_tenant_id["palf_state"])) + tenant_id, ls_id, tenant_diagnose_data_by_tenant_id["palf_role"], tenant_diagnose_data_by_tenant_id["palf_state"] + ) + ) record.add_record("Normal. Unable to find a replica where both election_role and palf_role are leaders, but log_handler_role is follower") continue if scene_2_tag is False: @@ -251,44 +234,30 @@ def execute_421_no_leader_by_tenant_id(self, tenant_id,diagnose_data): record.add_record("start step3") for tenant_diagnose_data_by_tenant_id in diagnose_data: - record.add_record( - "tenant_id: {0}, ls_id: {1} ".format(tenant_diagnose_data_by_tenant_id["tenant_id"], - tenant_diagnose_data_by_tenant_id["ls_id"])) - if tenant_diagnose_data_by_tenant_id["election_role"].upper() == "LEADER" and \ - tenant_diagnose_data_by_tenant_id["palf_role"].upper() == "LEADER" and \ - tenant_diagnose_data_by_tenant_id["log_handler_role"].upper() == "follower": + record.add_record("tenant_id: {0}, ls_id: {1} ".format(tenant_diagnose_data_by_tenant_id["tenant_id"], tenant_diagnose_data_by_tenant_id["ls_id"])) + if tenant_diagnose_data_by_tenant_id["election_role"].upper() == "LEADER" and tenant_diagnose_data_by_tenant_id["palf_role"].upper() == "LEADER" and tenant_diagnose_data_by_tenant_id["log_handler_role"].upper() == "follower": record.add_record("election_role:LEADER , palf_role: LEADER, log_handler_role: follower") - log_handler_takeover_state = tenant_diagnose_data_by_tenant_id[ - "log_handler_takeover_state"].lower() + log_handler_takeover_state = tenant_diagnose_data_by_tenant_id["log_handler_takeover_state"].lower() record.add_record("log_handler_takeover_state: {0}".format(log_handler_takeover_state)) if log_handler_takeover_state == "wait_replay_done": - record.add_suggest( - "Previous stuck waiting for replay steps. Please check the issue about replay") + record.add_suggest("Previous stuck waiting for replay steps. Please check the issue about replay") elif log_handler_takeover_state == "unknown": - record.add_suggest( - "Please check observe whether the remaining log streams of this tenant also have the issue of log handler failure in taking over") + record.add_suggest("Please check observe whether the remaining log streams of this tenant also have the issue of log handler failure in taking over") elif log_handler_takeover_state == "wait_rc_handler_done": - log_handler_takeover_log_type = tenant_diagnose_data_by_tenant_id[ - "log_handler_takeover_log_type"] - record.add_record( - "log_handler_takeover_log_type: {0}".format(log_handler_takeover_log_type)) - record.add_suggest( - "log_handler_takeover_log_type is {0}. Please report oceanbase's community".format( - log_handler_takeover_log_type)) + log_handler_takeover_log_type = tenant_diagnose_data_by_tenant_id["log_handler_takeover_log_type"] + record.add_record("log_handler_takeover_log_type: {0}".format(log_handler_takeover_log_type)) + record.add_suggest("log_handler_takeover_log_type is {0}. Please report oceanbase's community".format(log_handler_takeover_log_type)) else: record.add_record("Normal.Unable to find a replica where the selection_role is a leader, but the palf_role and palf_state are not leaders or active, respectively") if record.suggest_is_empty(): record.add_suggest("Normal. Not find the reason of the log handler failure in taking over.") except Exception as e: - raise RCAExecuteException("tenant_id: {0}. execute_421_no_leader_by_tenant_id execute error: {1}".format(tenant_id,e)) + raise RCAExecuteException("tenant_id: {0}. execute_421_no_leader_by_tenant_id execute error: {1}".format(tenant_id, e)) finally: return record - - - def check_election_leader_by_tenant_id(self, tenant_id): try: self.stdio.verbose("start check_election_leader_by_tenant_id") @@ -297,12 +266,9 @@ def check_election_leader_by_tenant_id(self, tenant_id): self.work_path = self.store_dir logs_name = self.gather_log.execute() if len(logs_name) == 0: - self.stdio.warn( - "check_election_leader_by_tenant_id not found log about election_leader. tenant_id: {0}".format( - tenant_id)) + self.stdio.warn("check_election_leader_by_tenant_id not found log about election_leader. tenant_id: {0}".format(tenant_id)) return "" - self.stdio.verbose( - "check_election_leader_by_tenant_id tenant_id: {0}, logs_name:{1}".format(tenant_id, logs_name)) + self.stdio.verbose("check_election_leader_by_tenant_id tenant_id: {0}, logs_name:{1}".format(tenant_id, logs_name)) for name in logs_name: self.stdio.verbose("read the log file: {0}".format(name)) with open(name, 'rb') as file: @@ -318,18 +284,18 @@ def check_election_leader_by_tenant_id(self, tenant_id): else: return "" except Exception as e: - raise RCAExecuteException( - "check_election_leader_by_tenant_id: {1}. execute error: {0}".format(e, tenant_id)) + raise RCAExecuteException("check_election_leader_by_tenant_id: {1}. execute error: {0}".format(e, tenant_id)) def export_result(self): super().export_result() def get_scene_info(self): - return {"name": "log_error", - "info_en": "Troubleshooting log related issues. Currently supported scenes: no_leader.", - "info_cn": '日志相关问题排查。目前支持:无主场景。', - } + return { + "name": "log_error", + "info_en": "Troubleshooting log related issues. Currently supported scenes: no_leader.", + "info_cn": '日志相关问题排查。目前支持:无主场景。', + } -log_error = LogErrorScene() \ No newline at end of file +log_error = LogErrorScene() diff --git a/handler/rca/scene/major_hold_scene.py b/handler/rca/scene/major_hold_scene.py index 7ebfb93d..48300b6b 100644 --- a/handler/rca/scene/major_hold_scene.py +++ b/handler/rca/scene/major_hold_scene.py @@ -49,8 +49,7 @@ def execute(self): err_tenant_ids = [] # 合并任务是否有报错 try: - COMPACTING_data = self.ob_connector.execute_sql( - 'select * from oceanbase.CDB_OB_MAJOR_COMPACTION where IS_ERROR="YES";') + COMPACTING_data = self.ob_connector.execute_sql('select * from oceanbase.CDB_OB_MAJOR_COMPACTION where IS_ERROR="YES";') if len(COMPACTING_data) == 0: first_record.add_record("CDB_OB_MAJOR_COMPACTION is not exist IS_ERROR='YES'") else: @@ -59,8 +58,7 @@ def execute(self): for data in COMPACTING_data: CDB_OB_MAJOR_COMPACTION_err_tenant_ids.append(str(data[0])) - first_record.add_record( - "CDB_OB_MAJOR_COMPACTION have IS_ERROR='YES',the tenant_ids are {0}".format(err_tenant_ids)) + first_record.add_record("CDB_OB_MAJOR_COMPACTION have IS_ERROR='YES',the tenant_ids are {0}".format(err_tenant_ids)) err_tenant_ids.extend(CDB_OB_MAJOR_COMPACTION_err_tenant_ids) except Exception as e: @@ -68,8 +66,7 @@ def execute(self): raise RCAExecuteException("MajorHoldScene execute CDB_OB_MAJOR_COMPACTION panic: {0}".format(e)) # __all_virtual_compaction_diagnose_info里存在status=FAILED的记录 try: - diagnose_data = self.ob_connector.execute_sql( - 'select * from oceanbase.__all_virtual_compaction_diagnose_info where status="FAILED";') + diagnose_data = self.ob_connector.execute_sql('select * from oceanbase.__all_virtual_compaction_diagnose_info where status="FAILED";') if len(diagnose_data) == 0: first_record.add_record('__all_virtual_compaction_diagnose_info is not exist status="FAILED";') else: @@ -78,17 +75,14 @@ def execute(self): for data in COMPACTING_data: __all_virtual_compaction_diagnose_info_err_tenant_ids.append(str(data[0])) - first_record.add_record( - "__all_virtual_compaction_diagnose_info have status='FAILED',the tenant is {0}".format( - __all_virtual_compaction_diagnose_info_err_tenant_ids)) + first_record.add_record("__all_virtual_compaction_diagnose_info have status='FAILED',the tenant is {0}".format(__all_virtual_compaction_diagnose_info_err_tenant_ids)) err_tenant_ids.extend(__all_virtual_compaction_diagnose_info_err_tenant_ids) except Exception as e: self.stdio.error("MajorHoldScene execute CDB_OB_MAJOR_COMPACTION panic: {0}".format(e)) raise RCAExecuteException("MajorHoldScene execute CDB_OB_MAJOR_COMPACTION panic: {0}".format(e)) # GV$OB_COMPACTION_PROGRESS表中,根据上一次合并记录中的data_size/(estimated_finish_time-start_time)与当前合并版本记录中(data_size-unfinished_data_size)/(当前时间-start_time)相比,如果差距过大(当前合并比上一次合并慢很多,以5倍为指标) try: - running_data = self.ob_connector.execute_sql( - "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where STATUS <> 'FINISH' and START_TIME <= NOW() - INTERVAL 20 minute GROUP BY COMPACTION_SCN DESC;") + running_data = self.ob_connector.execute_sql("select * from oceanbase.GV$OB_COMPACTION_PROGRESS where STATUS <> 'FINISH' and START_TIME <= NOW() - INTERVAL 20 minute GROUP BY COMPACTION_SCN DESC;") if len(running_data) == 0: first_record.add_record('No merge tasks that have not ended beyond the expected time') else: @@ -97,12 +91,8 @@ def execute(self): need_tag = True for data in running_data: time_out_merge_err_tenant_ids.append(str(data[2])) - first_record.add_record( - "merge tasks that have not ended beyond the expected time,the tenant_id is {0}".format( - time_out_merge_err_tenant_ids)) - self.stdio.verbose( - "merge tasks that have not ended beyond the expected time,the tenant_id is {0}".format( - time_out_merge_err_tenant_ids)) + first_record.add_record("merge tasks that have not ended beyond the expected time,the tenant_id is {0}".format(time_out_merge_err_tenant_ids)) + self.stdio.verbose("merge tasks that have not ended beyond the expected time,the tenant_id is {0}".format(time_out_merge_err_tenant_ids)) err_tenant_ids.extend(time_out_merge_err_tenant_ids) except Exception as e: self.stdio.error("MajorHoldScene execute GV$OB_COMPACTION_PROGRESS panic: {0}".format(e)) @@ -125,19 +115,13 @@ def execute(self): tenant_record.add_record("tenant_id is {0}".format(err_tenant_id)) # 1 try: - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - 'SELECT * FROM oceanbase.CDB_OB_MAJOR_COMPACTION WHERE TENANT_ID= "{0}" AND (IS_ERROR = "NO" OR IS_SUSPENDED = "NO");'.format( - err_tenant_id)) + cursor = self.ob_connector.execute_sql_return_cursor_dictionary('SELECT * FROM oceanbase.CDB_OB_MAJOR_COMPACTION WHERE TENANT_ID= "{0}" AND (IS_ERROR = "NO" OR IS_SUSPENDED = "NO");'.format(err_tenant_id)) OB_MAJOR_COMPACTION_data = cursor.fetchall() if len(OB_MAJOR_COMPACTION_data) == 0: - tenant_record.add_record( - "on CDB_OB_MAJOR_COMPACTION where status='COMPACTING'; " - "result:{0} , need not next step".format(str(OB_MAJOR_COMPACTION_data))) + tenant_record.add_record("on CDB_OB_MAJOR_COMPACTION where status='COMPACTING'; " "result:{0} , need not next step".format(str(OB_MAJOR_COMPACTION_data))) else: - tenant_record.add_record( - "on CDB_OB_MAJOR_COMPACTION where status='COMPACTING';" - "result:{0}".format(str(OB_MAJOR_COMPACTION_data))) + tenant_record.add_record("on CDB_OB_MAJOR_COMPACTION where status='COMPACTING';" "result:{0}".format(str(OB_MAJOR_COMPACTION_data))) except Exception as e: tenant_record.add_record("#1 on CDB_OB_MAJOR_COMPACTION get data failed") @@ -145,16 +129,12 @@ def execute(self): pass # 2 try: - compaction_diagnose_info = self.ob_connector.execute_sql( - 'SELECT * FROM oceanbase.__all_virtual_compaction_diagnose_info WHERE status="FAILED";') + compaction_diagnose_info = self.ob_connector.execute_sql('SELECT * FROM oceanbase.__all_virtual_compaction_diagnose_info WHERE status="FAILED";') if len(compaction_diagnose_info) == 0: - tenant_record.add_record( - "on __all_virtual_compaction_diagnose_info no data status=FAILED") + tenant_record.add_record("on __all_virtual_compaction_diagnose_info no data status=FAILED") else: - tenant_record.add_record( - "on __all_virtual_compaction_diagnose_info;" - "result:{0}".format(str(compaction_diagnose_info))) + tenant_record.add_record("on __all_virtual_compaction_diagnose_info;" "result:{0}".format(str(compaction_diagnose_info))) for COMPACTING_data in compaction_diagnose_info: self.diagnose_info_switch(COMPACTING_data, tenant_record) @@ -166,39 +146,26 @@ def execute(self): # 4 try: - global_broadcast_scn = self.ob_connector.execute_sql( - "select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(err_tenant_id))[ - 0][3] + global_broadcast_scn = self.ob_connector.execute_sql("select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(err_tenant_id))[0][3] tenant_record.add_record("global_broadcast_scn is {0}".format(global_broadcast_scn)) - last_scn = self.ob_connector.execute_sql( - "select LAST_SCN from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format( - err_tenant_id))[0] + last_scn = self.ob_connector.execute_sql("select LAST_SCN from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(err_tenant_id))[0] tenant_record.add_record("last_scn is {0}".format(last_scn)) - sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format( - err_tenant_id, global_broadcast_scn) + sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format(err_tenant_id, global_broadcast_scn) OB_COMPACTION_PROGRESS_data_global_broadcast_scn = self.ob_connector.execute_sql(sql) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_global_broadcast_scn".format( - self.local_path, err_tenant_id) + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_global_broadcast_scn".format(self.local_path, err_tenant_id) with open(file_name, 'w') as f: f.write(str(OB_COMPACTION_PROGRESS_data_global_broadcast_scn)) - tenant_record.add_record( - "tenant_id:{0} OB_COMPACTION_PROGRESS_data_global_broadcast_scn save on {1}".format(err_tenant_id, - file_name)) + tenant_record.add_record("tenant_id:{0} OB_COMPACTION_PROGRESS_data_global_broadcast_scn save on {1}".format(err_tenant_id, file_name)) - sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format( - err_tenant_id, last_scn) + sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and COMPACTION_SCN='{1}';".format(err_tenant_id, last_scn) OB_COMPACTION_PROGRESS_data_last_scn = self.ob_connector.execute_sql(sql) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_last_scn".format( - self.local_path, err_tenant_id) + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_PROGRESS_data_last_scn".format(self.local_path, err_tenant_id) with open(file_name, 'w') as f: f.write(str(OB_COMPACTION_PROGRESS_data_last_scn)) - tenant_record.add_record( - "tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, - file_name)) + tenant_record.add_record("tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, file_name)) - sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and STATUS<>'FINISH';".format( - err_tenant_id, global_broadcast_scn) + sql = "select * from oceanbase.GV$OB_COMPACTION_PROGRESS where TENANT_ID='{0}' and STATUS<>'FINISH';".format(err_tenant_id, global_broadcast_scn) finish_data = self.ob_connector.execute_sql(sql) if len(finish_data) == 0: tenant_record.add_record("sql:{0},len of result is 0;result:{1}".format(sql, finish_data)) @@ -213,15 +180,11 @@ def execute(self): node = observer_node ssh_helper = observer_node["ssher"] if node == None: - self.stdio.error( - "can not find ls_svr by TENANT_ID:{2} ip:{0},port:{1}".format(svr_ip, svr_port, - err_tenant_id)) + self.stdio.error("can not find ls_svr by TENANT_ID:{2} ip:{0},port:{1}".format(svr_ip, svr_port, err_tenant_id)) break log_name = "/tmp/major_hold_scene_4_major_merge_progress_checker_{0}.log".format(err_tenant_id) - ssh_helper.ssh_exec_cmd( - 'grep "major_merge_progress_checker" {0}/log/rootservice.log* | grep T{1} -m500 >{2}'.format( - node.get("home_path"), err_tenant_id, log_name)) + ssh_helper.ssh_exec_cmd('grep "major_merge_progress_checker" {0}/log/rootservice.log* | grep T{1} -m500 >{2}'.format(node.get("home_path"), err_tenant_id, log_name)) ssh_helper.download(log_name, self.local_path) tenant_record.add_record("download {0} to {1}".format(log_name, self.local_path)) ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) @@ -231,18 +194,14 @@ def execute(self): # 5 try: - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - 'select * from oceanbase.GV$OB_COMPACTION_SUGGESTIONS where tenant_id="{0}";'.format(err_tenant_id)) + cursor = self.ob_connector.execute_sql_return_cursor_dictionary('select * from oceanbase.GV$OB_COMPACTION_SUGGESTIONS where tenant_id="{0}";'.format(err_tenant_id)) columns = [column[0] for column in cursor.description] OB_COMPACTION_SUGGESTIONS_data = cursor.fetchall() OB_COMPACTION_SUGGESTIONS_info = json.dumps(OB_COMPACTION_SUGGESTIONS_data, cls=DateTimeEncoder) - file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_SUGGESTIONS_info".format( - self.local_path, err_tenant_id) + file_name = "{0}/rca_major_hold_{1}_OB_COMPACTION_SUGGESTIONS_info".format(self.local_path, err_tenant_id) with open(file_name, 'w') as f: f.write(str(OB_COMPACTION_SUGGESTIONS_info)) - tenant_record.add_record( - "tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, - file_name)) + tenant_record.add_record("tenant_id:{0} OB_COMPACTION_PROGRESS_data_last_scn save on {1}".format(err_tenant_id, file_name)) except Exception as e: self.stdio.warn("MajorHoldScene execute 5 exception: {0}".format(e)) @@ -251,21 +210,16 @@ def execute(self): def get_info__all_virtual_compaction_diagnose_info(self, tenant_record): try: - COMPACTING_datas = self.ob_connector.execute_sql( - "SELECT * FROM oceanbase.__all_virtual_compaction_diagnose_info WHERE IS_ERROR = 'NO' OR IS_SUSPENDED = 'NO';") + COMPACTING_datas = self.ob_connector.execute_sql("SELECT * FROM oceanbase.__all_virtual_compaction_diagnose_info WHERE IS_ERROR = 'NO' OR IS_SUSPENDED = 'NO';") if len(COMPACTING_datas) == 0: - tenant_record.add_record( - "sql:select * from oceanbase.__all_virtual_compaction_diagnose_info; no data") + tenant_record.add_record("sql:select * from oceanbase.__all_virtual_compaction_diagnose_info; no data") return else: - tenant_record.add_record( - "sql:select * from oceanbase.CDB_OB_MAJOR_COMPACTION where status=COMPACTING; " - "result:{0}".format(str(COMPACTING_datas))) + tenant_record.add_record("sql:select * from oceanbase.CDB_OB_MAJOR_COMPACTION where status=COMPACTING; " "result:{0}".format(str(COMPACTING_datas))) for index, COMPACTING_data in COMPACTING_datas: self.diagnose_info_switch(COMPACTING_data) except Exception as e: - raise RCAExecuteException( - "MajorHoldScene execute get_info__all_virtual_compaction_diagnose_info exception: {0}".format(e)) + raise RCAExecuteException("MajorHoldScene execute get_info__all_virtual_compaction_diagnose_info exception: {0}".format(e)) def diagnose_info_switch(self, sql_data, tenant_record): svr_ip = sql_data[0] @@ -285,15 +239,9 @@ def diagnose_info_switch(self, sql_data, tenant_record): if node is None: raise RCAExecuteException("can not find observer node by ip:{0}, port:{1}".format(svr_ip, svr_port)) - log_name = "/tmp/rca_major_hold_schedule_medium_failed_{1}_{2}_{0}.txt".format(tenant_id, svr_ip, - svr_port) - tenant_record.add_record( - "diagnose_info type is 'schedule medium failed'. time is {0},observer is {1}:{2},the log is {3}".format( - create_time, svr_ip, svr_port, log_name)) - ssh_helper.ssh_exec_cmd( - 'grep "schedule_medium_failed" {1}/log/observer.log* |grep -P "\[\d+\]" -m 1 -o >{0}'.format(log_name, - node.get( - "home_path"))) + log_name = "/tmp/rca_major_hold_schedule_medium_failed_{1}_{2}_{0}.txt".format(tenant_id, svr_ip, svr_port) + tenant_record.add_record("diagnose_info type is 'schedule medium failed'. time is {0},observer is {1}:{2},the log is {3}".format(create_time, svr_ip, svr_port, log_name)) + ssh_helper.ssh_exec_cmd('grep "schedule_medium_failed" {1}/log/observer.log* |grep -P "\[\d+\]" -m 1 -o >{0}'.format(log_name, node.get("home_path"))) ssh_helper.download(log_name, local_path=self.local_path) tenant_record.add_record("download {0} to {1}".format(log_name, self.local_path)) ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) @@ -302,20 +250,21 @@ def diagnose_info_switch(self, sql_data, tenant_record): err_no = re.search("\berror_no=(\d+)\b", diagnose_info).group(1) err_trace = re.search("\berror_trace=(.+)\b", diagnose_info).group(1) - global_broadcast_scn = self.ob_connector.execute_sql( - "select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(tenant_id))[0][3] - compaction_scn = self.ob_connector.execute_sql( - "select * from oceanbase.__all_virtual_tablet_meta_table where tablet_id='{0}' and tenant_id='{1}';".format( - table_id, tenant_id))[0][7] + global_broadcast_scn = self.ob_connector.execute_sql("select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(tenant_id))[0][3] + compaction_scn = self.ob_connector.execute_sql("select * from oceanbase.__all_virtual_tablet_meta_table where tablet_id='{0}' and tenant_id='{1}';".format(table_id, tenant_id))[0][7] if compaction_scn > global_broadcast_scn: tenant_record.add_record( "diagnose_info type is error_no. error_no: {0}, err_trace: {1} , table_id:{2}, tenant_id:{3}, compaction_scn: {4}, global_broadcast_scn: {5}. compaction_scn>global_broadcast_scn".format( - err_no, err_trace, table_id, tenant_id, compaction_scn, global_broadcast_scn)) + err_no, err_trace, table_id, tenant_id, compaction_scn, global_broadcast_scn + ) + ) return else: tenant_record.add_record( "diagnose_info type is error_no. error_no: {0}, err_trace:{1}, table_id:{2}, tenant_id:{3}, compaction_scn: {4}, global_broadcast_scn: {5}. compaction_scn{2}".format(err_trace, node.get("home_path"), log_name)) + log_name = "/tmp/rca_error_no_{1}_{2}_{0}.txt".format(tenant_id, svr_ip, svr_port) + ssh_helper.ssh_exec_cmd("grep \"{0}\" {1}/log/observer.log* >{2}".format(err_trace, node.get("home_path"), log_name)) ssh_helper.download(log_name, local_path=self.local_path) tenant_record.add_record("download {0} to {1}".format(log_name, self.local_path)) ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) @@ -342,80 +288,60 @@ def diagnose_info_switch(self, sql_data, tenant_record): if node is None: raise RCAExecuteException("can not find observer node by ip:{0}, port:{1}".format(svr_ip, svr_port)) - tenant_record.add_record( - "diagnose_info type is 'error_no'. time is {0},observer is {1}:{2},the log is {3}".format( - create_time, svr_ip, svr_port, log_name)) + tenant_record.add_record("diagnose_info type is 'error_no'. time is {0},observer is {1}:{2},the log is {3}".format(create_time, svr_ip, svr_port, log_name)) ssh_helper.ssh_exec_cmd('cat observer.log* |grep "{1}" > /tmp/{0}'.format(log_name, err_trace)) ssh_helper.download(log_name, local_path=self.local_path) tenant_record.add_record("download {0} to {1}".format(log_name, self.local_path)) ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) return elif "weak read ts is not ready" in diagnose_info: - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - "select * from oceanbase.__all_virtual_ls_info where tenant_id='{0}' and ls_id='{1}';".format(tenant_id, - ls_id)) + cursor = self.ob_connector.execute_sql_return_cursor_dictionary("select * from oceanbase.__all_virtual_ls_info where tenant_id='{0}' and ls_id='{1}';".format(tenant_id, ls_id)) columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) - tenant_record.add_record( - "sql:" + "select * from oceanbase.__all_virtual_ls_info where tenant_id='{0}' and ls_id='{1}';".format( - tenant_id, ls_id) + - "result:{0}".format(str(self.all_virtual_ls_info))) + tenant_record.add_record("sql:" + "select * from oceanbase.__all_virtual_ls_info where tenant_id='{0}' and ls_id='{1}';".format(tenant_id, ls_id) + "result:{0}".format(str(self.all_virtual_ls_info))) return elif "memtable can not create dag successfully" in diagnose_info: tenant_record.add_record("diagnose_info type is memtable can not create dag successfully.") - global_broadcast_scn = self.ob_connector.execute_sql( - "select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(tenant_id))[0][3] - compaction_scn = self.ob_connector.execute_sql( - "select * from oceanbase.__all_virtual_tablet_meta_table where tablet_id='{0}' and tenant_id='{1}';".format( - table_id, tenant_id))[0][7] + global_broadcast_scn = self.ob_connector.execute_sql("select * from oceanbase.CDB_OB_MAJOR_COMPACTION where TENANT_ID='{0}';".format(tenant_id))[0][3] + compaction_scn = self.ob_connector.execute_sql("select * from oceanbase.__all_virtual_tablet_meta_table where tablet_id='{0}' and tenant_id='{1}';".format(table_id, tenant_id))[0][7] if compaction_scn > global_broadcast_scn: tenant_record.add_record( "diagnose_info type is memtable can not create dag successfully. table_id:{0}, tenant_id:{1}, compaction_scn: {2}, global_broadcast_scn: {3}. compaction_scn>global_broadcast_scn".format( - table_id, tenant_id, compaction_scn, global_broadcast_scn)) + table_id, tenant_id, compaction_scn, global_broadcast_scn + ) + ) return else: - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( - svr_ip, svr_port, tenant_id)) + cursor = self.ob_connector.execute_sql_return_cursor_dictionary("select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format(svr_ip, svr_port, tenant_id)) columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) - tenant_record.add_record( - "sql:" + - "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( - svr_ip, svr_port, tenant_id) + - "result:{0}".format(str(self.all_virtual_ls_info))) + tenant_record.add_record("sql:" + "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format(svr_ip, svr_port, tenant_id) + "result:{0}".format(str(self.all_virtual_ls_info))) return elif "medium wait for freeze" in diagnose_info or "major wait for freeze" in diagnose_info: tenant_record.add_record("diagnose_info type is medium wait for freeze or major wait for freeze.") - cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( - svr_ip, svr_port, tenant_id)) + cursor = self.ob_connector.execute_sql_return_cursor_dictionary("select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format(svr_ip, svr_port, tenant_id)) columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() self.all_virtual_ls_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) - tenant_record.add_record( - "sql:" + - "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format( - svr_ip, svr_port, tenant_id) + - "result:{0}".format(str(self.all_virtual_ls_info))) + tenant_record.add_record("sql:" + "select * from oceanbase.__all_virtual_dag_scheduler where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}';".format(svr_ip, svr_port, tenant_id) + "result:{0}".format(str(self.all_virtual_ls_info))) return elif "major not schedule for long time" in diagnose_info: tenant_record.add_record("diagnose_info type is major not schedule for long time") cursor = self.ob_connector.execute_sql_return_cursor_dictionary( - "select * from oceanbase.__all_virtual_tablet_compaction_info where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}' and ls_id='{3}' and tablet_id='{4}';".format( - svr_ip, svr_port, tenant_id, ls_id, table_id)) + "select * from oceanbase.__all_virtual_tablet_compaction_info where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}' and ls_id='{3}' and tablet_id='{4}';".format(svr_ip, svr_port, tenant_id, ls_id, table_id) + ) columns = [column[0] for column in cursor.description] all_virtual_ls_info_data = cursor.fetchall() all_virtual_tablet_compaction_info = json.dumps(all_virtual_ls_info_data, cls=DateTimeEncoder) tenant_record.add_record( - "sql:" + - "select * from oceanbase.__all_virtual_tablet_compaction_info where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}' and ls_id='{3}' and tablet_id='{4}';".format( - svr_ip, svr_port, tenant_id, ls_id, table_id) + - "result:{0}".format(str(all_virtual_tablet_compaction_info))) + "sql:" + + "select * from oceanbase.__all_virtual_tablet_compaction_info where svr_ip='{0}' and svr_port='{1}' and tenant_id='{2}' and ls_id='{3}' and tablet_id='{4}';".format(svr_ip, svr_port, tenant_id, ls_id, table_id) + + "result:{0}".format(str(all_virtual_tablet_compaction_info)) + ) node = None ssh_helper = None for observer_node in self.observer_nodes: @@ -425,17 +351,10 @@ def diagnose_info_switch(self, sql_data, tenant_record): if node is None: raise RCAExecuteException("can not find observer node by ip:{0}, port:{1}".format(svr_ip, svr_port)) - log_name = "/tmp/rca_major_hold_major_not_schedule_for_long_time_{1}_{2}_{0}.txt".format(create_time, - svr_ip, - svr_port) - tenant_record.add_record( - "diagnose_info type is 'major not schedule for long time'. time is {0},observer is {1}:{2},the log is {3}".format( - create_time, svr_ip, svr_port, log_name)) - thread_id = ssh_helper.ssh_exec_cmd( - 'cat {0}/log/observer.log* |grep "MediumLoo" -m 1 |grep -P "\[\d+\]" -m 1 -o | grep -oP "\d+"'.format( - node["home_path"], tenant_id)).strip() - ssh_helper.ssh_exec_cmd( - 'cat {0}/log/observer.log | grep "{1}" -m 100> {2}'.format(node["home_path"], thread_id, log_name)) + log_name = "/tmp/rca_major_hold_major_not_schedule_for_long_time_{1}_{2}_{0}.txt".format(create_time, svr_ip, svr_port) + tenant_record.add_record("diagnose_info type is 'major not schedule for long time'. time is {0},observer is {1}:{2},the log is {3}".format(create_time, svr_ip, svr_port, log_name)) + thread_id = ssh_helper.ssh_exec_cmd('cat {0}/log/observer.log* |grep "MediumLoo" -m 1 |grep -P "\[\d+\]" -m 1 -o | grep -oP "\d+"'.format(node["home_path"], tenant_id)).strip() + ssh_helper.ssh_exec_cmd('cat {0}/log/observer.log | grep "{1}" -m 100> {2}'.format(node["home_path"], thread_id, log_name)) ssh_helper.download(log_name, local_path=self.local_path) tenant_record.add_record("download {0} to {1}".format(log_name, self.local_path)) ssh_helper.ssh_exec_cmd("rm -rf {0}".format(log_name)) @@ -445,11 +364,13 @@ def diagnose_info_switch(self, sql_data, tenant_record): def export_result(self): return self.Result.export() + def get_scene_info(self): - return {"name": "major_hold", - "info_en": "root cause analysis of major hold", - "info_cn": "针对卡合并场景的根因分析", - } + return { + "name": "major_hold", + "info_en": "root cause analysis of major hold", + "info_cn": "针对卡合并场景的根因分析", + } major_hold = MajorHoldScene() diff --git a/main.py b/main.py index 62cadf87..3fb4cf41 100644 --- a/main.py +++ b/main.py @@ -34,4 +34,4 @@ if MainCommand().init(sys.argv[0], sys.argv[1:]).do_command(): ROOT_IO.exit(0) else: - ROOT_IO.exit(1) \ No newline at end of file + ROOT_IO.exit(1) diff --git a/stdio.py b/stdio.py index ea56165a..2a0f86cf 100644 --- a/stdio.py +++ b/stdio.py @@ -40,7 +40,7 @@ class BufferIO(object): - + def __init__(self, auto_clear=True): self._buffer = [] self.auto_clear = auto_clear @@ -244,13 +244,13 @@ def __init__(self, text='', color='cyan', text_color=None, spinner='line', anima super(IOHalo, self).__init__(text=text, color=color, text_color=text_color, spinner=spinner, animation=animation, placement=placement, interval=interval, enabled=enabled, stream=stream) def start(self, text=None): - if getattr(self._stream, 'isatty', lambda : False)(): + if getattr(self._stream, 'isatty', lambda: False)(): return super(IOHalo, self).start(text=text) else: text and self._stream.write(text) def stop_and_persist(self, symbol=' ', text=None): - if getattr(self._stream, 'isatty', lambda : False)(): + if getattr(self._stream, 'isatty', lambda: False)(): return super(IOHalo, self).stop_and_persist(symbol=symbol, text=text) else: self._stream.write(' %s\n' % symbol.format(istty=False)) @@ -275,7 +275,7 @@ def _get_widgets(widget_type, text, istty=True): if istty is False: return [text] elif widget_type == 'download': - return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' ', FileTransferSpeed()] + return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' ', FileTransferSpeed()] elif widget_type == 'timer': return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', AdaptiveETA()] elif widget_type == 'simple_progress': @@ -284,7 +284,7 @@ def _get_widgets(widget_type, text, istty=True): return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']')] def __init__(self, maxval=None, text='', term_width=None, poll=1, left_justify=True, stream=None, widget_type='download'): - self.stream_isatty = getattr(stream, 'isatty', lambda : False)() + self.stream_isatty = getattr(stream, 'isatty', lambda: False)() super(IOProgressBar, self).__init__(maxval=maxval, widgets=self._get_widgets(widget_type, text, self.stream_isatty), term_width=term_width, poll=poll, left_justify=left_justify, fd=stream) def start(self): @@ -328,14 +328,12 @@ def _check_stream(self): return True def _hide_cursor(self): - """Disable the user's blinking cursor - """ + """Disable the user's blinking cursor""" if self._check_stream() and self.stream_isatty: cursor.hide(stream=self.fd) def _show_cursor(self): - """Re-enable the user's blinking cursor - """ + """Re-enable the user's blinking cursor""" if self._check_stream() and self.stream_isatty: cursor.show(stream=self.fd) @@ -360,15 +358,7 @@ class IO(object): WARNING_PREV = FormtatText.warning('[WARN]') ERROR_PREV = FormtatText.error('[ERROR]') - def __init__(self, - level, - msg_lv=MsgLevel.DEBUG, - use_cache=False, - track_limit=0, - root_io=None, - input_stream=SysStdin, - output_stream=sys.stdout - ): + def __init__(self, level, msg_lv=MsgLevel.DEBUG, use_cache=False, track_limit=0, root_io=None, input_stream=SysStdin, output_stream=sys.stdout): self.level = level self.msg_lv = msg_lv self.default_confirm = False @@ -472,7 +462,7 @@ def log_cache(self): def before_close(self): if self._before_critical: try: - self._before_critical(self) + self._before_critical(self) except: pass @@ -505,7 +495,7 @@ def _cache_off(self): self._flush_log() self._log_cache = None return True - + def get_input_stream(self): if self._root_io: return self._root_io.get_input_stream() @@ -630,12 +620,7 @@ def interrupt_progressbar(self): def sub_io(self, msg_lv=None): if msg_lv is None: msg_lv = self.msg_lv - return self.__class__( - self.level + 1, - msg_lv=msg_lv, - track_limit=self.track_limit, - root_io=self._root_io if self._root_io else self - ) + return self.__class__(self.level + 1, msg_lv=msg_lv, track_limit=self.track_limit, root_io=self._root_io if self._root_io else self) def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, (list, tuple)) else [x], show_index=False, start=0, **kwargs): if not ary: @@ -749,12 +734,14 @@ def verbose(self, msg, *args, **kwargs): self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs) if sys.version_info.major == 2: + def exception(self, msg='', *args, **kwargs): import linecache + exception_msg = [] ei = sys.exc_info() exception_msg.append('Traceback (most recent call last):') - stack = traceback.extract_stack()[self.track_limit:-2] + stack = traceback.extract_stack()[self.track_limit : -2] tb = ei[2] while tb is not None: f = tb.tb_frame @@ -768,7 +755,8 @@ def exception(self, msg='', *args, **kwargs): stack.append((filename, lineno, name, line)) for line in stack: exception_msg.append(' File "%s", line %d, in %s' % line[:3]) - if line[3]: exception_msg.append(' ' + line[3].strip()) + if line[3]: + exception_msg.append(' ' + line[3].strip()) lines = [] for line in traceback.format_exception_only(ei[0], ei[1]): lines.append(line) @@ -780,11 +768,13 @@ def exception(self, msg='', *args, **kwargs): print_stack = lambda m: self.log(MsgLevel.ERROR, m) msg and self.error(msg) print_stack('\n'.join(exception_msg)) + else: + def exception(self, msg='', *args, **kwargs): ei = sys.exc_info() traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None) - pre_stach = traceback.extract_stack()[self.track_limit:-2] + pre_stach = traceback.extract_stack()[self.track_limit : -2] pre_stach.reverse() for summary in pre_stach: traceback_e.stack.insert(0, summary) @@ -839,7 +829,7 @@ def __getattr__(self, item): if attr is not EMPTY: self._attrs[item] = attr else: - is_tty = getattr(self._stream, 'isatty', lambda : False)() + is_tty = getattr(self._stream, 'isatty', lambda: False)() self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)).format(is_tty)) self._attrs[item] = FAKE_RETURN return self._attrs[item] @@ -884,9 +874,11 @@ def func_wrapper(*args, **kwargs): stdio = get_stdio(kwargs.get("stdio", _default_stdio)) kwargs["stdio"] = stdio return func(*args, **kwargs) + return _type(func_wrapper) if is_bond_method else func_wrapper else: return _type(func) if is_bond_method else func + return decorated diff --git a/telemetry/__init__.py b/telemetry/__init__.py index 94ea97df..a00cb50f 100644 --- a/telemetry/__init__.py +++ b/telemetry/__init__.py @@ -14,4 +14,4 @@ @time: 2023/11/24 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/telemetry/telemetry.py b/telemetry/telemetry.py index 8543d02b..a8a0a236 100644 --- a/telemetry/telemetry.py +++ b/telemetry/telemetry.py @@ -31,7 +31,9 @@ from stdio import IO ssl._create_default_https_context = ssl._create_unverified_context -class Telemetry(): + + +class Telemetry: def __init__(self): self.obversion = None self.ob_connector = None @@ -44,7 +46,7 @@ def __init__(self): self.threads = [] self.work_tag = True self.version = get_obdiag_version() - self.stdio=IO(1) + self.stdio = IO(1) def set_cluster_conn(self, obcluster): try: @@ -58,14 +60,9 @@ def set_cluster_conn(self, obcluster): if obcluster is not None: try: - self.cluster_conn = OBConnector(ip=obcluster.get("db_host"), - port=obcluster.get("db_port"), - username=obcluster.get("tenant_sys").get("user"), - password=obcluster.get("tenant_sys").get("password"), - stdio=self.stdio, - timeout=10000) + self.cluster_conn = OBConnector(ip=obcluster.get("db_host"), port=obcluster.get("db_port"), username=obcluster.get("tenant_sys").get("user"), password=obcluster.get("tenant_sys").get("password"), stdio=self.stdio, timeout=10000) self.threads.append(threading.Thread(None, self.get_cluster_info())) - # self.threads.append(threading.Thread(None, self.get_tenant_info())) + # self.threads.append(threading.Thread(None, self.get_tenant_info())) for thread in self.threads: thread.start() except Exception as e: @@ -73,27 +70,24 @@ def set_cluster_conn(self, obcluster): except Exception as e: pass - def get_cluster_info(self): if self.cluster_conn is not None: try: data = None version = str(self.cluster_conn.execute_sql("select version();")[0][0]) if "-v4" in version: - cursor = self.cluster_conn.execute_sql_return_cursor_dictionary( - "select * from oceanbase.GV$OB_SERVERS;") + cursor = self.cluster_conn.execute_sql_return_cursor_dictionary("select * from oceanbase.GV$OB_SERVERS;") columns = [column[0] for column in cursor.description] data = cursor.fetchall() for data_one in data: data_one["SVR_IP"] = ip_mix_by_sha256(data_one["SVR_IP"]) elif version.startswith("3."): - cursor = self.cluster_conn.execute_sql_return_cursor_dictionary( - "select *from oceanbase.gv$unit u, oceanbase.__all_virtual_server_stat s where s.svr_ip=u.svr_ip and s.svr_port=u.svr_port") + cursor = self.cluster_conn.execute_sql_return_cursor_dictionary("select *from oceanbase.gv$unit u, oceanbase.__all_virtual_server_stat s where s.svr_ip=u.svr_ip and s.svr_port=u.svr_port") columns = [column[0] for column in cursor.description] data = cursor.fetchall() for data_one in data: data_one["svr_ip"] = ip_mix_by_sha256(data_one["svr_ip"]) - self.obversion=version + self.obversion = version self.cluster_info = json.dumps(data) self.cluster_info["obversion"] = version except Exception as e: @@ -107,15 +101,15 @@ def get_tenant_info(self): version = str(self.cluster_conn.execute_sql("select version();")[0][0]) if "-v4" in version: cursor = self.cluster_conn.execute_sql_return_cursor_dictionary( - "SELECT * FROM OCEANBASE.DBA_OB_TENANTS t1,OCEANBASE.DBA_OB_UNITS t2,OCEANBASE.DBA_OB_UNIT_CONFIGS t3,OCEANBASE.DBA_OB_RESOURCE_POOLS t4 where t1.tenant_id = t4.tenant_id AND t4.resource_pool_id=t2.resource_pool_id AND t4.unit_config_id=t3.unit_config_id ORDER BY t1.tenant_name;") + "SELECT * FROM OCEANBASE.DBA_OB_TENANTS t1,OCEANBASE.DBA_OB_UNITS t2,OCEANBASE.DBA_OB_UNIT_CONFIGS t3,OCEANBASE.DBA_OB_RESOURCE_POOLS t4 where t1.tenant_id = t4.tenant_id AND t4.resource_pool_id=t2.resource_pool_id AND t4.unit_config_id=t3.unit_config_id ORDER BY t1.tenant_name;" + ) columns = [column[0] for column in cursor.description] data = cursor.fetchall() for data_one in data: if "SVR_IP" in data_one: data_one["SVR_IP"] = ip_mix_by_sha256(data_one.get("SVR_IP")) elif version.startswith("3."): - cursor = self.cluster_conn.execute_sql_return_cursor_dictionary( - "SELECT * FROM OCEANBASE.gv$tenant t1,OCEANBASE.gv$unit t2 where t1.tenant_id = t2.tenant_id;") + cursor = self.cluster_conn.execute_sql_return_cursor_dictionary("SELECT * FROM OCEANBASE.gv$tenant t1,OCEANBASE.gv$unit t2 where t1.tenant_id = t2.tenant_id;") columns = [column[0] for column in cursor.description] data = cursor.fetchall() for data_one in data: @@ -155,9 +149,9 @@ def put_data(self): if self.check_info is not None: report_data["check_info"] = self.check_info if self.obversion is not None: - report_data["obversion"]=self.obversion + report_data["obversion"] = self.obversion - re = {"content": report_data,"component":"obdiag"} + re = {"content": report_data, "component": "obdiag"} # put to /tmp with open(const.OBDIAG_TELEMETRY_FILE_NAME, 'w', encoding="utf8") as f: @@ -175,22 +169,21 @@ def put_info_to_oceanbase(self): conn = http.client.HTTPSConnection(const.TELEMETRY_URL, timeout=(5)) with open(const.OBDIAG_TELEMETRY_FILE_NAME, 'rb') as file: payload = file.read() - headers = { - 'Content-Encoding': 'application/gzip', - 'Content-Type': 'application/json' - } + headers = {'Content-Encoding': 'application/gzip', 'Content-Type': 'application/json'} conn.request("POST", const.TELEMETRY_PATH, payload, headers) res = conn.getresponse() - except : + except: pass +key = "********" -key="********" def ip_mix_by_sha256(ip): ip = ip.encode('utf-8') return hmac.new(key.encode('utf-8'), ip, digestmod=hashlib.sha256).hexdigest().upper() + + def ip_mix_by_sha1(ip=""): sha1 = hashlib.sha1() sha1.update(ip.encode()) @@ -198,5 +191,3 @@ def ip_mix_by_sha1(ip=""): telemetry = Telemetry() - - diff --git a/test/analyzer/test_tree.py b/test/analyzer/test_tree.py index 28845044..f26520a9 100644 --- a/test/analyzer/test_tree.py +++ b/test/analyzer/test_tree.py @@ -17,23 +17,48 @@ """ from handler.analyzer.log_parser.tree import Tree -file_datas = [{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id":"1","name":"open1","id":"1","parent_id":"00000000-0000-0000-0000-000000000000","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open2","id":"2","parent_id":"00000000-0000-0000-0000-000000000000","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open3","id":"3","parent_id":"1","is_follow":"false","start_ts":1662107166232204,"end_ts":1662117166233214,"logs": "test log", "tags": "just a test"}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open6","id":"6","parent_id":"3","is_follow":"false","start_ts":1662107166232204,"end_ts":1662108166233214,"logs": "null", "tags": [{"sql_text": "select * from t where c1=1"}, {"hit_plan": "false"}, {"sql_id": "XXXXXXXXXXXXXXXXXXXXXX"}, {"database_id": 111111111}]}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open7","id":"7","parent_id":"6","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166433214,"logs": [{"end_ts":1662107166433214}]}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data':{"trace_id":"x","name":"open11","id":"11","parent_id":"1","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107167233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data':{"trace_id":"x","name":"open22","id":"22","parent_id":"2","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107173233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data':{"trace_id":"x","name":"open12","id":"12","parent_id":"1","is_follow":"false","start_ts":1662107166232204,"end_ts":1662117166233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data':{"trace_id":"x","name":"open13","id":"13","parent_id":"1","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166233314}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open23","id":"23","parent_id":"2","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166233314}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open32","id":"32","parent_id":"11","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166235214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open33","id":"33","parent_id":"11","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166283214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open41","id":"41","parent_id":"12","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166293214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open55","id":"55","parent_id":"32","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107166291214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open56","id":"56","parent_id":"32","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107167233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open66","id":"66","parent_id":"41","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107266233214}}, -{'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data':{"trace_id":"x","name":"open67","id":"67","parent_id":"999999","is_follow":"false","start_ts":1662107166232204,"end_ts":1662107966233214, "logs": "test log", "tags": "just a test"}}] + +file_datas = [ + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "1", "name": "open1", "id": "1", "parent_id": "00000000-0000-0000-0000-000000000000", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166233214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open2", "id": "2", "parent_id": "00000000-0000-0000-0000-000000000000", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166233214}}, + { + 'host_ip': '192.168.1.1', + 'host_type': 'OBSERVER', + 'trace_data': {"trace_id": "x", "name": "open3", "id": "3", "parent_id": "1", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662117166233214, "logs": "test log", "tags": "just a test"}, + }, + { + 'host_ip': '192.168.1.1', + 'host_type': 'OBSERVER', + 'trace_data': { + "trace_id": "x", + "name": "open6", + "id": "6", + "parent_id": "3", + "is_follow": "false", + "start_ts": 1662107166232204, + "end_ts": 1662108166233214, + "logs": "null", + "tags": [{"sql_text": "select * from t where c1=1"}, {"hit_plan": "false"}, {"sql_id": "XXXXXXXXXXXXXXXXXXXXXX"}, {"database_id": 111111111}], + }, + }, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open7", "id": "7", "parent_id": "6", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166433214, "logs": [{"end_ts": 1662107166433214}]}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data': {"trace_id": "x", "name": "open11", "id": "11", "parent_id": "1", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107167233214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data': {"trace_id": "x", "name": "open22", "id": "22", "parent_id": "2", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107173233214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data': {"trace_id": "x", "name": "open12", "id": "12", "parent_id": "1", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662117166233214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBPROXY', 'trace_data': {"trace_id": "x", "name": "open13", "id": "13", "parent_id": "1", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166233314}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open23", "id": "23", "parent_id": "2", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166233314}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open32", "id": "32", "parent_id": "11", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166235214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open33", "id": "33", "parent_id": "11", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166283214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open41", "id": "41", "parent_id": "12", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166293214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open55", "id": "55", "parent_id": "32", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107166291214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open56", "id": "56", "parent_id": "32", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107167233214}}, + {'host_ip': '192.168.1.1', 'host_type': 'OBSERVER', 'trace_data': {"trace_id": "x", "name": "open66", "id": "66", "parent_id": "41", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107266233214}}, + { + 'host_ip': '192.168.1.1', + 'host_type': 'OBSERVER', + 'trace_data': {"trace_id": "x", "name": "open67", "id": "67", "parent_id": "999999", "is_follow": "false", "start_ts": 1662107166232204, "end_ts": 1662107966233214, "logs": "test log", "tags": "just a test"}, + }, +] def output(tree): @@ -48,4 +73,4 @@ def output(tree): tree = Tree() tree.build(file_datas) tree.traverse(5, 5) - output(tree) \ No newline at end of file + output(tree) diff --git a/update/__init__.py b/update/__init__.py index 213f0f28..a7bbc654 100644 --- a/update/__init__.py +++ b/update/__init__.py @@ -14,4 +14,4 @@ @time: 2024/01/30 @file: __init__.py @desc: -""" \ No newline at end of file +""" diff --git a/update/update.py b/update/update.py index f9dd97bf..125a230b 100644 --- a/update/update.py +++ b/update/update.py @@ -29,28 +29,25 @@ # for update obdiag files without obdiag class UpdateHandler: - def __init__(self,context): + def __init__(self, context): self.context = context self.stdio = context.stdio self.local_update_file_sha = "" self.local_obdiag_version = OBDIAG_VERSION self.remote_obdiag_version = "" self.remote_tar_sha = "" - self.options=self.context.options - self.file_path="" - self.force=False + self.options = self.context.options + self.file_path = "" + self.force = False # on obdiag update command - if context.namespace.spacename =="update": - self.file_path=Util.get_option(self.options, 'file',default="") - self.force=Util.get_option(self.options, 'force',default=False) - - - + if context.namespace.spacename == "update": + self.file_path = Util.get_option(self.options, 'file', default="") + self.force = Util.get_option(self.options, 'force', default=False) def execute(self): try: - file_path=self.file_path - force=self.force + file_path = self.file_path + force = self.force remote_server = const.UPDATE_REMOTE_SERVER remote_version_file_name = const.UPDATE_REMOTE_VERSION_FILE_NAME local_version_file_name = os.path.expanduser('~/.obdiag/remote_version.yaml') @@ -75,8 +72,8 @@ def execute(self): self.stdio.warn( "remote_obdiag_version is {0}. local_obdiag_version is {1}. " "remote_obdiag_version>local_obdiag_version. Unable to update dependency files, please upgrade " - "obdiag. Do not perform the upgrade process.".format( - self.remote_obdiag_version, self.local_obdiag_version)) + "obdiag. Do not perform the upgrade process.".format(self.remote_obdiag_version, self.local_obdiag_version) + ) return if remote_data.get("remote_tar_sha") is None: self.stdio.warn("remote_tar_sha is None. Do not perform the upgrade process.") @@ -89,13 +86,11 @@ def execute(self): if os.path.exists(os.path.expanduser(local_update_log_file_name)): with open(os.path.expanduser(local_update_log_file_name), 'r') as file: local_data = yaml.safe_load(file) - if local_data.get("remote_tar_sha") is not None and local_data.get( - "remote_tar_sha") == self.remote_tar_sha: + if local_data.get("remote_tar_sha") is not None and local_data.get("remote_tar_sha") == self.remote_tar_sha: self.stdio.warn("[update] remote_tar_sha as local_tar_sha. No need to update.") return # get data_update_time - if local_data.get("data_update_time") is not None and time.time() - local_data[ - "data_update_time"] < 3600 * 24 * 7: + if local_data.get("data_update_time") is not None and time.time() - local_data["data_update_time"] < 3600 * 24 * 7: self.stdio.warn("[update] data_update_time No need to update.") return # download_update_files @@ -103,9 +98,7 @@ def execute(self): # check_sha self.local_update_file_sha = FileUtil.calculate_sha256(local_update_file_name) if self.remote_tar_sha != self.local_update_file_sha: - self.stdio.warn( - "remote_tar_sha is {0}, but local_tar_sha is {1}. Unable to update dependency files. Do not perform the upgrade process.".format( - self.remote_tar_sha, self.local_update_file_sha)) + self.stdio.warn("remote_tar_sha is {0}, but local_tar_sha is {1}. Unable to update dependency files. Do not perform the upgrade process.".format(self.remote_tar_sha, self.local_update_file_sha)) return # move old files ## check_old_files From b1a02bc32da0ea838f9edbf0a13e53453a01ae8d Mon Sep 17 00:00:00 2001 From: Teingi Date: Mon, 27 May 2024 14:27:56 +0800 Subject: [PATCH 3/4] format code check --- .github/workflows/format_check.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/format_check.yml diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml new file mode 100644 index 00000000..135e379d --- /dev/null +++ b/.github/workflows/format_check.yml @@ -0,0 +1,29 @@ +name: Black Code Formatter Check + +on: + pull_request: + branches: "*" + push: + branches: "*" + +jobs: + format_check: + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.8 + + - name: Install Black + run: | + python -m pip install --upgrade pip + pip install black + + - name: Run Black Formatter Check + run: black --check . + # `--check`选项会让black检查代码而不做实际修改,如果格式不符合black规范,则此步骤会失败 \ No newline at end of file From 50b6518f72d6822e0349e5bb08e416f0591145d9 Mon Sep 17 00:00:00 2001 From: Teingi Date: Mon, 27 May 2024 14:34:36 +0800 Subject: [PATCH 4/4] format code check --- .github/workflows/format_check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml index 135e379d..50b6bd78 100644 --- a/.github/workflows/format_check.yml +++ b/.github/workflows/format_check.yml @@ -25,5 +25,5 @@ jobs: pip install black - name: Run Black Formatter Check - run: black --check . + run: black --check -S -l 256 . # `--check`选项会让black检查代码而不做实际修改,如果格式不符合black规范,则此步骤会失败 \ No newline at end of file