diff --git a/_cmd.py b/_cmd.py index 4e488ac..7396a9b 100644 --- a/_cmd.py +++ b/_cmd.py @@ -30,7 +30,7 @@ from optparse import OptionParser, BadOptionError, Option, IndentedHelpFormatter from core import ObdHome -from _stdio import IO, FormtatText +from _stdio import IO, FormatText from _lock import LockMode from _types import Capacity from tool import DirectoryUtil, FileUtil, NetUtil, COMMAND_ENV @@ -226,17 +226,22 @@ def parse_command(self): self.parser.allow_undefine = self.dev_mode return super(ObdCommand, self).parse_command() + def _init_log(self): + trace_id = uuid() + log_dir = os.path.join(self.OBD_PATH, 'log') + DirectoryUtil.mkdir(log_dir) + log_path = os.path.join(log_dir, 'obd') + ROOT_IO.init_trace_logger(log_path, 'obd', trace_id) + ROOT_IO.exit_msg = '''Trace ID: {trace_id} +If you want to view detailed obd logs, please run: obd display-trace {trace_id}'''.format(trace_id=trace_id) + def do_command(self): self.parse_command() self.init_home() - trace_id = uuid() ret = False try: - log_dir = os.path.join(self.OBD_PATH, 'log') - DirectoryUtil.mkdir(log_dir) - log_path = os.path.join(log_dir, 'obd') - if self.enable_log: - ROOT_IO.init_trace_logger(log_path, 'obd', trace_id) + if self.has_trace and self.enable_log: + self._init_log() ROOT_IO.track_limit += 1 ROOT_IO.verbose('cmd: %s' % self.cmds) ROOT_IO.verbose('opts: %s' % self.opts) @@ -245,7 +250,7 @@ def do_command(self): obd.set_cmds(self.cmds) ret = self._do_command(obd) if not ret: - ROOT_IO.print(DOC_LINK_MSG) + ROOT_IO.exit_msg = DOC_LINK_MSG + "\n" + ROOT_IO.exit_msg except NotImplementedError: ROOT_IO.exception('command \'%s\' is not implemented' % self.prev_cmd) except LockError: @@ -257,9 +262,6 @@ def do_command(self): except: e = sys.exc_info()[1] ROOT_IO.exception('Running Error: %s' % e) - if self.has_trace: - ROOT_IO.print('Trace ID: %s' % trace_id) - ROOT_IO.print('If you want to view detailed obd logs, please run: obd display-trace %s' % trace_id) return ret def _do_command(self, obd): @@ -859,7 +861,7 @@ def _do_command(self, obd): res = obd.deploy_cluster(self.cmds[0]) self.background_telemetry_task(obd) if res: - obd.stdio.print(FormtatText.success('Please execute ` obd cluster start %s ` to start' % self.cmds[0])) + obd.stdio.print(FormatText.success('Please execute ` obd cluster start %s ` to start' % self.cmds[0])) return res else: return self._show_help() @@ -900,6 +902,7 @@ class ClusterComponentDeleteCommand(ClusterMirrorCommand): def __init__(self): super(ClusterComponentDeleteCommand, self).__init__('del', 'Add components for cluster') + self.parser.add_option('-f', '--force', action='store_true', help="Force delete components.", default=False) self.parser.add_option('--ignore-standby', '--igs', action='store_true', help="Force kill the observer while standby tenant in others cluster exists.") def init(self, cmd, args): @@ -1129,6 +1132,7 @@ def __init__(self): self.parser.add_option('--tablegroup', type='string', help="Tenant tablegroup.") self.parser.add_option('--primary-zone', type='string', help="Tenant primary zone. [RANDOM].", default='RANDOM') self.parser.add_option('--locality', type='string', help="Tenant locality.") + self.parser.add_option('--time-zone', type='string', help="Tenant time zone. The default tenant time_zone is [+08:00].") self.parser.add_option('-s', '--variables', type='string', help="Set the variables for the system tenant. [ob_tcp_invited_nodes='%'].", default="ob_tcp_invited_nodes='%'") self.parser.add_option('-o', '--optimize', type='string', help="Specify scenario optimization when creating a tenant, the default is consistent with the cluster dimension.\n{express_oltp, complex_oltp, olap, htap, kv}\nSupported since version 4.3.") @@ -1406,7 +1410,7 @@ def __init__(self): self.parser.add_option('--sysbench-script-dir', type='string', help='The directory of the sysbench lua script file. [/usr/sysbench/share/sysbench]', default='/usr/sysbench/share/sysbench') self.parser.add_option('--table-size', type='int', help='Number of data initialized per table. [20000]', default=20000) self.parser.add_option('--tables', type='int', help='Number of initialization tables. [30]', default=30) - self.parser.add_option('--threads', type='int', help='Number of threads to use. [16]', default=16) + self.parser.add_option('--threads', type='string', help='Number of threads to use. [16]', default='16') self.parser.add_option('--time', type='int', help='Limit for total execution time in seconds. [60]', default=60) self.parser.add_option('--interval', type='int', help='Periodically report intermediate statistics with a specified time interval in seconds. 0 disables intermediate reports. [10]', default=10) self.parser.add_option('--events', type='int', help='Limit for total number of events.') @@ -1447,6 +1451,8 @@ def __init__(self): self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1) self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False) + self.parser.add_option('--direct-load', action='store_true', help="Enable load data by direct feature.") + self.parser.add_option('--parallel', type='int', help='The degree of parallelism for loading data. [max_cpu * unit_count]') def _do_command(self, obd): if self.cmds: @@ -1605,9 +1611,10 @@ def __init__(self): super(CommandsCommand, self).__init__('command', 'Common tool commands') self.parser.add_option('-c', '--components', type='string', help='The components used by the command. The first component in the configuration will be used by default in interactive commands, and all available components will be used by default in non-interactive commands.') self.parser.add_option('-s', '--servers', type='string', help='The servers used by the command. The first server in the configuration will be used by default in interactive commands, and all available servers will be used by default in non-interactive commands.') + self.parser.undefine_warn = False def _do_command(self, obd): - if len(self.cmds) == 2: + if len(self.cmds) in [2, 3]: return obd.commands(self.cmds[0], self.cmds[1], self.opts) else: return self._show_help() diff --git a/_deploy.py b/_deploy.py index 824f573..4b13e51 100644 --- a/_deploy.py +++ b/_deploy.py @@ -28,6 +28,7 @@ import hashlib from copy import deepcopy from enum import Enum +from datetime import datetime from ruamel.yaml.comments import CommentedMap @@ -628,9 +629,15 @@ def del_depend(self, name, component_name): def get_depend_servers(self, name): if name not in self._depends: - return None + return [] cluster_config = self._depends[name] return deepcopy(cluster_config.original_servers) + + def get_be_depend_servers(self, name): + if name not in self._be_depends: + return [] + cluster_config = self._be_depends[name] + return deepcopy(cluster_config.original_servers) def get_depend_added_servers(self, name): if name not in self._depends: @@ -1033,11 +1040,12 @@ class DeployInstallMode(object): class DeployInfo(object): - def __init__(self, name, status, components=None, config_status=DeployConfigStatus.UNCHNAGE): + def __init__(self, name, status, components=None, config_status=DeployConfigStatus.UNCHNAGE, create_date=None): self.status = status self.name = name self.components = components if components else {} self.config_status = config_status + self.create_date = create_date def __str__(self): info = ['%s (%s)' % (self.name, self.status.value)] @@ -1218,19 +1226,23 @@ def scale_out(self, config_path): source_data = self.yaml_loader.load(f) for key in source_data: if key in ['user', 'unuse_lib_repository', 'auto_create_tenant']: - self.stdio.error(err.EC_COMPONENT_CHANGE_CONFIG.format(key)) + self.stdio.error(err.EC_COMPONENT_CHANGE_CONFIG.format(message=key)) ret = False elif issubclass(type(source_data[key]), dict): new_depends = source_data[key].get('depends', []) + if key not in self.components: + self.stdio.error(err.EC_COMPONENT_NOT_EXISTS.format(component=key)) + ret = False + break if new_depends and new_depends != self.components[key].depends: self.stdio.error(err.EC_COMPONENT_CHANGE_CONFIG.format(message='depends:{}'.format(key))) ret = False # temp _depends depends[key] = self.components[key].depends - + if not self._merge_component(key, source_data[key]): ret = False - + for comp in depends: conf = self.components[comp] for name in depends[comp]: @@ -1243,7 +1255,7 @@ def scale_out(self, config_path): ret = False return ret - def add_components(self, config_path): + def add_components(self, config_path, ignore_exist=False): ret = True depends = {} try: @@ -1255,6 +1267,8 @@ def add_components(self, config_path): ret = False elif issubclass(type(source_data[key]), dict): if key in self.components: + if ignore_exist: + continue self.stdio.error(err.EC_COMPONENT_EXISTS.format(component=key)) ret = False continue @@ -1578,6 +1592,7 @@ def deploy_info(self): getattr(DeployStatus, data['status'], DeployStatus.STATUS_CONFIGURED), ConfigUtil.get_value_from_dict(data, 'components', OrderedDict()), getattr(DeployConfigStatus, ConfigUtil.get_value_from_dict(data, 'config_status', '_'), DeployConfigStatus.UNCHNAGE), + ConfigUtil.get_value_from_dict(data, 'create_date', None), ) except: self._info = DeployInfo(self.name, DeployStatus.STATUS_CONFIGURED) @@ -1654,6 +1669,7 @@ def dump_deploy_info(self): 'components': self.deploy_info.components, 'status': self.deploy_info.status.name, 'config_status': self.deploy_info.config_status.name, + 'create_date': self.deploy_info.create_date, } yaml.dump(data, f) return True @@ -1664,6 +1680,8 @@ def dump_deploy_info(self): def _update_deploy_status(self, status): old = self.deploy_info.status self.deploy_info.status = status + if old == DeployStatus.STATUS_DEPLOYED and status == DeployStatus.STATUS_RUNNING: + self.deploy_info.create_date = datetime.now().strftime('%Y-%m-%d') if self.dump_deploy_info(): return True self.deploy_info.status = old diff --git a/_errno.py b/_errno.py index 39a00ee..d8b7713 100644 --- a/_errno.py +++ b/_errno.py @@ -139,6 +139,8 @@ class InitDirFailedErrorMessage(object): EC_COMPONENT_FAILED_TO_MERGE_CONFIG = OBDErrorCodeTemplate(1023, 'Failed to merge config: {message}') EC_COMPONENT_NO_REMAINING_COMPS = OBDErrorCodeTemplate(1024, 'The cluster will have no remaining components. If you are absolutely sure about DELETING ALL COMPONENTS, please use "obd cluster destroy " command to completely destroy the cluster') EC_COMPONENT_PASSWD_ERROR = OBDErrorCodeTemplate(1025, '({ip}) {component} {key} invalid. (Rule: {rule})') +EC_RUNNING_CLUSTER_NO_REDEPLOYED = OBDErrorCodeTemplate(1026, 'Could not modify {key} when the cluster is in the working status(`production_mode` is True, not support this operation).') +EC_COMPONENT_DIR_NOT_EMPTY = OBDErrorCodeTemplate(1027, 'If you are sure the directory can be emptied, run `obd cluster deploy -f {deploy_name}` to perform forced deployment.') WC_ULIMIT_CHECK = OBDErrorCodeTemplate(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})') WC_AIO_NOT_ENOUGH = OBDErrorCodeTemplate(1011, '({ip}) The recommended value of fs.aio-max-nr is 1048576 (Current value: {current})') @@ -160,6 +162,8 @@ class InitDirFailedErrorMessage(object): EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS = OBDErrorCodeTemplate(2005, 'Failed to register cluster. {appname} may have been registered in {obconfig_url}.') EC_OBSERVER_MULTI_NET_DEVICE = OBDErrorCodeTemplate(2006, '{ip} has more than one network interface. Please set `devname` for ({server})') EC_OBSERVER_PING_FAILED = OBDErrorCodeTemplate(2007, '{ip1} {devname} fail to ping {ip2}. Please check configuration `devname`') +EC_OBSERVER_PING_NOT_FOUND = OBDErrorCodeTemplate(2007, '/usr/bin/ping: No such file or directory. You can run `sudo yum install iputils` or `sudo apt-get install iputils-ping`.') +EC_OBSERVER_PING_FAILED_SUID = OBDErrorCodeTemplate(2007, 'If the error message `operation not permitted` appears, please check the ping file permissions. You can try running `sudo chmod u+s /usr/bin/ping`') EC_OBSERVER_PING_FAILED_WITH_NO_DEVNAME = OBDErrorCodeTemplate(2007, '{ip1} fail to ping {ip2}. Please check your network') EC_OBSERVER_TIME_OUT_OF_SYNC = OBDErrorCodeTemplate(2008, 'Cluster clocks are out of sync') EC_OBSERVER_PRODUCTION_MODE_LIMIT = OBDErrorCodeTemplate(2009, '({server}): when production_mode is True, {key} can not be less then {limit}') @@ -180,6 +184,7 @@ class InitDirFailedErrorMessage(object): # obagent EC_OBAGENT_RELOAD_FAILED = OBDErrorCodeTemplate(4000, 'Fail to reload {server}') EC_OBAGENT_SEND_CONFIG_FAILED = OBDErrorCodeTemplate(4001, 'Fail to send config file to {server}') +WC_OBAGENT_SERVER_NAME_ERROR = OBDErrorCodeTemplate(4002, '{servers}: Failed to obtain the configuration of the OceanBase database component. \nPlease ensure that the server configurations are consistent between the OBAgent and OceanBase database components.') # obproxy EC_OBPROXY_NEED_CONFIG = OBDErrorCodeTemplate(4100, '{server} need config "rs_list" or "obproxy_config_server_url"') @@ -220,8 +225,9 @@ class InitDirFailedErrorMessage(object): EC_OCP_SERVER_NOT_ENOUGH_MEMORY_CACHED = OBDErrorCodeTemplate(4364, '({ip}) not enough memory. (Free: {free}, Buff/Cache: {cached}, Need: {need})') EC_OCP_SERVER_NOT_ENOUGH_MEMORY = OBDErrorCodeTemplate(4364, '({ip}) not enough memory. (Free: {free}, Need: {need})') EC_OCP_SERVER_NOT_ENOUGH_DISK = OBDErrorCodeTemplate(4365, '({ip}) {disk} not enough disk space. (Avail: {avail}, Need: {need})') -EC_OCP_SERVER_RESOURCE_NOT_ENOUGH = OBDErrorCodeTemplate(4366, 'There is not enough {resource}. (Avail: {avail}, need: {need})') - +EC_OCP_SERVER_RESOURCE_NOT_ENOUGH = OBDErrorCodeTemplate(4366, 'There is not enough {resource}. (Avail: {avail}, Need: {need})') +EC_OCP_SERVER_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH = OBDErrorCodeTemplate(4367, 'The allocated memory for the provided meta database is currently insufficient for creating a tenant. Available: {avail}, Need: {need}.') +EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH = OBDErrorCodeTemplate(4368, 'The allocated memory for the provided meta database is currently insufficient for creating a tenant. Available: {avail}, Need: {need}(Available = memory_limit [{memory_limit}] - system_memory [{system_memory}] - sys tenant memory [{sys_tenant_memory}]. Need = ocp meta tenant memory [{ocp_meta_tenant_memory}] + ocp_monitor_tenant_memory [{ocp_monitor_tenant_memory}]).') WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO = OBDErrorCodeTemplate(4303, '({ip}) failed to get disk information, skip disk space check') @@ -300,7 +306,10 @@ class InitDirFailedErrorMessage(object): SUG_OCP_SERVER_REDUCE_MEM = OBDErrorSuggestionTemplate('Please reduce the `memory_size`', fix_eval=[FixEval(FixEval.DEL, 'memory_size')]) SUG_OCP_SERVER_REDUCE_DISK = OBDErrorSuggestionTemplate('Please reduce the `logging_file_total_size_cap`', fix_eval=[FixEval(FixEval.DEL, 'logging_file_total_size_cap')]) SUG_OCP_SERVER_EDIT_ADMIN_PASSWD_ERROR = OBDErrorSuggestionTemplate('Please edit the `admin_password`, must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/)', fix_eval=[FixEval(FixEval.DEL, 'admin_password')], auto_fix=True) +SUG_OCP_SERVER_MACHINE_TIME = OBDErrorSuggestionTemplate('Please ensure that the machine time is synchronized with the ob time') SUG_SUDO_NOPASSWD = OBDErrorSuggestionTemplate('Please execute `bash -c \'echo "{user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers`\' as root in {ip}.') +SUG_OCP_SERVER_EXIST_METADB_TENANT_NOT_ENOUGH = OBDErrorSuggestionTemplate('Please reduce the ocp meta tenant memory or ocp monitor tenant memory') +SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH = OBDErrorSuggestionTemplate('Please increase the meta db memory_limit and reduce the ocp meta tenant memory or ocp monitor tenant memory') SUG_OB_SYS_USERNAME = OBDErrorSuggestionTemplate('Please delete the "ob_sys_username" parameter.') SUG_OB_SYS_PASSWORD = OBDErrorSuggestionTemplate('''Please set the "ob_sys_password" for oblogproxy by configuring the "cdcro_password" parameter in the "oceanbase" or "oceanbase-ce" component.''') SUG_OBAGENT_EDIT_HTTP_BASIC_AUTH_PASSWORD = OBDErrorSuggestionTemplate('Please edit the `http_basic_auth_password`, cannot contain characters other than uppercase letters, lowercase characters, digits, special characters:~^*{{}}[]_-+', fix_eval=[FixEval(FixEval.DEL, 'http_basic_auth_password')], auto_fix=True) diff --git a/_mirror.py b/_mirror.py index 6c35b0f..042e1c9 100644 --- a/_mirror.py +++ b/_mirror.py @@ -537,6 +537,8 @@ def get_exact_pkg_info(self, **pattern): self.stdio and getattr(self.stdio, 'verbose', print)('min_version is %s' % min_version) max_version = ConfigUtil.get_value_from_dict(pattern, 'max_version', transform_func=Version) self.stdio and getattr(self.stdio, 'verbose', print)('max_version is %s' % max_version) + only_download = pattern['only_download'] if 'only_download' in pattern else False + self.stdio and getattr(self.stdio, 'verbose', print)('only_download is %s' % only_download) pkgs = [] for key in self.db: info = self.db[key] @@ -552,6 +554,8 @@ def get_exact_pkg_info(self, **pattern): continue if max_version and max_version <= info.version: continue + if only_download and not self.is_download(info): + continue pkgs.append(info) if pkgs: pkgs.sort() @@ -619,6 +623,11 @@ def match_score(self, info, name, arch, version=None, min_version=None, max_vers c = [len(name) / len(info.name), lse_score, info] return c + def is_download(self, pkg_info): + file_name = pkg_info.location[1] + file_path = os.path.join(self.mirror_path, file_name) + return os.path.exists(file_path) + @staticmethod def validate_repoid(repoid): @@ -755,7 +764,7 @@ def add_pkg(self, pkg): self.stdio and getattr(self.stdio, 'print', print)('add %s to local mirror', src_path) return pkg except IOError: - self.self.stdio and getattr(self.self.stdio, 'exception', print)('') + self.stdio and getattr(self.stdio, 'exception', print)('') self.stdio and getattr(self.stdio, 'error', print)('Set local mirror failed. %s IO Error' % pkg.file_name) except: self.stdio and getattr(self.stdio, 'exception', print)('') @@ -1022,6 +1031,7 @@ def _get_section(self, section_name): return None return repo_conf.sections.get(section_name) + def get_remote_mirrors(self, is_enabled=True): self._lock() mirrors = [] diff --git a/_plugin.py b/_plugin.py index 492524e..c5ca633 100644 --- a/_plugin.py +++ b/_plugin.py @@ -91,7 +91,7 @@ def get_return(self, plugin_name): ret = self._return.get(plugin_name) if isinstance(ret, PluginReturn): return ret - return None + return PluginReturn() def set_return(self, plugin_name, plugin_return): self._return[plugin_name] = plugin_return @@ -99,7 +99,7 @@ def set_return(self, plugin_name, plugin_return): class PluginReturn(object): - def __init__(self, value=False, *arg, **kwargs): + def __init__(self, value=None, *arg, **kwargs): self._return_value = value self._return_args = arg self._return_kwargs = kwargs diff --git a/_repository.py b/_repository.py index 9c47948..8d124be 100644 --- a/_repository.py +++ b/_repository.py @@ -354,6 +354,21 @@ def _dump(self): def need_load(self, pkg, plugin): return self.hash != pkg.md5 or not self.install_time > plugin.check_value or not self.file_check(plugin) + def rpm_headers_list(self, rpm_headers): + def ensure_list(param): + if isinstance(param, (list, tuple)): + return param + return [param] if param is not None else [] + + dirnames = ensure_list(rpm_headers.get("dirnames")) + basenames = ensure_list(rpm_headers.get("basenames")) + dirindexes = ensure_list(rpm_headers.get("dirindexes")) + filelinktos = ensure_list(rpm_headers.get("filelinktos")) + filemd5s = ensure_list(rpm_headers.get("filemd5s")) + filemodes = ensure_list(rpm_headers.get("filemodes")) + + return dirnames, basenames, dirindexes, filelinktos, filemd5s, filemodes + def load_pkg(self, pkg, plugin): if self.is_shadow_repository(): self.stdio and getattr(self.stdio, 'print', '%s is a shadow repository' % self) @@ -374,12 +389,7 @@ def load_pkg(self, pkg, plugin): need_files[src_path] = file_item.target_path files = {} links = {} - dirnames = rpm.headers.get("dirnames") - basenames = rpm.headers.get("basenames") - dirindexes = rpm.headers.get("dirindexes") - filelinktos = rpm.headers.get("filelinktos") - filemd5s = rpm.headers.get("filemd5s") - filemodes = rpm.headers.get("filemodes") + dirnames, basenames, dirindexes, filelinktos, filemd5s, filemodes = self.rpm_headers_list(rpm.headers) dirs = sorted(need_dirs.keys(), reverse=True) format_str = lambda s: s.decode(errors='replace') if isinstance(s, bytes) else s for i in range(len(basenames)): @@ -685,6 +695,8 @@ def delete_repositories(self, repositories): if not repository.path.startswith(self.path): self.stdio.error("The path of the %s file does not start with %s." % (repository.path, self.path)) return False + if os.path.basename(repository.path) == repository.name and not DirectoryUtil.rm(os.path.join(os.path.dirname(repository.path), repository.md5), self.stdio): + return False if not DirectoryUtil.rm(repository.path, self.stdio): return False return True diff --git a/_stdio.py b/_stdio.py index 3ad6983..6057b68 100644 --- a/_stdio.py +++ b/_stdio.py @@ -21,6 +21,7 @@ from __future__ import absolute_import, division, print_function import os +import re import signal import sys import fcntl @@ -89,6 +90,13 @@ def clear(self): def flush(self): self.auto_clear and self.clear() return True + + +class SetBufferIO(BufferIO): + + def write(self, s): + if s not in self._buffer: + return super(SetBufferIO, self).write(s) class SysStdin(object): @@ -178,7 +186,7 @@ def _readlines(cls): return sys.stdin.readlines() -class FormtatText(object): +class FormatText(object): def __init__(self, text, color): self.text = text @@ -192,27 +200,27 @@ def __str__(self): @staticmethod def info(text): - return FormtatText(text, Fore.BLUE) + return FormatText(text, Fore.BLUE) @staticmethod def success(text): - return FormtatText(text, Fore.GREEN) + return FormatText(text, Fore.GREEN) @staticmethod def warning(text): - return FormtatText(text, Fore.YELLOW) + return FormatText(text, Fore.YELLOW) @staticmethod def error(text): - return FormtatText(text, Fore.RED) + return FormatText(text, Fore.RED) class LogSymbols(Enum): - INFO = FormtatText.info('!') - SUCCESS = FormtatText.success('ok') - WARNING = FormtatText.warning('!!') - ERROR = FormtatText.error('x') + INFO = FormatText.info('!') + SUCCESS = FormatText.success('ok') + WARNING = FormatText.warning('!!') + ERROR = FormatText.error('x') class IOTable(PrettyTable): @@ -366,8 +374,8 @@ class IO(object): WIDTH = 64 VERBOSE_LEVEL = 0 - WARNING_PREV = FormtatText.warning('[WARN]') - ERROR_PREV = FormtatText.error('[ERROR]') + WARNING_PREV = FormatText.warning('[WARN]') + ERROR_PREV = FormatText.error('[ERROR]') def __init__(self, level, @@ -394,8 +402,10 @@ def __init__(self, self._out_obj = None self._cur_out_obj = None self._before_critical = None + self._exit_msg = "" self._output_is_tty = False self._input_is_tty = False + self._exit_buffer = SetBufferIO() self.set_input_stream(input_stream) self.set_output_stream(output_stream) @@ -485,8 +495,20 @@ def before_close(self): except: pass + @property + def exit_msg(self): + return self._exit_msg + + @exit_msg.setter + def exit_msg(self, msg): + self._exit_msg = msg + def _close(self): self.before_close() + self._flush_cache() + if self.exit_msg: + self.print(self.exit_msg) + self.exit_msg = "" self._flush_log() def __del__(self): @@ -524,6 +546,11 @@ def get_cur_out_obj(self): if self._root_io: return self._root_io.get_cur_out_obj() return self._cur_out_obj + + def get_exit_buffer(self): + if self._root_io: + return self._root_io.get_exit_buffer() + return self._exit_buffer def _start_buffer_io(self): if self._root_io: @@ -708,12 +735,25 @@ def _print(self, msg_lv, msg, *args, **kwargs): del kwargs['prev_msg'] else: print_msg = msg - kwargs['file'] = self.get_cur_out_obj() + + if kwargs.get('_on_exit'): + kwargs['file'] = self.get_exit_buffer() + del kwargs['_on_exit'] + else: + kwargs['file'] = self.get_cur_out_obj() + + if '_disable_log' in kwargs: + enaable_log = not kwargs['_disable_log'] + del kwargs['_disable_log'] + else: + enaable_log = True + kwargs['file'] and print(self._format(print_msg, *args), **kwargs) del kwargs['file'] - self.log(msg_lv, msg, *args, **kwargs) + enaable_log and self.log(msg_lv, msg, *args, **kwargs) def log(self, levelno, msg, *args, **kwargs): + msg = self.log_masking(msg) self._cache_log(levelno, msg, *args, **kwargs) def _cache_log(self, levelno, msg, *args, **kwargs): @@ -735,6 +775,12 @@ def _flush_log(self): def _log(self, levelno, msg, *args, **kwargs): if self.trace_logger: self.trace_logger.log(levelno, msg, *args, **kwargs) + + def _flush_cache(self): + if not self._root_io: + text = self._exit_buffer.read() + if text: + self.print(text, _disable_log=True) def print(self, msg, *args, **kwargs): self._print(MsgLevel.INFO, msg, *args, **kwargs) @@ -755,6 +801,17 @@ def critical(self, msg, *args, **kwargs): if not self._root_io: self.exit(code) + def contains_keys(self, msg): + keywords = ["IDENTIFIED", "PASSWORD", "CONNECT", "EXECUTER", "CLIENT"] + return any(keyword in msg.upper() for keyword in keywords) + + def log_masking(self, msg): + regex = r"(-P\s*\S+\s+.*?-p\s*['\"]?|_PASSWORD\s*(=|to)\s*['\"]*|IDENTIFIED BY \S+.*args:\s*\[['\"]?|_password \S+.*args:\s*\[['\"]?)([^\s'\"']+)(['\"]*)" + pattern = re.compile(regex) + if isinstance(msg, str) and self.contains_keys(msg): + msg = pattern.sub(r"\1******\4", msg) + return msg + def verbose(self, msg, *args, **kwargs): if self.level > self.VERBOSE_LEVEL: self.log(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs) @@ -853,7 +910,7 @@ def __getattr__(self, item): self._attrs[item] = attr else: is_tty = getattr(self._stream, 'isatty', lambda : False)() - self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)).format(is_tty)) + self._warn_func(FormatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)).format(is_tty)) self._attrs[item] = FAKE_RETURN return self._attrs[item] diff --git a/_tool.py b/_tool.py index 2e09c84..6cb88fb 100644 --- a/_tool.py +++ b/_tool.py @@ -26,12 +26,12 @@ from _rpm import PackageInfo from _stdio import SafeStdio from tool import YamlLoader, DirectoryUtil -from const import COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBDIAG +from const import COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBDIAG, TOOL_TPCH, TOOL_TPCC, TOOL_SYSBENCH, COMP_JRE yaml = YamlLoader() -TOOLS = [COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL] +TOOLS = [COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL, TOOL_TPCH, TOOL_TPCC, TOOL_SYSBENCH, COMP_JRE] TOOL_ALIAS = { COMP_OBDIAG: COMP_OCEANBASE_DIAGNOSTIC_TOOL, } diff --git a/_types.py b/_types.py index 80f62a1..4db4aac 100644 --- a/_types.py +++ b/_types.py @@ -47,7 +47,9 @@ def __init__(self, s): self._format() if self.value == self.NULL: self.value = self._origin - except: + except Exception as e: + if str(e): + raise Exception("%s" % str(e)) raise Exception("'%s' is not %s" % (self._origin, self._type_str)) @property @@ -108,7 +110,7 @@ def _format(self): if 0 <= h <= 23 and 0 <= m <= 60: self._value = h * 60 + m else: - raise Exception('Invalid Value') + raise Exception('Invalid Value(Please use the format like 20:00)') else: self._value = 0 @@ -138,7 +140,7 @@ def _format(self): if unit: self._value = int(n) * unit else: - raise Exception('Invalid Value') + raise Exception('%s is Invalid Value(Please use the format like 20m、20h or 20s)'.format(self._origin)) else: self._value = 0 @@ -182,8 +184,6 @@ class Capacity(ConfigItemType): UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, "P": 1 << 50} - LENGTHS = {"B": 4, "K": 8, "M": 12, "G": 16, "T": 20, "P": 24} - def __init__(self, s, precision = 0): self.precision = precision super(Capacity, self).__init__(s) @@ -192,7 +192,7 @@ def __str__(self): return str(self.value) @property - def btyes(self): + def bytes(self): return self._value def _format(self): @@ -201,15 +201,15 @@ def _format(self): self._origin = int(float(self._origin)) n = self._origin unit = self.UNITS['B'] - for u in self.LENGTHS: - if len(str(self._origin)) < self.LENGTHS[u]: + for u, v in sorted(self.UNITS.items(), key=lambda item: item[1], reverse=True): + if n >= v: + n /= v break - else: - u = 'P' + n = self._origin else: groups = re.match("^(\d+)\s*([BKMGTP])((IB)|B)?\s*$", self._origin.upper()) if not groups: - raise ValueError("Invalid capacity string: %s" % self._origin) + raise ValueError("Invalid capacity string: %s(Please use the format like 20G/20GB/20GIB)" % self._origin) n, u, _, _ = groups.groups() unit = self.UNITS.get(u.upper()) if unit: @@ -258,7 +258,7 @@ class Dict(ConfigItemType): def _format(self): if self._origin: if not isinstance(self._origin, dict): - raise Exception("Invalid Value") + raise Exception("Invalid Value: {} is not a dict.".format(self._origin)) self._value = self._origin else: self._value = self.value = {} @@ -341,11 +341,11 @@ def _format(self): # this type is used to ensure the parameter is a valid oceanbase user class OBUser(ConfigItemType): - OB_USER_PATTERN = re.compile("^[a-zA-Z0-9_\.-]+(@[a-zA-Z0-9_\.-]+)?(#[a-zA-Z0-9_\.-]+)?$") + OB_USER_PATTERN = re.compile("^[a-zA-Z0-9_.-]+(@[a-zA-Z0-9_.-]+)?(#[a-zA-Z0-9_.-]+)?$") def _format(self): if not self.OB_USER_PATTERN.match(str(self._origin)): - raise Exception("%s is not a valid config" % self._origin) + raise Exception("%s is not a valid config(Please use the format like root@sys#obcluster" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' @@ -356,7 +356,7 @@ class SafeString(ConfigItemType): def _format(self): if not self.SAFE_STRING_PATTERN.match(str(self._origin)): - raise Exception("%s is not a valid config" % self._origin) + raise Exception("%s is not a valid string(Support: a-z、A-Z、0-9、chinese characters、- _ : @ / .)" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' @@ -371,7 +371,7 @@ def _format(self): self._value = self._origin.split(';') for v in self._value: if not self.SAFE_STRING_PATTERN.match(v): - raise Exception("%s is not a valid config" % v) + raise Exception("%s is not a valid string(Support: a-z、A-Z、0-9、chinese characters、- _ : @ / .)" % v) else: self._value = [] @@ -387,7 +387,7 @@ def _format(self): normalized_path = os.path.normpath(absolute_path) if not (self.PATH_PATTERN.match(str(self._origin)) and normalized_path.startswith(parent_path)): - raise Exception("%s is not a valid path" % self._origin) + raise Exception("%s is not a valid path(Support: a-z、A-Z、0-9、chinese characters、- _ : @ / .)" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' @@ -405,7 +405,7 @@ def _format(self): absolute_path = "/".join([parent_path, v]) normalized_path = os.path.normpath(absolute_path) if not (self.PATH_PATTERN.match(v) and normalized_path.startswith(parent_path)): - raise Exception("%s is not a valid path" % v) + raise Exception("%s is not a valid path(Support: a-z、A-Z、0-9、chinese characters、- _ : @ / .)" % v) else: self._value = [] @@ -417,7 +417,7 @@ class DBUrl(ConfigItemType): def _format(self): if not self.DBURL_PATTERN.match(str(self._origin)): - raise Exception("%s is not a valid config" % self._origin) + raise Exception("%s is not a valid config(Please use the format like jdbc:mysql://root:123456@127.0.0.1:2883/test)" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' @@ -428,5 +428,5 @@ class WebUrl(ConfigItemType): def _format(self): if not self.WEBURL_PATTERN.match(str(self._origin)): - raise Exception("%s is not a valid config" % self._origin) + raise Exception("%s is not a valid config(Please use the format like https://127.0.0.1:8680/test)" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' diff --git a/const.py b/const.py index 1f16dde..70d5dad 100644 --- a/const.py +++ b/const.py @@ -67,3 +67,15 @@ PKG_REPO_FILE = 'repository' RSA_KEY_SIZE = 512 + + +# test tool +TOOL_TPCH = 'obtpch' +TOOL_TPCC = 'obtpcc' +TOOL_SYSBENCH = 'ob-sysbench' +TEST_TOOLS = { + TOOL_TPCH: 'tpch', + TOOL_TPCC: 'tpcc', + TOOL_SYSBENCH: 'sysbench', +} +TOOL_TPCC_BENCHMARKSQL = 'OB-BenchmarkSQL-5.0.jar' \ No newline at end of file diff --git a/core.py b/core.py index 9966254..e22e288 100644 --- a/core.py +++ b/core.py @@ -23,6 +23,7 @@ import re import os import time +import signal from optparse import Values from copy import deepcopy, copy from collections import defaultdict @@ -32,7 +33,7 @@ from ssh import SshClient, SshConfig from tool import FileUtil, DirectoryUtil, YamlLoader, timeout, COMMAND_ENV, OrderedDict -from _stdio import MsgLevel, FormtatText +from _stdio import MsgLevel, FormatText from _rpm import Version from _mirror import MirrorRepositoryManager, PackageInfo, RemotePackageInfo from _plugin import PluginManager, PluginType, InstallPlugin, PluginContextNamespace @@ -44,7 +45,7 @@ from _optimize import OptimizeManager from _environ import ENV_REPO_INSTALL_MODE, ENV_BASE_DIR from _types import Capacity -from const import COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBCLIENT, PKG_RPM_FILE, PKG_REPO_FILE +from const import COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBCLIENT, PKG_RPM_FILE, TEST_TOOLS, COMPS_OB, PKG_REPO_FILE, TOOL_TPCC, TOOL_TPCH, TOOL_SYSBENCH from ssh import LocalClient @@ -278,9 +279,24 @@ def get_clients_with_connect_status(self, deploy_config, repositories, fail_exit connect_status.update(self.ssh_clients_connect(servers, ssh_clients, user_config, fail_exit)) return ssh_clients, connect_status + def get_clients_with_connect_servers(self, deploy_config, repositories, fail_exit=False): + ssh_clients, connect_status = self.get_clients_with_connect_status(deploy_config, repositories, fail_exit) + + for repository in repositories: + cluster_config = deploy_config.components[repository.name] + cluster_config.servers = [server for server in cluster_config.servers if server in ssh_clients] + + failed_servers = [] + for k, v in connect_status.items(): + if v.status == v.FAIL: + failed_servers.append(k.ip) + for server in failed_servers: + self._call_stdio('warn', '%s connect failed' % server) + return ssh_clients + def ssh_clients_connect(self, servers, ssh_clients, user_config, fail_exit=False): self._call_stdio('start_loading', 'Open ssh connection') - connect_io = self.stdio if fail_exit else self.stdio.sub_io() + connect_io = self.stdio if fail_exit else self.stdio.sub_io(msg_lv=MsgLevel.CRITICAL) connect_status = {} success = True for server in servers: @@ -288,15 +304,15 @@ def ssh_clients_connect(self, servers, ssh_clients, user_config, fail_exit=False client = SshClient( SshConfig( server.ip, - user_config.username, - user_config.password, - user_config.key_file, - user_config.port, + user_config.username, + user_config.password, + user_config.key_file, + user_config.port, user_config.timeout ), self.stdio ) - error = client.connect(stdio=connect_io) + error = client.connect(stdio=connect_io, exit=fail_exit) connect_status[server] = status = err.CheckStatus() if error is not True: success = False @@ -354,11 +370,11 @@ def search_py_script_plugin(self, repositories, script_name, no_found_act='exit' self._call_stdio(msg_lv, 'No such %s plugin for %s-%s' % (script_name, repository.name, repository.version)) return plugins - def search_images(self, component_name, version=None, min_version=None, max_version=None, release=None, disable=[], + def search_images(self, component_name, version=None, min_version=None, max_version=None, release=None, disable=[], usable=[], release_first=False, print_match=True): matchs = {} usable_matchs = [] - for pkg in self.mirror_manager.get_pkgs_info(component_name, version=version, min_version=min_version, + for pkg in self.mirror_manager.get_pkgs_info(component_name, version=version, min_version=min_version, max_version=max_version, release=release): if pkg.md5 in disable: self._call_stdio('verbose', 'Disable %s' % pkg.md5) @@ -402,7 +418,7 @@ def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_ errors.append('No such component name: {}'.format(component)) continue config = deploy_config.components[component] - + # First, check if the component exists in the repository. If exists, check if the version is available. If so, use the repository directly. self._call_stdio('verbose', 'Get %s repository' % component) repository = self.repository_manager.get_repository(name=component, version=config.version, tag=config.tag, release=config.release, package_hash=config.package_hash) @@ -420,7 +436,7 @@ def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_ if repository: if repository >= pkg or ( ( - update_if_need is None and + update_if_need is None and not self._call_stdio('confirm', 'Found a higher version\n%s\nDo you want to use it?' % pkg) ) or update_if_need is False ): @@ -517,6 +533,11 @@ def is_server_list_change(deploy_config): self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) + + if deploy and deploy.deploy_info.status == DeployStatus.STATUS_UPRADEING: + self._call_stdio('error', 'Deploy "%s" is %s. You could not edit an upgrading cluster' % (name, deploy.deploy_info.status.value)) + return False + self.set_deploy(deploy) param_plugins = {} repositories, pkgs = [], [] @@ -576,6 +597,7 @@ def is_server_list_change(deploy_config): tf.flush() self.lock_manager.set_try_times(-1) config_status = DeployConfigStatus.UNCHNAGE + diff_need_redeploy_keys = [] while True: if not user_input: tf.seek(0) @@ -607,7 +629,7 @@ def is_server_list_change(deploy_config): config_status = DeployConfigStatus.UNCHNAGE elif is_deployed: if deploy_config.components.keys() != deploy.deploy_config.components.keys() or is_server_list_change(deploy_config): - if not self._call_stdio('confirm', 'Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? '): + if not self._call_stdio('confirm', FormatText.warning('Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? ')): if user_input: return False continue @@ -618,15 +640,14 @@ def is_server_list_change(deploy_config): for component_name in deploy_config.components: old_cluster_config = deploy.deploy_config.components[component_name] new_cluster_config = deploy_config.components[component_name] - if new_cluster_config.version != old_cluster_config.config_version \ - or new_cluster_config.package_hash != old_cluster_config.config_package_hash \ - or new_cluster_config.release != old_cluster_config.config_release \ - or new_cluster_config.tag != old_cluster_config.tag: - comp_attr_changed = True - config_status = DeployConfigStatus.NEED_REDEPLOY - break + comp_attr_map = {'version': 'config_version', 'package_hash': 'config_package_hash', 'release': 'config_release', 'tag': 'tag'} + for key, value in comp_attr_map.items(): + if getattr(new_cluster_config, key) != getattr(old_cluster_config, value): + comp_attr_changed = True + diff_need_redeploy_keys.append(key) + config_status = DeployConfigStatus.NEED_REDEPLOY if comp_attr_changed: - if not self._call_stdio('confirm', 'Modifications to the version, release or hash of the component take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '): + if not self._call_stdio('confirm', FormatText.warning('Modifications to the version, release or hash of the component take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? ')): if user_input: return False continue @@ -641,7 +662,7 @@ def is_server_list_change(deploy_config): rsync_conf_changed = True break if rsync_conf_changed: - if not self._call_stdio('confirm', 'Modifications to the rsync config of a deployed cluster take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '): + if not self._call_stdio('confirm', FormatText.warning('Modifications to the rsync config of a deployed cluster take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? ')): if user_input: return False continue @@ -706,7 +727,7 @@ def is_server_list_change(deploy_config): self._call_stdio('print', '\n'.join(errors)) if user_input: return False - if self._call_stdio('confirm', 'Modifications take effect after a redeployment. Are you sure that you want to start a redeployment?'): + if self._call_stdio('confirm', FormatText.warning('Modifications take effect after a redeployment. Are you sure that you want to start a redeployment?')): config_status = DeployConfigStatus.NEED_REDEPLOY elif self._call_stdio('confirm', 'Continue to edit?'): continue @@ -723,7 +744,10 @@ def is_server_list_change(deploy_config): if config_status == DeployConfigStatus.UNCHNAGE: config_status = DeployConfigStatus.NEED_RELOAD for server in old_cluster_config.servers: - if old_cluster_config.get_need_redeploy_items(server) != new_cluster_config.get_need_redeploy_items(server): + new_redeploy_items = new_cluster_config.get_need_redeploy_items(server) + old_redeploy_items = old_cluster_config.get_need_redeploy_items(server) + if new_redeploy_items != old_redeploy_items: + diff_need_redeploy_keys = [key for key in list(set(old_redeploy_items) | set(new_redeploy_items)) if new_redeploy_items.get(key, '') != old_redeploy_items.get(key, '')] config_status = DeployConfigStatus.NEED_REDEPLOY break if old_cluster_config.get_need_restart_items(server) != new_cluster_config.get_need_restart_items(server): @@ -732,6 +756,16 @@ def is_server_list_change(deploy_config): config_status = DeployConfigStatus.UNCHNAGE break + if config_status == DeployConfigStatus.NEED_REDEPLOY: + for comp in set(COMPS_OB) & set(list(deploy.deploy_config.components.keys())): + cluster_config = deploy.deploy_config.components[comp] + default_config = cluster_config.get_global_conf_with_default() + if default_config.get('production_mode', True): + diff_need_redeploy_keys = [f'`{key}`' for key in diff_need_redeploy_keys] + diff_need_redeploy_keys = list(set(diff_need_redeploy_keys)) + self._call_stdio('error', err.EC_RUNNING_CLUSTER_NO_REDEPLOYED.format(key=', '.join(diff_need_redeploy_keys))) + return False + self._call_stdio('verbose', 'Set deploy configuration status to %s' % config_status) self._call_stdio('verbose', 'Save new configuration yaml file') if config_status == DeployConfigStatus.UNCHNAGE: @@ -760,9 +794,9 @@ def list_deploy(self): self._call_stdio('verbose', 'Get deploy list') deploys = self.deploy_manager.get_deploy_configs() if deploys: - self._call_stdio('print_list', deploys, - ['Name', 'Configuration Path', 'Status (Cached)'], - lambda x: [x.name, x.config_dir, x.deploy_info.status.value], + self._call_stdio('print_list', deploys, + ['Name', 'Configuration Path', 'Status (Cached)'], + lambda x: [x.name, x.config_dir, x.deploy_info.status.value], title='Cluster List', ) else: @@ -902,7 +936,7 @@ def servers_repository_lib_check(self, ssh_clients, servers, repository, install remote_repository_path = repository.repository_dir.replace(self.home_path, remote_home_path) remote_repository_data_path = repository.data_file_path.replace(self.home_path, remote_home_path) client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_repository_path, True) - + for file_path in repository.bin_list(install_plugin): remote_file_path = file_path.replace(self.home_path, remote_home_path) libs = client.execute_command('ldd %s' % remote_file_path).stdout @@ -924,7 +958,7 @@ def servers_apply_lib_repository_and_check(self, ssh_clients, deploy_config, rep lib_repository = repositories_lib_map[repository]['repositories'] install_plugin = repositories_lib_map[repository]['install_plugin'] self._call_stdio('print', 'Use %s for %s' % (lib_repository, repository)) - + for server in cluster_config.servers: client = ssh_clients[server] if server not in servers_obd_home: @@ -1084,7 +1118,7 @@ def genconfig(self, name): self._call_stdio('error', 'Deploying multiple %s instances on the same server is not supported.' % repository.name) return False real_servers.add(server.ip) - + self._call_stdio('start_loading', 'Cluster param config check') # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1164,7 +1198,7 @@ def export_to_ocp(self, name): takeover_plugins = self.search_py_script_plugin(repositories, "takeover") self._call_stdio('verbose', 'successfully get takeover plugin.') - ret = self.call_plugin(connect_plugin, mock_ocp_repository) + ret = self.call_plugin(connect_plugin, mock_ocp_repository, cluster_config=cluster_config, clients=ssh_clients) if not ret or not ret.get_return('connect'): return False @@ -1517,10 +1551,10 @@ def _deploy_cluster(self, deploy, repositories, scale_out=False, dump=True): return False self._call_stdio( - 'print_list', - repositories, - ['Repository', 'Version', 'Release', 'Md5'], - lambda repository: [repository.name, repository.version, repository.release, repository.hash], + 'print_list', + repositories, + ['Repository', 'Version', 'Release', 'Md5'], + lambda repository: [repository.name, repository.version, repository.release, repository.hash], title='Packages' ) @@ -1712,7 +1746,7 @@ def scale_out(self, name): if not deploy: self._call_stdio('error', 'No such deploy: %s.' % name) return False - + deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Deploy status judge') if deploy_info.status not in [DeployStatus.STATUS_RUNNING]: @@ -1757,7 +1791,7 @@ def scale_out(self, name): check_pass = False if not check_pass: return False - + self._call_stdio('verbose', 'Start to deploy additional servers') if not self._deploy_cluster(deploy, repositories, scale_out=True, dump=False): return False @@ -1765,7 +1799,7 @@ def scale_out(self, name): self._call_stdio('verbose', 'Start to start additional servers') if not self._start_cluster(deploy, repositories, scale_out=True): return False - + for repository in repositories: if repository not in scale_out_plugins: continue @@ -1775,7 +1809,7 @@ def scale_out(self, name): return False if not self.call_plugin(scale_out_plugins[repository], repository): return False - + succeed = True # prepare for added components for repository in all_repositories: @@ -1789,12 +1823,12 @@ def scale_out(self, name): succeed = succeed and self.call_plugin(plugin[repository], repository) if not succeed: return False - + deploy_config.set_dumpable() if not deploy_config.dump(): self._call_stdio('error', 'Failed to dump new deploy config') return False - + errors = [] need_start = [] need_reload = [] @@ -1812,12 +1846,12 @@ def scale_out(self, name): # todo: need_reload use need_start tips,supoort later if need_start or need_reload: self._call_stdio('print', 'Use `obd cluster restart %s --wp` to make changes take effect.' % name) - + if errors: self._call_stdio('warn', err.WC_FAIL_TO_RESTART_OR_RELOAD_AFTER_SCALE_OUT.format(detail='\n -'.join(errors))) return False - - self._call_stdio('print', FormtatText.success('Execute ` obd cluster display %s ` to view the cluster status' % name)) + + self._call_stdio('print', FormatText.success('Execute ` obd cluster display %s ` to view the cluster status' % name)) return True def add_components(self, name): @@ -1956,7 +1990,7 @@ def delete_components(self, name, components): if not components: self._call_stdio('error', 'Components is required.') return False - + deploy_config = deploy.deploy_config for component in components: if component not in deploy_config.components: @@ -1969,12 +2003,17 @@ def delete_components(self, name, components): repositories = self.get_component_repositories(deploy_info, components) self.search_param_plugin_and_apply(all_repositories, deploy_config) self._call_stdio('stop_loading', 'succeed') + force = getattr(self.options, 'force', False) if not self.cluster_server_status_check(): - self._call_stdio('error', 'Some of the servers in the cluster is not running') - return False - setattr(self.options, 'skip_cluster_status_check', True) + if not force: + self._call_stdio('error', 'Some of the servers in the cluster is not running; You can use `obd cluster component del %s %s -f`' % (name, ','.join(components))) + return False + + self.get_clients_with_connect_servers(deploy_config, repositories, fail_exit=not force) + self._call_stdio('start_loading', f"force del components({','.join(components)})") + self.set_deploy(deploy) scale_in_check_plugins = self.search_py_script_plugin(all_repositories, 'scale_in_check', no_found_act='ignore') reload_plugins = self.search_py_script_plugin(all_repositories, 'reload') restart_plugins = self.search_py_script_plugin(all_repositories, 'restart') @@ -2052,7 +2091,7 @@ def start_cluster(self, name): if deploy_info.config_status == DeployConfigStatus.NEED_REDEPLOY: self._call_stdio('error', 'Deploy needs redeploy') return False - if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and not getattr(self.options, 'without_parameter', False): + if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and deploy_info.status != DeployStatus.STATUS_STOPPED and not getattr(self.options, 'without_parameter', False): self._call_stdio('error', 'Deploy %s.%s\nIf you still need to start the cluster, use the `obd cluster start %s --wop` option to start the cluster without loading parameters. ' % (deploy_info.config_status.value, deploy.effect_tip(), name)) return False @@ -3006,7 +3045,7 @@ def redeploy_cluster(self, name, search_repo=True, need_confirm=False): self._call_stdio('error', 'No such deploy: %s.' % name) return False - if need_confirm and not self._call_stdio('confirm', FormtatText.warning('Are you sure to destroy the "%s" cluster and rebuild it?' % name)): + if need_confirm and not self._call_stdio('confirm', FormatText.warning('Are you sure to destroy the "%s" cluster and rebuild it?' % name)): return False deploy_info = deploy.deploy_info @@ -3075,7 +3114,7 @@ def destroy_cluster(self, name, need_confirm=False): if not deploy: self._call_stdio('error', 'No such deploy: %s.' % name) return False - if need_confirm and not self._call_stdio('confirm', FormtatText.warning('Are you sure to destroy the "%s" cluster ?' % name)): + if need_confirm and not self._call_stdio('confirm', FormatText.warning('Are you sure to destroy the "%s" cluster ?' % name)): return False deploy_info = deploy.deploy_info @@ -3172,6 +3211,8 @@ def _destroy_cluster(self, deploy, repositories, dump=True): return True self._call_stdio('verbose', 'Set %s deploy status to destroyed' % deploy.name) if deploy.update_deploy_status(DeployStatus.STATUS_DESTROYED): + if deploy.deploy_info.config_status != DeployConfigStatus.UNCHNAGE: + deploy.apply_temp_deploy_config() self._call_stdio('print', '%s destroyed' % deploy.name) return True return False @@ -3278,7 +3319,7 @@ def reinstall(self, name): if need_restart and deploy_info.status == DeployStatus.STATUS_RUNNING: setattr(self.options, 'without_parameter', True) obd = self.fork(options=self.options) - if not obd.call_plugin(start_plugins[current_repository], current_repository, home_path=self.home_path) and getattr(self.options, 'force', False) is False: + if not obd.call_plugin(start_plugins[current_repository], current_repository, home_path=self.home_path, is_reinstall=True) and getattr(self.options, 'force', False) is False: self.install_repositories_to_servers(deploy_config, [current_repository, ], install_plugins) return False @@ -3301,6 +3342,10 @@ def upgrade_cluster(self, name): self._call_stdio('error', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) return False + if deploy_info.config_status != DeployConfigStatus.UNCHNAGE: + self._call_stdio('error', 'The current is config is modified. Deploy "%s" %s.' % (name, deploy_info.config_status.value)) + return False + deploy_config = deploy.deploy_config self._call_stdio('start_loading', 'Get local repositories and plugins') @@ -3541,6 +3586,14 @@ def upgrade_cluster(self, name): script_query_timeout = getattr(self.options, 'script_query_timeout', '') n = len(upgrade_repositories) + + def signal_handler(sig, frame): + deploy.update_upgrade_ctx(**upgrade_ctx) + signal.signal(signal.SIGINT, signal.SIG_DFL) + raise KeyboardInterrupt + + signal.signal(signal.SIGINT, signal_handler) + while upgrade_ctx['index'] < n: repository = upgrade_repositories[upgrade_ctx['index']] repositories = [repository] @@ -3777,7 +3830,7 @@ def mysqltest(self, name, opts): else: status = [DeployStatus.STATUS_RUNNING] if deploy_info.status not in status: - self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + self._call_stdio('print', '`{name}` deployment is not running. Please execute the command `obd cluster start {name}` to start the deployment first'.format(name=name)) return False self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config @@ -4047,7 +4100,8 @@ def sysbench(self, name, opts): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check deploy status') if deploy_info.status != DeployStatus.STATUS_RUNNING: - self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + if deploy_info.status.value == DeployStatus.STATUS_DEPLOYED.value: + self._call_stdio('print', '`{name}` deployment is not running. Please execute the command `obd cluster start {name}` to start the deployment first'.format(name=name)) return False self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config @@ -4112,6 +4166,12 @@ def sysbench(self, name, opts): self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) return False + if not self.install_tool(TOOL_SYSBENCH): + return False + + if not self.install_tool(COMP_OBCLIENT): + return False + ob_repository = None repository = None connect_namespaces = [] @@ -4174,6 +4234,7 @@ def sysbench(self, name, opts): if optimization and optimization_init: self._test_optimize_operation(repository=repository, ob_repository=ob_repository, connect_namespaces=connect_namespaces, connect_plugin=connect_plugin, optimize_envs=kwargs, operation='recover') + def tpch(self, name, opts): self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) @@ -4185,7 +4246,8 @@ def tpch(self, name, opts): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check deploy status') if deploy_info.status != DeployStatus.STATUS_RUNNING: - self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + if deploy_info.status.value == DeployStatus.STATUS_DEPLOYED.value: + self._call_stdio('print', '`{name}` deployment is not running. Please execute the command `obd cluster start {name}` to start the deployment first'.format(name=name)) return False self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config @@ -4243,6 +4305,13 @@ def tpch(self, name, opts): if cluster_status[server] == 0: self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) return False + + if not self.install_tool(TOOL_TPCH): + return False + + if not self.install_tool(COMP_OBCLIENT): + return False + repository = repositories[0] namespace = self.get_namespace(repository.name) namespace.set_variable('target_server', opts.test_server) @@ -4318,7 +4387,8 @@ def tpcds(self, name, opts): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check deploy status') if deploy_info.status != DeployStatus.STATUS_RUNNING: - self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + if deploy_info.status.value == DeployStatus.STATUS_DEPLOYED.value: + self._call_stdio('print', '`{name}` deployment is not running. Please execute the command `obd cluster start {name}` to start the deployment first'.format(name=name)) return False self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config @@ -4410,7 +4480,8 @@ def tpcc(self, name, opts): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check deploy status') if deploy_info.status != DeployStatus.STATUS_RUNNING: - self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + if deploy_info.status.value == DeployStatus.STATUS_DEPLOYED.value: + self._call_stdio('print', '`{name}` deployment is not running. Please execute the command `obd cluster start {name}` to start the deployment first'.format(name=name)) return False self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config @@ -4469,6 +4540,12 @@ def tpcc(self, name, opts): self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) return False + if not self.install_tool(TOOL_TPCC): + return False + + if not self.install_tool(COMP_OBCLIENT): + return False + ob_repository = None repository = None odp_cursor = None @@ -4644,6 +4721,14 @@ def commands(self, name, cmd_name, opts): self.set_deploy(deploy) self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config + added_components = [] + config_path = getattr(opts, 'config', '') + if config_path: + deploy_config.set_undumpable() + if not deploy_config.add_components(config_path, ignore_exist=True): + self._call_stdio('error', 'Failed to add components configuration for %s' % name) + return False + added_components = deploy_config.added_components deploy_info = deploy.deploy_info if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED): @@ -4653,6 +4738,8 @@ def commands(self, name, cmd_name, opts): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository repositories = self.load_local_repositories(deploy_info) + if added_components: + repositories += self.get_local_repositories({key: value for key, value in deploy_config.components.items() if key in added_components}) repositories = self.sort_repositories_by_depends(deploy_config, repositories) self.set_repositories(repositories) # Check whether the components have the parameter plugins and apply the plugins @@ -4990,7 +5077,7 @@ def clean_pkg(self, opts): if not delete_pkgs + delete_repositories: self._call_stdio('print', 'No Package need deleted') return False - if not opts.confirm and not self._call_stdio('confirm', FormtatText.warning('Are you sure to delete the files listed above ?')): + if not opts.confirm and not self._call_stdio('confirm', FormatText.warning('Are you sure to delete the files listed above ?')): return False if not self.mirror_manager.delete_pkgs(delete_pkgs) or not self.repository_manager.delete_repositories(delete_repositories): return False @@ -5018,7 +5105,10 @@ def list_tools(self): def check_requirement(self, tool_name, repository, package, file_map, requirement_map, install_path): obd = self.fork() obd.set_deploy(deploy=None) - check_requirement_plugin = self.plugin_manager.get_best_py_script_plugin('check_requirement', tool_name, package.version) + if tool_name in TEST_TOOLS: + check_requirement_plugin = self.plugin_manager.get_best_py_script_plugin('check_requirement', TEST_TOOLS[tool_name], package.version) + else: + check_requirement_plugin = self.plugin_manager.get_best_py_script_plugin('check_requirement', tool_name, package.version) if not check_requirement_plugin: self._call_stdio('verbose', '%s check_requirement plugin not found' % tool_name) return True @@ -5073,8 +5163,10 @@ def _install_tool(self, tool_name, version, force, install_path): package_info = '%s-%s' % (tool_name, version) if version else tool_name self._call_stdio('critical', 'No such package: %s' % package_info) return False - - plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, tool_name, pkg.version) + if tool_name in TEST_TOOLS: + plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, TEST_TOOLS[tool_name], pkg.version) + else: + plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, tool_name, pkg.version) if not plugin: self._call_stdio('critical', 'Not support tool %s of version %s' % (tool_name, pkg.version)) return False @@ -5468,11 +5560,16 @@ def filter_pkgs(self, pkgs, basic_condition, **pattern): if pkg.name in ['oceanbase', 'oceanbase-ce'] and (pkg in hit_pkgs or pkg in hash_hit_pkgs or pkg in component_hit_pkgs): for sub_pkg in pkgs: if (sub_pkg.name == '%s-libs' % pkg.name or sub_pkg.name == '%s-utils' % pkg.name) and sub_pkg.release == pkg.release: - hit_pkgs.append(sub_pkg) - hash_hit_pkgs.append(sub_pkg) - component_hit_pkgs.append(sub_pkg) - used_pkgs.append(sub_pkg) - max_version_pkgs.append(sub_pkg) + if pkg in hit_pkgs: + hit_pkgs.append(sub_pkg) + if pkg in hash_hit_pkgs: + hash_hit_pkgs.append(sub_pkg) + if pkg in component_hit_pkgs: + component_hit_pkgs.append(sub_pkg) + if pkg in used_pkgs: + used_pkgs.append(sub_pkg) + if pkg in max_version_pkgs: + max_version_pkgs.append(sub_pkg) # filter the pkg that meets the deletion criteria. if basic_condition == 'DELETE': diff --git a/example/all-components-min.yaml b/example/all-components-min.yaml index 698e8ff..0fd3294 100644 --- a/example/all-components-min.yaml +++ b/example/all-components-min.yaml @@ -31,7 +31,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster @@ -104,6 +103,7 @@ obproxy-ce: obagent: depends: - oceanbase-ce + # The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce. servers: - name: server1 # Please don't use hostname, only IP can be supported diff --git a/example/all-components.yaml b/example/all-components.yaml index b3bb627..1bcc9d6 100644 --- a/example/all-components.yaml +++ b/example/all-components.yaml @@ -25,11 +25,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster @@ -105,9 +104,9 @@ obproxy-ce: obagent: depends: - oceanbase-ce + # The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce. servers: - name: server1 - # Please don't use hostname, only IP can be supported ip: 172.19.33.2 - name: server2 ip: 172.19.33.3 diff --git a/example/autodeploy/all-components.yaml b/example/autodeploy/all-components.yaml index 80fd6b2..846b8a6 100644 --- a/example/autodeploy/all-components.yaml +++ b/example/autodeploy/all-components.yaml @@ -47,8 +47,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/default-example.yaml b/example/autodeploy/default-example.yaml index 1aac397..1b9fcd4 100644 --- a/example/autodeploy/default-example.yaml +++ b/example/autodeploy/default-example.yaml @@ -47,8 +47,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/distributed-example.yaml b/example/autodeploy/distributed-example.yaml index 84905f3..0f453c0 100644 --- a/example/autodeploy/distributed-example.yaml +++ b/example/autodeploy/distributed-example.yaml @@ -47,8 +47,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml index 9161649..072536b 100644 --- a/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml @@ -47,8 +47,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/distributed-with-obproxy-example.yaml b/example/autodeploy/distributed-with-obproxy-example.yaml index 811f3fc..7d4a9f8 100644 --- a/example/autodeploy/distributed-with-obproxy-example.yaml +++ b/example/autodeploy/distributed-with-obproxy-example.yaml @@ -47,8 +47,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/single-example.yaml b/example/autodeploy/single-example.yaml index 20ae863..0ab1b56 100644 --- a/example/autodeploy/single-example.yaml +++ b/example/autodeploy/single-example.yaml @@ -42,8 +42,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/autodeploy/single-with-obproxy-example.yaml b/example/autodeploy/single-with-obproxy-example.yaml index bdc4b6d..290b88e 100644 --- a/example/autodeploy/single-with-obproxy-example.yaml +++ b/example/autodeploy/single-with-obproxy-example.yaml @@ -42,8 +42,6 @@ oceanbase-ce: # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # enable_syslog_wf: false # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. - # enable_syslog_recycle: true - # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. # max_syslog_file_count: 4 # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/default-components-min.yaml b/example/default-components-min.yaml index 4ab876b..e1cb2aa 100644 --- a/example/default-components-min.yaml +++ b/example/default-components-min.yaml @@ -29,7 +29,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/default-components.yaml b/example/default-components.yaml index 33f51b2..a92fea7 100644 --- a/example/default-components.yaml +++ b/example/default-components.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/distributed-example.yaml b/example/distributed-example.yaml index 9a03bd8..59696ee 100644 --- a/example/distributed-example.yaml +++ b/example/distributed-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password # In this example , support multiple ob process in single node, so different process use different ports. diff --git a/example/distributed-with-obproxy-example.yaml b/example/distributed-with-obproxy-example.yaml index bfb4a28..25eeefc 100644 --- a/example/distributed-with-obproxy-example.yaml +++ b/example/distributed-with-obproxy-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/grafana/all-components-with-prometheus-and-grafana.yaml b/example/grafana/all-components-with-prometheus-and-grafana.yaml index 5e1dd3a..9376b08 100644 --- a/example/grafana/all-components-with-prometheus-and-grafana.yaml +++ b/example/grafana/all-components-with-prometheus-and-grafana.yaml @@ -35,7 +35,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. # appname: obcluster diff --git a/example/local-example.yaml b/example/local-example.yaml index c464204..5d0e99d 100644 --- a/example/local-example.yaml +++ b/example/local-example.yaml @@ -21,10 +21,9 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty \ No newline at end of file diff --git a/example/mini-distributed-example.yaml b/example/mini-distributed-example.yaml index cc50fd3..78e2fd1 100644 --- a/example/mini-distributed-example.yaml +++ b/example/mini-distributed-example.yaml @@ -31,7 +31,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty server1: diff --git a/example/mini-distributed-with-obproxy-example.yaml b/example/mini-distributed-with-obproxy-example.yaml index 5b24de1..d5d5d3e 100644 --- a/example/mini-distributed-with-obproxy-example.yaml +++ b/example/mini-distributed-with-obproxy-example.yaml @@ -31,7 +31,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/mini-local-example.yaml b/example/mini-local-example.yaml index 4ae0f7c..90de7dc 100755 --- a/example/mini-local-example.yaml +++ b/example/mini-local-example.yaml @@ -29,6 +29,5 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty diff --git a/example/mini-single-example.yaml b/example/mini-single-example.yaml index 1e7f429..579f7bb 100755 --- a/example/mini-single-example.yaml +++ b/example/mini-single-example.yaml @@ -36,6 +36,5 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty diff --git a/example/mini-single-with-obproxy-example.yaml b/example/mini-single-with-obproxy-example.yaml index 6b67898..01a0218 100644 --- a/example/mini-single-with-obproxy-example.yaml +++ b/example/mini-single-with-obproxy-example.yaml @@ -36,7 +36,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/ob-configserver/distributed-with-obproxy-and-configserver-example.yaml b/example/ob-configserver/distributed-with-obproxy-and-configserver-example.yaml index a05f8b0..f36acfd 100644 --- a/example/ob-configserver/distributed-with-obproxy-and-configserver-example.yaml +++ b/example/ob-configserver/distributed-with-obproxy-and-configserver-example.yaml @@ -38,7 +38,6 @@ oceanbase-ce: cpu_count: 16 production_mode: false enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. appname: obcluster diff --git a/example/obagent/distributed-with-obproxy-and-obagent-example.yaml b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml index ee817f3..9a50490 100644 --- a/example/obagent/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password # In this example , support multiple ob process in single node, so different process use different ports. diff --git a/example/oblogproxy/distributed-with-obproxy-and-oblogproxy-example.yaml b/example/oblogproxy/distributed-with-obproxy-and-oblogproxy-example.yaml index 8991cc3..d289389 100644 --- a/example/oblogproxy/distributed-with-obproxy-and-oblogproxy-example.yaml +++ b/example/oblogproxy/distributed-with-obproxy-and-oblogproxy-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/obproxy/distributed-with-obproxy-example.yaml b/example/obproxy/distributed-with-obproxy-example.yaml index bd34782..fbd176f 100644 --- a/example/obproxy/distributed-with-obproxy-example.yaml +++ b/example/obproxy/distributed-with-obproxy-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/oceanbase-3.x/distributed-example.yaml b/example/oceanbase-3.x/distributed-example.yaml index eb07fba..a4db086 100644 --- a/example/oceanbase-3.x/distributed-example.yaml +++ b/example/oceanbase-3.x/distributed-example.yaml @@ -24,7 +24,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. skip_proxy_sys_private_check: true enable_strict_kernel_release: false diff --git a/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml b/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml index fde624f..b398425 100644 --- a/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml @@ -24,7 +24,6 @@ oceanbase-ce: datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password # In this example , support multiple ob process in single node, so different process use different ports. diff --git a/example/oceanbase-3.x/distributed-with-obproxy-example.yaml b/example/oceanbase-3.x/distributed-with-obproxy-example.yaml index ef783d1..06e94a0 100644 --- a/example/oceanbase-3.x/distributed-with-obproxy-example.yaml +++ b/example/oceanbase-3.x/distributed-with-obproxy-example.yaml @@ -24,7 +24,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/oceanbase-3.x/local-example.yaml b/example/oceanbase-3.x/local-example.yaml index 720aa37..f7d27b8 100644 --- a/example/oceanbase-3.x/local-example.yaml +++ b/example/oceanbase-3.x/local-example.yaml @@ -21,6 +21,5 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty \ No newline at end of file diff --git a/example/oceanbase-3.x/mini-distributed-example.yaml b/example/oceanbase-3.x/mini-distributed-example.yaml index d7a7704..1a14c1e 100644 --- a/example/oceanbase-3.x/mini-distributed-example.yaml +++ b/example/oceanbase-3.x/mini-distributed-example.yaml @@ -36,7 +36,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty server1: diff --git a/example/oceanbase-3.x/mini-distributed-with-obproxy-example.yaml b/example/oceanbase-3.x/mini-distributed-with-obproxy-example.yaml index d2056e3..8e550ea 100644 --- a/example/oceanbase-3.x/mini-distributed-with-obproxy-example.yaml +++ b/example/oceanbase-3.x/mini-distributed-with-obproxy-example.yaml @@ -36,7 +36,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/oceanbase-3.x/mini-local-example.yaml b/example/oceanbase-3.x/mini-local-example.yaml index 037fa37..70cc34e 100755 --- a/example/oceanbase-3.x/mini-local-example.yaml +++ b/example/oceanbase-3.x/mini-local-example.yaml @@ -33,6 +33,5 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty diff --git a/example/oceanbase-3.x/mini-single-example.yaml b/example/oceanbase-3.x/mini-single-example.yaml index 09f2959..badf882 100755 --- a/example/oceanbase-3.x/mini-single-example.yaml +++ b/example/oceanbase-3.x/mini-single-example.yaml @@ -40,6 +40,5 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty diff --git a/example/oceanbase-3.x/mini-single-with-obproxy-example.yaml b/example/oceanbase-3.x/mini-single-with-obproxy-example.yaml index f5b5dad..e2cee8e 100644 --- a/example/oceanbase-3.x/mini-single-with-obproxy-example.yaml +++ b/example/oceanbase-3.x/mini-single-with-obproxy-example.yaml @@ -40,7 +40,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/oceanbase-3.x/single-example.yaml b/example/oceanbase-3.x/single-example.yaml index eaf1ee5..ef44512 100644 --- a/example/oceanbase-3.x/single-example.yaml +++ b/example/oceanbase-3.x/single-example.yaml @@ -28,6 +28,5 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty \ No newline at end of file diff --git a/example/oceanbase-3.x/single-with-obproxy-example.yaml b/example/oceanbase-3.x/single-with-obproxy-example.yaml index a7f481a..879a428 100644 --- a/example/oceanbase-3.x/single-with-obproxy-example.yaml +++ b/example/oceanbase-3.x/single-with-obproxy-example.yaml @@ -28,7 +28,6 @@ oceanbase-ce: datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. syslog_level: INFO # System log level. The default value is INFO. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/ocp/distributed-with-obproxy-and-ocp-example.yaml b/example/ocp/distributed-with-obproxy-and-ocp-example.yaml index 5c1b752..a508c68 100644 --- a/example/ocp/distributed-with-obproxy-and-ocp-example.yaml +++ b/example/ocp/distributed-with-obproxy-and-ocp-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml b/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml index 409a042..93135fe 100644 --- a/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml +++ b/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml @@ -23,11 +23,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password # In this example , support multiple ob process in single node, so different process use different ports. diff --git a/example/single-example.yaml b/example/single-example.yaml index 26c0175..794cc99 100644 --- a/example/single-example.yaml +++ b/example/single-example.yaml @@ -28,10 +28,9 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # root_password: # root user password, can be empty \ No newline at end of file diff --git a/example/single-with-obproxy-example.yaml b/example/single-with-obproxy-example.yaml index 4295831..5597fc0 100644 --- a/example/single-with-obproxy-example.yaml +++ b/example/single-with-obproxy-example.yaml @@ -28,11 +28,10 @@ oceanbase-ce: # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. - system_memory: 30G + system_memory: 10G datafile_size: 192G # Size of the data file. log_disk_size: 192G # The size of disk space used by the clog files. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. - enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. # observer cluster name, consistent with obproxy's cluster_name appname: obcluster diff --git a/executer27-requirements.txt b/executer27-requirements.txt index bc7364e..2b5a9c1 100644 --- a/executer27-requirements.txt +++ b/executer27-requirements.txt @@ -1,2 +1,3 @@ protobuf==3.18.0 mysql-connector-python==8.0.21 +argparse==1.4.0 diff --git a/executer27.py b/executer27.py index 7aa373b..0218e88 100644 --- a/executer27.py +++ b/executer27.py @@ -32,6 +32,8 @@ import ConfigParser import socket import platform +import ctypes +import mmap if __name__ == '__main__': defaultencoding = 'utf-8' diff --git a/optimize/obproxy/3.1.0/sysbench.yaml b/optimize/obproxy/3.1.0/sysbench.yaml index fd81564..e541fdf 100644 --- a/optimize/obproxy/3.1.0/sysbench.yaml +++ b/optimize/obproxy/3.1.0/sysbench.yaml @@ -3,7 +3,7 @@ test: - name: proxy_mem_limited value: Capacity(min(max(threads * (8 << 10), 2 << 30), 4 << 30), 0) expression: true - condition: "lambda n, o: Capacity(n).btyes > Capacity(o).btyes" + condition: "lambda n, o: Capacity(n).bytes > Capacity(o).bytes" - name: enable_prometheus value: false value_type: BOOL diff --git a/optimize/oceanbase-ce/4.3.0/tpch.yaml b/optimize/oceanbase-ce/4.3.0/tpch.yaml index 21e6312..4108212 100644 --- a/optimize/oceanbase-ce/4.3.0/tpch.yaml +++ b/optimize/oceanbase-ce/4.3.0/tpch.yaml @@ -13,7 +13,7 @@ test: value: 'false' variables: - name: ob_sql_work_area_percentage - value: 80 + value: 30 - name: ob_query_timeout value: 36000000000 - name: ob_trx_timeout diff --git a/plugins/commands/0.1/check_opt.py b/plugins/commands/0.1/check_opt.py index 87e63ed..4e8316f 100644 --- a/plugins/commands/0.1/check_opt.py +++ b/plugins/commands/0.1/check_opt.py @@ -102,11 +102,14 @@ def get_option(key, default=''): servers = list(clients.keys()) else: server_names = servers.split(',') - servers = [] + servers = set() for server in clients: + if server.ip in server_names: + server_names.remove(server.ip) + servers.add(server) if server.name in server_names: server_names.remove(server.name) - servers.append(server) + servers.add(server) if server_names: stdio.error("Server {} not found in current deployment".format(','.join(server_names))) return @@ -118,5 +121,5 @@ def get_option(key, default=''): if failed_components: stdio.error('{} not support. {} is allowed'.format(','.join(failed_components), deployed_components)) return plugin_context.return_false() - context.update(components=components, servers=servers, command_config=command_config) + context.update(components=components, servers=list(servers), command_config=command_config) return plugin_context.return_true(context=context) diff --git a/plugins/commands/0.1/command_template.yaml b/plugins/commands/0.1/command_template.yaml index c413c9b..9c43b5e 100644 --- a/plugins/commands/0.1/command_template.yaml +++ b/plugins/commands/0.1/command_template.yaml @@ -2,14 +2,14 @@ variables: ssh: - name: host config_key: host - components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus'] + components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus', 'ob-configserver', 'oblogproxy'] - name: user config_key: username - components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus'] + components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus', 'ob-configserver', 'oblogproxy'] server: - name: home_path config_key: home_path - components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus'] + components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus', 'ob-configserver', 'oblogproxy'] - name: mysql_port config_key: mysql_port components: ['oceanbase', 'oceanbase-ce'] @@ -31,12 +31,12 @@ wrappers: commands: - name: ssh - components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus'] + components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus', 'ob-configserver', 'oblogproxy'] command: "cd {home_path}/log; echo 'ssh {user}@{host}'; bash --login" wrapper: "ssh" interactive: true - name: log - components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus'] + components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce', 'obagent', 'ocp-express', 'grafana', 'prometheus', 'ob-configserver', 'oblogproxy'] command: "cd {home_path}/log; echo 'ssh {user}@{host}'; ls -l; bash --login" wrapper: "ssh" interactive: true diff --git a/plugins/commands/0.1/prepare_variables.py b/plugins/commands/0.1/prepare_variables.py index 7019145..c926476 100644 --- a/plugins/commands/0.1/prepare_variables.py +++ b/plugins/commands/0.1/prepare_variables.py @@ -54,6 +54,7 @@ def get_value_from_context(key, default=None): cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio clients = plugin_context.clients + cluster_servers = cluster_config.servers components = get_value_from_context("components", []) servers = get_value_from_context("servers", []) @@ -61,7 +62,9 @@ def get_value_from_context(key, default=None): loading_env = {} if server is None: - server = cluster_config.servers[0] + server = cluster_servers[0] + if server not in cluster_servers and getattr(server, 'ip', server) in [s.ip for s in cluster_servers]: + server = [s for s in cluster_servers if s.ip == getattr(server, 'ip', server)][0] # find command template command_template = None interactive = None @@ -94,7 +97,7 @@ def get_value_from_context(key, default=None): return cmd_input = None - if server not in cluster_config.servers: + if server not in cluster_servers: if interactive: stdio.error("{} is not a server in {}".format(server, component)) return plugin_context.return_false() diff --git a/plugins/grafana/7.5.17/destroy.py b/plugins/grafana/7.5.17/destroy.py index 1c343d0..9847a6c 100644 --- a/plugins/grafana/7.5.17/destroy.py +++ b/plugins/grafana/7.5.17/destroy.py @@ -24,10 +24,24 @@ global_ret = True + +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/grafana/7.5.17/display.py b/plugins/grafana/7.5.17/display.py index 4a7373a..c6d585a 100644 --- a/plugins/grafana/7.5.17/display.py +++ b/plugins/grafana/7.5.17/display.py @@ -53,7 +53,7 @@ def display(plugin_context, cursor, *args, **kwargs): 'status': 'active' if api_cursor and api_cursor.connect(stdio) else 'inactive' }) - stdio.print_list(results, [ 'url', 'user', 'password', 'status'], lambda x: [x['url'], x['user'], x['password'], x['status']], title='grafana') + stdio.print_list(results, [ 'url', 'user', 'password', 'status'], lambda x: [x['url'], x['user'], x['password'], x['status']], title=cluster_config.name) active_result = [r for r in results if r['status'] == 'active'] info_dict = active_result[0] if len(active_result) > 0 else None if info_dict is not None: diff --git a/plugins/grafana/7.5.17/init.py b/plugins/grafana/7.5.17/init.py index febb2ba..196d2a7 100644 --- a/plugins/grafana/7.5.17/init.py +++ b/plugins/grafana/7.5.17/init.py @@ -20,7 +20,7 @@ from __future__ import absolute_import, division, print_function import os from tool import OrderedDict -from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY stdio = None force = False @@ -37,6 +37,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name servers_dirs = {} force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -107,6 +108,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % target_path) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key=k, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=target_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) mkdir_ret = False continue client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (target_path, link_path, target_path, link_path)) diff --git a/plugins/mysqltest/3.1.0/init.py b/plugins/mysqltest/3.1.0/init.py index 2ba45e1..cb0dc0e 100644 --- a/plugins/mysqltest/3.1.0/init.py +++ b/plugins/mysqltest/3.1.0/init.py @@ -35,7 +35,7 @@ def get_memory_limit(cursor, client): try: memory_limit = cursor.fetchone('show parameters where name = \'memory_limit\'') if memory_limit and 'value' in memory_limit and memory_limit['value']: - return Capacity(memory_limit['value']).btyes + return Capacity(memory_limit['value']).bytes ret = client.execute_command('free -b') if ret: ret = client.execute_command("cat /proc/meminfo | grep 'MemTotal:' | awk -F' ' '{print $2}'") @@ -117,7 +117,7 @@ def exec_sql(cmd): exec_init_user_for_oracle = 'init_user_oracle.sql|SYS@oracle|SYS' client = plugin_context.clients[server] memory_limit = get_memory_limit(cursor, client) - is_mini = memory_limit and Capacity(memory_limit).btyes < (16<<30) + is_mini = memory_limit and Capacity(memory_limit).bytes < (16<<30) if env['is_business']: init_sql = [exec_mini_init if is_mini else exec_init, exec_init_user_for_oracle, exec_init_user] else: diff --git a/plugins/mysqltest/3.1.0/run_test.py b/plugins/mysqltest/3.1.0/run_test.py index c407f06..4f9d0b0 100644 --- a/plugins/mysqltest/3.1.0/run_test.py +++ b/plugins/mysqltest/3.1.0/run_test.py @@ -30,7 +30,7 @@ from copy import deepcopy from ssh import LocalClient from tool import DirectoryUtil -from _stdio import FormtatText +from _stdio import FormatText inner_dir = os.path.split(__file__)[0] inner_test_dir = os.path.join(inner_dir, 't') @@ -220,7 +220,7 @@ def return_true(**kw): need_reboot = False return return_true(reboot=True) retry_msg = "in auto retry mode" if is_retry else "" - label = FormtatText.info("[ RUN ]") + label = FormatText.info("[ RUN ]") stdio.start_loading('%sRunning case: %s ( %s / %s ) %s' % (label, test, index+1, total_test_count, retry_msg)) test_name = test opt = {} @@ -428,9 +428,9 @@ def return_true(**kw): if retcode: # verbose_msg += ', error output:\n%s' % errput stdio.print(errput) - case_status = FormtatText.error("[ FAILED ]") + case_status = FormatText.error("[ FAILED ]") else: - case_status = FormtatText.success('[ OK ]') + case_status = FormatText.success('[ OK ]') stdio.print("%s%s" % (case_status, case_info)) if retcode == 0 and slb_host and exec_id: slb_request(test_name, exec_id=exec_id, slb_host=slb_host, op='success', stdio=stdio) diff --git a/plugins/ob-configserver/1.0.0/destroy.py b/plugins/ob-configserver/1.0.0/destroy.py index 54698b5..6bb1916 100644 --- a/plugins/ob-configserver/1.0.0/destroy.py +++ b/plugins/ob-configserver/1.0.0/destroy.py @@ -23,6 +23,17 @@ from _errno import EC_CLEAN_PATH_FAILED +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients @@ -35,7 +46,10 @@ def destroy(plugin_context, *args, **kwargs): stdio.verbose('%s work path cleaning' % server) client = clients[server] home_path = server_config['home_path'] - ret = client.execute_command('rm -fr %s/' % (home_path), timeout=-1) + if check_mount_path(client, home_path, stdio): + ret = client.execute_command('rm -fr %s/*' % home_path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % home_path, timeout=-1) if not ret: stdio.warn(EC_CLEAN_PATH_FAILED.format(server=server, path=home_path)) global_ret = False diff --git a/plugins/ob-configserver/1.0.0/display.py b/plugins/ob-configserver/1.0.0/display.py index 191a209..0e57f44 100644 --- a/plugins/ob-configserver/1.0.0/display.py +++ b/plugins/ob-configserver/1.0.0/display.py @@ -52,7 +52,7 @@ def display(plugin_context, cursor, *args, **kwargs): stdio.print_list(result, ['server', 'port', 'vip_address', 'vip_port', 'status', 'pid'], lambda x: [x['server'], x['port'], x['vip_address'], x['vip_port'], x['status'], x['pid']], - title='ob-configserver') + title=cluster_config.name) if result: cmd = "curl -s 'http://{0}:{1}/services?Action=GetObProxyConfig'".format(result[0]['server'], result[0]['port']) stdio.print(cmd) diff --git a/plugins/ob-configserver/1.0.0/init.py b/plugins/ob-configserver/1.0.0/init.py index 9a0b89a..9a41998 100644 --- a/plugins/ob-configserver/1.0.0/init.py +++ b/plugins/ob-configserver/1.0.0/init.py @@ -21,7 +21,7 @@ import os.path -from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY def check_home_path(home_path, client): @@ -49,6 +49,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) stdio.start_loading('Initializes ob-configserver work home') @@ -71,8 +72,8 @@ def init(plugin_context, *args, **kwargs): stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) else: global_ret = False - err_msg = ' {} is not empty'.format(home_path) - stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) if global_ret and not client.execute_command(f"""bash -c 'mkdir -p {os.path.join(home_path, '{run,bin,conf,log}')}'"""): stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path',msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path))) diff --git a/plugins/ob-configserver/1.0.0/start.py b/plugins/ob-configserver/1.0.0/start.py index f69df4e..a08fe54 100644 --- a/plugins/ob-configserver/1.0.0/start.py +++ b/plugins/ob-configserver/1.0.0/start.py @@ -21,8 +21,7 @@ import time from _errno import EC_OBC_PROGRAM_START_ERROR -from const import CONST_OBD_HOME -from tool import YamlLoader +from tool import YamlLoader, NetUtil, Cursor def check_home_path_pid(home_path, client): @@ -83,17 +82,60 @@ def get_config_dict(home_path, server_config, ip): return config_dict +def manual_register(added_components, cluster_config, config_dict, stdio): + if 'ob-configserver' not in added_components or not config_dict: + return True + ip = config_dict['vip']['address'] + port = config_dict['vip']['port'] + + for comp in ["oceanbase-ce", "oceanbase"]: + if comp not in added_components and cluster_config.get_be_depend_servers(comp): + stdio.verbose('add_component(observer register in ob-configserver)') + server = cluster_config.get_be_depend_servers(comp)[0] + server_config = cluster_config.get_be_depend_config(comp, server) + mysql_port = server_config['mysql_port'] + root_password = server_config['root_password'] + appname = server_config['appname'] + cursor = Cursor(ip=server.ip, port=mysql_port, tenant='', password=root_password, stdio=stdio) + cfg_url = 'http://%s:%s/services?Action=ObRootServiceInfo&ObCluster=%s' % (ip, port, appname) + try: + cursor.execute("alter system set obconfig_url = '%s'" % cfg_url) + cursor.execute("alter system reload server") + except: + stdio.error('Failed to register obconfig_url') + return False + + for comp in ["obproxy-ce", "obproxy"]: + if comp not in added_components and cluster_config.get_be_depend_servers(comp): + stdio.verbose('add_component(cfg_url register in proxyconfig)') + for server in cluster_config.get_be_depend_servers(comp): + server_config = cluster_config.get_be_depend_config(comp, server) + password = server_config.get('obproxy_sys_password', '') + proxy_port = server_config['listen_port'] + cursor = Cursor(ip=server.ip, user='root@proxysys', port=proxy_port, tenant='', password=password, stdio=stdio) + obproxy_config_server_url = "http://{0}:{1}/services?Action=GetObProxyConfig".format(ip, port) + try: + cursor.execute("alter proxyconfig set obproxy_config_server_url='%s'" % obproxy_config_server_url) + except: + stdio.error('Failed to register obproxy_config_server_url') + return False + return True + + + def start(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config + added_components = cluster_config.get_deploy_added_components() clients = plugin_context.clients stdio = plugin_context.stdio stdio.start_loading('Start ob-configserver') + config_dict = {} for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) client = clients[server] home_path = server_config['home_path'] - ip = server.ip + ip = NetUtil.get_host_ip() if client.is_localhost() else server.ip if check_home_path_pid(home_path, client): stdio.verbose('%s is runnning, skip' % server) @@ -112,7 +154,6 @@ def start(plugin_context, *args, **kwargs): if not client.execute_command(cmd): stdio.error('Failed to start ob-configserver') stdio.stop_loading('failed') - plugin_context.return_false() return stdio.stop_loading('succeed') @@ -152,5 +193,8 @@ def start(plugin_context, *args, **kwargs): stdio.error(EC_OBC_PROGRAM_START_ERROR.format(server=server)) plugin_context.return_false() else: + if not manual_register(added_components, cluster_config, config_dict, stdio): + stdio.stop_loading('failed') + return stdio.stop_loading('succeed') plugin_context.return_true() diff --git a/plugins/obagent/0.1/destroy.py b/plugins/obagent/0.1/destroy.py index db501d2..26fe9a8 100644 --- a/plugins/obagent/0.1/destroy.py +++ b/plugins/obagent/0.1/destroy.py @@ -25,10 +25,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/obagent/0.1/display.py b/plugins/obagent/0.1/display.py index f04f76d..2a45ee3 100644 --- a/plugins/obagent/0.1/display.py +++ b/plugins/obagent/0.1/display.py @@ -47,5 +47,5 @@ def display(plugin_context, cursor, *args, **kwargs): }) stdio.print_list(result, ['ip', 'server_port', 'pprof_port', 'status'], - lambda x: [x['ip'], x['server_port'], x['pprof_port'], x['status']], title='obagent') + lambda x: [x['ip'], x['server_port'], x['pprof_port'], x['status']], title=cluster_config.name) plugin_context.return_true() diff --git a/plugins/obagent/0.1/generate_config.py b/plugins/obagent/0.1/generate_config.py index 777fafb..0e166ee 100644 --- a/plugins/obagent/0.1/generate_config.py +++ b/plugins/obagent/0.1/generate_config.py @@ -62,6 +62,8 @@ def generate_config(plugin_context, auto_depend=False, return_generate_keys=Fals break else: cluster_config.update_server_conf(server, 'ob_monitor_status', 'inactive', False) + if generate_configs.get(server) is None: + generate_configs[server] = {} generate_configs[server]['ob_monitor_status'] = 'inactive' else: cluster_config.update_global_conf('ob_monitor_status', 'inactive', False) diff --git a/plugins/obagent/0.1/init.py b/plugins/obagent/0.1/init.py index 379dd1e..5b9a606 100644 --- a/plugins/obagent/0.1/init.py +++ b/plugins/obagent/0.1/init.py @@ -20,13 +20,14 @@ from __future__ import absolute_import, division, print_function -from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -69,6 +70,7 @@ def init(plugin_context, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False diff --git a/plugins/obagent/0.1/parameter.yaml b/plugins/obagent/0.1/parameter.yaml index 02a7544..f1e9525 100644 --- a/plugins/obagent/0.1/parameter.yaml +++ b/plugins/obagent/0.1/parameter.yaml @@ -266,7 +266,7 @@ description_local: 是否禁用 debug 接口的basic auth 认证,true 表示禁用,false 表示不禁用 - name: target_sync_configs require: false - type: PARAM_LIST + type: LIST need_restart: true description_en: description_local: '''将地址同步至指定远端目录 diff --git a/plugins/obagent/0.1/start.py b/plugins/obagent/0.1/start.py index ac4e42b..994535f 100644 --- a/plugins/obagent/0.1/start.py +++ b/plugins/obagent/0.1/start.py @@ -190,6 +190,17 @@ def start(plugin_context, *args, **kwargs): if key and isinstance(key, dict): key = list(key.keys())[0] need_encrypted.append(key) + + for comp in ["oceanbase", "oceanbase-ce"]: + if cluster_config.get_depend_config(comp) and plugin_context.get_return('start', comp).get_return('need_bootstrap'): + error_servers_list = [] + for server in cluster_config.servers: + if not cluster_config.get_depend_config(comp, server): + error_servers_list.append(server) + if error_servers_list: + error_servers_msg = ', '.join(map(lambda x: str(x), error_servers_list)) + stdio.warn(WC_OBAGENT_SERVER_NAME_ERROR.format(servers=error_servers_msg)) + targets = [] for server in cluster_config.servers: client = clients[server] diff --git a/plugins/obagent/1.1.0/start.py b/plugins/obagent/1.1.0/start.py index d156930..5568772 100644 --- a/plugins/obagent/1.1.0/start.py +++ b/plugins/obagent/1.1.0/start.py @@ -191,6 +191,17 @@ def start(plugin_context, local_home_path, *args, **kwargs): if key and isinstance(key, dict): key = list(key.keys())[0] need_encrypted.append(key) + + for comp in ["oceanbase", "oceanbase-ce"]: + if cluster_config.get_depend_config(comp) and plugin_context.get_return('start', comp).get_return('need_bootstrap'): + error_servers_list = [] + for server in cluster_config.servers: + if not cluster_config.get_depend_config(comp, server): + error_servers_list.append(server) + if error_servers_list: + error_servers_msg = ', '.join(map(lambda x: str(x), error_servers_list)) + stdio.warn(WC_OBAGENT_SERVER_NAME_ERROR.format(servers=error_servers_msg)) + targets = [] for server in cluster_config.servers: client = clients[server] diff --git a/plugins/obagent/1.3.0/display.py b/plugins/obagent/1.3.0/display.py index 2b82766..ded797d 100644 --- a/plugins/obagent/1.3.0/display.py +++ b/plugins/obagent/1.3.0/display.py @@ -42,5 +42,5 @@ def display(plugin_context, cursor, *args, **kwargs): }) stdio.print_list(result, ['ip', 'mgragent_http_port', 'monagent_http_port', 'status'], - lambda x: [x['ip'], x['mgragent_http_port'], x['monagent_http_port'], x['status']], title='obagent') + lambda x: [x['ip'], x['mgragent_http_port'], x['monagent_http_port'], x['status']], title=cluster_config.name) plugin_context.return_true() diff --git a/plugins/obagent/1.3.0/init.py b/plugins/obagent/1.3.0/init.py index 8c13780..0f42203 100644 --- a/plugins/obagent/1.3.0/init.py +++ b/plugins/obagent/1.3.0/init.py @@ -20,13 +20,14 @@ from __future__ import absolute_import, division, print_function -from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -67,6 +68,7 @@ def init(plugin_context, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False diff --git a/plugins/obagent/1.3.0/parameter.yaml b/plugins/obagent/1.3.0/parameter.yaml index ca3d789..86c2c2b 100644 --- a/plugins/obagent/1.3.0/parameter.yaml +++ b/plugins/obagent/1.3.0/parameter.yaml @@ -229,7 +229,7 @@ description_local: OceanBase 监控指标采集状态,active 表示开启,inactive 表示关闭 - name: target_sync_configs require: false - type: PARAM_LIST + type: LIST need_restart: true description_en: description_local: '''将地址同步至指定远端目录 diff --git a/plugins/obagent/1.3.0/start.py b/plugins/obagent/1.3.0/start.py index d11c25a..6ad8f0b 100644 --- a/plugins/obagent/1.3.0/start.py +++ b/plugins/obagent/1.3.0/start.py @@ -32,6 +32,7 @@ from Crypto import Random from Crypto.Cipher import AES +from _errno import WC_OBAGENT_SERVER_NAME_ERROR from ssh import SshClient, SshConfig from tool import YamlLoader, FileUtil @@ -186,7 +187,7 @@ def prepare_parameters(cluster_config): return env -def start(plugin_context, *args, **kwargs): +def start(plugin_context, is_reinstall=False, *args, **kwargs): global stdio cluster_config = plugin_context.cluster_config clients = plugin_context.clients @@ -209,6 +210,16 @@ def start(plugin_context, *args, **kwargs): config_mapper = yaml.load(f).get('config_mapper', {}) stdio.start_loading('Start obagent') + for comp in ["oceanbase", "oceanbase-ce"]: + if cluster_config.get_depend_config(comp) and plugin_context.get_return('start', comp).get_return('need_bootstrap'): + error_servers_list = [] + for server in cluster_config.servers: + if not cluster_config.get_depend_config(comp, server): + error_servers_list.append(server) + if error_servers_list: + error_servers_msg = ', '.join(map(lambda x: str(x), error_servers_list)) + stdio.warn(WC_OBAGENT_SERVER_NAME_ERROR.format(servers=error_servers_msg)) + targets = [] for server in cluster_config.servers: client = clients[server] @@ -227,6 +238,9 @@ def start(plugin_context, *args, **kwargs): if getattr(options, 'without_parameter', False) and client.execute_command('ls %s' % config_flag): use_parameter = False + if is_reinstall: + use_parameter = True + if use_parameter: # todo: set agent secret key mgr_conf = os.path.join(home_path, 'conf/mgragent.yaml') diff --git a/plugins/obagent/4.2.2/parameter.yaml b/plugins/obagent/4.2.2/parameter.yaml new file mode 100644 index 0000000..6b1b073 --- /dev/null +++ b/plugins/obagent/4.2.2/parameter.yaml @@ -0,0 +1,251 @@ +- name: home_path + name_local: 工作目录 + require: true + essential: true + type: PATH + need_restart: true + need_redeploy: true + description_en: working directory for obagent + description_local: Obagent工作目录 +- name: log_path + require: true + type: PATH + default: log/monagent.log + need_restart: true + need_redeploy: true + description_en: log path + description_local: 日志路径 +- name: http_basic_auth_user + name_local: 用户名 + require: true + essential: true + type: SAFE_STRING + default: admin + need_restart: true + description_en: username for HTTP authentication + description_local: HTTP 服务认证用户名 +- name: http_basic_auth_password + name_local: 密码 + require: true + essential: true + type: STRING + default: root + need_restart: true + need_redeploy: false + description_en: password for HTTP authentication + description_local: HTTP 服务认证密码 +- name: mgragent_http_port + name_local: 管理服务端口 + require: true + essential: true + type: INT + default: 8089 + need_restart: true + need_redeploy: false + description_en: The port of manager agent + description_local: OBAgent 管理服务端口 +- name: mgragent_log_level + require: false + type: SAFE_STRING + need_restart: true + need_redeploy: false + description_en: The log level of manager agent. + description_local: ob_mgragent 日志级别 +- name: mgragent_log_max_size + require: false + type: INT + default: 30 + need_restart: true + need_redeploy: false + description_en: The total size of manager agent.Log size is measured in Megabytes. + description_local: ob_mgragent 日志文件大小(单位:mb) +- name: mgragent_log_max_days + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: Expiration time for manager agent logs. The default value is 30 days. + description_local: ob_mgragent 日志文件最大保留天数 +- name: mgragent_log_max_backups + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: The maximum number for manager agent log files. + description_local: ob_mgragent 日志文件最大备份数 +- name: mgragent_log_compress + require: false + type: BOOL + need_restart: true + need_redeploy: false + description_en: ob_mgragent log compression switch + description_local: ob_mgragent 日志压缩开关 +- name: monagent_http_port + name_local: 监控服务端口 + require: true + essential: true + type: INT + default: 8088 + need_restart: true + need_redeploy: false + description_en: The port of monitor agent. + description_local: OBAgent 监控服务端口 +- name: monagent_host_ip + require: false + type: SAFE_STRING + need_restart: true + need_redeploy: false + description_en: ob_monagent host ip + description_local: ob_monagent 主机 ip +- name: monitor_password + require: false + type: STRING + need_restart: true + need_redeploy: false + description_en: monitor password for OceanBase Database + default: '' + description_local: OceanBase 数据库监控数据采集用户密码 +- name: monitor_user + require: false + type: STRING + default: ocp_monitor + need_restart: true + need_redeploy: false + description_en: The user name for ocp meta db + description_local: OceanBase 数据库监控数据采集用户名 +- name: sql_port + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: SQL port for observer + default: 2881 + min_value: 1025 + max_value: 65535 + description_local: observer的 SQL 端口 +- name: rpc_port + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: the RPC port for observer + default: 2882 + min_value: 1025 + max_value: 65535 + description_local: observer 的 RPC 端口 +- name: cluster_name + require: false + type: SAFE_STRING + need_restart: true + need_redeploy: false + description_en: cluster name for OceanBase Database + default: obcluster + description_local: OceanBase Database 集群名 +- name: cluster_id + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: cluster ID for OceanBase Database + default: 1 + min_value: 1 + max_value: 4294901759 + description_local: OceanBase 集群 ID +- name: zone_name + require: false + type: SAFE_STRING + need_restart: true + need_redeploy: false + description_en: zone name for your observer + default: zone1 + min_value: + max_value: + description_local: observer 所在的 zone 名字 +- name: ob_log_path + require: false + type: PATH + need_restart: true + need_redeploy: false + description_en: observer log path + description_local: observer 日志盘路径 +- name: ob_data_path + require: false + type: PATH + need_restart: true + need_redeploy: false + description_en: observer data path + description_local: observer 数据盘路径 +- name: ob_install_path + require: false + type: PATH + need_restart: true + need_redeploy: false + description_en: observer install path + description_local: observer 安装目录 +- name: observer_log_path + require: false + type: PATH + need_restart: true + need_redeploy: false + description_en: observer install path log + description_local: observer 安装目录下日志路径 +- name: monagent_log_level + require: false + type: SAFE_STRING + default: info + need_restart: true + need_redeploy: false + description_en: The log level of monitor agent. + description_local: ob_monagent 日志级别 +- name: monagent_log_max_size + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: The total size of monitor agent.Log size is measured in Megabytes. + description_local: ob_monagent 日志文件大小(单位:mb) +- name: monagent_log_max_days + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: Expiration time for monitor agent logs. + description_local: ob_monagent 日志文件最大保留天数 +- name: monagent_log_max_backups + require: false + type: INT + need_restart: true + need_redeploy: false + description_en: The maximum number for monitor agent log files. + description_local: ob_monagent 日志文件最大备份数 +- name: monagent_log_compress + require: false + type: BOOL + need_restart: true + need_redeploy: false + description_en: ob_monagent log compression switch + description_local: ob_monagent 日志压缩开关 +- name: ob_monitor_status + name_local: OceanBase 指标监控采集 + require: true + essential: true + type: SAFE_STRING + default: active + need_restart: true + need_redeploy: false + description_en: monitor status for OceanBase Database. Active is to enable. Inactive is to disable. + description_local: OceanBase 监控指标采集状态,active 表示开启,inactive 表示关闭 +- name: target_sync_configs + require: false + type: LIST + need_restart: true + description_en: + description_local: '''将地址同步至指定远端目录 + target_sync_configs: + - host: 192.168.1.1 + target_dir: /data/prometheus/targets + user: user1 + port: 22 + # password: ***** + key_file: xxxxx + ''' diff --git a/plugins/obagent/4.2.2/start.py b/plugins/obagent/4.2.2/start.py new file mode 100644 index 0000000..ad81973 --- /dev/null +++ b/plugins/obagent/4.2.2/start.py @@ -0,0 +1,352 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import re +import sys +import time +import random +import base64 +import tempfile +from copy import deepcopy + +from Crypto import Random +from Crypto.Cipher import AES + +from _errno import WC_OBAGENT_SERVER_NAME_ERROR +from ssh import SshClient, SshConfig +from tool import YamlLoader, FileUtil + +stdio = None +OBAGNET_CONFIG_MAP = { + "monitor_password": "{ocp_agent_monitor_password}", + "monitor_user": "{ocp_agent_monitor_username}", + "sql_port": "{mysql_port}", + "rpc_port": "{rpc_port}", + "cluster_name": "{appname}", + "cluster_id": "{cluster_id}", + "zone_name": "{zone}", + "ob_log_path": "{home_path}/store", + "ob_data_path": "{home_path}/store", + "ob_install_path": "{home_path}", + "observer_log_path": "{home_path}/log", +} + +if sys.version_info.major == 2: + + def generate_key(key): + genKey = [chr(0)] * 16 + for i in range(min(16, len(key))): + genKey[i] = key[i] + i = 16 + while i < len(key): + j = 0 + while j < 16 and i < len(key): + genKey[j] = chr(ord(genKey[j]) ^ ord(key[i])) + j, i = j + 1, i + 1 + return "".join(genKey) + + + class AESCipher: + bs = AES.block_size + + def __init__(self, key): + self.key = generate_key(key) + + def encrypt(self, message): + message = self._pad(message) + iv = Random.new().read(AES.block_size) + cipher = AES.new(self.key, AES.MODE_CBC, iv) + return base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8') + + def _pad(self, s): + return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) + +else: + def generate_key(key): + genKey = [0] * 16 + for i in range(min(16, len(key))): + genKey[i] = key[i] + i = 16 + while i < len(key): + j = 0 + while j < 16 and i < len(key): + genKey[j] = genKey[j] ^ key[i] + j, i = j + 1, i + 1 + genKey = [chr(k) for k in genKey] + return bytes("".join(genKey), encoding="utf-8") + + + class AESCipher: + bs = AES.block_size + + def __init__(self, key): + self.key = generate_key(key) + + def encrypt(self, message): + message = self._pad(message) + iv = Random.new().read(AES.block_size) + cipher = AES.new(self.key, AES.MODE_CBC, iv) + return str(base64.b64encode(iv + cipher.encrypt(bytes(message, encoding='utf-8'))), encoding="utf-8") + + def _pad(self, s): + return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) + + +def encrypt(key, data): + key = base64.b64decode(key) + cipher = AESCipher(key) + return cipher.encrypt(data) + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def confirm_port(client, pid, port): + socket_inodes = get_port_socket_inode(client, port) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def generate_aes_b64_key(): + n = random.randint(1, 3) * 8 + key = [] + c = 0 + while c < n: + key += chr(random.randint(33, 127)) + c += 1 + key = ''.join(key) + return base64.b64encode(key.encode('utf-8')) + + +def get_missing_required_parameters(parameters): + results = [] + for key in OBAGNET_CONFIG_MAP: + if parameters.get(key) is None: + results.append(key) + return results + + +def prepare_parameters(cluster_config): + env = {} + depend_info = {} + ob_servers_config = {} + depends_keys = ["ocp_agent_monitor_username", "ocp_agent_monitor_password", "appname", "cluster_id"] + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + observer_globals = cluster_config.get_depend_config(comp) + for key in depends_keys: + value = observer_globals.get(key) + if value is not None: + depend_info[key] = value + ob_servers = cluster_config.get_depend_servers(comp) + for server in ob_servers: + ob_servers_config[server] = cluster_config.get_depend_config(comp, server) + + for server in cluster_config.servers: + server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + user_server_config = deepcopy(cluster_config.get_server_conf(server)) + if 'monagent_host_ip' not in user_server_config: + server_config['monagent_host_ip'] = server.ip + missed_keys = get_missing_required_parameters(user_server_config) + if missed_keys and server in ob_servers_config: + for key in depend_info: + ob_servers_config[server][key] = depend_info[key] + for key in missed_keys: + server_config[key] = OBAGNET_CONFIG_MAP[key].format(server_ip=server.ip, **ob_servers_config[server]) + env[server] = server_config + return env + + +def start(plugin_context, is_reinstall=False, *args, **kwargs): + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + options = plugin_context.options + stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name + pid_path = {} + yaml = YamlLoader(stdio) + start_env = plugin_context.get_variable('start_env') + + if not start_env: + start_env = prepare_parameters(cluster_config) + + repository_dir = None + for repository in plugin_context.repositories: + if repository.name == cluster_config.name: + repository_dir = repository.repository_dir + break + with FileUtil.open(os.path.join(repository_dir, 'conf/obd_agent_mapper.yaml')) as f: + config_mapper = yaml.load(f).get('config_mapper', {}) + stdio.start_loading('Start obagent') + + for comp in ["oceanbase", "oceanbase-ce"]: + if cluster_config.get_depend_config(comp) and plugin_context.get_return('start', comp).get_return('need_bootstrap'): + error_servers_list = [] + for server in cluster_config.servers: + if not cluster_config.get_depend_config(comp, server): + error_servers_list.append(server) + if error_servers_list: + error_servers_msg = ', '.join(map(lambda x: str(x), error_servers_list)) + stdio.warn(WC_OBAGENT_SERVER_NAME_ERROR.format(servers=error_servers_msg)) + + targets = [] + for server in cluster_config.servers: + client = clients[server] + server_config = start_env[server] + home_path = server_config['home_path'] + pid_path[server] = '%s/run/ob_agentd.pid' % home_path + mgragent_http_port = int(server_config['mgragent_http_port']) + targets.append('{}:{}'.format(server.ip, mgragent_http_port)) + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + continue + + home_path = server_config['home_path'] + use_parameter = True + config_flag = os.path.join(home_path, '.configured') + if getattr(options, 'without_parameter', False) and client.execute_command('ls %s' % config_flag): + use_parameter = False + + if is_reinstall: + use_parameter = True + + if use_parameter: + # todo: set agent secret key + mgr_conf = os.path.join(home_path, 'conf/mgragent.yaml') + mon_conf = os.path.join(home_path, 'conf/monagent.yaml') + agent_conf = os.path.join(home_path, 'conf/agentctl.yaml') + for conf in [mgr_conf, mon_conf, agent_conf]: + ret = client.execute_command('cat {}'.format(conf)) + if ret: + content = ret.stdout + content = re.sub(r"cryptoMethod:\s+aes", "cryptoMethod: plain", content) + client.write_file(content, conf) + client.execute_command('chmod 755 {}'.format(conf)) + for key in server_config: + if server_config[key] is None: + server_config[key] = '' + if isinstance(server_config[key], bool): + server_config[key] = str(server_config[key]).lower() + + cmds = [] + for key, value in server_config.items(): + if key in config_mapper: + cmds.append("%s=%s" % (config_mapper[key], value)) + cmd = 'cd %s;%s/bin/ob_agentctl config -u %s && touch %s' % (home_path, home_path, ','.join(cmds), config_flag) + res = client.execute_command(cmd) + if not res: + stdio.error('failed to set config to {} obagent.'.format(server)) + return plugin_context.return_false() + + if not client.execute_command('cd %s;%s/bin/ob_agentctl start' % (home_path, home_path)): + stdio.error('failed to start {} obagent.'.format(server)) + return plugin_context.return_false() + + stdio.stop_loading('succeed') + stdio.start_loading('obagent program health check') + time.sleep(1) + failed = [] + servers = cluster_config.servers + count = 20 + while servers and count: + count -= 1 + tmp_servers = [] + for server in servers: + client = clients[server] + server_config = start_env[server] + home_path = server_config['home_path'] + stdio.verbose('%s program health check' % server) + pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if pid: + mgr_pid = client.execute_command("cat %s" % os.path.join(home_path, 'run/ob_mgragent.pid')).stdout.strip() + if mgr_pid and confirm_port(client, mgr_pid, int(server_config["mgragent_http_port"])): + stdio.verbose('%s obagent[pid: %s] started', server, pid) + elif count: + tmp_servers.append(server) + else: + failed.append('failed to start %s obagent' % server) + else: + failed.append('failed to start %s obagent' % server) + servers = tmp_servers + if servers and count: + time.sleep(1) + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + plugin_context.return_false() + else: + global_config = cluster_config.get_global_conf() + target_sync_configs = global_config.get('target_sync_configs', []) + stdio.verbose('start to sync target config') + data = [{'targets': targets}] + default_ssh_config = None + for client in clients.values(): + default_ssh_config = client.config + break + for target_sync_config in target_sync_configs: + host = None + target_dir = None + try: + host = target_sync_config.get('host') + target_dir = target_sync_config.get('target_dir') + if not host or not target_dir: + continue + ssh_config_keys = ['username', 'password', 'port', 'key_file', 'timeout'] + auth_keys = ['username', 'password', 'key_file'] + for key in auth_keys: + if key in target_sync_config: + config = SshConfig(host) + break + else: + config = deepcopy(default_ssh_config) + for key in ssh_config_keys: + if key in target_sync_config: + setattr(config, key, target_sync_config[key]) + with tempfile.NamedTemporaryFile(suffix='.yaml') as f: + yaml.dump(data, f) + f.flush() + file_name = '{}.yaml'.format(deploy_name or hash(cluster_config)) + file_path = os.path.join(target_dir, file_name) + remote_client = SshClient(config) + remote_client.connect() + remote_client.put_file(f.name, file_path) + except: + stdio.warn('failed to sync target to {}:{}'.format(host, target_dir)) + stdio.exception('') + stdio.stop_loading('succeed') + plugin_context.return_true(need_bootstrap=False) + + diff --git a/plugins/obagent/4.2.2/start_check.py b/plugins/obagent/4.2.2/start_check.py new file mode 100644 index 0000000..e037d25 --- /dev/null +++ b/plugins/obagent/4.2.2/start_check.py @@ -0,0 +1,265 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import re +from copy import deepcopy + +import _errno as err +from tool import YamlLoader, FileUtil + + +stdio = None +success = True + +OBAGNET_CONFIG_MAP = { + "monitor_password": "{ocp_agent_monitor_password}", + "monitor_user": "{ocp_agent_monitor_username}", + "sql_port": "{mysql_port}", + "rpc_port": "{rpc_port}", + "cluster_name": "{appname}", + "cluster_id": "{cluster_id}", + "zone_name": "{zone}", + "ob_log_path": "{home_path}/store", + "ob_data_path": "{home_path}/store", + "ob_install_path": "{home_path}", + "observer_log_path": "{home_path}/log", +} + + +def get_missing_required_parameters(parameters): + results = [] + for key in OBAGNET_CONFIG_MAP: + if parameters.get(key) is None: + results.append(key) + return results + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def prepare_parameters(cluster_config): + env = {} + depend_info = {} + ob_servers_config = {} + depends_keys = ["ocp_agent_monitor_username", "ocp_agent_monitor_password", "appname", "cluster_id"] + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + observer_globals = cluster_config.get_depend_config(comp) + for key in depends_keys: + value = observer_globals.get(key) + if value is not None: + depend_info[key] = value + ob_servers = cluster_config.get_depend_servers(comp) + for server in ob_servers: + ob_servers_config[server] = cluster_config.get_depend_config(comp, server) + + for server in cluster_config.servers: + server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + user_server_config = deepcopy(cluster_config.get_server_conf(server)) + if 'monagent_host_ip' not in user_server_config: + server_config['monagent_host_ip'] = server.ip + missed_keys = get_missing_required_parameters(user_server_config) + if missed_keys and server in ob_servers_config: + for key in depend_info: + ob_servers_config[server][key] = depend_info[key] + for key in missed_keys: + server_config[key] = OBAGNET_CONFIG_MAP[key].format(server_ip=server.ip, **ob_servers_config[server]) + env[server] = server_config + return env + + +def password_check(password): + if not re.match(r'^[\w~^*{}\[\]_\-+]+$', password): + return False + return True + + +def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, precheck=False, *args, **kwargs): + def check_fail(item, error, suggests=[]): + status = check_status[server][item] + if status.status == err.CheckStatus.WAIT: + status.error = error + status.suggests = suggests + status.status = err.CheckStatus.FAIL + + def alert(item, error, suggests=[]): + global success + if strict_check: + success = False + check_fail(item, error, suggests) + stdio.error(error) + else: + stdio.warn(error) + + def critical(item, error, suggests=[]): + global success + success = False + status = check_status.get(server).get(item) + status.status = err.CheckStatus.FAIL + status.error = error + status.suggests = suggests + stdio.error(error) + + def check_pass(item): + status = check_status.get(server).get(item).status + if status == err.CheckStatus.WAIT: + check_status.get(server).get(item).status = err.CheckStatus.PASS + + def wait_2_pass(): + status = check_status[server] + for key in status: + if status[key].status == err.CheckStatus.WAIT: + status[key].status = err.CheckStatus.PASS + + global stdio, success + success = True + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + servers_port = {} + servers_dirs = {} + servers_check_dirs = {} + check_status = {} + plugin_context.set_variable('start_check_status', check_status) + + for server in cluster_config.servers: + check_status[server] = { + 'port': err.CheckStatus(), + 'parameter': err.CheckStatus(), + 'password': err.CheckStatus() + } + if work_dir_check: + check_status[server]['dir'] = err.CheckStatus() + + if init_check_status: + return plugin_context.return_true(start_check_status=check_status) + + stdio.start_loading('Check before start obagent') + env = prepare_parameters(cluster_config) + for server in cluster_config.servers: + ip = server.ip + client = clients[server] + server_config = cluster_config.get_server_conf(server) + if not precheck: + remote_pid_path = "%s/run/ob_agentd.pid" % server_config['home_path'] + remote_pid = client.execute_command("cat %s" % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s' % remote_pid): + stdio.verbose('%s is runnning, skip' % server) + wait_2_pass() + continue + check_pass('parameter') + + # http_basic_auth_password check + http_basic_auth_password = server_config.get('http_basic_auth_password') + if http_basic_auth_password: + if not password_check(http_basic_auth_password): + critical('password', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='obagent', key='http_basic_auth_password', rule='^[\w~^*{}\[\]_\-+]+$'), suggests=[err.SUG_OBAGENT_EDIT_HTTP_BASIC_AUTH_PASSWORD.format()]) + + if work_dir_check: + stdio.verbose('%s dir check' % server) + if ip not in servers_dirs: + servers_dirs[ip] = {} + servers_check_dirs[ip] = {} + dirs = servers_dirs[ip] + check_dirs = servers_check_dirs[ip] + key = 'home_path' + path = server_config.get(key) + suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)] + if path in dirs and dirs[path]: + critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests) + dirs[path] = { + 'server': server, + 'key': key, + } + empty_check = work_dir_empty_check + while True: + if path in check_dirs: + if check_dirs[path] != True: + critical('dir', check_dirs[path], suggests) + break + + if client.execute_command('bash -c "[ -a %s ]"' % path): + is_dir = client.execute_command('[ -d {} ]'.format(path)) + has_write_permission = client.execute_command('[ -w {} ]'.format(path)) + if is_dir and has_write_permission: + if empty_check: + ret = client.execute_command('ls %s' % path) + if not ret or ret.stdout.strip(): + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path)) + else: + check_dirs[path] = True + else: + check_dirs[path] = True + else: + if not is_dir: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path)) + else: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path)) + else: + path = os.path.dirname(path) + empty_check = False + + if ip not in servers_port: + servers_port[ip] = {} + ports = servers_port[ip] + + stdio.verbose('%s port check' % server) + for key in ['mgragent_http_port', 'monagent_http_port']: + port = int(server_config[key]) + if port in ports: + critical( + 'port', + err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], key=ports[port]['key']), + [err.SUG_PORT_CONFLICTS.format()] + ) + continue + ports[port] = { + 'server': server, + 'key': key + } + if get_port_socket_inode(client, port): + critical( + 'port', + err.EC_CONFLICT_PORT.format(server=ip, port=port), + [err.SUG_USE_OTHER_PORT.format()] + ) + check_pass('port') + plugin_context.set_variable('start_env', env) + + for server in cluster_config.servers: + wait_2_pass() + + + if success: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/oblogproxy/2.0.0/destroy.py b/plugins/oblogproxy/2.0.0/destroy.py index f4703d4..3308648 100644 --- a/plugins/oblogproxy/2.0.0/destroy.py +++ b/plugins/oblogproxy/2.0.0/destroy.py @@ -23,11 +23,25 @@ from _errno import EC_CLEAN_PATH_FAILED +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): global_ret = True def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/oblogproxy/2.0.0/display.py b/plugins/oblogproxy/2.0.0/display.py index 9fddac5..824e81b 100644 --- a/plugins/oblogproxy/2.0.0/display.py +++ b/plugins/oblogproxy/2.0.0/display.py @@ -41,7 +41,7 @@ def display(plugin_context, *args, **kwargs): 'url': 'obclient -h%s -P%s' % (server.ip, server_config['service_port']), 'status': 'active' }) - stdio.print_list(results, ['ip', 'port', 'status'], lambda x: [x['ip'], x['port'], x['status']], title='oblogproxy') + stdio.print_list(results, ['ip', 'port', 'status'], lambda x: [x['ip'], x['port'], x['status']], title=cluster_config.name) stdio.print(results[0]['url'] if results else '') stdio.print('') return plugin_context.return_true() \ No newline at end of file diff --git a/plugins/oblogproxy/2.0.0/init.py b/plugins/oblogproxy/2.0.0/init.py index b90f888..6fac256 100644 --- a/plugins/oblogproxy/2.0.0/init.py +++ b/plugins/oblogproxy/2.0.0/init.py @@ -19,7 +19,7 @@ from __future__ import absolute_import, division, print_function -from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY def init(plugin_context, *args, **kwargs): @@ -31,6 +31,7 @@ def critical(*arg, **kwargs): global_ret = True cluster_config = plugin_context.cluster_config clients = plugin_context.clients + deploy_name = plugin_context.deploy_name stdio = plugin_context.stdio force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -76,6 +77,7 @@ def critical(*arg, **kwargs): if not ret or ret.stdout.strip(): global_ret = False critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) diff --git a/plugins/obproxy/3.1.0/destroy.py b/plugins/obproxy/3.1.0/destroy.py index 75bb758..b442ca4 100644 --- a/plugins/obproxy/3.1.0/destroy.py +++ b/plugins/obproxy/3.1.0/destroy.py @@ -25,10 +25,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: # pring stderror global global_ret diff --git a/plugins/obproxy/3.1.0/display.py b/plugins/obproxy/3.1.0/display.py index b79cb73..1185464 100644 --- a/plugins/obproxy/3.1.0/display.py +++ b/plugins/obproxy/3.1.0/display.py @@ -46,7 +46,7 @@ def display(plugin_context, cursor, *args, **kwargs): continue result.append(data) stdio.print_list(result, ['ip', 'port', 'prometheus_port', 'status'], - lambda x: [x['ip'], x['listen_port'], x['prometheus_listen_port'], x['status']], title='obproxy') + lambda x: [x['ip'], x['listen_port'], x['prometheus_listen_port'], x['status']], title=cluster_config.name) server = servers[0] with_observer = False server_config = cluster_config.get_server_conf(server) diff --git a/plugins/obproxy/3.1.0/init.py b/plugins/obproxy/3.1.0/init.py index ae019a9..2770016 100644 --- a/plugins/obproxy/3.1.0/init.py +++ b/plugins/obproxy/3.1.0/init.py @@ -19,13 +19,14 @@ from __future__ import absolute_import, division, print_function -from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage +from _errno import EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -40,10 +41,11 @@ def init(plugin_context, *args, **kwargs): if clean and not force: if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)): owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip() - global_ret = False - err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) - stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) - continue + if owner != client.config.username: + global_ret = False + err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) + continue need_clean = True if need_clean: @@ -55,16 +57,19 @@ def init(plugin_context, *args, **kwargs): err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) continue - need_clean = True - - if need_clean: - client.execute_command("pkill -9 -u `whoami` -f '^bash {home_path}/obproxyd.sh {home_path} {ip} {port} daemon$'".format(home_path=home_path, ip=server.ip, port=server_config.get('listen_port'))) - client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/obproxy --listen_port %s'" % (home_path, server_config.get('listen_port'))) ret = client.execute_command('rm -fr %s' % home_path, timeout=-1) if not ret: global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) continue + + if not (clean or force): + if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" ]]; then exit 0; else exit 1; fi\''.format(home_path)): + global_ret = False + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) + continue + if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % home_path): global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path))) diff --git a/plugins/obproxy/3.1.0/scale_out_check.py b/plugins/obproxy/3.1.0/scale_out_check.py index 1533725..58a306f 100644 --- a/plugins/obproxy/3.1.0/scale_out_check.py +++ b/plugins/obproxy/3.1.0/scale_out_check.py @@ -21,6 +21,11 @@ from __future__ import absolute_import, division, print_function +def add_plugin(component_name, plugins): + if component_name not in plugins: + plugins.append(component_name) + + def scale_out_check(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config added_components = cluster_config.get_deploy_added_components() @@ -29,8 +34,14 @@ def scale_out_check(plugin_context, *args, **kwargs): need_restart = False if 'ob-configserver' in added_components: cluster_config.add_depend_component('ob-configserver') + + if 'obproxy-ce' in added_components or 'obproxy' in added_components: + plugin_context.set_variable('auto_depend', True) + add_plugin('generate_config', plugins) + + if 'ob-configserver' in added_components and 'obproxy-ce' not in added_components and 'obproxy' not in added_components: need_restart = True - + plugin_context.stdio.verbose('scale_out_check plugins: %s' % plugins) plugin_context.stdio.verbose('added_components: %s' % added_components) return plugin_context.return_true(plugins=plugins, need_restart=need_restart) diff --git a/plugins/obproxy/3.1.0/start.py b/plugins/obproxy/3.1.0/start.py index a6f6c52..4081c49 100644 --- a/plugins/obproxy/3.1.0/start.py +++ b/plugins/obproxy/3.1.0/start.py @@ -172,15 +172,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): stdio.start_loading('Start obproxy') - for server in cluster_config.servers: - client = clients[server] - server_config = cluster_config.get_server_conf(server) - home_path = server_config['home_path'] - if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): - need_bootstrap = True - break - - if getattr(options, 'without_parameter', False) and need_bootstrap is False: + if getattr(options, 'without_parameter', False): use_parameter = False else: # Bootstrap is required when starting with parameter, ensure the passwords are correct. @@ -194,6 +186,9 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): if not server_config.get('obproxy_config_server_url') and obproxy_config_server_url: server_config['obproxy_config_server_url'] = obproxy_config_server_url + if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): + need_bootstrap = True + pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"]) if use_parameter: diff --git a/plugins/obproxy/4.2.3/scale_out_check.py b/plugins/obproxy/4.2.3/scale_out_check.py index 1533725..58a306f 100644 --- a/plugins/obproxy/4.2.3/scale_out_check.py +++ b/plugins/obproxy/4.2.3/scale_out_check.py @@ -21,6 +21,11 @@ from __future__ import absolute_import, division, print_function +def add_plugin(component_name, plugins): + if component_name not in plugins: + plugins.append(component_name) + + def scale_out_check(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config added_components = cluster_config.get_deploy_added_components() @@ -29,8 +34,14 @@ def scale_out_check(plugin_context, *args, **kwargs): need_restart = False if 'ob-configserver' in added_components: cluster_config.add_depend_component('ob-configserver') + + if 'obproxy-ce' in added_components or 'obproxy' in added_components: + plugin_context.set_variable('auto_depend', True) + add_plugin('generate_config', plugins) + + if 'ob-configserver' in added_components and 'obproxy-ce' not in added_components and 'obproxy' not in added_components: need_restart = True - + plugin_context.stdio.verbose('scale_out_check plugins: %s' % plugins) plugin_context.stdio.verbose('added_components: %s' % added_components) return plugin_context.return_true(plugins=plugins, need_restart=need_restart) diff --git a/plugins/obproxy/4.2.3/start.py b/plugins/obproxy/4.2.3/start.py index f7d223b..afca5e0 100644 --- a/plugins/obproxy/4.2.3/start.py +++ b/plugins/obproxy/4.2.3/start.py @@ -173,15 +173,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): stdio.start_loading('Start obproxy') - for server in cluster_config.servers: - client = clients[server] - server_config = cluster_config.get_server_conf(server) - home_path = server_config['home_path'] - if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): - need_bootstrap = True - break - - if getattr(options, 'without_parameter', False) and need_bootstrap is False: + if getattr(options, 'without_parameter', False): use_parameter = False else: # Bootstrap is required when starting with parameter, ensure the passwords are correct. @@ -192,6 +184,10 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): client = clients[server] server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + + if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): + need_bootstrap = True + if not server_config.get('obproxy_config_server_url') and obproxy_config_server_url: server_config['obproxy_config_server_url'] = obproxy_config_server_url diff --git a/plugins/obproxy/4.2.3/upgrade.py b/plugins/obproxy/4.2.3/upgrade.py index f78f097..305071e 100644 --- a/plugins/obproxy/4.2.3/upgrade.py +++ b/plugins/obproxy/4.2.3/upgrade.py @@ -75,8 +75,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install apply_param_plugin(dest_repository) if not start_plugin(namespace, namespaces, deploy_name,deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs): - return - + return + ret = connect_plugin(namespace, namespaces, deploy_name,deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs) if ret: if bootstrap_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs) and display_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs): diff --git a/plugins/obproxy/4.3.0/obproxyd.sh b/plugins/obproxy/4.3.0/obproxyd.sh new file mode 100644 index 0000000..bd9b126 --- /dev/null +++ b/plugins/obproxy/4.3.0/obproxyd.sh @@ -0,0 +1,45 @@ + +path=$1 +ip=$2 +port=$3 + +function start() { + obproxyd_path=$path/run/obproxyd-$ip-$port.pid + obproxy_path=$path/run/obproxy-$ip-$port.pid + + cat $obproxyd_path | xargs kill -9 + + echo $$ > $obproxyd_path + if [ $? != 0 ]; then + exit $? + fi + + pid=`cat $obproxy_path` + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + exit $? + fi + kill -9 $pid + + while [ 1 ]; + do + sleep 1 + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + cd $path + $path/bin/obproxy --listen_port $port + pid=`ps -aux | egrep "$path/bin/obproxy --listen_port $port$" | grep -v grep | awk '{print $2}'` + echo $pid > $obproxy_path + if [ $? != 0 ]; then + exit $? + fi + fi + done +} + +if [ "$4" == "daemon" ] +then + start +else + nohup bash $0 $path $ip $port daemon > /dev/null 2>&1 & +fi \ No newline at end of file diff --git a/plugins/obproxy/4.3.0/parameter.yaml b/plugins/obproxy/4.3.0/parameter.yaml new file mode 100644 index 0000000..51cbb8b --- /dev/null +++ b/plugins/obproxy/4.3.0/parameter.yaml @@ -0,0 +1,483 @@ +- name: home_path + name_local: 工作目录 + require: true + essential: true + type: PATH + need_redeploy: true + description_en: the directory for the work data file + description_local: ObProxy工作目录 +- name: listen_port + name_local: 服务端口 + require: true + essential: true + type: INT + default: 2883 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: port number for mysql connection + description_local: SQL服务协议端口号 +- name: prometheus_listen_port + name_local: Exporter 端口 + require: true + essential: true + type: INT + default: 2884 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: obproxy prometheus listen port + description_local: 提供prometheus服务端口号 +- name: rpc_listen_port + name_local: RPC 端口 + require: true + essential: true + type: INT + default: 2885 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: obproxy customize the rpc service port + description_local: RPC 服务端口 +- name: enable_obproxy_rpc_service + require: true + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: enable obproxy rpc service + description_local: 启用 obproxy rpc 服务 +- name: appname + require: false + type: SAFE_STRING + need_restart: true + description_en: application name + description_local: 应用名 +- name: cluster_name + require: false + type: SAFE_STRING + need_restart: true + description_en: observer cluster name + description_local: 代理的observer集群名 +- name: rs_list + type: ARRAY + need_restart: true + description_en: root server list(format ip:sql_port) + description_local: observer列表(格式 ip:sql_port) +- name: proxy_mem_limited + name_local: 最大运行内存 + essential: true + type: CAPACITY + default: 2G + min_value: 100MB + max_value: 100GB + description_en: The upper limit of ODP runtime memory. If the ODP exceeds the upper limit, it will exit automatically. Please enter an capacity, such as 2G + description_local: ODP 运行时内存上限。超过上限 ODP 即自动退出。请输入带容量带单位的整数,如2G +- name: refresh_json_config + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: force update json info if refresh_json_config is true +- name: refresh_rslist + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when refresh config server, update all rslist if refresh_rslist is true +- name: refresh_idc_list + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when refresh config server, update all idc list if refresh_idc_list is true +- name: refresh_config + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when table processor do check work, update all proxy config if refresh_config is true +- name: proxy_info_check_interval + type: TIME + default: 60s + min_value: 1s + max_value: 1h + need_restart: false + description_en: proxy info check task interval, [1s, 1h] +- name: cache_cleaner_clean_interval + type: TIME + default: 20s + min_value: 1s + max_value: 1d + need_restart: false + description_en: the interval for cache cleaner to clean cache, [1s, 1d] +- name: server_state_refresh_interval + type: TIME + default: 20s + min_value: 10ms + max_value: 1h + need_restart: false + description_en: the interval to refresh server state for getting zone or server newest state, [10ms, 1h] +- name: metadb_server_state_refresh_interval + type: TIME + default: 60s + min_value: 10ms + max_value: 1h + need_restart: false + description_en: the interval to refresh metadb server state for getting zone or server newest state, [10ms, 1h] +- name: config_server_refresh_interval + type: TIME + default: 60s + min_value: 10s + max_value: 1d + need_restart: false + description_en: config server info refresh task interval, [10s, 1d] +- name: idc_list_refresh_interval + type: TIME + default: 2h + min_value: 10s + max_value: 1d + need_restart: false + description_en: the interval to refresh idc list for getting newest region-idc, [10s, 1d] +- name: stat_table_sync_interval + type: TIME + default: 60s + min_value: 0s + max_value: 1d + need_restart: false + description_en: update sync statistic to ob_all_proxy_stat table interval, [0s, 1d], 0 means disable, if set a negative value, proxy treat it as 0 +- name: stat_dump_interval + type: TIME + default: 6000s + min_value: 0s + max_value: 1d + need_restart: false + description_en: dump statistic in log interval, [0s, 1d], 0 means disable, if set a negative value, proxy treat it as 0 +- name: partition_location_expire_relative_time + type: INT + default: 0 + min_value: -36000000 + max_value: 36000000 + need_restart: false + description_en: the unit is ms, 0 means do not expire, others will expire partition location base on relative time +- name: cluster_count_high_water_mark + type: INT + default: 256 + min_value: 2 + max_value: 102400 + need_restart: false + description_en: if cluster count is greater than this water mark, cluser will be kicked out by LRU +- name: cluster_expire_time + type: TIME + default: 1d + min_value: 0 + max_value: + need_restart: false + description_en: cluster resource expire time, 0 means never expire,cluster will be deleted if it has not been accessed for more than the time,[0, ] +- name: fetch_proxy_bin_random_time + type: TIME + default: 300s + min_value: 1s + max_value: 1h + need_restart: false + description_en: max random waiting time of fetching proxy bin in hot upgrade, [1s, 1h] +- name: fetch_proxy_bin_timeout + type: TIME + default: 120s + min_value: 1s + max_value: 1200s + need_restart: false + description_en: default hot upgrade fetch binary timeout, proxy will stop fetching after such long time, [1s, 1200s] +- name: hot_upgrade_failure_retries + type: INT + default: 5 + min_value: 1 + max_value: 20 + need_restart: false + description_en: default hot upgrade failure retries, proxy will stop handle hot_upgrade command after such retries, [1, 20] +- name: hot_upgrade_rollback_timeout + type: TIME + default: 24h + min_value: 1s + max_value: 30d + need_restart: false + description_en: default hot upgrade rollback timeout, proxy will do rollback if receive no rollback command in such long time, [1s, 30d] +- name: hot_upgrade_graceful_exit_timeout + type: TIME + default: 120s + min_value: 0s + max_value: 30d + need_restart: false + description_en: graceful exit timeout, [0s, 30d], if set a value <= 0, proxy treat it as 0 +- name: delay_exit_time + type: TIME + default: 100ms + min_value: 100ms + max_value: 500ms + need_restart: false + description_en: delay exit time, [100ms,500ms] +- name: log_file_percentage + type: INT + default: 80 + min_value: 0 + max_value: 100 + need_restart: false + description_en: max percentage of avail size occupied by proxy log file, [0, 90], 0 means ignore such limit +- name: log_cleanup_interval + type: TIME + default: 10m + min_value: 5s + max_value: 30d + need_restart: false + description_en: log file clean up task schedule interval, set 1 day or longer, [5s, 30d] +- name: log_dir_size_threshold + type: CAPACITY + default: 64GB + min_value: 256M + max_value: 1T + need_restart: false + description_en: max usable space size of log dir, used to decide whether should clean up log file, [256MB, 1T] +- name: need_convert_vip_to_tname + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: convert vip to tenant name, which is useful in cloud +- name: long_async_task_timeout + type: TIME + default: 60s + min_value: 1s + max_value: 1h + need_restart: false + description_en: long async task timeout, [1s, 1h] +- name: short_async_task_timeout + type: TIME + default: 5s + min_value: 1s + max_value: 1h + need_restart: false + description_en: short async task timeout, [1s, 1h] +- name: username_separator + type: SAFE_STRING_LIST + default: :;-;. + min_value: + max_value: + need_restart: false + description_en: username separator +- name: enable_client_connection_lru_disconnect + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: if client connections reach throttle, true is that new connection will be accepted, and eliminate lru client connection, false is that new connection will disconnect, and err packet will be returned +- name: max_connections + type: INT + default: 60000 + min_value: 0 + max_value: 65535 + need_restart: false + description_en: max fd proxy could use +- name: client_max_connections + type: INT + default: 8192 + min_value: 0 + max_value: 65535 + need_restart: false + description_en: client max connections for one obproxy, [0, 65535] +- name: observer_query_timeout_delta + type: TIME + default: 20s + min_value: 1s + max_value: 30s + need_restart: false + description_en: the delta value for @@ob_query_timeout, to cover net round trip time(proxy<->server) and task schedule time(server), [1s, 30s] +- name: enable_cluster_checkout + type: BOOL + default: true + min_value: false + max_value: true + need_restart: false + description_en: if enable cluster checkout, proxy will send cluster name when login and server will check it +- name: enable_proxy_scramble + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: if enable proxy scramble, proxy will send client its variable scramble num, not support old observer +- name: enable_client_ip_checkout + type: BOOL + default: true + min_value: false + max_value: true + need_restart: false + description_en: if enabled, proxy send client ip when login +- name: connect_observer_max_retries + type: INT + default: 3 + min_value: 2 + max_value: 5 + need_restart: false + description_en: max retries to do connect +- name: frequent_accept + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: frequent accept +- name: net_accept_threads + type: INT + default: 2 + min_value: 0 + max_value: 8 + need_restart: true + description_en: net accept threads num, [0, 8] +- name: stack_size + type: CAPACITY + default: 1MB + min_value: 1MB + max_value: 10MB + need_restart: true + description_en: stack size of one thread, [1MB, 10MB] +- name: work_thread_num + type: INT + default: 128 + min_value: 1 + max_value: 128 + need_restart: true + description_en: proxy work thread num or max work thread num when automatic match, [1, 128] +- name: task_thread_num + type: INT + default: 2 + min_value: 1 + max_value: 4 + need_restart: true + description_en: proxy task thread num, [1, 4] +- name: block_thread_num + type: INT + default: 1 + min_value: 1 + max_value: 4 + need_restart: true + description_en: proxy block thread num, [1, 4] +- name: grpc_thread_num + type: INT + default: 8 + min_value: 8 + max_value: 16 + need_restart: true + description_en: proxy grpc thread num, [8, 16] +- name: grpc_client_num + type: INT + default: 9 + min_value: 9 + max_value: 16 + need_restart: true + description_en: proxy grpc client num, [9, 16] +- name: automatic_match_work_thread + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: ignore work_thread_num configuration item, use the count of cpu for current proxy work thread num +- name: enable_strict_kernel_release + require: true + type: BOOL + default: false + min_value: false + max_value: true + need_restart: true + description_en: If is true, proxy only support 5u/6u/7u redhat. Otherwise no care kernel release, and proxy maybe unstable +- name: enable_cpu_topology + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: enable cpu topology, work threads bind to cpu +- name: local_bound_ip + type: SAFE_STRING + default: 0.0.0.0 + need_restart: true + description_en: local bound ip(any) +- name: obproxy_config_server_url + type: WEB_URL + default: '' + need_restart: true + description_en: url of config info(rs list and so on) +- name: proxy_service_mode + type: SAFE_STRING + default: '' + need_restart: true + description_en: "proxy deploy and service mode: 1.client(default); 2.server" +- name: client_session_id_version + type: INT + default: 2 + max_value: 2 + min_value: 1 + need_reload: true + description_en: This parameter is used to specify whether to use the new logic to generate the client session ID. The parameter type is integer. The value range is [1, 2] and the default value is 2 (use the new logic). +- name: proxy_id + type: INT + default: 0 + max_value: 8191 + min_value: 0 + need_reload: true + description_en: This parameter is used to set the ID for an ODP. The parameter type is integer. The default value is 0 and the value range is [0, 8191]. +- name: app_name + type: SAFE_STRING + default: undefined + need_restart: true + description_en: current application name which proxy works for, need defined, only modified when restart +- name: enable_metadb_used + type: BOOL + default: true + max_value: true + min_value: false + need_restart: true + description_en: use MetaDataBase when proxy run +- name: rootservice_cluster_name + type: SAFE_STRING + default: undefined + need_restart: true + description_en: default cluster name for rootservice_list +- name: prometheus_cost_ms_unit + type: BOOL + default: true + max_value: true + min_value: false + need_restart: true + description_en: update sync metrics to prometheus exposer interval, [1s, 1h], 0 means disable, if set a negative value, proxy treat it as 0 +- name: bt_retry_times + type: INT + default: 3 + min_value: 0 + max_value: 100 + need_restart: true + description_en: beyond trust sdk retry times +- name: obproxy_sys_password + name_local: 密码 + essential: true + type: STRING + default: '' + need_restart: false + description_en: password pf obproxy sys user +- name: observer_sys_password + type: STRING + default: '' + need_restart: false + description_en: password of observer proxyro user +- name: observer_root_password + type: STRING + default: '' + need_restart: false + description_en: password of observer root user diff --git a/plugins/obproxy/4.3.0/start.py b/plugins/obproxy/4.3.0/start.py new file mode 100644 index 0000000..54c0416 --- /dev/null +++ b/plugins/obproxy/4.3.0/start.py @@ -0,0 +1,302 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import time +import random +import hashlib +from copy import deepcopy + +import re + +from _errno import EC_CONFLICT_PORT +from tool import NetUtil + +stdio = None + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def confirm_port(client, pid, port): + socket_inodes = get_port_socket_inode(client, port) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def confirm_command(client, pid, command): + command = command.replace(' ', '').strip() + if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exit 1; fi\'' % (pid, command)): + return True + return False + + +def confirm_home_path(client, pid, home_path): + if client.execute_command('path=`ls -l /proc/%s | grep cwd | awk -F\'-> \' \'{print $2}\'`; bash -c \'if [ "$path" != "%s" ]; then exit 1; fi\'' % (pid, home_path)): + return True + return False + + +def is_started(client, remote_bin_path, port, home_path, command): + username = client.config.username + ret = client.execute_command('pgrep -u %s -f "^%s"' % (username, remote_bin_path)) + if not ret: + return False + pids = ret.stdout.strip() + if not pids: + return False + pids = pids.split('\n') + for pid in pids: + if confirm_port(client, pid, port): + break + else: + return False + return confirm_home_path(client, pid, home_path) and confirm_command(client, pid, command) + + +def obproxyd(home_path, client, ip, port): + path = os.path.join(os.path.split(__file__)[0], 'obproxyd.sh') + retmoe_path = os.path.join(home_path, 'obproxyd.sh') + if os.path.exists(path): + shell = '''bash %s %s %s %s''' % (retmoe_path, home_path, ip, port) + return client.put_file(path, retmoe_path) and client.execute_command(shell) + return False + + +class EnvVariables(object): + + def __init__(self, environments, client): + self.environments = environments + self.client = client + self.env_done = {} + + def __enter__(self): + for env_key, env_value in self.environments.items(): + self.env_done[env_key] = self.client.get_env(env_key) + self.client.add_env(env_key, env_value, True) + + def __exit__(self, *args, **kwargs): + for env_key, env_value in self.env_done.items(): + if env_value is not None: + self.client.add_env(env_key, env_value, True) + else: + self.client.del_env(env_key) + + +def start(plugin_context, need_bootstrap=False, *args, **kwargs): + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + options = plugin_context.options + clusters_cmd = {} + real_cmd = {} + pid_path = {} + obproxy_config_server_url = '' + + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depend_config(comp) + if not ob_config: + continue + odp_config = cluster_config.get_global_conf() + for server in cluster_config.get_depend_servers(comp): + config = cluster_config.get_depend_config(comp, server) + zone = config['zone'] + if zone not in root_servers: + root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port']) + depend_rs_list = ';'.join([root_servers[zone] for zone in root_servers]) + cluster_config.update_global_conf('rs_list', depend_rs_list, save=False) + + config_map = { + 'observer_sys_password': 'proxyro_password', + 'cluster_name': 'appname' + } + for key in config_map: + ob_key = config_map[key] + if key not in odp_config and ob_key in ob_config: + cluster_config.update_global_conf(key, ob_config.get(ob_key), save=False) + break + + obc_cluster_config = cluster_config.get_depend_config('ob-configserver') + if obc_cluster_config: + vip_address = obc_cluster_config.get('vip_address') + if vip_address: + obc_ip = vip_address + obc_port = obc_cluster_config.get('vip_port') + else: + server = cluster_config.get_depend_servers('ob-configserver')[0] + client = clients[server] + obc_ip = NetUtil.get_host_ip() if client.is_localhost() else server.ip + obc_port = obc_cluster_config.get('listen_port') + obproxy_config_server_url = "http://{0}:{1}/services?Action=GetObProxyConfig".format(obc_ip, obc_port) + + error = False + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + if 'rs_list' not in server_config and 'obproxy_config_server_url' not in server_config and not obproxy_config_server_url: + error = True + stdio.error('%s need config "rs_list" or "obproxy_config_server_url"' % server) + if error: + return plugin_context.return_false() + + stdio.start_loading('Start obproxy') + + if getattr(options, 'without_parameter', False): + use_parameter = False + else: + # Bootstrap is required when starting with parameter, ensure the passwords are correct. + need_bootstrap = True + use_parameter = True + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + + if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): + need_bootstrap = True + + if not server_config.get('obproxy_config_server_url') and obproxy_config_server_url: + server_config['obproxy_config_server_url'] = obproxy_config_server_url + + pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"]) + + if use_parameter: + not_opt_str = [ + 'listen_port', + 'prometheus_listen_port', + 'rs_list', + 'cluster_name', + 'rpc_listen_port' + ] + start_unuse = ['home_path', 'observer_sys_password', 'obproxy_sys_password', 'observer_root_password'] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + opt_str = [] + if server_config.get('obproxy_sys_password'): + obproxy_sys_password = hashlib.sha1(server_config['obproxy_sys_password'].encode("utf-8")).hexdigest() + else: + obproxy_sys_password = '' + if server_config.get('proxy_id'): + opt_str.append("client_session_id_version=%s,proxy_id=%s" % (server_config.get('client_session_id_version', 2), server_config.get('proxy_id'))) + opt_str.append("obproxy_sys_password='%s'" % obproxy_sys_password) + for key in server_config: + if key not in start_unuse and key not in not_opt_str: + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + cmd = ['-o %s' % ','.join(opt_str)] + for key in not_opt_str: + if key in server_config: + if key == 'rpc_listen_port' and not server_config['enable_obproxy_rpc_service']: + continue + value = get_value(key) + cmd.append('--%s %s' % (key, value)) + else: + cmd = ['--listen_port %s' % server_config.get('listen_port')] + + real_cmd[server] = '%s/bin/obproxy %s' % (home_path, ' '.join(cmd)) + clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server]) + + for server in clusters_cmd: + environments = deepcopy(cluster_config.get_environments()) + client = clients[server] + server_config = cluster_config.get_server_conf(server) + port = int(server_config["listen_port"]) + prometheus_port = int(server_config["prometheus_listen_port"]) + stdio.verbose('%s port check' % server) + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + cmd = real_cmd[server].replace('\'', '') + if remote_pid: + ret = client.execute_command('ls /proc/%s/' % remote_pid) + if ret: + if confirm_port(client, remote_pid, port): + continue + stdio.stop_loading('fail') + stdio.error(EC_CONFLICT_PORT.format(server=server.ip, port=port)) + return plugin_context.return_false() + + stdio.verbose('starting %s obproxy', server) + if 'LD_LIBRARY_PATH' not in environments: + environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path'] + with EnvVariables(environments, client): + ret = client.execute_command(clusters_cmd[server]) + if not ret: + stdio.stop_loading('fail') + stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr)) + return plugin_context.return_false() + client.execute_command('''ps -aux | grep -e '%s$' | grep -v grep | awk '{print $2}' > %s''' % (cmd, pid_path[server])) + stdio.stop_loading('succeed') + + stdio.start_loading('obproxy program health check') + failed = [] + servers = cluster_config.servers + count = 300 + while servers and count: + count -= 1 + tmp_servers = [] + for server in servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + stdio.verbose('%s program health check' % server) + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if remote_pid: + for pid in re.findall('\d+',remote_pid): + confirm = confirm_port(client, pid, int(server_config["listen_port"])) + if confirm: + proxyd_Pid_path = os.path.join(server_config["home_path"], 'run/obproxyd-%s-%d.pid' % (server.ip, server_config["listen_port"])) + if client.execute_command("pid=`cat %s` && ls /proc/$pid" % proxyd_Pid_path): + stdio.verbose('%s obproxy[pid: %s] started', server, pid) + else: + client.execute_command('echo %s > %s' % (pid, pid_path[server])) + obproxyd(server_config["home_path"], client, server.ip, server_config["listen_port"]) + tmp_servers.append(server) + break + stdio.verbose('failed to start %s obproxy, remaining retries: %d' % (server, count)) + if count: + tmp_servers.append(server) + else: + failed.append('failed to start %s obproxy' % server) + else: + failed.append('failed to start %s obproxy' % server) + servers = tmp_servers + if servers and count: + time.sleep(1) + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + plugin_context.return_false() + else: + stdio.stop_loading('succeed') + plugin_context.return_true(need_bootstrap=need_bootstrap) diff --git a/plugins/obproxy/4.3.0/start_check.py b/plugins/obproxy/4.3.0/start_check.py new file mode 100644 index 0000000..1cd0f31 --- /dev/null +++ b/plugins/obproxy/4.3.0/start_check.py @@ -0,0 +1,218 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import _errno as err + + +stdio = None +success = True + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, precheck=False, *args, **kwargs): + def check_pass(item): + status = check_status[server] + if status[item].status == err.CheckStatus.WAIT: + status[item].status = err.CheckStatus.PASS + def check_fail(item, error, suggests=[]): + status = check_status[server][item] + if status.status == err.CheckStatus.WAIT: + status.error = error + status.suggests = suggests + status.status = err.CheckStatus.FAIL + def wait_2_pass(): + status = check_status[server] + for item in status: + check_pass(item) + def alert(item, error, suggests=[]): + global success + if strict_check: + success = False + check_fail(item, error, suggests) + stdio.error(error) + else: + stdio.warn(error) + def critical(item, error, suggests=[]): + global success + success = False + check_fail(item, error, suggests) + stdio.error(error) + + global stdio, success + success = True + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + servers_port = {} + check_status = {} + servers_dirs = {} + servers_check_dirs = {} + + plugin_context.set_variable('start_check_status', check_status) + for server in cluster_config.servers: + check_status[server] = { + 'port': err.CheckStatus(), + } + if work_dir_check: + check_status[server]['dir'] = err.CheckStatus() + + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + check_status[server]['password'] = err.CheckStatus() + check_status[server]['proxy_id'] = err.CheckStatus() + + if init_check_status: + return plugin_context.return_true(start_check_status=check_status) + + stdio.start_loading('Check before start obproxy') + + global_config = cluster_config.get_original_global_conf() + key = 'observer_sys_password' + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + if key in global_config: + alert('password', + err.WC_PARAM_USELESS.format(key=key, current_comp=cluster_config.name, comp=comp), + [err.SUG_OB_SYS_PASSWORD.format()] + ) + break + + for server in cluster_config.servers: + ip = server.ip + client = clients[server] + server_config = cluster_config.get_server_conf(server) + port = int(server_config["listen_port"]) + if not precheck: + remote_pid_path = "%s/run/obproxy-%s-%s.pid" % (server_config['home_path'], server.ip, server_config["listen_port"]) + remote_pid = client.execute_command("cat %s" % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s/fd' % remote_pid): + stdio.verbose('%s is runnning, skip' % server) + wait_2_pass() + continue + + if work_dir_check: + stdio.verbose('%s dir check' % server) + if ip not in servers_dirs: + servers_dirs[ip] = {} + servers_check_dirs[ip] = {} + dirs = servers_dirs[ip] + check_dirs = servers_check_dirs[ip] + key = 'home_path' + path = server_config.get(key) + suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)] + if path in dirs and dirs[path]: + critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests) + dirs[path] = { + 'server': server, + 'key': key, + } + empty_check = work_dir_empty_check + while True: + if path in check_dirs: + if check_dirs[path] != True: + critical('dir', check_dirs[path], suggests) + break + + if client.execute_command('bash -c "[ -a %s ]"' % path): + is_dir = client.execute_command('[ -d {} ]'.format(path)) + has_write_permission = client.execute_command('[ -w {} ]'.format(path)) + if is_dir and has_write_permission: + if empty_check: + ret = client.execute_command('ls %s' % path) + if not ret or ret.stdout.strip(): + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path)) + else: + check_dirs[path] = True + else: + check_dirs[path] = True + else: + if not is_dir: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path)) + else: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path)) + else: + path = os.path.dirname(path) + empty_check = False + + if ip not in servers_port: + servers_port[ip] = {} + ports = servers_port[ip] + server_config = cluster_config.get_server_conf_with_default(server) + stdio.verbose('%s port check' % server) + for key in ['listen_port', 'prometheus_listen_port', 'rpc_listen_port']: + if key == 'rpc_listen_port' and not server_config.get('enable_obproxy_rpc_service'): + continue + port = int(server_config[key]) + alert_f = alert if key == 'prometheus_listen_port' else critical + if port in ports: + alert_f( + 'port', + err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], key=ports[port]['key']), + [err.SUG_PORT_CONFLICTS.format()] + ) + continue + ports[port] = { + 'server': server, + 'key': key + } + if get_port_socket_inode(client, port): + alert_f( + 'port', + err.EC_CONFLICT_PORT.format(server=ip, port=port), + [err.SUG_USE_OTHER_PORT.format()] + ) + + new_cluster_config = kwargs.get('new_cluster_config', None) + if new_cluster_config: + server_config = new_cluster_config.get_server_conf_with_default(server) + client_session_id_version = server_config.get('client_session_id_version') + proxy_id = server_config.get('proxy_id') + proxy_id_limits = { + 1: [1, 255], + 2: [1, 8191], + } + if proxy_id: + limit_range = proxy_id_limits.get(client_session_id_version) + if limit_range: + min_limit, max_limit = limit_range + if not (min_limit <= proxy_id <= max_limit): + critical('proxy_id', err.EC_OBPROXY_ID_OVER_LIMIT.format(id=client_session_id_version, limit=str(limit_range))) + + for server in cluster_config.servers: + wait_2_pass() + + if success: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/obproxy/4.3.0/upgrade.py b/plugins/obproxy/4.3.0/upgrade.py new file mode 100644 index 0000000..7737f56 --- /dev/null +++ b/plugins/obproxy/4.3.0/upgrade.py @@ -0,0 +1,85 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import random + + +def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs): + namespace = plugin_context.namespace + namespaces = plugin_context.namespaces + deploy_name = plugin_context.deploy_name + deploy_status = plugin_context.deploy_status + repositories = plugin_context.repositories + plugin_name = plugin_context.plugin_name + + components = plugin_context.components + clients = plugin_context.clients + cluster_config = plugin_context.cluster_config + cmds = plugin_context.cmds + options = plugin_context.options + dev_mode = plugin_context.dev_mode + stdio = plugin_context.stdio + + upgrade_ctx = kwargs.get('upgrade_ctx') + local_home_path = kwargs.get('local_home_path') + upgrade_repositories = kwargs.get('upgrade_repositories') + + cur_repository = upgrade_repositories[0] + dest_repository = upgrade_repositories[-1] + repository_dir = dest_repository.repository_dir + kwargs['repository_dir'] = repository_dir + + stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository] + start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository] + connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository] + display_plugin = search_py_script_plugin([dest_repository], 'display')[dest_repository] + bootstrap_plugin = search_py_script_plugin([dest_repository], 'bootstrap')[dest_repository] + + apply_param_plugin(cur_repository) + if not stop_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): + return + install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients) + + random_num = random.randint(1, 8191 - len(cluster_config.servers)) + num = 0 + global_config = cluster_config.get_original_global_conf() + if 'enable_obproxy_rpc_service' not in global_config: + cluster_config.update_global_conf('enable_obproxy_rpc_service', False, False) + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + client_session_id_version = server_config.get('client_session_id_version', None) + + if client_session_id_version in [None, 2]: + cluster_config.update_server_conf('client_session_id_version', 2, False) + if server_config.get('proxy_id', None) is None: + cluster_config.update_server_conf(server, 'proxy_id', random_num + num, False) + num += 1 + + apply_param_plugin(dest_repository) + if not start_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs): + return + + ret = connect_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs) + if ret: + if bootstrap_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs) and display_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs): + upgrade_ctx['index'] = len(upgrade_repositories) + return plugin_context.return_true() diff --git a/plugins/oceanbase-ce-libs/3.1.0/file_map.yaml b/plugins/oceanbase-ce-libs/3.1.0/file_map.yaml index 146b9ce..8afde67 100644 --- a/plugins/oceanbase-ce-libs/3.1.0/file_map.yaml +++ b/plugins/oceanbase-ce-libs/3.1.0/file_map.yaml @@ -7,4 +7,7 @@ - src_path: ./home/admin/oceanbase/lib/libmariadb.so target_path: libmariadb.so - src_path: ./home/admin/oceanbase/lib/libmariadb.so.3 - target_path: libmariadb.so.3 \ No newline at end of file + target_path: libmariadb.so.3 +- src_path: ./home/admin/oceanbase/lib + target_path: '' + type: dir \ No newline at end of file diff --git a/plugins/oceanbase-ce-utils/3.1.0/file_map.yaml b/plugins/oceanbase-ce-utils/3.1.0/file_map.yaml index 07ed0f6..e642f93 100644 --- a/plugins/oceanbase-ce-utils/3.1.0/file_map.yaml +++ b/plugins/oceanbase-ce-utils/3.1.0/file_map.yaml @@ -1,3 +1,4 @@ - src_path: ./usr/bin - target_path: '' - type: dir \ No newline at end of file + target_path: bin + type: dir + install_method: cp \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py index 0330f06..ecce619 100644 --- a/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py +++ b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py @@ -24,7 +24,7 @@ from tool import TimeUtils import _errno as err from datetime import datetime -from _stdio import FormtatText +from _stdio import FormatText def gather_scene_run(plugin_context, *args, **kwargs): @@ -56,7 +56,7 @@ def run(): obdiag_cmd = get_obdiag_cmd() stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) run_result = LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) - stdio.warn(FormtatText.warning("\nGather all result stored in this directory: {0}\n".format(store_dir_option))) + stdio.warn(FormatText.warning("\nGather all result stored in this directory: {0}\n".format(store_dir_option))) return run_result options = plugin_context.options diff --git a/plugins/oceanbase/3.1.0/create_tenant.py b/plugins/oceanbase/3.1.0/create_tenant.py index 1717202..d92b7a6 100644 --- a/plugins/oceanbase/3.1.0/create_tenant.py +++ b/plugins/oceanbase/3.1.0/create_tenant.py @@ -20,11 +20,13 @@ from __future__ import absolute_import, division, print_function - +import os import time +import const from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN from _types import Capacity +from tool import Exector tenant_cursor = None @@ -53,7 +55,7 @@ def get_option(key, default=''): def get_parsed_option(key, default=''): value = get_option(key=key, default=default) try: - parsed_value = Capacity(value).btyes + parsed_value = Capacity(value).bytes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -180,11 +182,11 @@ def error(*arg, **kwargs): MIN_IOPS = 128 MIN_SESSION_NUM = 64 if cpu_total < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_total, need=MIN_CPU)) if mem_total < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_total), need=Capacity(MIN_MEMORY))) if disk_total < MIN_DISK_SIZE: - return error('%s: resource not enough: disk space less than %s' % (zone_list, Capacity(MIN_DISK_SIZE))) + return error('{zone} not enough disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(disk_total), need=Capacity(MIN_DISK_SIZE))) try: max_memory = get_parsed_option('max_memory', mem_total) @@ -290,14 +292,36 @@ def error(*arg, **kwargs): db_username = get_option('db_username') db_password = get_option('db_password', '') if db_username: + create_sql, grant_sql = "", "" if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY %s; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username) + create_sql = "create user if not exists '{username}' IDENTIFIED BY %s;".format(username=db_username) + grant_sql = "grant all on *.* to '{username}' WITH GRANT OPTION;".format(username=db_username) else: error("Create user in oracle tenant is not supported") - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): + if not exec_sql_in_tenant(sql=create_sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): stdio.error('failed to create user {}'.format(db_username)) return + if not exec_sql_in_tenant(sql=grant_sql, cursor=cursor, tenant=name, mode=mode, stdio=stdio): + stdio.error('Failed to grant privileges to user {}'.format(db_username)) + return + clients = plugin_context.clients + repositories = plugin_context.repositories + client = clients[plugin_context.cluster_config.servers[0]] + cluster_config = plugin_context.cluster_config + global_config = cluster_config.get_global_conf() + + time_zone = get_option('time_zone', client.execute_command('date +%:z').stdout.strip()) + exec_sql_in_tenant(sql="SET GLOBAL time_zone='%s';" % time_zone, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio) + + exector_path = get_option('exector_path', '/usr/obd/lib/executer') + exector = Exector(tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user, tenant_cursor.password, exector_path, stdio) + for repository in repositories: + if repository.name in const.COMPS_OB: + time_zone_info_param = os.path.join(repository.repository_dir, 'etc', 'timezone_V1.log') + if not exector.exec_script('import_time_zone_info.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), time_zone_info_param)): + stdio.warn('execute import_time_zone_info.py failed') + break + cmd = 'obclient -h%s -P%s -u%s -Doceanbase -A\n' % (tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user) + stdio.print(cmd) return plugin_context.return_true() diff --git a/plugins/oceanbase/3.1.0/destroy.py b/plugins/oceanbase/3.1.0/destroy.py index 2cef1d6..8f8a98c 100644 --- a/plugins/oceanbase/3.1.0/destroy.py +++ b/plugins/oceanbase/3.1.0/destroy.py @@ -25,10 +25,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: # print stderror global global_ret diff --git a/plugins/oceanbase/3.1.0/display.py b/plugins/oceanbase/3.1.0/display.py index 47bae19..b603abb 100644 --- a/plugins/oceanbase/3.1.0/display.py +++ b/plugins/oceanbase/3.1.0/display.py @@ -63,7 +63,7 @@ def display(plugin_context, cursor, *args, **kwargs): servers = cursor.fetchall('select * from oceanbase.__all_server', raise_exception=True, exc_level='verbose') if servers: stdio.print_list(servers, ['ip', 'version', 'port', 'zone', 'status'], - lambda x: [x['svr_ip'], x['build_version'].split('_')[0], x['inner_port'], x['zone'], x['status']], title='observer') + lambda x: [x['svr_ip'], x['build_version'].split('_')[0], x['inner_port'], x['zone'], x['status']], title=cluster_config.name) user = 'root' password = cluster_config.get_global_conf().get('root_password', '') cmd = 'obclient -h%s -P%s -u%s %s-Doceanbase -A' % (servers[0]['svr_ip'], servers[0]['inner_port'], user, '-p%s ' % passwd_format(password) if password else '') diff --git a/plugins/oceanbase/3.1.0/generate_config.py b/plugins/oceanbase/3.1.0/generate_config.py index 6846649..13522f3 100644 --- a/plugins/oceanbase/3.1.0/generate_config.py +++ b/plugins/oceanbase/3.1.0/generate_config.py @@ -46,7 +46,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if not only_generate_password: generate_keys += [ 'memory_limit', 'datafile_size', 'clog_disk_utilization_threshold', 'clog_disk_usage_limit_percentage', - 'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', + 'syslog_level', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'devname', 'system_memory', 'cpu_count' ] if generate_password: @@ -59,7 +59,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals cluster_config.update_global_conf('appname', plugin_context.deploy_name) if original_global_conf.get('cluster_id') is None: cluster_config.update_global_conf('cluster_id', round(time.time()) % 4294901759, False) - if generate_password: + if generate_password or only_generate_password: generate_random_password(cluster_config) if only_generate_password: return plugin_context.return_true() @@ -101,8 +101,6 @@ def summit_config(): max_syslog_file_count_default = 4 if global_config.get('syslog_level') is None: update_global_conf('syslog_level', 'INFO') - if global_config.get('enable_syslog_recycle') is None: - update_global_conf('enable_syslog_recycle', True) if global_config.get('enable_syslog_wf') is None: update_global_conf('enable_syslog_wf', False) if global_config.get('max_syslog_file_count') is None: @@ -153,7 +151,7 @@ def summit_config(): total_memory = 0 for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k == 'MemTotal': - total_memory = Capacity(str(v)).btyes + total_memory = Capacity(str(v)).bytes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) else: if not server_config.get('memory_limit'): @@ -172,7 +170,7 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes if generate_check: if server_memory_stats['available'] < START_NEED_MEMORY: @@ -194,7 +192,7 @@ def summit_config(): success = False continue else: - memory_limit = Capacity(server_config.get('memory_limit')).btyes + memory_limit = Capacity(server_config.get('memory_limit')).bytes auto_set_system_memory = False if not user_server_config.get('system_memory'): @@ -258,10 +256,10 @@ def summit_config(): disk_free = data_dir_disk['avail'] real_disk_total = data_dir_disk['total'] if mounts[dirs['home_path']] == data_dir_mount: - if user_server_config.get('enable_syslog_recycle') is False: - log_size = real_disk_total * 0.1 - else: + if int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) != 0: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 + else: + log_size = real_disk_total * 0.1 else: log_size = 0 clog_padding_size = int(real_disk_total * (1 - clog_disk_utilization_threshold_max / 100.0 * 0.8)) @@ -280,13 +278,13 @@ def summit_config(): if auto_set_system_memory: min_size = MIN_MEMORY * 7 else: - min_size = max(MIN_MEMORY, Capacity(user_server_config.get('system_memory')).btyes * 2) * 7 + min_size = max(MIN_MEMORY, Capacity(user_server_config.get('system_memory')).bytes * 2) * 7 min_need = padding_size + min_size if min_need <= disk_free: memory_limit = (disk_free - padding_size) / 7 server_config['memory_limit'] = str(Capacity(memory_limit, 0)) update_server_conf(server, 'memory_limit', server_config['memory_limit']) - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes clog_disk_size = memory_limit * 4 clog_size = int(round(clog_disk_size * 0.64)) if auto_set_system_memory: @@ -301,7 +299,7 @@ def summit_config(): continue datafile_size_format = str(Capacity(disk_total - clog_disk_size - disk_used, 0)) - datafile_size = Capacity(datafile_size_format).btyes + datafile_size = Capacity(datafile_size_format).bytes clog_disk_utilization_threshold = max(80, int(100.0 * (disk_used + datafile_size + padding_size + clog_disk_size * 0.8) / real_disk_total)) clog_disk_utilization_threshold = min(clog_disk_utilization_threshold, clog_disk_utilization_threshold_max) clog_disk_usage_limit_percentage = min(int(clog_disk_utilization_threshold / 80.0 * 95), clog_disk_usage_limit_percentage_max) @@ -330,7 +328,7 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(Capacity(value).btyes if is_capacity_key else value) + values.append(Capacity(value).bytes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue diff --git a/plugins/oceanbase/3.1.0/init.py b/plugins/oceanbase/3.1.0/init.py index f7e1742..4006159 100644 --- a/plugins/oceanbase/3.1.0/init.py +++ b/plugins/oceanbase/3.1.0/init.py @@ -21,8 +21,7 @@ from __future__ import absolute_import, division, print_function import os -from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage - +from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY stdio = None force = False @@ -35,7 +34,7 @@ def critical(*arg, **kwargs): stdio.error(*arg, **kwargs) -def init_dir(server, client, key, path, link_path=None): +def init_dir(server, client, key, path, deploy_name, link_path=None): if force: ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: @@ -46,6 +45,7 @@ def init_dir(server, client, key, path, link_path=None): ret = client.execute_command('ls %s' % (path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) return False else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=path))) @@ -64,6 +64,7 @@ def init(plugin_context, *args, **kwargs): global stdio, force cluster_config = plugin_context.cluster_config clients = plugin_context.clients + deploy_name = plugin_context.deploy_name stdio = plugin_context.stdio servers_dirs = {} force = getattr(plugin_context.options, 'force', False) @@ -135,6 +136,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (home_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) @@ -151,6 +153,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (data_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=data_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=data_path))) @@ -159,7 +162,7 @@ def init(plugin_context, *args, **kwargs): link_path = '%s/store' % home_path client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (data_path, link_path, data_path, link_path)) for key in ['clog', 'ilog', 'slog']: - # init_dir(server, client, key, server_config['%s_dir' % key], os.path.join(data_path, key)) + # init_dir(server, client, key, server_config['%s_dir' % key], deploy_name, os.path.join(data_path, key)) log_dir = server_config['%s_dir' % key] if force: ret = client.execute_command('rm -fr %s/*' % log_dir, timeout=-1) @@ -171,6 +174,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (log_dir)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=log_dir))) diff --git a/plugins/oceanbase/3.1.0/ocp_check.py b/plugins/oceanbase/3.1.0/ocp_check.py index 7aebcaf..1bf3bda 100644 --- a/plugins/oceanbase/3.1.0/ocp_check.py +++ b/plugins/oceanbase/3.1.0/ocp_check.py @@ -47,7 +47,7 @@ def ocp_check(plugin_context, ocp_version, cursor, new_cluster_config=None, new_ if is_admin and client.config.username != 'admin' and ocp_version < ocp_version_420: is_admin = False stdio.error('The current user must be the admin user. Run the edit-config command to modify the user.username field') - if can_sudo and not client.execute_command('sudo whoami'): + if can_sudo and not (client.execute_command('[ `id -u` == "0" ]') or client.execute_command('sudo whoami')): can_sudo = False stdio.error('The user must have the privilege to run sudo commands without a password.') if not client.execute_command('bash -c "if [ `pgrep observer | wc -l` -gt 1 ]; then exit 1; else exit 0;fi;"'): diff --git a/plugins/oceanbase/3.1.0/parameter.yaml b/plugins/oceanbase/3.1.0/parameter.yaml index aacd008..0bc4859 100644 --- a/plugins/oceanbase/3.1.0/parameter.yaml +++ b/plugins/oceanbase/3.1.0/parameter.yaml @@ -796,15 +796,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true diff --git a/plugins/oceanbase/3.1.0/reload.py b/plugins/oceanbase/3.1.0/reload.py index 65b3b5a..eac200b 100644 --- a/plugins/oceanbase/3.1.0/reload.py +++ b/plugins/oceanbase/3.1.0/reload.py @@ -20,10 +20,38 @@ from __future__ import absolute_import, division, print_function +from urllib.parse import urlparse + +import requests + from _deploy import InnerConfigItem from _errno import EC_OBSERVER_INVALID_MODFILY_GLOBAL_KEY +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def reload(plugin_context, new_cluster_config, *args, **kwargs): stdio = plugin_context.stdio cluster_config = plugin_context.cluster_config @@ -54,6 +82,10 @@ def reload(plugin_context, new_cluster_config, *args, **kwargs): stdio.verbose('%s is not a oceanbase parameter. skip' % key) continue n_value = new_config[key] + if key == 'obconfig_url': + cfg_url = get_ob_configserver_cfg_url(n_value, cluster_config.name, stdio) + if cfg_url: + n_value = cfg_url if key not in config or config[key] != n_value: if isinstance(key, InnerConfigItem) and key in inner_keys: zone = config['zone'] diff --git a/plugins/oceanbase/3.1.0/restart.py b/plugins/oceanbase/3.1.0/restart.py index 9ef6501..938cbca 100644 --- a/plugins/oceanbase/3.1.0/restart.py +++ b/plugins/oceanbase/3.1.0/restart.py @@ -202,7 +202,7 @@ def _restart(self): clients = self.new_clients cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config - if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository=self.repository): + if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=self.cluster_config, new_cluster_config=self.new_cluster_config, local_home_path=self.local_home_path, repository=self.repository): self.stdio.stop_loading('stop_loading', 'fail') return False self.close() diff --git a/plugins/oceanbase/3.1.0/scale_out_check.py b/plugins/oceanbase/3.1.0/scale_out_check.py index 40aeee9..c5fd9da 100644 --- a/plugins/oceanbase/3.1.0/scale_out_check.py +++ b/plugins/oceanbase/3.1.0/scale_out_check.py @@ -42,6 +42,8 @@ def scale_out_check(plugin_context, *args, **kwargs): add_plugin('connect', plugins) add_plugin('bootstrap', plugins) add_plugin('create_tenant', plugins) + if 'ob-configserver' in added_components: + cluster_config.add_depend_component('ob-configserver') plugin_context.stdio.verbose('scale_out_check plugins: %s' % plugins) plugin_context.stdio.verbose('added_components: %s' % added_components) diff --git a/plugins/oceanbase/3.1.0/start.py b/plugins/oceanbase/3.1.0/start.py index 7b5da23..ac38fdc 100644 --- a/plugins/oceanbase/3.1.0/start.py +++ b/plugins/oceanbase/3.1.0/start.py @@ -24,6 +24,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS @@ -31,6 +32,29 @@ from tool import NetUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': @@ -87,9 +111,51 @@ def __exit__(self, *args, **kwargs): else: self.client.del_env(env_key) +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) -def start(plugin_context, *args, **kwargs): - cluster_config = plugin_context.cluster_config + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio @@ -106,12 +172,14 @@ def start(plugin_context, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -138,6 +206,17 @@ def start(plugin_context, *args, **kwargs): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -158,41 +237,7 @@ def start(plugin_context, *args, **kwargs): cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password' - ] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str: - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/3.1.0/start_check.py b/plugins/oceanbase/3.1.0/start_check.py index b4bd2e5..89594ca 100644 --- a/plugins/oceanbase/3.1.0/start_check.py +++ b/plugins/oceanbase/3.1.0/start_check.py @@ -48,7 +48,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -148,7 +148,7 @@ def system_memory_check(): factor = 0.7 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -327,7 +327,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes memory['num'] += memory_limit elif 'memory_limit_percentage' in server_config: percentage = server_config['memory_limit_percentage'] @@ -338,7 +338,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -509,7 +509,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes server_memory_stat = servers_memory[ip] min_start_need = server_num * START_NEED_MEMORY @@ -548,7 +548,7 @@ def system_memory_check(): if isinstance(need, int): disk[kp]['need'] += disk[kp]['total'] * need / 100 else: - disk[kp]['need'] += Capacity(need).btyes + disk[kp]['need'] += Capacity(need).bytes for path in servers_clog_mount[ip]: kp = '/' @@ -613,7 +613,17 @@ def system_memory_check(): if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and devname != 'lo' or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/oceanbase/3.1.0/stop.py b/plugins/oceanbase/3.1.0/stop.py index 557a75c..f5854fd 100644 --- a/plugins/oceanbase/3.1.0/stop.py +++ b/plugins/oceanbase/3.1.0/stop.py @@ -23,10 +23,27 @@ import json import time import requests +from urllib.parse import urlparse from tool import NetUtil +def is_ob_configserver(obconfig_url, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code == 404: + stdio.verbose('request %s status_code: 404' % url) + return False + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return False + return True + + def config_url(ocp_config_server, appname, cid): cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname) proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname) @@ -71,14 +88,15 @@ def stop(plugin_context, *args, **kwargs): obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None stdio.start_loading('Stop observer') if obconfig_url and appname and cluster_id: - try: - cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) - stdio.verbose('post %s' % cleanup_config_url_content) - response = requests.post(cleanup_config_url_content) - if response.status_code != 200: - stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) - except: - stdio.warn('failed to clean up the configuration url content') + if not is_ob_configserver(obconfig_url, stdio): + try: + cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) + stdio.verbose('post %s' % cleanup_config_url_content) + response = requests.post(cleanup_config_url_content) + if response.status_code != 200: + stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) + except: + stdio.warn('failed to clean up the configuration url content') servers = {} for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) diff --git a/plugins/oceanbase/3.1.0/upgrade.py b/plugins/oceanbase/3.1.0/upgrade.py index 101de52..50c348e 100644 --- a/plugins/oceanbase/3.1.0/upgrade.py +++ b/plugins/oceanbase/3.1.0/upgrade.py @@ -25,39 +25,14 @@ import tool import datetime from ssh import LocalClient +from tool import Exector as BaseExector -class Exector(object): +class Exector(BaseExector): def __init__(self, tmp_prefix, host, port, user, pwd, exector_path, stdio): + super(Exector, self).__init__(host, port, user, pwd, exector_path, stdio) self.tmp_prefix = tmp_prefix - self._host = host - self._port = port - self._user = user - self._pwd = pwd - self._cmd = None - self.stdio = stdio - self._exector = os.path.join(exector_path, 'executer27/bin/executer') - - @property - def host(self): - return self._host - - @property - def port(self): - return self._port - - @property - def user(self): - return self._user - - @property - def pwd(self): - return self._pwd - - @property - def exector(self): - return self._exector @property def cmd(self): @@ -65,43 +40,6 @@ def cmd(self): self._cmd = '%s %%s -h %s -P %s -u %s %s' % (self._exector, self.host, self.port, self.user, '-p %s' % tool.ConfigUtil.passwd_format(self.pwd) if self.pwd else '') return self._cmd - @host.setter - def host(self, value): - self._host = value - self._cmd = None - - @port.setter - def port(self, value): - self._port = value - self._cmd = None - - @user.setter - def user(self, value): - self._user = value - self._cmd = None - - @pwd.setter - def pwd(self, value): - self._pwd = value - self._cmd = None - - @pwd.setter - def exector(self, exector_path): - self._exector = os.path.join(exector_path, 'bin/executer27') - self._cmd = None - - def create_temp(self, repository, direct_upgrade=False): - tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) - if not os.path.exists(tmp_path): - relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' - script_dir = os.path.join(repository.repository_dir, relative_dir) - LocalClient.put_dir(script_dir, tmp_path) - return tmp_path - - def clear_temp(self): - tmp_path = os.path.join('/tmp', self.tmp_prefix) - tool.DirectoryUtil.rm(tmp_path) - def exec_script(self, name, repository, direct_upgrade=False, can_skip=False): script_dir = self.create_temp(repository, direct_upgrade) path = os.path.join(script_dir, name) @@ -214,6 +152,7 @@ def run(self): total = len(self.route) self.apply_param_plugin(self.repositories[self.route_index - 1]) while self.route_index < total: + setattr(self.plugin_context.options, 'without_parameter', True) self.call_plugin(self.start_plugin, local_home_path=None, repository_dir=None) self.close() if not self.connect(): @@ -222,14 +161,14 @@ def run(self): while self.process_index < self.process_total: try: if not self.process[self.process_index](): - self._dump() return False self.process_index += 1 self.process_route_index = self.route_index except Exception as e: - self._dump() self.stdio.exception(str(e)) return False + finally: + self._dump() self.process_index = 0 self.route_index = self.next_stage + 1 self.exector.clear_temp() @@ -251,8 +190,8 @@ def close(self): self.db = None self.exector = None - def connect(self): - if self.cursor is None or self.execute_sql('select version()', error=False) is False: + def connect(self, cache=True): + if self.cursor is None or not cache or self.execute_sql('select version()', error=False) is False: ret = self.call_plugin(self.connect_plugin) if not ret: return False @@ -349,6 +288,7 @@ def broken_sql(self, sql, sleep_time=3): if ret is None: break time.sleep(sleep_time) + self.connect(cache=False) def wait(self): if not self.connect(): diff --git a/plugins/oceanbase/4.0.0.0/bootstrap.py b/plugins/oceanbase/4.0.0.0/bootstrap.py index ec64581..417b79f 100644 --- a/plugins/oceanbase/4.0.0.0/bootstrap.py +++ b/plugins/oceanbase/4.0.0.0/bootstrap.py @@ -118,10 +118,11 @@ def is_bootstrap(): has_obagent = "obagent" in added_components and "obagent" in be_depend if has_obagent or ('ocp_agent_monitor_password' in global_conf and "obagent" not in changed_components): value = global_conf['ocp_agent_monitor_password'] if global_conf.get('ocp_agent_monitor_password') is not None else '' - sql = 'create user if not exists "ocp_monitor" IDENTIFIED BY %s' + agent_user = cluster_config.get_global_conf_with_default().get('ocp_agent_monitor_username') + sql = "create user if not exists '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) - sql = 'grant select on oceanbase.* to ocp_monitor IDENTIFIED BY %s' + sql = "grant select on oceanbase.* to '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) diff --git a/plugins/oceanbase/4.0.0.0/create_tenant.py b/plugins/oceanbase/4.0.0.0/create_tenant.py index 86cd967..c472918 100644 --- a/plugins/oceanbase/4.0.0.0/create_tenant.py +++ b/plugins/oceanbase/4.0.0.0/create_tenant.py @@ -20,10 +20,12 @@ from __future__ import absolute_import, division, print_function - +import os import time from collections import defaultdict +import const +from tool import Exector from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN from _types import Capacity @@ -31,6 +33,7 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[], stdio=None): + global tenant_cursor if not user: user = 'SYS' if mode == 'oracle' else 'root' # find tenant ip, port @@ -67,7 +70,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = Capacity(value).btyes + parsed_value = Capacity(value).bytes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -120,6 +123,7 @@ def error(msg='', *arg, **kwargs): return elif res is False: return + if not tenant_exists: stdio.start_loading('Create tenant %s' % name) zone_list = get_option('zone_list', set()) @@ -183,17 +187,17 @@ def error(msg='', *arg, **kwargs): MIN_IOPS = 1024 if cpu_available < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=MIN_CPU)) if mem_available < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(MIN_MEMORY))) if log_disk_available < MIN_LOG_DISK_SIZE: - return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(MIN_LOG_DISK_SIZE))) # cpu options max_cpu = get_option('max_cpu', cpu_available) min_cpu = get_option('min_cpu', max_cpu) if cpu_available < max_cpu: - return error('resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=max_cpu)) if max_cpu < min_cpu: return error('min_cpu must less then max_cpu') @@ -207,11 +211,11 @@ def error(msg='', *arg, **kwargs): log_disk_size = log_disk_available if mem_available < memory_size: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(memory_size))) # log disk size options if log_disk_size is not None and log_disk_available < log_disk_size: - return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(log_disk_size))) # iops options max_iops = get_option('max_iops', None) @@ -307,13 +311,38 @@ def error(msg='', *arg, **kwargs): db_username = get_option('db_username') db_password = get_option('db_password', '') if db_username: + create_sql, grant_sql = "", "" if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY %s; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username) + create_sql = "create user if not exists '{username}' IDENTIFIED BY %s;".format(username=db_username) + grant_sql = "grant all on *.* to '{username}' WITH GRANT OPTION;".format(username=db_username) else: error("Create user in oracle tenant is not supported") - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + if not exec_sql_in_tenant(sql=create_sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): stdio.error('failed to create user {}'.format(db_username)) return + if not exec_sql_in_tenant(sql=grant_sql, cursor=cursor, tenant=name, mode=mode, stdio=stdio): + stdio.error('Failed to grant privileges to user {}'.format(db_username)) + return + + clients = plugin_context.clients + client = clients[plugin_context.cluster_config.servers[0]] + repositories = plugin_context.repositories + global_config = cluster_config.get_global_conf() + + time_zone = get_option('time_zone', client.execute_command('date +%:z').stdout.strip()) + exec_sql_in_tenant(sql="SET GLOBAL time_zone='%s';" % time_zone, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') + + exector_path = get_option('exector_path', '/usr/obd/lib/executer') + exector = Exector(tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user, tenant_cursor.password, exector_path, stdio) + for repository in repositories: + if repository.name in const.COMPS_OB: + time_zone_info_param = os.path.join(repository.repository_dir, 'etc', 'timezone_V1.log') + srs_data_param = os.path.join(repository.repository_dir, 'etc', 'default_srs_data_mysql.sql') + if not exector.exec_script('import_time_zone_info.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), time_zone_info_param)): + stdio.warn('execute import_time_zone_info.py failed') + if not exector.exec_script('import_srs_data.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), srs_data_param)): + stdio.warn('execute import_srs_data.py failed') + break + cmd = 'obclient -h%s -P%s -u%s -Doceanbase -A\n' % (tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user) + stdio.print(cmd) return plugin_context.return_true() diff --git a/plugins/oceanbase/4.0.0.0/generate_config.py b/plugins/oceanbase/4.0.0.0/generate_config.py index 2a02784..fc79b8f 100644 --- a/plugins/oceanbase/4.0.0.0/generate_config.py +++ b/plugins/oceanbase/4.0.0.0/generate_config.py @@ -23,7 +23,6 @@ import re, os import time -from math import sqrt from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err @@ -31,23 +30,24 @@ from tool import ConfigUtil -def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): - if generate_config_mini and memory_limit <= 6 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: system_memory = 1 << 30 - elif memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs): @@ -56,7 +56,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if not only_generate_password: generate_keys += [ 'memory_limit', 'datafile_size', 'log_disk_size', 'devname', 'system_memory', 'cpu_count', 'production_mode', - 'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size' + 'syslog_level', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size' ] if generate_password: generate_keys += ['root_password', 'proxyro_password', 'ocp_meta_password', 'ocp_agent_monitor_password'] @@ -68,8 +68,8 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals cluster_config.update_global_conf('appname', plugin_context.deploy_name) if original_global_conf.get('cluster_id') is None: cluster_config.update_global_conf('cluster_id', round(time.time()) % 4294901759, False) - if generate_password: - generate_random_password(cluster_config) + if generate_password or only_generate_password: + generate_random_password(cluster_config, auto_depend) if only_generate_password: return plugin_context.return_true() @@ -101,8 +101,6 @@ def summit_config(): global_config = cluster_config.get_global_conf() max_syslog_file_count_default = 4 - if global_config.get('enable_syslog_recycle') is None: - update_global_conf('enable_syslog_recycle', True) if global_config.get('enable_syslog_wf') is None: update_global_conf('enable_syslog_wf', False) if global_config.get('max_syslog_file_count') is None: @@ -151,7 +149,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = Capacity(user_server_config.get('system_memory')).btyes + system_memory = Capacity(user_server_config.get('system_memory')).bytes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -172,11 +170,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes + total_memory = Capacity(ip_server_memory_info[ip]['total']).bytes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -215,11 +213,11 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = Capacity(server_config.get('memory_limit')).btyes + memory_limit = Capacity(server_config.get('memory_limit')).bytes if system_memory == 0: auto_set_system_memory = True - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu @@ -236,8 +234,8 @@ def summit_config(): stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) # disk - datafile_size = Capacity(server_config.get('datafile_size', 0)).btyes - log_disk_size = Capacity(server_config.get('log_disk_size', 0)).btyes + datafile_size = Capacity(server_config.get('datafile_size', 0)).bytes + log_disk_size = Capacity(server_config.get('log_disk_size', 0)).bytes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -303,10 +301,11 @@ def summit_config(): else: auto_set_log_disk_size = True - if user_server_config.get('enable_syslog_recycle') is False: - log_size = 1 << 30 # 默认先给1G普通日志空间 - else: + if int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) != 0: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 + else: + log_size = 1 << 30 # 默认先给1G普通日志空间 + if clog_dir_mount == data_dir_mount: min_log_size = log_size if clog_dir_mount == home_path_mount else 0 @@ -316,14 +315,14 @@ def summit_config(): MIN_NEED += min_memory * 3 else: min_datafile_size = datafile_size - MIN_NEED += Capacity(datafile_size).btyes + MIN_NEED += Capacity(datafile_size).bytes if auto_set_log_disk_size: min_log_disk_size = memory_limit * 3 MIN_NEED += min_memory * 3 else: min_log_disk_size = log_disk_size - MIN_NEED += Capacity(min_log_disk_size).btyes - min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes + MIN_NEED += Capacity(min_log_disk_size).bytes + min_need = min_log_size + Capacity(min_datafile_size).bytes + Capacity(min_log_disk_size).bytes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: @@ -340,7 +339,7 @@ def summit_config(): memory_limit = MIN_MEMORY update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) elif min_need > disk_free: if generate_check and not auto_set_memory: @@ -358,9 +357,9 @@ def summit_config(): memory_factor -= 3 memory_limit = str(Capacity(disk_free / max(1, memory_factor), 0)) update_server_conf(server, 'memory_limit', memory_limit) - memory_limit = Capacity(memory_limit).btyes + memory_limit = Capacity(memory_limit).bytes if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) log_disk_size = memory_limit * 3 datafile_size = max(disk_free - log_disk_size, log_disk_size) @@ -389,7 +388,7 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).bytes datafile_size = datafile_min_memory_limit * 3 log_disk_min_memory_limit = memory_limit @@ -408,13 +407,13 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).btyes + log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).bytes log_disk_size = log_disk_min_memory_limit * 3 if auto_set_memory: update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: @@ -453,10 +452,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = Capacity(server_info['memory_limit']).btyes - system_memory = Capacity(server_info['system_memory']).btyes - log_disk_size = Capacity(server_info['log_disk_size']).btyes - min_pool_memory = Capacity(server_info['min_pool_memory']).btyes + memory_limit = Capacity(server_info['memory_limit']).bytes + system_memory = Capacity(server_info['system_memory']).bytes + log_disk_size = Capacity(server_info['log_disk_size']).bytes + min_pool_memory = Capacity(server_info['min_pool_memory']).bytes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -472,7 +471,7 @@ def summit_config(): update_global_conf('ocp_meta_tenant_memory_size', '1536M') if generate_password: - generate_random_password(cluster_config) + generate_random_password(cluster_config, auto_depend) if generate_consistent_config: generate_global_config = generate_configs['global'] @@ -486,7 +485,7 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(Capacity(value).btyes if is_capacity_key else value) + values.append(Capacity(value).bytes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue @@ -549,19 +548,33 @@ def summit_config(): stdio.stop_loading('fail') -def generate_random_password(cluster_config): +def generate_random_password(cluster_config, auto_depend): add_components = cluster_config.get_deploy_added_components() be_depend = cluster_config.be_depends global_config = cluster_config.get_original_global_conf() - if cluster_config.name in add_components and 'root_password' not in global_config: + be_depends = { + component: (auto_depend or component in be_depend) + for component in ['obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + added_components = { + component: component in add_components + for component in ['oceanbase', 'oceanbase-ce', 'obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + + if added_components[cluster_config.name] and 'root_password' not in global_config: cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) - if 'obagent' in add_components and 'obagent' in be_depend and 'ocp_agent_monitor_password' not in global_config: + + if added_components['obagent'] and be_depends['obagent'] and 'ocp_agent_monitor_password' not in global_config: cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length(), False) - + if 'proxyro_password' not in global_config: for component_name in ['obproxy', 'obproxy-ce']: - if component_name in add_components and component_name in be_depend: + if added_components[component_name] and be_depends[component_name]: cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length(), False) - if 'ocp-express' in add_components and 'ocp-express' in be_depend and 'ocp_meta_password' not in global_config: - cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length(), False) \ No newline at end of file + if (added_components['ocp-express'] and be_depends['ocp-express'] and 'ocp_meta_password' not in global_config) or \ + any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): + if 'ocp_root_password' not in global_config: + cluster_config.update_global_conf('ocp_root_password', ConfigUtil.get_random_pwd_by_total_length(), False) # 不支持在配置文件中中配置 + if 'ocp_meta_password' not in global_config : + cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length(), False) \ No newline at end of file diff --git a/plugins/oceanbase/4.0.0.0/init.py b/plugins/oceanbase/4.0.0.0/init.py index 668b601..b0bf3ec 100644 --- a/plugins/oceanbase/4.0.0.0/init.py +++ b/plugins/oceanbase/4.0.0.0/init.py @@ -21,8 +21,7 @@ from __future__ import absolute_import, division, print_function import os -from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage - +from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY stdio = None force = False @@ -35,7 +34,7 @@ def critical(*arg, **kwargs): stdio.error(*arg, **kwargs) -def init_dir(server, client, key, path, link_path=None): +def init_dir(server, client, key, path, deploy_name, link_path=None): if force: ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: @@ -46,6 +45,7 @@ def init_dir(server, client, key, path, link_path=None): ret = client.execute_command('ls %s' % (path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) return False else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=path))) @@ -65,6 +65,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name servers_dirs = {} force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -123,6 +124,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (home_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) @@ -139,6 +141,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (data_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=data_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=data_path))) @@ -147,7 +150,7 @@ def init(plugin_context, *args, **kwargs): link_path = '%s/store' % home_path client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (data_path, link_path, data_path, link_path)) for key in ['clog', 'slog']: - # init_dir(server, client, key, server_config['%s_dir' % key], os.path.join(data_path, key)) + # init_dir(server, client, key, server_config['%s_dir' % key], deploy_name, os.path.join(data_path, key)) log_dir = server_config['%s_dir' % key] if force: ret = client.execute_command('rm -fr %s/*' % log_dir, timeout=-1) @@ -159,6 +162,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (log_dir)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=log_dir))) diff --git a/plugins/oceanbase/4.0.0.0/parameter.yaml b/plugins/oceanbase/4.0.0.0/parameter.yaml index f7a10c2..25de063 100644 --- a/plugins/oceanbase/4.0.0.0/parameter.yaml +++ b/plugins/oceanbase/4.0.0.0/parameter.yaml @@ -634,15 +634,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1907,6 +1906,13 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 - name: ocp_monitor_tenant require: false type: DICT @@ -1919,7 +1925,6 @@ description_local: ocp 的监控数据库使用的租户定义 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1928,7 +1933,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1937,7 +1941,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M diff --git a/plugins/oceanbase/4.0.0.0/restart.py b/plugins/oceanbase/4.0.0.0/restart.py index e339773..adecfb0 100644 --- a/plugins/oceanbase/4.0.0.0/restart.py +++ b/plugins/oceanbase/4.0.0.0/restart.py @@ -203,7 +203,7 @@ def _restart(self): clients = self.new_clients cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config - if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository=self.repository): + if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=self.cluster_config, new_cluster_config=self.new_cluster_config, local_home_path=self.local_home_path, repository=self.repository): self.stdio.stop_loading('stop_loading', 'fail') return False self.close() diff --git a/plugins/oceanbase/4.0.0.0/start.py b/plugins/oceanbase/4.0.0.0/start.py index 8b0492b..e4172b6 100644 --- a/plugins/oceanbase/4.0.0.0/start.py +++ b/plugins/oceanbase/4.0.0.0/start.py @@ -24,6 +24,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS @@ -32,6 +33,30 @@ from tool import NetUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': link_char = '' @@ -87,9 +112,51 @@ def __exit__(self, *args, **kwargs): else: self.client.del_env(env_key) +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) -def start(plugin_context, *args, **kwargs): - cluster_config = plugin_context.cluster_config + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio @@ -105,12 +172,15 @@ def start(plugin_context, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), + stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -137,6 +207,18 @@ def start(plugin_context, *args, **kwargs): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -157,41 +239,7 @@ def start(plugin_context, *args, **kwargs): cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password' - ] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/4.0.0.0/start_check.py b/plugins/oceanbase/4.0.0.0/start_check.py index b6d816b..a7425dd 100644 --- a/plugins/oceanbase/4.0.0.0/start_check.py +++ b/plugins/oceanbase/4.0.0.0/start_check.py @@ -50,7 +50,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -63,21 +63,24 @@ def get_mount_path(disk, _path): return _mount_path -def get_system_memory(memory_limit, min_pool_memory): - if memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: + system_memory = 1 << 30 + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def get_disk_info_by_path(path, client, stdio): @@ -175,7 +178,7 @@ def system_memory_check(): factor = 0.75 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -365,7 +368,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes if production_mode and memory_limit < PRO_MEMORY_MIN: error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=str(Capacity(PRO_MEMORY_MIN))), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit @@ -378,7 +381,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -553,7 +556,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] @@ -601,7 +604,7 @@ def system_memory_check(): # slog need 10G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += Capacity(need).btyes + disk[mount_path]['need'] += Capacity(need).bytes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -621,7 +624,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = Capacity(need).btyes + log_disk_size = Capacity(need).bytes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -683,9 +686,9 @@ def system_memory_check(): global_conf_with_default['ocp_%s_tenant' % tenant][key.replace(prefix, '', 1)] = global_conf_with_default[key] if set(list(plugin_context.components)) & set(component_list): tenant_memory_default = global_conf_with_default[tenant_key].get('memory_size', '0') - tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).btyes + tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).bytes tenant_log_disk_default = global_conf_with_default[tenant_key].get('log_disk_size', '0') - tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).btyes + tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).bytes servers_sys_memory = {} if tenant_memory: @@ -700,13 +703,15 @@ def system_memory_check(): system_memory = servers_memory[server.ip]['servers'][server]['system_memory'] min_pool_memory = servers_min_pool_memory[server] if system_memory == 0: - system_memory = get_system_memory(memory_limit, min_pool_memory) + system_memory = get_system_memory(memory_limit) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').bytes)) if tenant_memory + system_memory + sys_memory_size <= memory_limit: break else: - critical('ocp tenant memory', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='mem', avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory))) + ocp_meta_tenant_mem = original_global_conf.get('ocp_meta_tenant', {}).get('memory_size', global_conf_with_default['ocp_meta_tenant'].get('memory_size', '0')) + ocp_monitor_tenant_mem = original_global_conf.get('ocp_monitor_tenant', {}).get('memory_size', global_conf_with_default['ocp_monitor_tenant'].get('memory_size', '0')) + critical('ocp tenant memory', err.EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory), memory_limit=Capacity(memory_limit), system_memory=Capacity(system_memory), sys_tenant_memory=Capacity(sys_memory_size), ocp_meta_tenant_memory=Capacity(ocp_meta_tenant_mem), ocp_monitor_tenant_memory=Capacity(ocp_monitor_tenant_mem)), [err.SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if tenant_log_disk: for server in cluster_config.servers: @@ -739,7 +744,17 @@ def system_memory_check(): if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and devname != 'lo' or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/oceanbase/4.0.0.0/upgrade.py b/plugins/oceanbase/4.0.0.0/upgrade.py index 0502d3b..fff6e6a 100644 --- a/plugins/oceanbase/4.0.0.0/upgrade.py +++ b/plugins/oceanbase/4.0.0.0/upgrade.py @@ -26,39 +26,14 @@ import tool from _rpm import Version from ssh import LocalClient +from tool import Exector as BaseExector -class Exector(object): +class Exector(BaseExector): def __init__(self, tmp_prefix, host, port, user, pwd, exector_path, stdio): + super(Exector, self).__init__(host, port, user, pwd, exector_path, stdio) self.tmp_prefix = tmp_prefix - self._host = host - self._port = port - self._user = user - self._pwd = pwd - self._cmd = None - self.stdio = stdio - self._exector = os.path.join(exector_path, 'executer27/bin/executer') - - @property - def host(self): - return self._host - - @property - def port(self): - return self._port - - @property - def user(self): - return self._user - - @property - def pwd(self): - return self._pwd - - @property - def exector(self): - return self._exector @property def cmd(self): @@ -66,43 +41,6 @@ def cmd(self): self._cmd = '%s %%s -h %s -P %s -u %s %s' % (self._exector, self.host, self.port, self.user, '-p %s' % tool.ConfigUtil.passwd_format(self.pwd) if self.pwd else '') return self._cmd - @host.setter - def host(self, value): - self._host = value - self._cmd = None - - @port.setter - def port(self, value): - self._port = value - self._cmd = None - - @user.setter - def user(self, value): - self._user = value - self._cmd = None - - @pwd.setter - def pwd(self, value): - self._pwd = value - self._cmd = None - - @pwd.setter - def exector(self, exector_path): - self._exector = os.path.join(exector_path, 'bin/executer27') - self._cmd = None - - def create_temp(self, repository, direct_upgrade=False): - tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) - if not os.path.exists(tmp_path): - relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' - script_dir = os.path.join(repository.repository_dir, relative_dir) - LocalClient.put_dir(script_dir, tmp_path) - return tmp_path - - def clear_temp(self): - tmp_path = os.path.join('/tmp', self.tmp_prefix) - tool.DirectoryUtil.rm(tmp_path) - def exec_script(self, name, repository, direct_upgrade=False, can_skip=False): script_dir = self.create_temp(repository, direct_upgrade) path = os.path.join(script_dir, name) @@ -216,6 +154,7 @@ def run(self): total = len(self.route) self.apply_param_plugin(self.repositories[self.route_index - 1]) while self.route_index < total: + setattr(self.plugin_context.options, 'without_parameter', True) self.call_plugin(self.start_plugin, local_home_path=None, repository_dir=None) self.close() if not self.connect(): @@ -224,14 +163,14 @@ def run(self): while self.process_index < self.process_total: try: if not self.process[self.process_index](): - self._dump() return False self.process_index += 1 self.process_route_index = self.route_index except Exception as e: - self._dump() self.stdio.exception(str(e)) return False + finally: + self._dump() self.process_index = 0 self.route_index = self.next_stage + 1 self.exector.clear_temp() @@ -253,8 +192,8 @@ def close(self): self.db = None self.exector = None - def connect(self): - if self.cursor is None or self.execute_sql('select version()', error=False) is False: + def connect(self, cache=True): + if self.cursor is None or not cache or self.execute_sql('select version()', error=False) is False: ret = self.call_plugin(self.connect_plugin) if not ret: return False @@ -351,6 +290,7 @@ def broken_sql(self, sql, sleep_time=3): if ret is None: break time.sleep(sleep_time) + self.connect(cache=False) def wait(self): if not self.connect(): diff --git a/plugins/oceanbase/4.0.0.0/upgrade_route.py b/plugins/oceanbase/4.0.0.0/upgrade_route.py index f2f126b..aea3ed5 100644 --- a/plugins/oceanbase/4.0.0.0/upgrade_route.py +++ b/plugins/oceanbase/4.0.0.0/upgrade_route.py @@ -164,6 +164,8 @@ def findShortestUpgradePath(self, current_repository, dest_repository, stdio): def format_route(routes, repository): + if not routes: + return routes route_res = [] from_version = repository.version from_release = repository.release diff --git a/plugins/oceanbase/4.1.0.0/upgrade.py b/plugins/oceanbase/4.1.0.0/upgrade.py index c879ed7..e03952c 100644 --- a/plugins/oceanbase/4.1.0.0/upgrade.py +++ b/plugins/oceanbase/4.1.0.0/upgrade.py @@ -27,40 +27,15 @@ import tool from _rpm import Version from ssh import LocalClient +from tool import Exector as BaseExector -class Exector(object): +class Exector(BaseExector): def __init__(self, tmp_prefix, host, port, user, pwd, exector_path, stdio, script_query_timeout=''): - self.tmp_prefix = tmp_prefix - self._host = host - self._port = port - self._user = user - self._pwd = pwd - self._cmd = None - self.stdio = stdio - self._exector = os.path.join(exector_path, 'executer27/bin/executer') + super(Exector, self).__init__(host, port, user, pwd, exector_path, stdio) self.script_query_timeout = script_query_timeout - - @property - def host(self): - return self._host - - @property - def port(self): - return self._port - - @property - def user(self): - return self._user - - @property - def pwd(self): - return self._pwd - - @property - def exector(self): - return self._exector + self.tmp_prefix = tmp_prefix @property def cmd(self): @@ -68,43 +43,6 @@ def cmd(self): self._cmd = '%s %%s -h %s -P %s -u %s %s' % (self._exector, self.host, self.port, self.user, '-p %s' % tool.ConfigUtil.passwd_format(self.pwd) if self.pwd else '') return self._cmd - @host.setter - def host(self, value): - self._host = value - self._cmd = None - - @port.setter - def port(self, value): - self._port = value - self._cmd = None - - @user.setter - def user(self, value): - self._user = value - self._cmd = None - - @pwd.setter - def pwd(self, value): - self._pwd = value - self._cmd = None - - @pwd.setter - def exector(self, exector_path): - self._exector = os.path.join(exector_path, 'bin/executer27') - self._cmd = None - - def create_temp(self, repository, direct_upgrade=False): - tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) - if not os.path.exists(tmp_path): - relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' - script_dir = os.path.join(repository.repository_dir, relative_dir) - LocalClient.put_dir(script_dir, tmp_path) - return tmp_path - - def clear_temp(self): - tmp_path = os.path.join('/tmp', self.tmp_prefix) - tool.DirectoryUtil.rm(tmp_path) - def exec_script(self, name, repository, direct_upgrade=False, can_skip=False, param=''): script_dir = self.create_temp(repository, direct_upgrade) path = os.path.join(script_dir, name) @@ -220,6 +158,7 @@ def run(self): total = len(self.route) self.apply_param_plugin(self.repositories[self.route_index - 1]) while self.route_index < total: + setattr(self.plugin_context.options, 'without_parameter', True) self.call_plugin(self.start_plugin, local_home_path=None, repository_dir=None) self.close() if not self.connect(): @@ -228,14 +167,14 @@ def run(self): while self.process_index < self.process_total: try: if not self.process[self.process_index](): - self._dump() return False self.process_index += 1 self.process_route_index = self.route_index except Exception as e: - self._dump() self.stdio.exception(str(e)) return False + finally: + self._dump() self.process_index = 0 self.route_index = self.next_stage + 1 self.exector.clear_temp() @@ -258,8 +197,8 @@ def close(self): self.db = None self.exector = None - def connect(self): - if self.cursor is None or self.execute_sql('select version()', error=False) is False: + def connect(self, cache=True): + if self.cursor is None or not cache or self.execute_sql('select version()', error=False) is False: ret = self.call_plugin(self.connect_plugin) if not ret: return False @@ -416,6 +355,7 @@ def broken_sql(self, sql, sleep_time=3): if ret is None: break time.sleep(sleep_time) + self.connect(cache=False) def wait(self): if not self.connect(): diff --git a/plugins/oceanbase/4.2.0.0/check_exit_standby.py b/plugins/oceanbase/4.2.0.0/check_exit_standby.py index aa9ac08..cdff15d 100644 --- a/plugins/oceanbase/4.2.0.0/check_exit_standby.py +++ b/plugins/oceanbase/4.2.0.0/check_exit_standby.py @@ -18,7 +18,7 @@ # along with OceanBase Deploy. If not, see . from copy import deepcopy -from _stdio import FormtatText +from _stdio import FormatText def check_exit_standby(plugin_context, standby_tenants, no_primary_cursor=False, relation_deploy_names=[], *args, **kwargs): @@ -29,14 +29,14 @@ def check_exit_standby(plugin_context, standby_tenants, no_primary_cursor=False, if not ignore_standby: if standby_tenants: stdio.warn('Found standby tenant in {0}, drop current {1} the standby tenants will become unavailable'.format([v[0] for v in standby_tenants], 'tenant' if primary_tenant else 'cluster')) - stdio.warn(FormtatText.success('Recommendation: you can switchover/decouple/drop the standby tenant then rerun, or rerun with "--ignore-standby" option if you want to proceed despite the risks')) + stdio.warn(FormatText.success('Recommendation: you can switchover/decouple/drop the standby tenant then rerun, or rerun with "--ignore-standby" option if you want to proceed despite the risks')) stdio.error('Exists standby tenant, current operation is not supported.') return elif no_primary_cursor and len(relation_deploy_names) > 1: relation_deploy_names_cp = deepcopy(relation_deploy_names) deploy_name = plugin_context.cluster_config.deploy_name stdio.warn('The current cluster is unconnectable, please check if tenants in clusters {} use any tenant in the current cluster as a source by executing command "obd cluster tenant show {{deployment_name}}" '.format([v for v in relation_deploy_names_cp if v != deploy_name])) - stdio.warn(FormtatText.success('Recommendation: you can failover/decouple/drop the standby tenant then rerun, or rerun with "--ignore-standby" option if you want to proceed despite the risks')) + stdio.warn(FormatText.success('Recommendation: you can failover/decouple/drop the standby tenant then rerun, or rerun with "--ignore-standby" option if you want to proceed despite the risks')) stdio.error('There may be standby tenants present, need to confirm.') return return plugin_context.return_true() diff --git a/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py b/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py index a297c5a..2ca3ab7 100644 --- a/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py +++ b/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py @@ -21,7 +21,7 @@ from collections import defaultdict from tool import ConfigUtil -from _stdio import FormtatText +from _stdio import FormatText tenant_cursor_cache = defaultdict(dict) @@ -144,7 +144,7 @@ def get_option(key, default=''): res = primary_cursor.fetchone(sql, (primary_tenant_info['tenant_id'], )) if not res or res['max_begin_lsn'] is None: error('Check primary tenant have full log failed.') - stdio.print(FormtatText.success('Please try again in a moment.')) + stdio.print(FormatText.success('Please try again in a moment.')) return if res['max_begin_lsn'] > 0: error('Primary cluster have not full log, not support create standby cluster.') diff --git a/plugins/oceanbase/4.2.0.0/create_tenant.py b/plugins/oceanbase/4.2.0.0/create_tenant.py index c2c9ca8..ff7ad73 100644 --- a/plugins/oceanbase/4.2.0.0/create_tenant.py +++ b/plugins/oceanbase/4.2.0.0/create_tenant.py @@ -20,10 +20,13 @@ from __future__ import absolute_import, division, print_function +import os import time from collections import defaultdict from copy import deepcopy +import const +from tool import Exector from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN from _types import Capacity @@ -32,6 +35,7 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[], stdio=None): + global tenant_cursor if not user: user = 'SYS' if mode == 'oracle' else 'root' # find tenant ip, port @@ -114,7 +118,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = Capacity(value).btyes + parsed_value = Capacity(value).bytes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -246,11 +250,11 @@ def error(msg='', *arg, **kwargs): STANDBY_MIN_LOG_DISK_SIZE = 1073741824 * 4 if cpu_available < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=MIN_CPU)) if mem_available < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(MIN_MEMORY))) if log_disk_available < MIN_LOG_DISK_SIZE: - return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(MIN_LOG_DISK_SIZE))) if primary_tenant_info: recreate_cmd = '' @@ -271,7 +275,7 @@ def error(msg='', *arg, **kwargs): max_cpu = get_option('max_cpu', cpu_available) min_cpu = get_option('min_cpu', max_cpu) if cpu_available < max_cpu: - return error('Resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=max_cpu)) if max_cpu < min_cpu: return error('min_cpu must less then max_cpu') if min_cpu < MIN_CPU: @@ -287,20 +291,20 @@ def error(msg='', *arg, **kwargs): log_disk_size = log_disk_available if mem_available < memory_size: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(memory_size))) if memory_size < MIN_MEMORY: return error('memory must greater then %s' % Capacity(MIN_MEMORY)) # log disk size options if log_disk_size is not None and log_disk_available < log_disk_size: - return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(log_disk_size))) if primary_tenant_info: - if Capacity(primary_memory_size).btyes < STANDBY_MIN_MEMORY: + if Capacity(primary_memory_size).bytes < STANDBY_MIN_MEMORY: return error('Primary tenant memory_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_memory_size, STANDBY_MIN_MEMORY)) - if Capacity(primary_memory_size).btyes < STANDBY_WARN_MEMORY: + if Capacity(primary_memory_size).bytes < STANDBY_WARN_MEMORY: stdio.warn('Primary tenant memory_size: {}B , suggestion: {}B'.format(primary_memory_size, STANDBY_WARN_MEMORY)) - if Capacity(primary_log_disk_size).btyes < STANDBY_MIN_LOG_DISK_SIZE: + if Capacity(primary_log_disk_size).bytes < STANDBY_MIN_LOG_DISK_SIZE: return error('Primary tenant log_disk_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_log_disk_size, STANDBY_MIN_LOG_DISK_SIZE)) # iops options @@ -401,15 +405,41 @@ def error(msg='', *arg, **kwargs): db_username = get_option('db_username') db_password = get_option('db_password', '') if db_username: + create_sql, grant_sql = "", "" if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY %s; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username) + create_sql = "create user if not exists '{username}' IDENTIFIED BY %s;".format(username=db_username) + grant_sql = "grant all on *.* to '{username}' WITH GRANT OPTION;".format(username=db_username) else: error("Create user in oracle tenant is not supported") - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + if not exec_sql_in_tenant(sql=create_sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): stdio.error('failed to create user {}'.format(db_username)) return + if not exec_sql_in_tenant(sql=grant_sql, cursor=cursor, tenant=name, mode=mode, stdio=stdio): + stdio.error('Failed to grant privileges to user {}'.format(db_username)) + return + + clients = plugin_context.clients + client = clients[plugin_context.cluster_config.servers[0]] + repositories = plugin_context.repositories + cluster_config = plugin_context.cluster_config + global_config = cluster_config.get_global_conf() + + time_zone = get_option('time_zone', client.execute_command('date +%:z').stdout.strip()) + exec_sql_in_tenant(sql="SET GLOBAL time_zone='%s';" % time_zone, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') + + exector_path = get_option('exector_path', '/usr/obd/lib/executer') + exector = Exector(tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user, tenant_cursor.password, exector_path, stdio) + for repository in repositories: + if repository.name in const.COMPS_OB: + time_zone_info_param = os.path.join(repository.repository_dir, 'etc', 'timezone_V1.log') + srs_data_param = os.path.join(repository.repository_dir, 'etc', 'default_srs_data_mysql.sql') + if not exector.exec_script('import_time_zone_info.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), time_zone_info_param)): + stdio.warn('execute import_time_zone_info.py failed') + if not exector.exec_script('import_srs_data.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), srs_data_param)): + stdio.warn('execute import_srs_data.py failed') + break + cmd = 'obclient -h%s -P%s -u%s -Doceanbase -A\n' % (tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user) + stdio.print(cmd) else: # create standby tenant # query ip_list diff --git a/plugins/oceanbase/4.2.0.0/generate_config.py b/plugins/oceanbase/4.2.0.0/generate_config.py index 900ee01..9437248 100644 --- a/plugins/oceanbase/4.2.0.0/generate_config.py +++ b/plugins/oceanbase/4.2.0.0/generate_config.py @@ -23,7 +23,6 @@ import re, os import time -from math import sqrt from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err @@ -31,23 +30,24 @@ from _types import Capacity -def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): - if generate_config_mini and memory_limit <= 6 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: system_memory = 1 << 30 - elif memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs): @@ -56,7 +56,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if not only_generate_password: generate_keys += [ 'memory_limit', 'datafile_size', 'log_disk_size', 'system_memory', 'cpu_count', 'production_mode', - 'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', + 'syslog_level', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', 'datafile_next', 'datafile_maxsize' ] if generate_password: @@ -69,8 +69,8 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals cluster_config.update_global_conf('appname', plugin_context.deploy_name) if original_global_conf.get('cluster_id') is None: cluster_config.update_global_conf('cluster_id', round(time.time()) % 4294901759, False) - if generate_password: - generate_random_password(cluster_config) + if generate_password or only_generate_password: + generate_random_password(cluster_config, auto_depend) if only_generate_password: return plugin_context.return_true() @@ -104,8 +104,6 @@ def summit_config(): global_config = cluster_config.get_global_conf() max_syslog_file_count_default = 4 - if global_config.get('enable_syslog_recycle') is None: - update_global_conf('enable_syslog_recycle', True) if global_config.get('enable_syslog_wf') is None: update_global_conf('enable_syslog_wf', False) if global_config.get('max_syslog_file_count') is None: @@ -123,6 +121,9 @@ def summit_config(): MINI_DATA_FILE_MAX_SIZE = 8 << 30 MINI_LOG_DISK_SIZE = 14 << 30 + DATA_RESERVED = 0.95 + DATA_NEXT = 0.1 + has_ocp = 'ocp-express' in added_components and 'ocp-express' in be_depend if any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): has_ocp = True @@ -145,7 +146,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = Capacity(user_server_config.get('system_memory')).btyes + system_memory = Capacity(user_server_config.get('system_memory')).bytes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -166,11 +167,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes + total_memory = Capacity(ip_server_memory_info[ip]['total']).bytes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -209,11 +210,11 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = Capacity(server_config.get('memory_limit')).btyes + memory_limit = Capacity(server_config.get('memory_limit')).bytes if system_memory == 0: auto_set_system_memory = True - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu @@ -231,7 +232,11 @@ def summit_config(): # disk datafile_size = server_config.get('datafile_size', 0) + if datafile_size: + datafile_size = Capacity(user_server_config.get('datafile_size')).bytes log_disk_size = server_config.get('log_disk_size', 0) + if log_disk_size: + log_disk_size = Capacity(user_server_config.get('log_disk_size')).bytes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -288,6 +293,7 @@ def summit_config(): update_server_conf(server, 'datafile_maxsize', str(Capacity(MINI_DATA_FILE_MAX_SIZE, 0))) if 'datafile_next' not in user_server_config: update_server_conf(server, 'datafile_next', str(Capacity(MINI_DATA_FILE_NEXT, 0))) + else: auto_set_datafile_size = True @@ -301,14 +307,14 @@ def summit_config(): else: auto_set_log_disk_size = True - if user_server_config.get('enable_syslog_recycle') is False: - log_size = 1 << 30 # 默认先给1G普通日志空间 - else: + if int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) != 0: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 + else: + log_size = 1 << 30 # 默认先给1G普通日志空间 datafile_maxsize = datafile_next = 0 - DATA_RESERVED = 0.95 - DATA_NEXT = 0.1 + if user_server_config.get('datafile_maxsize'): + datafile_maxsize = Capacity(user_server_config.get('datafile_maxsize')).bytes if clog_dir_mount == data_dir_mount: min_log_size = log_size if clog_dir_mount == home_path_mount else 0 MIN_NEED = min_log_size + SLOG_SIZE @@ -316,13 +322,13 @@ def summit_config(): datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE else: min_datafile_size = datafile_size - MIN_NEED += Capacity(min_datafile_size).btyes + MIN_NEED += Capacity(min_datafile_size).bytes if auto_set_log_disk_size: log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory else: min_log_disk_size = log_disk_size - MIN_NEED += Capacity(min_log_disk_size).btyes - min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes + MIN_NEED += Capacity(min_log_disk_size).bytes + min_need = min_log_size + Capacity(min_datafile_size).bytes + Capacity(min_log_disk_size).bytes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: @@ -335,7 +341,7 @@ def summit_config(): memory_limit = MIN_MEMORY update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_datafile_size: datafile_size = MINI_DATA_FILE_SIZE @@ -359,27 +365,31 @@ def summit_config(): update_server_conf(server, 'memory_limit', memory_limit) memory_limit = Capacity(memory_limit).bytes if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min(disk_free - log_disk_size, memory_limit * 3) - datafile_maxsize = max(disk_free - log_disk_size, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) else: if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) if auto_set_datafile_size: + datafile_next = max(MINI_DATA_FILE_NEXT, DATA_NEXT * datafile_maxsize) + datafile_size = min(datafile_maxsize, datafile_size) update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if datafile_maxsize > datafile_size: - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: @@ -400,14 +410,17 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).bytes datafile_size = datafile_min_memory_limit * 3 - datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED + if datafile_maxsize: + datafile_size = min(datafile_size, datafile_maxsize) + else: + datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED datafile_next = DATA_NEXT * datafile_maxsize log_disk_min_memory_limit = memory_limit if auto_set_log_disk_size: - log_disk_size = 3 * memory_limit + log_disk_size = (memory_limit - system_memory) * 3 + system_memory min_log_size = log_size if clog_dir_mount == home_path_mount else 0 disk_free = clog_dir_disk['avail'] min_need = min_log_size + log_disk_size @@ -427,14 +440,16 @@ def summit_config(): if auto_set_memory: update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if datafile_maxsize > datafile_size: - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) @@ -469,10 +484,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = Capacity(server_info['memory_limit']).btyes - system_memory = Capacity(server_info['system_memory']).btyes - log_disk_size = Capacity(server_info['log_disk_size']).btyes - min_pool_memory = Capacity(server_info['min_pool_memory']).btyes + memory_limit = Capacity(server_info['memory_limit']).bytes + system_memory = Capacity(server_info['system_memory']).bytes + log_disk_size = Capacity(server_info['log_disk_size']).bytes + min_pool_memory = Capacity(server_info['min_pool_memory']).bytes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -488,7 +503,7 @@ def summit_config(): update_global_conf('ocp_meta_tenant_memory_size', '1536M') if generate_password: - generate_random_password(cluster_config) + generate_random_password(cluster_config, auto_depend) if generate_consistent_config: generate_global_config = generate_configs['global'] @@ -502,7 +517,7 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(Capacity(value).btyes if is_capacity_key else value) + values.append(Capacity(value).bytes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue @@ -565,23 +580,33 @@ def summit_config(): stdio.stop_loading('fail') -def generate_random_password(cluster_config): +def generate_random_password(cluster_config, auto_depend): add_components = cluster_config.get_deploy_added_components() be_depend = cluster_config.be_depends global_config = cluster_config.get_original_global_conf() - if cluster_config.name in add_components and 'root_password' not in global_config: - cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) - if 'obagent' in add_components and 'obagent' in be_depend and 'ocp_agent_monitor_password' not in global_config: + be_depends = { + component: (auto_depend or component in be_depend) + for component in ['obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + added_components = { + component: component in add_components + for component in ['oceanbase', 'oceanbase-ce', 'obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + + if added_components[cluster_config.name] and 'root_password' not in global_config: + cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) + + if added_components['obagent'] and be_depends['obagent'] and 'ocp_agent_monitor_password' not in global_config: cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length(), False) - + if 'proxyro_password' not in global_config: for component_name in ['obproxy', 'obproxy-ce']: - if component_name in add_components and component_name in be_depend: + if added_components[component_name] and be_depends[component_name]: cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length(), False) - if ('ocp-express' in add_components and 'ocp-express' in be_depend and 'ocp_meta_password' not in global_config) or \ - (any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]])): - if 'ocp_root_password' not in global_config : + if (added_components['ocp-express'] and be_depends['ocp-express'] and 'ocp_meta_password' not in global_config) or \ + any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): + if 'ocp_root_password' not in global_config: cluster_config.update_global_conf('ocp_root_password', ConfigUtil.get_random_pwd_by_total_length(), False) # 不支持在配置文件中中配置 if 'ocp_meta_password' not in global_config : cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length(), False) diff --git a/plugins/oceanbase/4.2.0.0/parameter.yaml b/plugins/oceanbase/4.2.0.0/parameter.yaml index ce64655..60493fe 100644 --- a/plugins/oceanbase/4.2.0.0/parameter.yaml +++ b/plugins/oceanbase/4.2.0.0/parameter.yaml @@ -641,15 +641,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1871,6 +1870,13 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 - name: ocp_monitor_tenant require: false type: DICT @@ -1883,7 +1889,6 @@ description_local: ocp 的监控数据库使用的租户定义 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1892,7 +1897,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1901,7 +1905,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M diff --git a/plugins/oceanbase/4.2.0.0/start.py b/plugins/oceanbase/4.2.0.0/start.py index 2518026..a080cb3 100644 --- a/plugins/oceanbase/4.2.0.0/start.py +++ b/plugins/oceanbase/4.2.0.0/start.py @@ -24,6 +24,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS @@ -32,6 +33,30 @@ from tool import NetUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': link_char = '' @@ -88,8 +113,51 @@ def __exit__(self, *args, **kwargs): self.client.del_env(env_key) -def start(plugin_context, *args, **kwargs): - cluster_config = plugin_context.cluster_config +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) + + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio @@ -105,12 +173,15 @@ def start(plugin_context, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), + stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -137,6 +208,17 @@ def start(plugin_context, *args, **kwargs): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -160,42 +242,7 @@ def start(plugin_context, *args, **kwargs): cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f', - 'local_ip': '-I' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password','ocp_root_password' - ] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/4.2.0.0/start_check.py b/plugins/oceanbase/4.2.0.0/start_check.py index 5d41dde..74d275d 100644 --- a/plugins/oceanbase/4.2.0.0/start_check.py +++ b/plugins/oceanbase/4.2.0.0/start_check.py @@ -51,7 +51,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -64,21 +64,24 @@ def get_mount_path(disk, _path): return _mount_path -def get_system_memory(memory_limit, min_pool_memory): - if memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: + system_memory = 1 << 30 + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def get_disk_info_by_path(path, client, stdio): @@ -176,7 +179,7 @@ def system_memory_check(): factor = 0.75 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -366,7 +369,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes if production_mode and memory_limit < PRO_MEMORY_MIN: error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit @@ -379,7 +382,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -553,7 +556,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] @@ -601,7 +604,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += Capacity(need).btyes + disk[mount_path]['need'] += Capacity(need).bytes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -621,7 +624,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = Capacity(need).btyes + log_disk_size = Capacity(need).bytes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -683,9 +686,9 @@ def system_memory_check(): global_conf_with_default['ocp_%s_tenant' % tenant][key.replace(prefix, '', 1)] = global_conf_with_default[key] if set(list(plugin_context.components)) & set(component_list): tenant_memory_default = global_conf_with_default[tenant_key].get('memory_size', '0') - tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).btyes + tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).bytes tenant_log_disk_default = global_conf_with_default[tenant_key].get('log_disk_size', '0') - tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).btyes + tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).bytes servers_sys_memory = {} if tenant_memory: @@ -700,13 +703,15 @@ def system_memory_check(): system_memory = servers_memory[server.ip]['servers'][server]['system_memory'] min_pool_memory = servers_min_pool_memory[server] if system_memory == 0: - system_memory = get_system_memory(memory_limit, min_pool_memory) + system_memory = get_system_memory(memory_limit) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').bytes)) if tenant_memory + system_memory + sys_memory_size <= memory_limit: break else: - critical('ocp tenant memory', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='memory', avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory))) + ocp_meta_tenant_mem = original_global_conf.get('ocp_meta_tenant', {}).get('memory_size', global_conf_with_default['ocp_meta_tenant'].get('memory_size', '0')) + ocp_monitor_tenant_mem = original_global_conf.get('ocp_monitor_tenant', {}).get('memory_size', global_conf_with_default['ocp_monitor_tenant'].get('memory_size', '0')) + critical('ocp tenant memory', err.EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory), memory_limit=Capacity(memory_limit), system_memory=Capacity(system_memory), sys_tenant_memory=Capacity(sys_memory_size), ocp_meta_tenant_memory=Capacity(ocp_meta_tenant_mem), ocp_monitor_tenant_memory=Capacity(ocp_monitor_tenant_mem)), [err.SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if tenant_log_disk: for server in cluster_config.servers: @@ -720,7 +725,17 @@ def system_memory_check(): if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and (devname != 'lo' and devname is not None) or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/oceanbase/4.2.0.0/upgrade_check.py b/plugins/oceanbase/4.2.0.0/upgrade_check.py index af1dad2..f94a87b 100644 --- a/plugins/oceanbase/4.2.0.0/upgrade_check.py +++ b/plugins/oceanbase/4.2.0.0/upgrade_check.py @@ -23,7 +23,7 @@ import os from _rpm import Version -from _stdio import FormtatText +from _stdio import FormatText def upgrade_check(plugin_context, current_repository, upgrade_repositories, route, cursor, *args, **kwargs): @@ -44,7 +44,7 @@ def upgrade_check(plugin_context, current_repository, upgrade_repositories, rout need_upgrade_standbys.append(standby_deploy_name) if need_upgrade_standbys: stdio.warn('Found standby tenant in {0}, upgrade current cluster may cause data synchronization error with standby tenants'.format(need_upgrade_standbys)) - stdio.warn(FormtatText.success('Recommendation: upgrade clusters {0} first or switchover standby tenant to primary tenant or you can rerun upgrade with "--ignore-standby" option if you want to proceed despite the risks'.format(need_upgrade_standbys))) + stdio.warn(FormatText.success('Recommendation: upgrade clusters {0} first or switchover standby tenant to primary tenant or you can rerun upgrade with "--ignore-standby" option if you want to proceed despite the risks'.format(need_upgrade_standbys))) stdio.error('Check standby tenant version error.') return False diff --git a/plugins/oceanbase/4.2.1.0/bootstrap.py b/plugins/oceanbase/4.2.1.0/bootstrap.py index 732e517..067971e 100644 --- a/plugins/oceanbase/4.2.1.0/bootstrap.py +++ b/plugins/oceanbase/4.2.1.0/bootstrap.py @@ -127,10 +127,11 @@ def is_bootstrap(): has_obagent = "obagent" in added_components and "obagent" in be_depend if has_obagent or ('ocp_agent_monitor_password' in global_conf and 'obagent' not in changed_components): value = global_conf['ocp_agent_monitor_password'] if global_conf.get('ocp_agent_monitor_password') is not None else '' - sql = 'create user if not exists "ocp_monitor" IDENTIFIED BY %s' + agent_user = cluster_config.get_global_conf_with_default().get('ocp_agent_monitor_username') + sql = "create user if not exists '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) - sql = 'grant select on oceanbase.* to ocp_monitor IDENTIFIED BY %s' + sql = "grant select on oceanbase.* to '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) diff --git a/plugins/oceanbase/4.2.1.0/generate_config.py b/plugins/oceanbase/4.2.1.0/generate_config.py index 604d871..49f7abe 100644 --- a/plugins/oceanbase/4.2.1.0/generate_config.py +++ b/plugins/oceanbase/4.2.1.0/generate_config.py @@ -23,7 +23,6 @@ import re, os import time -from math import sqrt from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err @@ -31,23 +30,24 @@ from _types import Capacity -def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): - if generate_config_mini and memory_limit <= 6 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: system_memory = 1 << 30 - elif memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs): @@ -56,7 +56,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if not only_generate_password: generate_keys += [ 'memory_limit', 'datafile_size', 'log_disk_size', 'system_memory', 'cpu_count', 'production_mode', - 'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', + 'syslog_level', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', 'datafile_next', 'datafile_maxsize' ] if generate_password: @@ -69,8 +69,8 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals cluster_config.update_global_conf('appname', plugin_context.deploy_name) if original_global_conf.get('cluster_id') is None: cluster_config.update_global_conf('cluster_id', round(time.time()) % 4294901759, False) - if generate_password: - generate_random_password(cluster_config) + if generate_password or only_generate_password: + generate_random_password(cluster_config, auto_depend) if only_generate_password: return plugin_context.return_true() @@ -105,8 +105,6 @@ def summit_config(): global_config = cluster_config.get_global_conf() max_syslog_file_count_default = 4 - if global_config.get('enable_syslog_recycle') is None: - update_global_conf('enable_syslog_recycle', True) if global_config.get('enable_syslog_wf') is None: update_global_conf('enable_syslog_wf', False) if global_config.get('max_syslog_file_count') is None: @@ -146,7 +144,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = Capacity(user_server_config.get('system_memory')).btyes + system_memory = Capacity(user_server_config.get('system_memory')).bytes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -167,11 +165,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes + total_memory = Capacity(ip_server_memory_info[ip]['total']).bytes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -209,7 +207,7 @@ def summit_config(): if 'memory' in reserved_resource.keys(): for server_ip in reserved_resource['memory'].keys(): if server_ip == ip: - available_memory = available_memory - Capacity(reserved_resource['memory'][server_ip]).btyes + available_memory = available_memory - Capacity(reserved_resource['memory'][server_ip]).bytes memory_limit = max(MIN_MEMORY, int(available_memory * 0.9)) update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) auto_set_memory = True @@ -220,11 +218,11 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = Capacity(server_config.get('memory_limit')).btyes + memory_limit = Capacity(server_config.get('memory_limit')).bytes if system_memory == 0: auto_set_system_memory = True - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu @@ -241,8 +239,8 @@ def summit_config(): stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) # disk - datafile_size = Capacity(server_config.get('datafile_size', 0)).btyes - log_disk_size = Capacity(server_config.get('log_disk_size', 0)).btyes + datafile_size = Capacity(server_config.get('datafile_size', 0)).bytes + log_disk_size = Capacity(server_config.get('log_disk_size', 0)).bytes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -312,12 +310,13 @@ def summit_config(): else: auto_set_log_disk_size = True - if user_server_config.get('enable_syslog_recycle') is False: - log_size = 1 << 30 # 默认先给1G普通日志空间 - else: + if int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) != 0: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 datafile_maxsize = datafile_next = 0 + if user_server_config.get('datafile_maxsize'): + datafile_maxsize = Capacity(user_server_config.get('datafile_maxsize')).bytes + DATA_RESERVED = 0.95 DATA_NEXT = 0.1 if clog_dir_mount == data_dir_mount: @@ -327,13 +326,13 @@ def summit_config(): datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE else: min_datafile_size = datafile_size - MIN_NEED += Capacity(min_datafile_size).btyes + MIN_NEED += Capacity(min_datafile_size).bytes if auto_set_log_disk_size: log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory else: min_log_disk_size = log_disk_size - MIN_NEED += Capacity(min_log_disk_size).btyes - min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes + MIN_NEED += Capacity(min_log_disk_size).bytes + min_need = min_log_size + Capacity(min_datafile_size).bytes + Capacity(min_log_disk_size).bytes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: @@ -346,7 +345,7 @@ def summit_config(): memory_limit = MIN_MEMORY update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_datafile_size: datafile_size = MINI_DATA_FILE_SIZE @@ -368,29 +367,32 @@ def summit_config(): memory_factor -= 3 memory_limit = str(Capacity(disk_free / max(1, memory_factor), 0)) update_server_conf(server, 'memory_limit', memory_limit) - memory_limit = Capacity(memory_limit).btyes + memory_limit = Capacity(memory_limit).bytes if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min(disk_free - log_disk_size, memory_limit * 3) - if datafile_maxsize > datafile_size: - datafile_maxsize = max(disk_free - log_disk_size, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) else: if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) if auto_set_datafile_size: + datafile_size = min(datafile_maxsize, datafile_size) + datafile_next = max(MINI_DATA_FILE_NEXT, DATA_NEXT * datafile_maxsize) update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: @@ -410,9 +412,12 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue - datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).bytes datafile_size = datafile_min_memory_limit * 3 - datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED + if datafile_maxsize: + datafile_size = min(datafile_size, datafile_maxsize) + else: + datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED datafile_next = DATA_NEXT * datafile_maxsize log_disk_min_memory_limit = memory_limit @@ -431,20 +436,22 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue - log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).btyes + log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).bytes log_disk_size = log_disk_min_memory_limit * 3 if auto_set_memory: update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: - system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) + system_memory = get_system_memory(memory_limit) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if datafile_maxsize > datafile_size: - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) @@ -479,10 +486,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = Capacity(server_info['memory_limit']).btyes - system_memory = Capacity(server_info['system_memory']).btyes - log_disk_size = Capacity(server_info['log_disk_size']).btyes - min_pool_memory = Capacity(server_info['min_pool_memory']).btyes + memory_limit = Capacity(server_info['memory_limit']).bytes + system_memory = Capacity(server_info['system_memory']).bytes + log_disk_size = Capacity(server_info['log_disk_size']).bytes + min_pool_memory = Capacity(server_info['min_pool_memory']).bytes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -498,7 +505,7 @@ def summit_config(): update_global_conf('ocp_meta_tenant_memory_size', '1536M') if generate_password: - generate_random_password(cluster_config) + generate_random_password(cluster_config, auto_depend) if generate_consistent_config: generate_global_config = generate_configs['global'] @@ -512,7 +519,7 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(Capacity(value).btyes if is_capacity_key else value) + values.append(Capacity(value).bytes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue @@ -578,26 +585,36 @@ def summit_config(): stdio.stop_loading('fail') -def generate_random_password(cluster_config): +def generate_random_password(cluster_config, auto_depend): add_components = cluster_config.get_deploy_added_components() be_depend = cluster_config.be_depends global_config = cluster_config.get_original_global_conf() - if cluster_config.name in add_components and 'root_password' not in global_config: - cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) - if 'obagent' in add_components and 'obagent' in be_depend and 'ocp_agent_monitor_password' not in global_config: + be_depends = { + component: (auto_depend or component in be_depend) + for component in ['obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + added_components = { + component: component in add_components + for component in ['oceanbase', 'oceanbase-ce', 'obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + + if added_components[cluster_config.name] and 'root_password' not in global_config: + cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) + + if added_components['obagent'] and be_depends['obagent'] and 'ocp_agent_monitor_password' not in global_config: cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length(), False) - + if 'proxyro_password' not in global_config: for component_name in ['obproxy', 'obproxy-ce']: - if component_name in add_components and component_name in be_depend: + if added_components[component_name] and be_depends[component_name]: cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length(), False) - - if 'oblogproxy' in add_components and 'oblogproxy' in be_depend and 'cdcro_password' not in global_config: + + if added_components['oblogproxy'] and be_depends['oblogproxy'] and 'cdcro_password' not in global_config: cluster_config.update_global_conf('cdcro_password', ConfigUtil.get_random_pwd_by_total_length(), False) - if ('ocp-express' in add_components and 'ocp-express' in be_depend and 'ocp_meta_password' not in global_config) or \ - (any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]])): - if 'ocp_root_password' not in global_config : + if (added_components['ocp-express'] and be_depends['ocp-express'] and 'ocp_meta_password' not in global_config) or \ + any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): + if 'ocp_root_password' not in global_config: cluster_config.update_global_conf('ocp_root_password', ConfigUtil.get_random_pwd_by_total_length(), False) # 不支持在配置文件中中配置 if 'ocp_meta_password' not in global_config : cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length(), False) diff --git a/plugins/oceanbase/4.2.1.0/parameter.yaml b/plugins/oceanbase/4.2.1.0/parameter.yaml index f06ca39..f1baa5c 100644 --- a/plugins/oceanbase/4.2.1.0/parameter.yaml +++ b/plugins/oceanbase/4.2.1.0/parameter.yaml @@ -641,15 +641,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1864,7 +1863,6 @@ description_local: ocp express的元数据库使用的租户日志磁盘大小 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1873,7 +1871,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1882,7 +1879,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M @@ -1938,3 +1934,10 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 diff --git a/plugins/oceanbase/4.2.1.4/bootstrap.py b/plugins/oceanbase/4.2.1.4/bootstrap.py index fb1859a..f2d17f6 100644 --- a/plugins/oceanbase/4.2.1.4/bootstrap.py +++ b/plugins/oceanbase/4.2.1.4/bootstrap.py @@ -240,10 +240,11 @@ def bootstrap(plugin_context, need_bootstrap=True, *args, **kwargs): has_obagent = "obagent" in added_components and "obagent" in be_depend if has_obagent or ('ocp_agent_monitor_password' in global_conf and not 'obagent' not in changed_components): value = global_conf['ocp_agent_monitor_password'] if global_conf.get('ocp_agent_monitor_password') is not None else '' - sql = 'create user if not exists "ocp_monitor" IDENTIFIED BY %s' + agent_user = cluster_config.get_global_conf_with_default().get('ocp_agent_monitor_username') + sql = "create user if not exists '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) - sql = 'grant select on oceanbase.* to ocp_monitor IDENTIFIED BY %s' + sql = "grant select on oceanbase.* to '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) diff --git a/plugins/oceanbase/4.2.1.4/destroy.py b/plugins/oceanbase/4.2.1.4/destroy.py index 2cef1d6..8f8a98c 100644 --- a/plugins/oceanbase/4.2.1.4/destroy.py +++ b/plugins/oceanbase/4.2.1.4/destroy.py @@ -25,10 +25,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: # print stderror global global_ret diff --git a/plugins/oceanbase/4.2.1.4/init.py b/plugins/oceanbase/4.2.1.4/init.py index ea7d200..9894a78 100644 --- a/plugins/oceanbase/4.2.1.4/init.py +++ b/plugins/oceanbase/4.2.1.4/init.py @@ -21,8 +21,7 @@ from __future__ import absolute_import, division, print_function import os -from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage - +from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY stdio = None force = False @@ -35,7 +34,7 @@ def critical(*arg, **kwargs): stdio.error(*arg, **kwargs) -def init_dir(server, client, key, path, link_path=None): +def init_dir(server, client, key, path, deploy_name, link_path=None): if force: ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: @@ -46,6 +45,7 @@ def init_dir(server, client, key, path, link_path=None): ret = client.execute_command('ls %s' % (path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) return False else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=path))) @@ -65,6 +65,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name servers_dirs = {} force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -124,6 +125,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (home_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) @@ -144,6 +146,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (data_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=data_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=data_path))) @@ -152,7 +155,7 @@ def init(plugin_context, *args, **kwargs): link_path = '%s/store' % home_path client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (data_path, link_path, data_path, link_path)) for key in ['clog', 'slog']: - # init_dir(server, client, key, server_config['%s_dir' % key], os.path.join(data_path, key)) + # init_dir(server, client, key, server_config['%s_dir' % key], deploy_name, os.path.join(data_path, key)) log_dir = server_config['%s_dir' % key] if force: ret = client.execute_command('rm -fr %s/*' % log_dir, timeout=-1) @@ -164,6 +167,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (log_dir)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=log_dir))) diff --git a/plugins/oceanbase/4.2.1.4/parameter.yaml b/plugins/oceanbase/4.2.1.4/parameter.yaml index 5f2a95f..69ffe0e 100644 --- a/plugins/oceanbase/4.2.1.4/parameter.yaml +++ b/plugins/oceanbase/4.2.1.4/parameter.yaml @@ -653,15 +653,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1881,6 +1880,13 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 - name: ocp_monitor_tenant require: false type: DICT @@ -1893,7 +1899,6 @@ description_local: ocp 的监控数据库使用的租户定义 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1902,7 +1907,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1911,7 +1915,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M diff --git a/plugins/oceanbase/4.2.1.4/reload.py b/plugins/oceanbase/4.2.1.4/reload.py index 8bb6459..078eb16 100644 --- a/plugins/oceanbase/4.2.1.4/reload.py +++ b/plugins/oceanbase/4.2.1.4/reload.py @@ -20,9 +20,36 @@ from __future__ import absolute_import, division, print_function +from urllib.parse import urlparse + +import requests + from _deploy import InnerConfigItem from _errno import EC_OBSERVER_INVALID_MODFILY_GLOBAL_KEY +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + def reload(plugin_context, new_cluster_config, *args, **kwargs): stdio = plugin_context.stdio @@ -55,6 +82,10 @@ def reload(plugin_context, new_cluster_config, *args, **kwargs): stdio.verbose('%s is not a oceanbase parameter. skip' % key) continue n_value = new_config[key] + if key == 'obconfig_url': + cfg_url = get_ob_configserver_cfg_url(n_value, cluster_config.name, stdio) + if cfg_url: + n_value = cfg_url if key not in config or config[key] != n_value: if isinstance(key, InnerConfigItem) and key in inner_keys: zone = config['zone'] diff --git a/plugins/oceanbase/4.2.1.4/start.py b/plugins/oceanbase/4.2.1.4/start.py index a8b5152..6f15fc3 100644 --- a/plugins/oceanbase/4.2.1.4/start.py +++ b/plugins/oceanbase/4.2.1.4/start.py @@ -24,6 +24,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS, EC_OBSERVER_FAIL_TO_START_OCS @@ -32,6 +33,30 @@ from tool import NetUtil, ConfigUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': link_char = '' @@ -88,8 +113,51 @@ def __exit__(self, *args, **kwargs): self.client.del_env(env_key) -def start(plugin_context, start_obshell=True, *args, **kwargs): - cluster_config = plugin_context.cluster_config +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) + + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, start_obshell=True, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio @@ -104,12 +172,15 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), + stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -141,6 +212,17 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -163,42 +245,7 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): use_parameter = True cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f', - 'local_ip': '-I' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password','ocp_root_password','obshell_port' - ] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/4.2.1.4/start_check.py b/plugins/oceanbase/4.2.1.4/start_check.py index 9ede8e7..69e8495 100644 --- a/plugins/oceanbase/4.2.1.4/start_check.py +++ b/plugins/oceanbase/4.2.1.4/start_check.py @@ -52,7 +52,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -65,21 +65,24 @@ def get_mount_path(disk, _path): return _mount_path -def get_system_memory(memory_limit, min_pool_memory): - if memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: + system_memory = 1 << 30 + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def get_disk_info_by_path(path, client, stdio): @@ -181,7 +184,7 @@ def system_memory_check(): factor = 0.75 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -215,7 +218,8 @@ def system_memory_check(): 'aio': err.CheckStatus(), 'net': err.CheckStatus(), 'ntp': err.CheckStatus(), - 'ocp meta db': err.CheckStatus() + 'ocp tenant memory': err.CheckStatus(), + 'ocp tenant disk': err.CheckStatus() } check_status[server].update(kernel_check_status) @@ -379,7 +383,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes if production_mode and memory_limit < PRO_MEMORY_MIN: error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit @@ -392,7 +396,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -566,7 +570,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] @@ -614,7 +618,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += Capacity(need).btyes + disk[mount_path]['need'] += Capacity(need).bytes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -634,7 +638,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = Capacity(need).btyes + log_disk_size = Capacity(need).bytes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -677,19 +681,32 @@ def system_memory_check(): critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=Capacity(avail), need=Capacity(need)), tmp_suggests + suggests) global_conf = cluster_config.get_global_conf() - has_ocp = 'ocp-express' in plugin_context.components + has_ocp = 'ocp-express' in plugin_context.components or 'ocp-server-ce' in plugin_context.components if not has_ocp and any([key.startswith('ocp_meta') for key in global_conf]): has_ocp = True + if has_ocp and need_bootstrap and parameter_check: global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default()) original_global_conf = cluster_config.get_original_global_conf() - ocp_meta_tenant_prefix = 'ocp_meta_tenant_' - for key in global_conf_with_default: - if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): - global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes + tenants_componets_map = { + "meta": ["ocp-express", "ocp-server", "ocp-server-ce"], + "monitor": ["ocp-server", "ocp-server-ce"], + } + tenant_memory = tenant_log_disk = memory_limit = system_memory = log_disk_size = sys_log_disk_size = 0 + for tenant, component_list in tenants_componets_map.items(): + prefix = "ocp_%s_tenant_" % tenant + tenant_key = "ocp_%s_tenant" % tenant + for key in global_conf_with_default: + if key.startswith(prefix) and not original_global_conf.get(key, None): + global_conf_with_default['ocp_%s_tenant' % tenant][key.replace(prefix, '', 1)] = global_conf_with_default[key] + if set(list(plugin_context.components)) & set(component_list): + tenant_memory_default = global_conf_with_default[tenant_key].get('memory_size', '0') + tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).bytes + tenant_log_disk_default = global_conf_with_default[tenant_key].get('log_disk_size', '0') + tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).bytes + servers_sys_memory = {} - if meta_db_memory_size: + if tenant_memory: sys_memory_size = None if 'sys_tenant' in global_conf and 'memory_size' in global_conf['sys_tenant']: sys_memory_size = global_conf['sys_tenant']['memory_size'] @@ -701,39 +718,39 @@ def system_memory_check(): system_memory = servers_memory[server.ip]['servers'][server]['system_memory'] min_pool_memory = servers_min_pool_memory[server] if system_memory == 0: - system_memory = get_system_memory(memory_limit, min_pool_memory) + system_memory = get_system_memory(memory_limit) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) - if meta_db_memory_size + system_memory + sys_memory_size <= memory_limit: + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').bytes)) + if tenant_memory + system_memory + sys_memory_size <= memory_limit: break else: - suggest = err.SUG_OCP_EXPRESS_REDUCE_META_DB_MEM.format() - suggest.auto_fix = True - if 'ocp_meta_tenant_memory_size' in global_generate_config: - suggest.auto_fix = False - error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM.format(), [suggest]) - - meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size') - meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes if meta_db_log_disk_size else meta_db_log_disk_size - if not meta_db_log_disk_size and meta_db_memory_size: - meta_db_log_disk_size = meta_db_memory_size * 3 - if meta_db_log_disk_size: + ocp_meta_tenant_mem = original_global_conf.get('ocp_meta_tenant', {}).get('memory_size', global_conf_with_default['ocp_meta_tenant'].get('memory_size', '0')) + ocp_monitor_tenant_mem = original_global_conf.get('ocp_monitor_tenant', {}).get('memory_size', global_conf_with_default['ocp_monitor_tenant'].get('memory_size', '0')) + critical('ocp tenant memory', err.EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory), memory_limit=Capacity(memory_limit), system_memory=Capacity(system_memory), sys_tenant_memory=Capacity(sys_memory_size), ocp_meta_tenant_memory=Capacity(ocp_meta_tenant_mem), ocp_monitor_tenant_memory=Capacity(ocp_monitor_tenant_mem)), [err.SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) + + if tenant_log_disk: for server in cluster_config.servers: log_disk_size = servers_log_disk_size[server] sys_log_disk_size = servers_sys_memory.get(server, 0) - if meta_db_log_disk_size + sys_log_disk_size <= log_disk_size: + if tenant_log_disk + sys_log_disk_size <= log_disk_size: break else: - suggest = err.SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK.format() - suggest.auto_fix = True - if 'ocp_meta_tenant_log_disk_size' in global_generate_config: - suggest.auto_fix = False - error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK.format(), [suggest]) + critical('ocp tenant disk', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='log_disk_size', avail=Capacity(log_disk_size - sys_log_disk_size), need=Capacity(tenant_log_disk))) if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and (devname != 'lo' and devname is not None) or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/oceanbase/4.2.1.4/stop.py b/plugins/oceanbase/4.2.1.4/stop.py index dd563ce..8c4838a 100644 --- a/plugins/oceanbase/4.2.1.4/stop.py +++ b/plugins/oceanbase/4.2.1.4/stop.py @@ -23,10 +23,27 @@ import json import time import requests +from urllib.parse import urlparse from tool import NetUtil +def is_ob_configserver(obconfig_url, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code == 404: + stdio.verbose('request %s status_code: 404' % url) + return False + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + return True + + def config_url(ocp_config_server, appname, cid): cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname) proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname) @@ -71,14 +88,15 @@ def stop(plugin_context, *args, **kwargs): obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None stdio.start_loading('Stop observer') if obconfig_url and appname and cluster_id: - try: - cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) - stdio.verbose('post %s' % cleanup_config_url_content) - response = requests.post(cleanup_config_url_content) - if response.status_code != 200: - stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) - except: - stdio.warn('failed to clean up the configuration url content') + if not is_ob_configserver(obconfig_url, stdio): + try: + cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) + stdio.verbose('post %s' % cleanup_config_url_content) + response = requests.post(cleanup_config_url_content) + if response.status_code != 200: + stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) + except: + stdio.warn('failed to clean up the configuration url content') servers = {} for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) diff --git a/plugins/oceanbase/4.2.1.4/takeover.py b/plugins/oceanbase/4.2.1.4/takeover.py index bbb03f9..f52d98b 100644 --- a/plugins/oceanbase/4.2.1.4/takeover.py +++ b/plugins/oceanbase/4.2.1.4/takeover.py @@ -210,7 +210,7 @@ def error(msg): for server in dict_servers: config = dict_servers[server] if 'memory_limit' in config: - dict_servers[server]['production_mode'] = CapacityMB(config['memory_limit']).btyes >= PRO_MEMORY_MIN + dict_servers[server]['production_mode'] = CapacityMB(config['memory_limit']).bytes >= PRO_MEMORY_MIN if 'cluster' in config: config['appname'] = config['cluster'] del config['cluster'] diff --git a/plugins/oceanbase/4.2.1.4/upgrade.py b/plugins/oceanbase/4.2.1.4/upgrade.py index b9d87ae..6d0d9c7 100644 --- a/plugins/oceanbase/4.2.1.4/upgrade.py +++ b/plugins/oceanbase/4.2.1.4/upgrade.py @@ -27,40 +27,15 @@ import tool from _rpm import Version from ssh import LocalClient +from tool import Exector as BaseExector -class Exector(object): +class Exector(BaseExector): def __init__(self, tmp_prefix, host, port, user, pwd, exector_path, stdio, script_query_timeout=''): - self.tmp_prefix = tmp_prefix - self._host = host - self._port = port - self._user = user - self._pwd = pwd - self._cmd = None - self.stdio = stdio - self._exector = os.path.join(exector_path, 'executer27/bin/executer') + super(Exector, self).__init__(host, port, user, pwd, exector_path, stdio) self.script_query_timeout = script_query_timeout - - @property - def host(self): - return self._host - - @property - def port(self): - return self._port - - @property - def user(self): - return self._user - - @property - def pwd(self): - return self._pwd - - @property - def exector(self): - return self._exector + self.tmp_prefix = tmp_prefix @property def cmd(self): @@ -68,43 +43,6 @@ def cmd(self): self._cmd = '%s %%s -h %s -P %s -u %s %s' % (self._exector, self.host, self.port, self.user, '-p %s' % tool.ConfigUtil.passwd_format(self.pwd) if self.pwd else '') return self._cmd - @host.setter - def host(self, value): - self._host = value - self._cmd = None - - @port.setter - def port(self, value): - self._port = value - self._cmd = None - - @user.setter - def user(self, value): - self._user = value - self._cmd = None - - @pwd.setter - def pwd(self, value): - self._pwd = value - self._cmd = None - - @pwd.setter - def exector(self, exector_path): - self._exector = os.path.join(exector_path, 'bin/executer27') - self._cmd = None - - def create_temp(self, repository, direct_upgrade=False): - tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) - if not os.path.exists(tmp_path): - relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' - script_dir = os.path.join(repository.repository_dir, relative_dir) - LocalClient.put_dir(script_dir, tmp_path) - return tmp_path - - def clear_temp(self): - tmp_path = os.path.join('/tmp', self.tmp_prefix) - tool.DirectoryUtil.rm(tmp_path) - def exec_script(self, name, repository, direct_upgrade=False, can_skip=False, param=''): script_dir = self.create_temp(repository, direct_upgrade) path = os.path.join(script_dir, name) @@ -237,6 +175,7 @@ def run(self): total = len(self.route) self.apply_param_plugin(self.repositories[self.route_index - 1]) while self.route_index < total: + setattr(self.plugin_context.options, 'without_parameter', True) start_plugin = self.search_py_script_plugin(self.route_index - 1, 'start') self.call_plugin(start_plugin, start_obshell=self.has_obshell) self.close() @@ -246,14 +185,14 @@ def run(self): while self.process_index < self.process_total: try: if not self.process[self.process_index](): - self._dump() return False self.process_index += 1 self.process_route_index = self.route_index except Exception as e: - self._dump() self.stdio.exception(str(e)) return False + finally: + self._dump() self.process_index = 0 self.route_index = self.next_stage + 1 self.exector.clear_temp() @@ -277,8 +216,8 @@ def close(self): self.db = None self.exector = None - def connect(self): - if self.cursor is None or self.execute_sql('show tables', error=False) is False: + def connect(self, cache=True): + if self.cursor is None or not cache or self.execute_sql('show tables', error=False) is False: ret = self.call_plugin(self.connect_plugin) if not ret: return False @@ -436,6 +375,7 @@ def broken_sql(self, sql, sleep_time=3): if ret is None: break time.sleep(sleep_time) + self.connect(cache=False) def wait(self): if not self.connect(): diff --git a/plugins/oceanbase/4.2.2.0/bootstrap.py b/plugins/oceanbase/4.2.2.0/bootstrap.py index 076050d..51971b4 100644 --- a/plugins/oceanbase/4.2.2.0/bootstrap.py +++ b/plugins/oceanbase/4.2.2.0/bootstrap.py @@ -48,6 +48,7 @@ def bootstrap(plugin_context, need_bootstrap=True, *args, **kwargs): be_depend = cluster_config.be_depends global_conf = cluster_config.get_global_conf() ocp_config = cluster_config.get_be_depend_config('ocp-server-ce', with_default=False) + obagent_config = cluster_config.get_be_depend_config('obagent', with_default=False) bootstrap = [] floor_servers = {} zones_config = {} @@ -240,10 +241,11 @@ def bootstrap(plugin_context, need_bootstrap=True, *args, **kwargs): has_obagent = "obagent" in added_components and "obagent" in be_depend if has_obagent or ('ocp_agent_monitor_password' in global_conf and 'obagent' not in changed_components): value = global_conf['ocp_agent_monitor_password'] if global_conf.get('ocp_agent_monitor_password') is not None else '' - sql = 'create user if not exists "ocp_monitor" IDENTIFIED BY %s' + agent_user = cluster_config.get_global_conf_with_default().get('ocp_agent_monitor_username') + sql = "create user if not exists '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) - sql = 'grant select on oceanbase.* to ocp_monitor IDENTIFIED BY %s' + sql = "grant select on oceanbase.* to '{username}' IDENTIFIED BY %s".format(username=agent_user) stdio.verbose(sql) raise_cursor.execute(sql, [value]) diff --git a/plugins/oceanbase/4.2.2.0/destroy.py b/plugins/oceanbase/4.2.2.0/destroy.py index 2cef1d6..8f8a98c 100644 --- a/plugins/oceanbase/4.2.2.0/destroy.py +++ b/plugins/oceanbase/4.2.2.0/destroy.py @@ -25,10 +25,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(server, path): client = clients[server] - ret = client.execute_command('rm -fr %s/' % (path), timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: # print stderror global global_ret diff --git a/plugins/oceanbase/4.2.2.0/init.py b/plugins/oceanbase/4.2.2.0/init.py index ea7d200..efc1be4 100644 --- a/plugins/oceanbase/4.2.2.0/init.py +++ b/plugins/oceanbase/4.2.2.0/init.py @@ -21,8 +21,7 @@ from __future__ import absolute_import, division, print_function import os -from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage - +from _errno import EC_CONFIG_CONFLICT_DIR, EC_FAIL_TO_INIT_PATH, InitDirFailedErrorMessage, EC_COMPONENT_DIR_NOT_EMPTY stdio = None force = False @@ -35,7 +34,7 @@ def critical(*arg, **kwargs): stdio.error(*arg, **kwargs) -def init_dir(server, client, key, path, link_path=None): +def init_dir(server, client, key, path, deploy_name, link_path=None): if force: ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: @@ -46,6 +45,7 @@ def init_dir(server, client, key, path, link_path=None): ret = client.execute_command('ls %s' % (path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) return False else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=path))) @@ -65,6 +65,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name servers_dirs = {} force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -124,6 +125,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (home_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) @@ -144,6 +146,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (data_path)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=data_path))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=data_path))) @@ -152,7 +155,7 @@ def init(plugin_context, *args, **kwargs): link_path = '%s/store' % home_path client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (data_path, link_path, data_path, link_path)) for key in ['clog', 'slog']: - # init_dir(server, client, key, server_config['%s_dir' % key], os.path.join(data_path, key)) + # init_dir(server, client, key, server_config['%s_dir' % key], deploy_name, os.path.join(data_path, key)) log_dir = server_config['%s_dir' % key] if force: ret = client.execute_command('rm -fr %s/*' % log_dir, timeout=-1) @@ -164,6 +167,7 @@ def init(plugin_context, *args, **kwargs): ret = client.execute_command('ls %s' % (log_dir)) if not ret or ret.stdout.strip(): critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + critical(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=log_dir))) diff --git a/plugins/oceanbase/4.2.2.0/parameter.yaml b/plugins/oceanbase/4.2.2.0/parameter.yaml index 42d3b0d..5314272 100644 --- a/plugins/oceanbase/4.2.2.0/parameter.yaml +++ b/plugins/oceanbase/4.2.2.0/parameter.yaml @@ -653,15 +653,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1883,6 +1882,13 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 - name: ocp_monitor_tenant require: false type: DICT @@ -1895,7 +1901,6 @@ description_local: ocp 的监控数据库使用的租户定义 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1904,7 +1909,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1913,7 +1917,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M diff --git a/plugins/oceanbase/4.2.2.0/restart.py b/plugins/oceanbase/4.2.2.0/restart.py index 81bd0aa..f2b1ca7 100644 --- a/plugins/oceanbase/4.2.2.0/restart.py +++ b/plugins/oceanbase/4.2.2.0/restart.py @@ -203,7 +203,7 @@ def _restart(self): clients = self.new_clients cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config - if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository=self.repository): + if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=self.cluster_config, new_cluster_config=self.new_cluster_config, local_home_path=self.local_home_path, repository=self.repository): self.stdio.stop_loading('stop_loading', 'fail') return False self.close() diff --git a/plugins/oceanbase/4.2.2.0/scale_out_check.py b/plugins/oceanbase/4.2.2.0/scale_out_check.py index 9af0fb6..a77298a 100644 --- a/plugins/oceanbase/4.2.2.0/scale_out_check.py +++ b/plugins/oceanbase/4.2.2.0/scale_out_check.py @@ -60,7 +60,6 @@ def scale_out_check(plugin_context, *args, **kwargs): add_plugin('bootstrap', plugins) if 'ob-configserver' in added_components: cluster_config.add_depend_component('ob-configserver') - need_restart = True if cluster_config.added_servers: add_plugin('connect', plugins) add_plugin('bootstrap', plugins) diff --git a/plugins/oceanbase/4.2.2.0/start.py b/plugins/oceanbase/4.2.2.0/start.py index a8b5152..30385c8 100644 --- a/plugins/oceanbase/4.2.2.0/start.py +++ b/plugins/oceanbase/4.2.2.0/start.py @@ -24,6 +24,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS, EC_OBSERVER_FAIL_TO_START_OCS @@ -32,6 +33,30 @@ from tool import NetUtil, ConfigUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': link_char = '' @@ -87,9 +112,51 @@ def __exit__(self, *args, **kwargs): else: self.client.del_env(env_key) +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) -def start(plugin_context, start_obshell=True, *args, **kwargs): - cluster_config = plugin_context.cluster_config + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, start_obshell=True, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio @@ -104,12 +171,15 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), + stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -141,6 +211,17 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -163,42 +244,7 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): use_parameter = True cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f', - 'local_ip': '-I' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password','ocp_root_password','obshell_port' - ] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/4.2.2.0/start_check.py b/plugins/oceanbase/4.2.2.0/start_check.py index 3463bca..97e5df3 100644 --- a/plugins/oceanbase/4.2.2.0/start_check.py +++ b/plugins/oceanbase/4.2.2.0/start_check.py @@ -52,7 +52,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -65,21 +65,24 @@ def get_mount_path(disk, _path): return _mount_path -def get_system_memory(memory_limit, min_pool_memory): - if memory_limit <= 8 << 30: - system_memory = 2 << 30 - elif memory_limit <= 16 << 30: - system_memory = 3 << 30 - elif memory_limit <= 32 << 30: +def get_system_memory(memory_limit): + if memory_limit < 12 << 30: + system_memory = 1 << 30 + elif memory_limit < 20 << 30: system_memory = 5 << 30 - elif memory_limit <= 48 << 30: + elif memory_limit < 40 << 30: + system_memory = 6 << 30 + elif memory_limit < 60 << 30: system_memory = 7 << 30 - elif memory_limit <= 64 << 30: + elif memory_limit < 80 << 30: + system_memory = 8 << 30 + elif memory_limit < 100 << 30: + system_memory = 9 << 30 + elif memory_limit < 130 << 30: system_memory = 10 << 30 else: - memory_limit_gb = memory_limit >> 30 - system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30 - return max(system_memory, min_pool_memory) + system_memory = int(memory_limit * 0.08) + return system_memory def get_disk_info_by_path(path, client, stdio): @@ -181,7 +184,7 @@ def system_memory_check(): factor = 0.75 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -380,7 +383,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes if production_mode and memory_limit < PRO_MEMORY_MIN: error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit @@ -393,7 +396,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -567,7 +570,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] @@ -615,7 +618,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += Capacity(need).btyes + disk[mount_path]['need'] += Capacity(need).bytes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -635,7 +638,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = Capacity(need).btyes + log_disk_size = Capacity(need).bytes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -698,9 +701,9 @@ def system_memory_check(): global_conf_with_default['ocp_%s_tenant' % tenant][key.replace(prefix, '', 1)] = global_conf_with_default[key] if set(list(plugin_context.components)) & set(component_list): tenant_memory_default = global_conf_with_default[tenant_key].get('memory_size', '0') - tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).btyes + tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).bytes tenant_log_disk_default = global_conf_with_default[tenant_key].get('log_disk_size', '0') - tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).btyes + tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).bytes servers_sys_memory = {} if tenant_memory: @@ -715,14 +718,15 @@ def system_memory_check(): system_memory = servers_memory[server.ip]['servers'][server]['system_memory'] min_pool_memory = servers_min_pool_memory[server] if system_memory == 0: - system_memory = get_system_memory(memory_limit, min_pool_memory) + system_memory = get_system_memory(memory_limit) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').bytes)) if tenant_memory + system_memory + sys_memory_size <= memory_limit: break else: - critical('ocp tenant memory', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='memory', avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory))) - + ocp_meta_tenant_mem = original_global_conf.get('ocp_meta_tenant', {}).get('memory_size', global_conf_with_default['ocp_meta_tenant'].get('memory_size', '0')) + ocp_monitor_tenant_mem = original_global_conf.get('ocp_monitor_tenant', {}).get('memory_size', global_conf_with_default['ocp_monitor_tenant'].get('memory_size', '0')) + critical('ocp tenant memory', err.EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory), memory_limit=Capacity(memory_limit), system_memory=Capacity(system_memory), sys_tenant_memory=Capacity(sys_memory_size), ocp_meta_tenant_memory=Capacity(ocp_meta_tenant_mem), ocp_monitor_tenant_memory=Capacity(ocp_monitor_tenant_mem)), [err.SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if tenant_log_disk: for server in cluster_config.servers: log_disk_size = servers_log_disk_size[server] @@ -735,7 +739,17 @@ def system_memory_check(): if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and (devname != 'lo' and devname is not None) or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/oceanbase/4.2.2.0/stop.py b/plugins/oceanbase/4.2.2.0/stop.py index dd563ce..8c4838a 100644 --- a/plugins/oceanbase/4.2.2.0/stop.py +++ b/plugins/oceanbase/4.2.2.0/stop.py @@ -23,10 +23,27 @@ import json import time import requests +from urllib.parse import urlparse from tool import NetUtil +def is_ob_configserver(obconfig_url, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code == 404: + stdio.verbose('request %s status_code: 404' % url) + return False + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + return True + + def config_url(ocp_config_server, appname, cid): cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname) proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname) @@ -71,14 +88,15 @@ def stop(plugin_context, *args, **kwargs): obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None stdio.start_loading('Stop observer') if obconfig_url and appname and cluster_id: - try: - cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) - stdio.verbose('post %s' % cleanup_config_url_content) - response = requests.post(cleanup_config_url_content) - if response.status_code != 200: - stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) - except: - stdio.warn('failed to clean up the configuration url content') + if not is_ob_configserver(obconfig_url, stdio): + try: + cfg_url, cleanup_config_url_content, register_to_config_url = config_url(obconfig_url, appname, cluster_id) + stdio.verbose('post %s' % cleanup_config_url_content) + response = requests.post(cleanup_config_url_content) + if response.status_code != 200: + stdio.warn('%s status code %s' % (cleanup_config_url_content, response.status_code)) + except: + stdio.warn('failed to clean up the configuration url content') servers = {} for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) diff --git a/plugins/oceanbase/4.2.2.0/upgrade.py b/plugins/oceanbase/4.2.2.0/upgrade.py index b9d87ae..6d0d9c7 100644 --- a/plugins/oceanbase/4.2.2.0/upgrade.py +++ b/plugins/oceanbase/4.2.2.0/upgrade.py @@ -27,40 +27,15 @@ import tool from _rpm import Version from ssh import LocalClient +from tool import Exector as BaseExector -class Exector(object): +class Exector(BaseExector): def __init__(self, tmp_prefix, host, port, user, pwd, exector_path, stdio, script_query_timeout=''): - self.tmp_prefix = tmp_prefix - self._host = host - self._port = port - self._user = user - self._pwd = pwd - self._cmd = None - self.stdio = stdio - self._exector = os.path.join(exector_path, 'executer27/bin/executer') + super(Exector, self).__init__(host, port, user, pwd, exector_path, stdio) self.script_query_timeout = script_query_timeout - - @property - def host(self): - return self._host - - @property - def port(self): - return self._port - - @property - def user(self): - return self._user - - @property - def pwd(self): - return self._pwd - - @property - def exector(self): - return self._exector + self.tmp_prefix = tmp_prefix @property def cmd(self): @@ -68,43 +43,6 @@ def cmd(self): self._cmd = '%s %%s -h %s -P %s -u %s %s' % (self._exector, self.host, self.port, self.user, '-p %s' % tool.ConfigUtil.passwd_format(self.pwd) if self.pwd else '') return self._cmd - @host.setter - def host(self, value): - self._host = value - self._cmd = None - - @port.setter - def port(self, value): - self._port = value - self._cmd = None - - @user.setter - def user(self, value): - self._user = value - self._cmd = None - - @pwd.setter - def pwd(self, value): - self._pwd = value - self._cmd = None - - @pwd.setter - def exector(self, exector_path): - self._exector = os.path.join(exector_path, 'bin/executer27') - self._cmd = None - - def create_temp(self, repository, direct_upgrade=False): - tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) - if not os.path.exists(tmp_path): - relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' - script_dir = os.path.join(repository.repository_dir, relative_dir) - LocalClient.put_dir(script_dir, tmp_path) - return tmp_path - - def clear_temp(self): - tmp_path = os.path.join('/tmp', self.tmp_prefix) - tool.DirectoryUtil.rm(tmp_path) - def exec_script(self, name, repository, direct_upgrade=False, can_skip=False, param=''): script_dir = self.create_temp(repository, direct_upgrade) path = os.path.join(script_dir, name) @@ -237,6 +175,7 @@ def run(self): total = len(self.route) self.apply_param_plugin(self.repositories[self.route_index - 1]) while self.route_index < total: + setattr(self.plugin_context.options, 'without_parameter', True) start_plugin = self.search_py_script_plugin(self.route_index - 1, 'start') self.call_plugin(start_plugin, start_obshell=self.has_obshell) self.close() @@ -246,14 +185,14 @@ def run(self): while self.process_index < self.process_total: try: if not self.process[self.process_index](): - self._dump() return False self.process_index += 1 self.process_route_index = self.route_index except Exception as e: - self._dump() self.stdio.exception(str(e)) return False + finally: + self._dump() self.process_index = 0 self.route_index = self.next_stage + 1 self.exector.clear_temp() @@ -277,8 +216,8 @@ def close(self): self.db = None self.exector = None - def connect(self): - if self.cursor is None or self.execute_sql('show tables', error=False) is False: + def connect(self, cache=True): + if self.cursor is None or not cache or self.execute_sql('show tables', error=False) is False: ret = self.call_plugin(self.connect_plugin) if not ret: return False @@ -436,6 +375,7 @@ def broken_sql(self, sql, sleep_time=3): if ret is None: break time.sleep(sleep_time) + self.connect(cache=False) def wait(self): if not self.connect(): diff --git a/plugins/oceanbase/4.3.0.0/create_tenant.py b/plugins/oceanbase/4.3.0.0/create_tenant.py index a0b8bfb..87f3494 100644 --- a/plugins/oceanbase/4.3.0.0/create_tenant.py +++ b/plugins/oceanbase/4.3.0.0/create_tenant.py @@ -20,10 +20,13 @@ from __future__ import absolute_import, division, print_function +import os import time from collections import defaultdict from copy import deepcopy +import const +from tool import Exector from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN from _types import Capacity @@ -32,6 +35,7 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[], stdio=None): + global tenant_cursor if not user: user = 'SYS' if mode == 'oracle' else 'root' # find tenant ip, port @@ -114,7 +118,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = Capacity(value).btyes + parsed_value = Capacity(value).bytes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -247,11 +251,11 @@ def error(msg='', *arg, **kwargs): STANDBY_MIN_LOG_DISK_SIZE = 1073741824 * 4 if cpu_available < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=MIN_CPU)) if mem_available < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(MIN_MEMORY))) if log_disk_available < MIN_LOG_DISK_SIZE: - return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(MIN_LOG_DISK_SIZE))) if primary_tenant_info: recreate_cmd = '' @@ -272,7 +276,7 @@ def error(msg='', *arg, **kwargs): max_cpu = get_option('max_cpu', cpu_available) min_cpu = get_option('min_cpu', max_cpu) if cpu_available < max_cpu: - return error('Resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + return error('{zone} not enough cpu. (Available: {available}, Need: {need})'.format(zone=zone_list, available=cpu_available, need=max_cpu)) if max_cpu < min_cpu: return error('min_cpu must less then max_cpu') if min_cpu < MIN_CPU: @@ -288,20 +292,20 @@ def error(msg='', *arg, **kwargs): log_disk_size = log_disk_available if mem_available < memory_size: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + return error('{zone} not enough memory. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(mem_available), need=Capacity(memory_size))) if memory_size < MIN_MEMORY: return error('memory must greater then %s' % Capacity(MIN_MEMORY)) # log disk size options if log_disk_size is not None and log_disk_available < log_disk_size: - return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + return error('{zone} not enough log_disk. (Available: {available}, Need: {need})'.format(zone=zone_list, available=Capacity(log_disk_available), need=Capacity(log_disk_size))) if primary_tenant_info: - if Capacity(primary_memory_size).btyes < STANDBY_MIN_MEMORY: + if Capacity(primary_memory_size).bytes < STANDBY_MIN_MEMORY: return error('Primary tenant memory_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_memory_size, STANDBY_MIN_MEMORY)) - if Capacity(primary_memory_size).btyes < STANDBY_WARN_MEMORY: + if Capacity(primary_memory_size).bytes < STANDBY_WARN_MEMORY: stdio.warn('Primary tenant memory_size: {}B , suggestion: {}B'.format(primary_memory_size, STANDBY_WARN_MEMORY)) - if Capacity(primary_log_disk_size).btyes < STANDBY_MIN_LOG_DISK_SIZE: + if Capacity(primary_log_disk_size).bytes < STANDBY_MIN_LOG_DISK_SIZE: return error('Primary tenant log_disk_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_log_disk_size, STANDBY_MIN_LOG_DISK_SIZE)) # iops options @@ -402,17 +406,43 @@ def error(msg='', *arg, **kwargs): db_username = get_option('db_username') db_password = get_option('db_password', '') if db_username: + create_sql, grant_sql = "", "" if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY %s; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username) + create_sql = "create user if not exists '{username}' IDENTIFIED BY %s;".format(username=db_username) + grant_sql = "grant all on *.* to '{username}' WITH GRANT OPTION;".format(username=db_username) else: error("Create user in oracle tenant is not supported") - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): + if not exec_sql_in_tenant(sql=create_sql, cursor=cursor, tenant=name, mode=mode, args=[db_password], stdio=stdio): stdio.error('failed to create user {}'.format(db_username)) return - exec_sql_in_tenant(sql='show databases;', cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '', stdio=stdio) + if not exec_sql_in_tenant(sql=grant_sql, cursor=cursor, tenant=name, mode=mode, stdio=stdio): + stdio.error('Failed to grant privileges to user {}'.format(db_username)) + return + + clients = plugin_context.clients + client = clients[plugin_context.cluster_config.servers[0]] + repositories = plugin_context.repositories + cluster_config = plugin_context.cluster_config + global_config = cluster_config.get_global_conf() + + time_zone = get_option('time_zone', client.execute_command('date +%:z').stdout.strip()) + exec_sql_in_tenant(sql="SET GLOBAL time_zone='%s';" % time_zone, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') + + exector_path = get_option('exector_path', '/usr/obd/lib/executer') + exector = Exector(tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user, tenant_cursor.password, exector_path, stdio) + for repository in repositories: + if repository.name in const.COMPS_OB: + time_zone_info_param = os.path.join(repository.repository_dir, 'etc', 'timezone_V1.log') + srs_data_param = os.path.join(repository.repository_dir, 'etc', 'default_srs_data_mysql.sql') + if not exector.exec_script('import_time_zone_info.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), time_zone_info_param)): + stdio.warn('execute import_time_zone_info.py failed') + if not exector.exec_script('import_srs_data.py', repository, param="-h {} -P {} -t {} -p '{}' -f {}".format(tenant_cursor.ip, tenant_cursor.port, name, global_config.get("root_password", ''), srs_data_param)): + stdio.warn('execute import_srs_data.py failed') + break + cursors.append(tenant_cursor) + cmd = 'obclient -h%s -P%s -u%s -Doceanbase -A\n' % (tenant_cursor.ip, tenant_cursor.port, tenant_cursor.user) + stdio.print(cmd) else: # create standby tenant # query ip_list diff --git a/plugins/oceanbase/4.3.0.0/generate_config.py b/plugins/oceanbase/4.3.0.0/generate_config.py index 699b0f3..5e08ccb 100644 --- a/plugins/oceanbase/4.3.0.0/generate_config.py +++ b/plugins/oceanbase/4.3.0.0/generate_config.py @@ -21,13 +21,13 @@ from __future__ import absolute_import, division, print_function -import re, os +import re, os, json import time from math import sqrt from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err -from tool import ConfigUtil +from tool import ConfigUtil, FileUtil from _types import Capacity @@ -56,7 +56,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if not only_generate_password: generate_keys += [ 'memory_limit', 'datafile_size', 'log_disk_size', 'system_memory', 'cpu_count', 'production_mode', - 'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', + 'syslog_level', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size', 'datafile_next', 'datafile_maxsize' ] if generate_password: @@ -70,8 +70,8 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals if original_global_conf.get('cluster_id') is None: cluster_config.update_global_conf('cluster_id', round(time.time()) % 4294901759, False) generate_scenario(plugin_context, generate_config_mini) - if generate_password: - generate_random_password(cluster_config) + if generate_password or only_generate_password: + generate_random_password(cluster_config, auto_depend) if only_generate_password: return plugin_context.return_true() @@ -105,8 +105,6 @@ def summit_config(): global_config = cluster_config.get_global_conf() max_syslog_file_count_default = 4 - if global_config.get('enable_syslog_recycle') is None: - update_global_conf('enable_syslog_recycle', True) if global_config.get('enable_syslog_wf') is None: update_global_conf('enable_syslog_wf', False) if global_config.get('max_syslog_file_count') is None: @@ -146,7 +144,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = Capacity(user_server_config.get('system_memory')).btyes + system_memory = Capacity(user_server_config.get('system_memory')).bytes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -167,11 +165,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes + total_memory = Capacity(ip_server_memory_info[ip]['total']).bytes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -210,7 +208,7 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = Capacity(server_config.get('memory_limit')).btyes + memory_limit = Capacity(server_config.get('memory_limit')).bytes if system_memory == 0: auto_set_system_memory = True @@ -232,7 +230,11 @@ def summit_config(): # disk datafile_size = server_config.get('datafile_size', 0) + if datafile_size: + datafile_size = Capacity(user_server_config.get('datafile_size')).bytes log_disk_size = server_config.get('log_disk_size', 0) + if log_disk_size: + log_disk_size = Capacity(user_server_config.get('log_disk_size')).bytes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -302,12 +304,13 @@ def summit_config(): else: auto_set_log_disk_size = True - if user_server_config.get('enable_syslog_recycle') is False: - log_size = 1 << 30 # 默认先给1G普通日志空间 - else: + if int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) != 0: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 datafile_maxsize = datafile_next = 0 + if user_server_config.get('datafile_maxsize'): + datafile_maxsize = Capacity(user_server_config.get('datafile_maxsize')).bytes + DATA_RESERVED = 0.95 DATA_NEXT = 0.1 if clog_dir_mount == data_dir_mount: @@ -317,13 +320,13 @@ def summit_config(): datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE else: min_datafile_size = datafile_size - MIN_NEED += Capacity(min_datafile_size).btyes + MIN_NEED += Capacity(min_datafile_size).bytes if auto_set_log_disk_size: log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory else: min_log_disk_size = log_disk_size - MIN_NEED += Capacity(min_log_disk_size).btyes - min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes + MIN_NEED += Capacity(min_log_disk_size).bytes + min_need = min_log_size + Capacity(min_datafile_size).bytes + Capacity(min_log_disk_size).bytes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: @@ -365,22 +368,26 @@ def summit_config(): if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min(disk_free - log_disk_size, memory_limit * 3) - datafile_maxsize = max(disk_free - log_disk_size, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max(disk_free - log_disk_size, (memory_limit - system_memory) * 3 + system_memory) else: if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) - datafile_next = DATA_NEXT * datafile_maxsize + datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) + if not datafile_maxsize: + datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, (memory_limit - system_memory) * 3 + system_memory) if auto_set_datafile_size: + datafile_next = max(MINI_DATA_FILE_NEXT, DATA_NEXT * datafile_maxsize) + datafile_size = min(datafile_maxsize, datafile_size) update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if datafile_maxsize > datafile_size: - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: @@ -401,9 +408,12 @@ def summit_config(): stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).bytes datafile_size = datafile_min_memory_limit * 3 - datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED + if datafile_maxsize: + datafile_size = min(datafile_size, datafile_maxsize) + else: + datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED datafile_next = DATA_NEXT * datafile_maxsize log_disk_min_memory_limit = memory_limit @@ -434,8 +444,10 @@ def summit_config(): if auto_set_datafile_size: update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if datafile_maxsize > datafile_size: - update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) - update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) + if 'datafile_maxsize' not in user_server_config: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + if 'datafile_next' not in user_server_config: + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) @@ -470,10 +482,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = Capacity(server_info['memory_limit']).btyes - system_memory = Capacity(server_info['system_memory']).btyes - log_disk_size = Capacity(server_info['log_disk_size']).btyes - min_pool_memory = Capacity(server_info['min_pool_memory']).btyes + memory_limit = Capacity(server_info['memory_limit']).bytes + system_memory = Capacity(server_info['system_memory']).bytes + log_disk_size = Capacity(server_info['log_disk_size']).bytes + min_pool_memory = Capacity(server_info['min_pool_memory']).bytes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -489,7 +501,7 @@ def summit_config(): update_global_conf('ocp_meta_tenant_memory_size', '1536M') if generate_password: - generate_random_password(cluster_config) + generate_random_password(cluster_config, auto_depend) if generate_consistent_config: generate_global_config = generate_configs['global'] @@ -503,7 +515,7 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(Capacity(value).btyes if is_capacity_key else value) + values.append(Capacity(value).bytes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue @@ -566,30 +578,46 @@ def summit_config(): stdio.stop_loading('fail') -def generate_random_password(cluster_config): +def generate_random_password(cluster_config, auto_depend): add_components = cluster_config.get_deploy_added_components() be_depend = cluster_config.be_depends global_config = cluster_config.get_original_global_conf() - if cluster_config.name in add_components and 'root_password' not in global_config: - cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) - if 'obagent' in add_components and 'obagent' in be_depend and 'ocp_agent_monitor_password' not in global_config: + be_depends = { + component: (auto_depend or component in be_depend) + for component in ['obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + added_components = { + component: component in add_components + for component in ['oceanbase', 'oceanbase-ce', 'obagent', 'obproxy', 'obproxy-ce', 'oblogproxy', 'ocp-express'] + } + + if added_components[cluster_config.name] and 'root_password' not in global_config: + cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20), False) + + if added_components['obagent'] and be_depends['obagent'] and 'ocp_agent_monitor_password' not in global_config: cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length(), False) if 'proxyro_password' not in global_config: for component_name in ['obproxy', 'obproxy-ce']: - if component_name in add_components and component_name in be_depend: + if added_components[component_name] and be_depends[component_name]: cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length(), False) - if ('ocp-express' in add_components and 'ocp-express' in be_depend and 'ocp_meta_password' not in global_config) or \ - (any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]])): - if 'ocp_root_password' not in global_config : + if added_components['oblogproxy'] and be_depends['oblogproxy'] and 'cdcro_password' not in global_config: + cluster_config.update_global_conf('cdcro_password', ConfigUtil.get_random_pwd_by_total_length(), False) + + if (added_components['ocp-express'] and be_depends['ocp-express'] and 'ocp_meta_password' not in global_config) or \ + any([key in global_config for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): + if 'ocp_root_password' not in global_config: cluster_config.update_global_conf('ocp_root_password', ConfigUtil.get_random_pwd_by_total_length(), False) # 不支持在配置文件中中配置 if 'ocp_meta_password' not in global_config : cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length(), False) + def generate_scenario(plugin_context, generate_config_mini): cluster_config = plugin_context.cluster_config + repositories = plugin_context.repositories stdio = plugin_context.stdio + scenarios = ['express_oltp', 'complex_oltp', 'olap', 'htap', 'kv'] scenario_check = lambda scenario: scenario in scenarios global_config = cluster_config.get_original_global_conf() diff --git a/plugins/oceanbase/4.3.0.0/parameter.yaml b/plugins/oceanbase/4.3.0.0/parameter.yaml index 681dcff..17a3fd1 100644 --- a/plugins/oceanbase/4.3.0.0/parameter.yaml +++ b/plugins/oceanbase/4.3.0.0/parameter.yaml @@ -653,15 +653,14 @@ description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 - name: enable_syslog_recycle require: false - essential: true type: BOOL default: false min_value: NULL max_value: NULL section: OBSERVER need_restart: false - description_en: specifies whether log file recycling is turned on - description_local: 是否自动回收系统日志 + description_en: specifies whether to include the log files before restarting into the recycling space + description_local: 是否将重启前的日志文件纳入回收空间 - name: max_syslog_file_count require: false essential: true @@ -1883,6 +1882,13 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_agent_monitor_username + require: false + type: STRING + default: ocp_monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: obagent 监控用户名 - name: scenario require: true type: STRING @@ -1903,7 +1909,6 @@ description_local: ocp 的监控数据库使用的租户定义 - name: ocp_monitor_tenant_max_cpu name_local: OCP 监控数据库租户的CPU数 - essential: true require: false type: INT default: 1 @@ -1912,7 +1917,6 @@ description_local: ocp 监控数据库使用的CPU数量 - name: ocp_monitor_tenant_memory_size name_local: OCP 监控数据库租户内存 - essential: true require: false type: CAPACITY_MB default: 2G @@ -1921,7 +1925,6 @@ description_local: ocp 监控数据库使用的租户内存大小 - name: ocp_monitor_tenant_log_disk_size name_local: OCP 监控数据库租户日志磁盘大小 - essential: true require: false type: CAPACITY_MB default: 6656M diff --git a/plugins/oceanbase/4.3.0.0/scenario_check.py b/plugins/oceanbase/4.3.0.0/scenario_check.py index a1d8126..6e6a0e7 100644 --- a/plugins/oceanbase/4.3.0.0/scenario_check.py +++ b/plugins/oceanbase/4.3.0.0/scenario_check.py @@ -21,6 +21,7 @@ def scenario_check(plugin_context, scenario='', *args, **kwargs): cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio + scenarios = ['express_oltp', 'complex_oltp', 'olap', 'htap', 'kv'] scenario_check = lambda scenario: scenario in scenarios scenario = getattr(plugin_context.options, 'optimize', scenario) diff --git a/plugins/oceanbase/4.3.0.0/start.py b/plugins/oceanbase/4.3.0.0/start.py index 7745fbf..98937f3 100644 --- a/plugins/oceanbase/4.3.0.0/start.py +++ b/plugins/oceanbase/4.3.0.0/start.py @@ -25,6 +25,7 @@ import time import requests from copy import deepcopy +from urllib.parse import urlparse from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, \ EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS, EC_OBSERVER_FAIL_TO_START_OCS @@ -34,6 +35,30 @@ from tool import NetUtil, ConfigUtil, FileUtil +def get_ob_configserver_cfg_url(obconfig_url, appname, stdio): + parsed_url = urlparse(obconfig_url) + host = parsed_url.netloc + stdio.verbose('obconfig_url host: %s' % host) + url = '%s://%s/debug/pprof/cmdline' % (parsed_url.scheme, host) + try: + response = requests.get(url, allow_redirects=False) + if response.status_code != 200: + stdio.verbose('request %s status_code: %s' % (url, str(response.status_code))) + return None + except Exception: + stdio.verbose('Configserver url check failed: request %s failed' % url) + return None + + if obconfig_url[-1] == '?': + link_char = '' + elif obconfig_url.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (obconfig_url, link_char, appname) + return cfg_url + + def config_url(ocp_config_server, appname, cid): if ocp_config_server[-1] == '?': link_char = '' @@ -91,8 +116,51 @@ def __exit__(self, *args, **kwargs): self.client.del_env(env_key) -def start(plugin_context, start_obshell=True, *args, **kwargs): - cluster_config = plugin_context.cluster_config +def construct_opts(server_config, param_list, rs_list_opt, cfg_url, cmd, need_bootstrap): + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + + opt_str = [] + for key in param_list: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if need_bootstrap: + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) + + param_list['mysql_port'] = server_config['mysql_port'] + for key in not_opt_str: + if key in param_list: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + if len(opt_str) > 0: + cmd.append('-o %s' % ','.join(opt_str)) + +def start(plugin_context, new_cluster_config=None, start_obshell=True, *args, **kwargs): + cluster_config = new_cluster_config if new_cluster_config else plugin_context.cluster_config options = plugin_context.options clients = plugin_context.clients repositories = plugin_context.repositories @@ -108,12 +176,15 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): if not appname or not cluster_id: stdio.error('need appname and cluster_id') return - try: - cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) - if not cfg_url: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) - except: - stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + cfg_url = get_ob_configserver_cfg_url(obconfig_url, appname, stdio) + if not cfg_url: + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), + stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) elif 'ob-configserver' in cluster_config.depends and appname: obc_cluster_config = cluster_config.get_depend_config('ob-configserver') vip_address = obc_cluster_config.get('vip_address') @@ -172,6 +243,17 @@ def _optimize(): server_config = cluster_config.get_server_conf(server) home_path = server_config['home_path'] + param_config = {} + if new_cluster_config: + old_config = plugin_context.cluster_config.get_server_conf_with_default(server) + new_config = new_cluster_config.get_server_conf_with_default(server) + for key in new_config: + param_value = new_config[key] + if key not in old_config or old_config[key] != param_value: + param_config[key] = param_value + else: + param_config = server_config + if not server_config.get('data_dir'): server_config['data_dir'] = '%s/store' % home_path @@ -194,45 +276,10 @@ def _optimize(): use_parameter = True cmd = [] if use_parameter: - not_opt_str = OrderedDict({ - 'mysql_port': '-p', - 'rpc_port': '-P', - 'zone': '-z', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f', - 'local_ip': '-I' - }) - not_cmd_opt = [ - 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', - 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', - 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', - 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' - ] optimize_scenario = ['express_oltp', 'complex_oltp', 'olap', 'htap', 'kv'] if scenario in optimize_scenario: _optimize() - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) - else: - cmd.append(rs_list_opt) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) - cmd.append('-o %s' % ','.join(opt_str)) + construct_opts(server_config, param_config, rs_list_opt, cfg_url, cmd, need_bootstrap) else: cmd.append('-p %s' % server_config['mysql_port']) diff --git a/plugins/oceanbase/4.3.0.0/start_check.py b/plugins/oceanbase/4.3.0.0/start_check.py index bee9f1c..73ae116 100644 --- a/plugins/oceanbase/4.3.0.0/start_check.py +++ b/plugins/oceanbase/4.3.0.0/start_check.py @@ -52,7 +52,7 @@ def time_delta(client): time_ed = time.time() * 1000 time_it = time_ed - time_st - time_srv -= time_it + time_srv -= time_it/2 return time_srv - time_st @@ -181,7 +181,7 @@ def system_memory_check(): factor = 0.75 suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) - if memory_limit < server_memory_config[server]['system_memory']: + if memory_limit <= server_memory_config[server]['system_memory']: critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest]) elif memory_limit * factor < server_memory_config[server]['system_memory']: alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest]) @@ -381,7 +381,7 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = Capacity(server_config['memory_limit']).btyes + memory_limit = Capacity(server_config['memory_limit']).bytes if production_mode and memory_limit < PRO_MEMORY_MIN: error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit @@ -394,7 +394,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes + 'system_memory': Capacity(server_config.get('system_memory', 0)).bytes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -578,7 +578,7 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] @@ -626,7 +626,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += Capacity(need).btyes + disk[mount_path]['need'] += Capacity(need).bytes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -646,7 +646,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = Capacity(need).btyes + log_disk_size = Capacity(need).bytes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -709,9 +709,9 @@ def system_memory_check(): global_conf_with_default['ocp_%s_tenant' % tenant][key.replace(prefix, '', 1)] = global_conf_with_default[key] if set(list(plugin_context.components)) & set(component_list): tenant_memory_default = global_conf_with_default[tenant_key].get('memory_size', '0') - tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).btyes + tenant_memory += Capacity(original_global_conf.get(tenant_key, {}).get('memory_size', tenant_memory_default)).bytes tenant_log_disk_default = global_conf_with_default[tenant_key].get('log_disk_size', '0') - tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).btyes + tenant_log_disk += Capacity(original_global_conf.get(tenant_key, {}).get('log_disk_size', tenant_log_disk_default)).bytes servers_sys_memory = {} if tenant_memory: @@ -728,11 +728,13 @@ def system_memory_check(): if system_memory == 0: system_memory = get_system_memory(memory_limit, min_pool_memory) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').bytes)) if tenant_memory + system_memory + sys_memory_size <= memory_limit: break else: - critical('ocp tenant memory', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='memory', avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory))) + ocp_meta_tenant_mem = original_global_conf.get('ocp_meta_tenant', {}).get('memory_size', global_conf_with_default['ocp_meta_tenant'].get('memory_size', '0')) + ocp_monitor_tenant_mem = original_global_conf.get('ocp_monitor_tenant', {}).get('memory_size', global_conf_with_default['ocp_monitor_tenant'].get('memory_size', '0')) + critical('ocp tenant memory', err.EC_OCP_SERVER_NOT_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(memory_limit - system_memory - sys_memory_size), need=Capacity(tenant_memory), memory_limit=Capacity(memory_limit), system_memory=Capacity(system_memory), sys_tenant_memory=Capacity(sys_memory_size), ocp_meta_tenant_memory=Capacity(ocp_meta_tenant_mem), ocp_monitor_tenant_memory=Capacity(ocp_monitor_tenant_mem)), [err.SUG_OCP_SERVER_NOT_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if tenant_log_disk: for server in cluster_config.servers: @@ -746,7 +748,17 @@ def system_memory_check(): if success: for ip in servers_net_interface: client = servers_clients[ip] + is_check_ping_permission = False for devname in servers_net_interface[ip]: + if not is_check_ping_permission: + ret = client.execute_command('ping -W 1 -c 1 127.0.0.1') + if ret.code == 127: + critical('net', err.EC_OBSERVER_PING_NOT_FOUND.format()) + break + if not ret: + critical('net', err.EC_OBSERVER_PING_FAILED_SUID.format()) + break + is_check_ping_permission = True if client.is_localhost() and (devname != 'lo' and devname is not None) or (not client.is_localhost() and devname == 'lo'): suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip) suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config diff --git a/plugins/ocp-express/1.0.1/display.py b/plugins/ocp-express/1.0.1/display.py index a78d0e0..f56a8a5 100644 --- a/plugins/ocp-express/1.0.1/display.py +++ b/plugins/ocp-express/1.0.1/display.py @@ -42,7 +42,7 @@ def display(plugin_context, cursor, *args, **kwargs): 'url': url, 'status': 'active' if api_cursor and api_cursor.status(stdio) else 'inactive' }) - stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title='ocp-express') + stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title=cluster_config.name) active_result = [r for r in results if r['status'] == 'active'] info_dict = active_result[0] if len(active_result) > 0 else None if info_dict is not None: diff --git a/plugins/ocp-express/1.0.1/generate_config.py b/plugins/ocp-express/1.0.1/generate_config.py index 6696ba9..0d61158 100644 --- a/plugins/ocp-express/1.0.1/generate_config.py +++ b/plugins/ocp-express/1.0.1/generate_config.py @@ -80,6 +80,6 @@ def generate_random_password(cluster_config): global_config = cluster_config.get_original_global_conf() if cluster_config.name in add_components and 'admin_passwd' not in global_config: cluster_config.update_global_conf('admin_passwd', ConfigUtil.get_random_pwd_by_rule(), False) - if cluster_config.name in add_components and 'ocp_root_password' not in global_config: + if cluster_config.name in add_components and 'oceanbase-ce' not in add_components and 'oceanbase' not in add_components and 'ocp_root_password' not in global_config: cluster_config.update_global_conf('ocp_root_password', ConfigUtil.get_random_pwd_by_rule(), False) diff --git a/plugins/ocp-express/1.0.1/start.py b/plugins/ocp-express/1.0.1/start.py index 034a159..b70dfde 100644 --- a/plugins/ocp-express/1.0.1/start.py +++ b/plugins/ocp-express/1.0.1/start.py @@ -344,6 +344,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): database = server_config.get('ocp_meta_db', '') connected = False retries = 300 + tenant_map = {'meta@ocp_meta': {'user': 'meta@ocp', 'database': 'ocp_express'}, 'meta@ocp': {'user': 'meta@ocp_meta', 'database': 'ocp_meta'}} while not connected and retries: for connect_info in connect_infos: retries -= 1 @@ -365,6 +366,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.verbose("failed to update 'need_change_password' to true in user table") break except: + if tenant_map.get(jdbc_username, {}): + database = tenant_map.get(jdbc_username, {}).get('database') + jdbc_username = tenant_map.get(jdbc_username, {}).get('user') time.sleep(1) if not connected: success = False @@ -378,7 +382,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).bytes * 0.5, 0)).lower()) java_bin = server_config['java_bin'] client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ diff --git a/plugins/ocp-express/1.0.1/start_check.py b/plugins/ocp-express/1.0.1/start_check.py index 9854d3c..f6b2357 100644 --- a/plugins/ocp-express/1.0.1/start_check.py +++ b/plugins/ocp-express/1.0.1/start_check.py @@ -424,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = Capacity(server_config['memory_size']).btyes + memory_size = Capacity(server_config['memory_size']).bytes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = Capacity(server_config['logging_file_total_size_cap']).btyes + need_size = Capacity(server_config['logging_file_total_size_cap']).bytes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -471,7 +471,7 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: @@ -501,7 +501,7 @@ def critical(item, error, suggests=[]): server_config = env[server] admin_passwd = server_config.get('admin_passwd') if not admin_passwd or not password_check(admin_passwd): - error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='The password must be 8 to 32 characters in length, containing at least 2 uppercase letters, 2 lowercase letters, 2 numbers, and 2 of the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-express/1.0/destroy.py b/plugins/ocp-express/1.0/destroy.py index 9ad83d2..acaed7a 100644 --- a/plugins/ocp-express/1.0/destroy.py +++ b/plugins/ocp-express/1.0/destroy.py @@ -24,11 +24,20 @@ from copy import deepcopy import _errno as err -from tool import Cursor global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + def get_missing_required_parameters(parameters): results = [] @@ -97,7 +106,10 @@ def clean_database(cursor, database): def clean(path): client = clients[server] - ret = client.execute_command('rm -fr %s' % path, timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/ocp-express/1.0/display.py b/plugins/ocp-express/1.0/display.py index e1db4ee..3ab3ffb 100644 --- a/plugins/ocp-express/1.0/display.py +++ b/plugins/ocp-express/1.0/display.py @@ -42,7 +42,7 @@ def display(plugin_context, cursor, *args, **kwargs): 'url': url, 'status': 'active' if api_cursor and api_cursor.status(stdio) else 'inactive' }) - stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title='ocp-express') + stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title=cluster_config.name) active_result = [r for r in results if r['status'] == 'active'] info_dict = active_result[0] if len(active_result) > 0 else None if info_dict is not None: diff --git a/plugins/ocp-express/1.0/init.py b/plugins/ocp-express/1.0/init.py index 0f3757a..885addc 100644 --- a/plugins/ocp-express/1.0/init.py +++ b/plugins/ocp-express/1.0/init.py @@ -39,7 +39,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio - + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -89,6 +89,7 @@ def init(plugin_context, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -104,6 +105,7 @@ def init(plugin_context, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='log dir', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -114,6 +116,7 @@ def init(plugin_context, *args, **kwargs): if not client.execute_command('mkdir -p %s' % log_dir): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='log dir', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue link_path = os.path.join(home_path, 'log') client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (log_dir, link_path, log_dir, link_path)) diff --git a/plugins/ocp-express/1.0/scale_out_check.py b/plugins/ocp-express/1.0/scale_out_check.py index 7f730e2..665ed84 100644 --- a/plugins/ocp-express/1.0/scale_out_check.py +++ b/plugins/ocp-express/1.0/scale_out_check.py @@ -21,11 +21,21 @@ from __future__ import absolute_import, division, print_function +def add_plugin(component_name, plugins): + if component_name not in plugins: + plugins.append(component_name) + + def scale_out_check(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config + added_components = cluster_config.get_deploy_added_components() + plugins = [] # check if obagents has changed if 'obagent' in cluster_config.depends: servers = cluster_config.get_depend_added_servers('obagent') if len(servers) != 0: return plugin_context.return_true(need_restart=True) + if 'ocp-express' in added_components: + plugin_context.set_variable('auto_depend', True) + add_plugin('generate_config', plugins) return plugin_context.return_true(need_restart=False) diff --git a/plugins/ocp-express/1.0/start.py b/plugins/ocp-express/1.0/start.py index 814eed4..db9d693 100644 --- a/plugins/ocp-express/1.0/start.py +++ b/plugins/ocp-express/1.0/start.py @@ -345,6 +345,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): database = server_config.get('ocp_meta_db', '') connected = False retries = 300 + tenant_map = {'meta@ocp_meta': {'user': 'meta@ocp', 'database': 'ocp_express'}, 'meta@ocp': {'user': 'meta@ocp_meta', 'database': 'ocp_meta'}} while not connected and retries: for connect_info in connect_infos: retries -= 1 @@ -363,6 +364,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.verbose("failed to update 'need_change_password' to true in iam_user table") break except: + if tenant_map.get(jdbc_username, {}): + database = tenant_map.get(jdbc_username, {}).get('database') + jdbc_username = tenant_map.get(jdbc_username, {}).get('user') time.sleep(1) if not connected: success = False @@ -377,7 +381,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).bytes * 0.5, 0)).lower()) extra_options = { "ocp.iam.encrypted-system-password": system_password } diff --git a/plugins/ocp-express/1.0/start_check.py b/plugins/ocp-express/1.0/start_check.py index 3468440..1d95b13 100644 --- a/plugins/ocp-express/1.0/start_check.py +++ b/plugins/ocp-express/1.0/start_check.py @@ -424,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = Capacity(server_config['memory_size']).btyes + memory_size = Capacity(server_config['memory_size']).bytes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = Capacity(server_config['logging_file_total_size_cap']).btyes + need_size = Capacity(server_config['logging_file_total_size_cap']).bytes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -471,7 +471,7 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: diff --git a/plugins/ocp-express/4.2.1/start.py b/plugins/ocp-express/4.2.1/start.py index bdf5857..7373792 100644 --- a/plugins/ocp-express/4.2.1/start.py +++ b/plugins/ocp-express/4.2.1/start.py @@ -29,7 +29,7 @@ from copy import deepcopy from _types import Capacity, CapacityWithB -from tool import FileUtil, YamlLoader, ConfigUtil +from tool import FileUtil, YamlLoader, ConfigUtil, Cursor from Crypto import Random from Crypto.Hash import SHA @@ -44,62 +44,6 @@ PUB_KEY_FILE = '.ocp-express.pub' -if sys.version_info.major == 2: - import MySQLdb as mysql -else: - import pymysql as mysql -from _stdio import SafeStdio - - -class Cursor(SafeStdio): - - def __init__(self, ip, port, user='root', tenant='sys', password='', database=None, stdio=None): - self.stdio = stdio - self.ip = ip - self.port = port - self._user = user - self.tenant = tenant - self.password = password - self.database = database - self.cursor = None - self.db = None - self._connect() - - @property - def user(self): - if "@" in self._user: - return self._user - if self.tenant: - return "{}@{}".format(self._user, self.tenant) - else: - return self._user - - def _connect(self): - self.stdio.verbose('connect %s -P%s -u%s -p%s' % (self.ip, self.port, self.user, self.password)) - if sys.version_info.major == 2: - self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), passwd=str(self.password), database=self.database) - self.cursor = self.db.cursor(cursorclass=mysql.cursors.DictCursor) - else: - self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), database=self.database, - cursorclass=mysql.cursors.DictCursor) - self.cursor = self.db.cursor() - - def execute(self, sql, args=None, execute_func=None, raise_exception=False, exc_level='error', stdio=None): - try: - stdio.verbose('execute sql: %s. args: %s' % (sql, args)) - self.cursor.execute(sql, args) - if not execute_func: - return self.cursor - return getattr(self.cursor, execute_func)() - except Exception as e: - getattr(stdio, exc_level)(EC_SQL_EXECUTE_FAILED.format(sql=sql)) - if raise_exception is None: - raise_exception = self._raise_exception - if raise_exception: - stdio.exception('') - raise e - return False - def generate_key(client, key_dir, stdio): rsa = RSA.generate(1024) private_key = rsa @@ -344,13 +288,14 @@ def start(plugin_context, start_env=None, *args, **kwargs): database = server_config.get('ocp_meta_db', '') connected = False retries = 300 + tenant_map = {'meta@ocp_meta': {'user': 'meta@ocp', 'database': 'ocp_express'}, 'meta@ocp': {'user': 'meta@ocp_meta', 'database': 'ocp_meta'}} while not connected and retries: for connect_info in connect_infos: retries -= 1 server_ip = connect_info[0] server_port = connect_info[-1] try: - ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) + ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, stdio=stdio) jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) connected = True if 'ocp-express' in added_components: @@ -362,6 +307,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.verbose("failed to update 'need_change_password' to true in user table") break except: + if tenant_map.get(jdbc_username, {}): + database = tenant_map.get(jdbc_username, {}).get('database') + jdbc_username = tenant_map.get(jdbc_username, {}).get('user') time.sleep(1) if not connected: success = False @@ -375,7 +323,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).bytes * 0.5, 0)).lower()) java_bin = server_config['java_bin'] client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ diff --git a/plugins/ocp-express/4.2.1/start_check.py b/plugins/ocp-express/4.2.1/start_check.py index 0d2914a..b88e765 100644 --- a/plugins/ocp-express/4.2.1/start_check.py +++ b/plugins/ocp-express/4.2.1/start_check.py @@ -424,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = Capacity(server_config['memory_size']).btyes + memory_size = Capacity(server_config['memory_size']).bytes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = Capacity(server_config['logging_file_total_size_cap']).btyes + need_size = Capacity(server_config['logging_file_total_size_cap']).bytes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -471,7 +471,7 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: @@ -501,7 +501,7 @@ def critical(item, error, suggests=[]): server_config = env[server] admin_passwd = server_config.get('admin_passwd') if not admin_passwd or not password_check(admin_passwd): - error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='The password must be 8 to 32 characters in length, containing at least 2 uppercase letters, 2 lowercase letters, 2 numbers, and 2 of the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-express/4.2.2/start.py b/plugins/ocp-express/4.2.2/start.py index c236551..ee03eb6 100644 --- a/plugins/ocp-express/4.2.2/start.py +++ b/plugins/ocp-express/4.2.2/start.py @@ -28,7 +28,8 @@ import sys from copy import deepcopy -from tool import FileUtil, YamlLoader, ConfigUtil +from tool import FileUtil, YamlLoader, ConfigUtil, Cursor +from _errno import EC_SQL_EXECUTE_FAILED from Crypto import Random from Crypto.Hash import SHA @@ -41,47 +42,6 @@ PUB_KEY_FILE = '.ocp-express.pub' -if sys.version_info.major == 2: - import MySQLdb as mysql -else: - import pymysql as mysql -from _stdio import SafeStdio - - -class Cursor(SafeStdio): - - def __init__(self, ip, port, user='root', tenant='sys', password='', database=None, stdio=None): - self.stdio = stdio - self.ip = ip - self.port = port - self._user = user - self.tenant = tenant - self.password = password - self.database = database - self.cursor = None - self.db = None - self._connect() - - @property - def user(self): - if "@" in self._user: - return self._user - if self.tenant: - return "{}@{}".format(self._user, self.tenant) - else: - return self._user - - def _connect(self): - self.stdio.verbose('connect %s -P%s -u%s -p%s' % (self.ip, self.port, self.user, self.password)) - if sys.version_info.major == 2: - self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), passwd=str(self.password), database=self.database) - self.cursor = self.db.cursor(cursorclass=mysql.cursors.DictCursor) - else: - self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), database=self.database, - cursorclass=mysql.cursors.DictCursor) - self.cursor = self.db.cursor() - - def generate_key(client, key_dir, stdio): rsa = RSA.generate(1024) private_key = rsa @@ -272,6 +232,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio + added_components = cluster_config.get_deploy_added_components() if not start_env: start_env = prepare_parameters(cluster_config, stdio) @@ -327,17 +288,28 @@ def start(plugin_context, start_env=None, *args, **kwargs): database = server_config.get('ocp_meta_db', '') connected = False retries = 300 + tenant_map = {'meta@ocp_meta': {'user': 'meta@ocp', 'database': 'ocp_express'}, 'meta@ocp': {'user': 'meta@ocp_meta', 'database': 'ocp_meta'}} while not connected and retries: for connect_info in connect_infos: retries -= 1 server_ip = connect_info[0] server_port = connect_info[-1] try: - Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) + ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, stdio=stdio) jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) connected = True + if 'ocp-express' in added_components: + if ob_cursor.execute("select * from %s.config_properties limit 1" % database, exc_level='verbose'): + if not ob_cursor.execute("update %s.config_properties set `value`=NULL, default_value=NULL where `key`='ocp.version' or `key`='ocp.version.full'" % database, exc_level='verbose'): + stdio.verbose("failed to update 'ocp.version' and 'ocp.version.full' to NULL in config_properties table") + if ob_cursor.execute("select * from %s.user limit 1" % database, exc_level='verbose'): + if not ob_cursor.execute("update %s.user set need_change_password=true where id='100'" % database, exc_level='verbose'): + stdio.verbose("failed to update 'need_change_password' to true in user table") break except: + if tenant_map.get(jdbc_username, {}): + database = tenant_map.get(jdbc_username, {}).get('database') + jdbc_username = tenant_map.get(jdbc_username, {}).get('user') time.sleep(1) if not connected: success = False @@ -351,7 +323,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).bytes * 0.5, 0)).lower()) java_bin = server_config['java_bin'] client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ diff --git a/plugins/ocp-express/4.2.2/start_check.py b/plugins/ocp-express/4.2.2/start_check.py index 308200d..b8c6353 100644 --- a/plugins/ocp-express/4.2.2/start_check.py +++ b/plugins/ocp-express/4.2.2/start_check.py @@ -531,7 +531,7 @@ def critical(item, error, suggests=[]): server_config = env[server] admin_passwd = server_config.get('admin_passwd') if not admin_passwd or not password_check(admin_passwd): - error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='The password must be 8 to 32 characters in length, containing at least 2 uppercase letters, 2 lowercase letters, 2 numbers, and 2 of the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-server/4.2.1/connect.py b/plugins/ocp-server/4.2.1/connect.py index 96299ef..b436733 100644 --- a/plugins/ocp-server/4.2.1/connect.py +++ b/plugins/ocp-server/4.2.1/connect.py @@ -39,7 +39,7 @@ def __init__(self, code, content): def __bool__(self): return self.code == 200 - def __init__(self, ip, port, username=None, password=None, component_name=None, base_url=None): + def __init__(self, ip, port, username=None, password=None, component_name=None, base_url=None, stdio=None): self.auth = None self.ip = ip self.port = port @@ -49,6 +49,8 @@ def __init__(self, ip, port, username=None, password=None, component_name=None, self.component_name = component_name if self.username: self.auth = HTTPBasicAuth(username=username, password=password) + self.stdio = stdio + self.stdio.verbose('connect {} ({}:{} by user {})'.format(component_name, ip, port, username)) def status(self, stdio=None): @@ -181,8 +183,7 @@ def return_true(**kwargs): config = cluster_config.get_server_conf(server) username = 'admin' if not address else user password = config['admin_password'] if not address else password - stdio.verbose('connect {} ({}:{} by user {})'.format(cluster_config.name, server.ip, config['port'], username)) - cursor = OcpServerCursor(ip=server.ip, port=config['port'], username=username, password=password, component_name=cluster_config.name, base_url=address) + cursor = OcpServerCursor(ip=server.ip, port=config['port'] if not address else '', username=username, password=password, component_name=cluster_config.name, base_url=address, stdio=stdio) if cursor.status(stdio=stdio): cursors[server] = cursor if not cursors: diff --git a/plugins/ocp-server/4.2.1/destroy.py b/plugins/ocp-server/4.2.1/destroy.py index 5c2e389..9ab13db 100644 --- a/plugins/ocp-server/4.2.1/destroy.py +++ b/plugins/ocp-server/4.2.1/destroy.py @@ -29,6 +29,17 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def get_missing_required_parameters(parameters): results = [] for key in ["jdbc_url"]: @@ -105,7 +116,12 @@ def clean_database(cursor, database): def clean(path): client = clients[server] - ret = client.execute_command('sudo rm -fr %s/*' % path, timeout=-1) + cmd = 'rm -fr %s/' % path + if check_mount_path(client, path, stdio): + cmd = 'rm -fr %s/*' % path + if not client.execute_command('[ `id -u` == "0" ]') and server_config.get('launch_user', '') and client.execute_command('sudo -n true'): + cmd = 'sudo' + cmd + ret = client.execute_command(cmd, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/ocp-server/4.2.1/init.py b/plugins/ocp-server/4.2.1/init.py index c90a912..7b9d033 100644 --- a/plugins/ocp-server/4.2.1/init.py +++ b/plugins/ocp-server/4.2.1/init.py @@ -24,8 +24,11 @@ from glob import glob import _errno as err +from _arch import getArchList +from core import ObdHome from const import CONST_OBD_HOME from ssh import LocalClient +from _mirror import MirrorRepositoryManager def _clean(server, client, path, stdio=None): @@ -48,27 +51,28 @@ def _ocp_lib(client, home_path, soft_dir='', stdio=None): client.execute_command('mkdir -p -m 775 %s/logs/ocp/' % home_path, timeout=-1) OBD_HOME = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') - for rpm in glob(os.path.join(OBD_HOME, 'mirror/local/*ocp-agent-*.rpm')): - name = os.path.basename(rpm) - client.put_file(rpm, os.path.join(home_path, 'ocp-server/lib/', name)) + mirror_manager = MirrorRepositoryManager(OBD_HOME) + pkgs = [] + for arch in ['x86_64', 'aarch64']: + pkg = mirror_manager.get_exact_pkg(name="ocp-agent-ce", arch=arch, only_download=True) + if pkg: + pkgs.append(pkg) + for comp in ['oceanbase-ce', 'oceanbase-ce-libs', 'oceanbase-ce-utils', 'obproxy-ce']: + pkg = mirror_manager.get_exact_pkg(name=comp, only_download=True) + if pkg: + pkgs.append(pkg) + for pkg in pkgs: + client.put_file(pkg.path, os.path.join(home_path, 'ocp-server/lib/', pkg.file_name)) if soft_dir: - client.put_file(rpm, os.path.join(soft_dir, name)) - max_ob_pkg = LocalClient.execute_command('find %s/mirror/ -type f -name "oceanbase-*.rpm" -exec readlink -f {} \; | grep -v "oceanbase.*libs" | grep -v "oceanbase.*utils" | sort -V | tail -n 1' % OBD_HOME, stdio=stdio).stdout.strip() - max_odp_pkg = LocalClient.execute_command('find %s/mirror/ -type f -name "obproxy-*.rpm" -exec readlink -f {} \; | sort -V | tail -n 1' % OBD_HOME, stdio=stdio).stdout.strip() - name = os.path.basename(max_ob_pkg) - client.put_file(max_ob_pkg, os.path.join(home_path, 'ocp-server/lib/', name)) - if soft_dir: - client.put_file(max_ob_pkg, os.path.join(soft_dir, name)) - name = os.path.basename(max_odp_pkg) - client.put_file(max_odp_pkg, os.path.join(home_path, 'ocp-server/lib/', name)) - if soft_dir: - client.put_file(max_odp_pkg, os.path.join(soft_dir, name)) + client.put_file(pkg.path, os.path.join(soft_dir, pkg.file_name)) + def init(plugin_context, upgrade=False, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) @@ -131,6 +135,7 @@ def init(plugin_context, upgrade=False, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -146,6 +151,7 @@ def init(plugin_context, upgrade=False, *args, **kwargs): if not ret or ret.stdout.strip(): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='log dir', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -156,6 +162,7 @@ def init(plugin_context, upgrade=False, *args, **kwargs): if not client.execute_command('mkdir -p -m 775 %s' % log_dir): global_ret = False stdio.error(err.EC_FAIL_TO_INIT_PATH.format(server=server, key='log dir', msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=log_dir))) + stdio.error(err.EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue link_path = os.path.join(home_path, 'log') client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (log_dir, link_path, log_dir, link_path)) diff --git a/plugins/ocp-server/4.2.1/restart.py b/plugins/ocp-server/4.2.1/restart.py index d164b20..565af18 100644 --- a/plugins/ocp-server/4.2.1/restart.py +++ b/plugins/ocp-server/4.2.1/restart.py @@ -122,11 +122,10 @@ def restart(self): cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config - need_bootstrap = self.bootstrap_plugin is not None if not self.call_plugin(self.start_check_plugin, clients=clients, cluster_config=cluster_config): self.stdio.stop_loading('stop_loading', 'fail') return False - if not self.call_plugin(self.start_plugin, source_option='restart', clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, need_bootstrap=need_bootstrap, repository_dir_map=self.repository_dir_map): + if not self.call_plugin(self.start_plugin, source_option='restart', clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository_dir_map=self.repository_dir_map): self.rollback() self.stdio.stop_loading('stop_loading', 'fail') return False diff --git a/plugins/ocp-server/4.2.1/start.py b/plugins/ocp-server/4.2.1/start.py index 543d2d3..35321cc 100644 --- a/plugins/ocp-server/4.2.1/start.py +++ b/plugins/ocp-server/4.2.1/start.py @@ -196,7 +196,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = Capacity(value).btyes + parsed_value = Capacity(value).bytes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -207,12 +207,12 @@ def error(*arg, **kwargs): stdio.stop_loading('fail') def start_cluster(times=0): - jdbc_host = jdbc_port = jdbc_url = jdbc_username = jdbc_password = jdbc_public_key = meta_user = meta_tenant = meta_password = monitor_user = monitor_tenant = monitor_password = monitor_db = '' server_config = start_env[cluster_config.servers[0]] # check meta db connect before start jdbc_url = server_config['jdbc_url'] jdbc_username = "{0}@{1}".format(server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']) jdbc_password = server_config['ocp_meta_password'] + jdbc_public_key = '' meta_user = server_config['ocp_meta_username'] meta_tenant = server_config['ocp_meta_tenant']['tenant_name'] meta_password = server_config['ocp_meta_password'] @@ -234,14 +234,30 @@ def start_cluster(times=0): site_url = global_config.get('ocp_site_url', '') soft_dir = global_config.get('soft_dir', '') - meta_cursor = Cursor(jdbc_host, jdbc_port, meta_user, meta_tenant, meta_password, stdio) - if meta_user != 'root': + retries = 10 + meta_cursor = monitor_cursor = '' + while retries: + retries -= 1 + try: + meta_cursor = Cursor(jdbc_host, jdbc_port, meta_user, meta_tenant, meta_password, stdio) + meta_cursor.execute("show databases;", raise_exception=False, exc_level='verbose') + monitor_cursor = Cursor(jdbc_host, jdbc_port, monitor_user, monitor_tenant, monitor_password, stdio) + monitor_cursor.execute("show databases;", raise_exception=False, exc_level='verbose') + stdio.verbose(f'meta tenant and monitor tenant connect successful') + break + except: + stdio.verbose(f'meta tenant or monitor tenant connect failed, retrying({retries})') + if retries == 0: + stdio.error('meta tenant or monitor tenant connect failed') + return False + time.sleep(1) + + if meta_cursor and meta_user != 'root': sql = f"""ALTER USER root IDENTIFIED BY %s""" meta_cursor.execute(sql, args=[meta_password], raise_exception=False, exc_level='verbose') plugin_context.set_variable('meta_cursor', meta_cursor) - monitor_cursor = Cursor(jdbc_host, jdbc_port, monitor_user, monitor_tenant, monitor_password, stdio) - if monitor_user != 'root': + if monitor_cursor and monitor_user != 'root': sql = f"""ALTER USER root IDENTIFIED BY %s""" monitor_cursor.execute(sql, args=[monitor_password], raise_exception=False, exc_level='verbose') plugin_context.set_variable('monitor_cursor', monitor_cursor) @@ -256,7 +272,6 @@ def start_cluster(times=0): home_path = server_config['home_path'] launch_user = server_config.get('launch_user', None) system_password = server_config["system_password"] - port = server_config['port'] pid_path = os.path.join(home_path, 'run/ocp-server.pid') pids = client.execute_command("cat %s" % pid_path).stdout.strip() if not times and pids and all([client.execute_command('ls /proc/%s' % pid) for pid in pids.split('\n')]): @@ -310,18 +325,19 @@ def start_cluster(times=0): cmd += ' --with-property=obsdk.ob.connection.mode:direct' cmd += ' --with-property=ocp.iam.login.client.max-attempts:60' cmd += ' --with-property=ocp.iam.login.client.lockout-minutes:1' - if server_config['admin_password'] != '********': - admin_password = server_config['admin_password'].replace("'", """'"'"'""") - environ_variable += "export OCP_INITIAL_ADMIN_PASSWORD=\'%s\';" % admin_password - cmd += f' --with-property=ocp.file.local.built-in.dir:{home_path}/ocp-server/lib' - cmd += f' --with-property=ocp.log.download.tmp.dir:{home_path}/logs/ocp' - cmd += ' --with-property=ocp.file.local.dir:{}'.format(soft_dir) if soft_dir else f' --with-property=ocp.file.local.dir:{home_path}/data/files' + if not without_parameter and not get_option('without_parameter', ''): + if server_config['admin_password'] != '********': + admin_password = server_config['admin_password'].replace("'", """'"'"'""") + environ_variable += "export OCP_INITIAL_ADMIN_PASSWORD=\'%s\';" % admin_password + cmd += f' --with-property=ocp.file.local.built-in.dir:{home_path}/ocp-server/lib' + cmd += f' --with-property=ocp.log.download.tmp.dir:{home_path}/logs/ocp' + cmd += ' --with-property=ocp.file.local.dir:{}'.format(soft_dir) if soft_dir else f' --with-property=ocp.file.local.dir:{home_path}/data/files' real_cmd = environ_variable + cmd execute_cmd = "cd {}; {} > /dev/null 2>&1 &".format(home_path, real_cmd) - if server_config.get('launch_user'): + if launch_user: cmd_file = os.path.join(home_path, 'cmd.sh') client.write_file(execute_cmd, cmd_file) - execute_cmd = "chmod +x {0};sudo chown -R {1} {0};sudo su - {1} -c '{0}' &".format(cmd_file, server_config['launch_user']) + execute_cmd = "chmod +x {0};sudo chown -R {1} {0};sudo su - {1} -c '{0}' &".format(cmd_file, launch_user) client.execute_command(execute_cmd, timeout=3600) ret = client.execute_command( "ps -aux | grep -F '%s' | grep -v grep | awk '{print $2}' " % jar_cmd) @@ -423,25 +439,13 @@ def stop_cluster(): return plugin_context.return_true() cluster_config = plugin_context.cluster_config - deploy_status = plugin_context.deploy_status options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio - namespace = plugin_context.namespace - namespaces = plugin_context.namespaces - deploy_name = plugin_context.deploy_name - repositories = plugin_context.repositories - plugin_name = plugin_context.plugin_name - - components = plugin_context.components clients = plugin_context.clients cluster_config = plugin_context.cluster_config - cmds = plugin_context.cmds options = plugin_context.options - dev_mode = plugin_context.dev_mode stdio = plugin_context.stdio - create_if_not_exists = get_option('create_if_not_exists', True) - sys_cursor = kwargs.get('sys_cursor') global tenant_cursor tenant_cursor = None diff --git a/plugins/ocp-server/4.2.1/start_check.py b/plugins/ocp-server/4.2.1/start_check.py index a821d24..954fe67 100644 --- a/plugins/ocp-server/4.2.1/start_check.py +++ b/plugins/ocp-server/4.2.1/start_check.py @@ -293,10 +293,9 @@ def get_option(key, default=''): if not cluster_config.depends: # check meta db connect before start + cursor = None jdbc_url = server_config['jdbc_url'] matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) - cursor = getattr(options, 'metadb_cursor', '') - cursor = kwargs.get('metadb_cursor', '') if cursor == '' else cursor stdio.verbose('metadb connect check') if matched: jdbc_host = matched.group(1) @@ -324,9 +323,10 @@ def get_option(key, default=''): error('metadb connect', err.EC_OCP_SERVER_CONNECT_METADB, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) else: critical('metadb connect', err.EC_OCP_SERVER_ERROR_JDBC_URL, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) - client = clients[server] + # time check stdio.verbose('time check ') + client = clients[server] now = client.execute_command('date +"%Y-%m-%d %H:%M:%S"').stdout.strip() now = datetime.datetime.strptime(now, '%Y-%m-%d %H:%M:%S') stdio.verbose('now: %s' % now) @@ -334,10 +334,9 @@ def get_option(key, default=''): if cursor: ob_time = cursor.fetchone("SELECT NOW() now")['now'] stdio.verbose('ob_time: %s' % ob_time) - if not abs((now - ob_time).total_seconds()) < 180: - critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server)) - - if cursor and cursor.user == 'root@sys': + if not abs((now - ob_time).total_seconds()) < 60: + critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server), suggests=[err.SUG_OCP_SERVER_MACHINE_TIME]) + if cursor and cursor.user == 'root@sys' and source_option == 'start_check': stdio.verbose('tenant check ') zone_obs_num = {} sql = "select zone, count(*) num from oceanbase.DBA_OB_SERVERS where status = 'active' group by zone" @@ -386,20 +385,20 @@ def get_option(key, default=''): log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default()) - meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes - monitor_db_memory_size = Capacity(global_conf_with_default['ocp_monitor_tenant'].get('memory_size', 0)).btyes + meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).bytes + monitor_db_memory_size = Capacity(global_conf_with_default['ocp_monitor_tenant'].get('memory_size', 0)).bytes meta_db_max_cpu = global_conf_with_default['ocp_meta_tenant'].get('max_cpu') monitor_db_max_cpu = global_conf_with_default['ocp_monitor_tenant'].get('max_cpu', 0) meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size', 0) - meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes + meta_db_log_disk_size = Capacity(meta_db_log_disk_size).bytes monitor_db_log_disk_size = global_conf_with_default['ocp_monitor_tenant'].get('log_disk_size', 0) - monitor_db_log_disk_size = Capacity(monitor_db_log_disk_size).btyes + monitor_db_log_disk_size = Capacity(monitor_db_log_disk_size).bytes if meta_db_max_cpu and monitor_db_max_cpu: if int(meta_db_max_cpu) + int(monitor_db_max_cpu) > cpu_available: critical('tenant cpu', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='cpu', avail=cpu_available, need=int(meta_db_max_cpu) + int(monitor_db_max_cpu))) if meta_db_memory_size and monitor_db_memory_size: if meta_db_memory_size + monitor_db_memory_size > mem_available: - critical('tenant mem', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='memory', avail=Capacity(mem_available), need=Capacity(meta_db_memory_size + monitor_db_memory_size))) + critical('tenant mem', err.EC_OCP_SERVER_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(mem_available), need=Capacity(meta_db_memory_size + monitor_db_memory_size)), suggests=[err.SUG_OCP_SERVER_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if meta_db_log_disk_size and monitor_db_log_disk_size: if meta_db_log_disk_size + monitor_db_log_disk_size > log_disk_available: critical('tenant clog', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='log_disk_size', avail=Capacity(log_disk_available), need=Capacity(meta_db_log_disk_size + monitor_db_log_disk_size))) @@ -524,8 +523,6 @@ def get_option(key, default=''): if client.execute_command(clockdiff_cmd): check_pass('clockdiff') else: - if not client.execute_command('sudo -n true'): - critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) ret = client.execute_command('sudo ' + clockdiff_cmd) if not ret: critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) @@ -544,12 +541,12 @@ def get_option(key, default=''): ip_servers = {} MIN_MEMORY_VALUE = 1073741824 - memory_size = Capacity(server_config.get('memory_size', '1G')).btyes + memory_size = Capacity(server_config.get('memory_size', '1G')).bytes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).btyes + need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).bytes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -592,7 +589,7 @@ def get_option(key, default=''): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes mem_suggests = [err.SUG_OCP_SERVER_REDUCE_MEM.format()] if memory_needed > server_memory_stats['available']: for server in ip_servers[ip]: diff --git a/plugins/ocp-server/4.2.1/takeover.py b/plugins/ocp-server/4.2.1/takeover.py index 1ffd7af..d9dcafd 100644 --- a/plugins/ocp-server/4.2.1/takeover.py +++ b/plugins/ocp-server/4.2.1/takeover.py @@ -98,7 +98,7 @@ def takeover(plugin_context, cursors=None, *args, **kwargs): root_password = cluster_config.get_global_conf().get("root_password") takeover_data = {"switchConfigUrl": True, "connectionMode": "direct", "rootSysPassword": root_password, "address": server.ip, "port": mysql_port, - "hostInfo": {"kind": "DEDICATED_PHYSICAL_MACHINE", "hostTypeId": host_type_id, "sshPort": 22, + "hostInfo": {"kind": "DEDICATED_PHYSICAL_MACHINE", "hostTypeId": host_type_id, "sshPort": ssh_config.port, "credentialId": credential_id}} proxyro_password = cluster_config.get_global_conf().get("proxyro_password") if proxyro_password is not None and proxyro_password != "": diff --git a/plugins/ocp-server/4.2.2/start_check.py b/plugins/ocp-server/4.2.2/start_check.py index 765bf3d..8494bbf 100644 --- a/plugins/ocp-server/4.2.2/start_check.py +++ b/plugins/ocp-server/4.2.2/start_check.py @@ -311,11 +311,10 @@ def get_option(key, default=''): continue if not cluster_config.depends: + curosr = None # check meta db connect before start jdbc_url = server_config['jdbc_url'] matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) - cursor = getattr(options, 'metadb_cursor', '') - cursor = kwargs.get('metadb_cursor', '') if cursor == '' else cursor stdio.verbose('metadb connect check') if matched: jdbc_host = matched.group(1) @@ -343,9 +342,10 @@ def get_option(key, default=''): error('metadb connect', err.EC_OCP_SERVER_CONNECT_METADB, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) else: critical('metadb connect', err.EC_OCP_SERVER_ERROR_JDBC_URL, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) - client = clients[server] + # time check stdio.verbose('time check ') + client = clients[server] now = client.execute_command('date +"%Y-%m-%d %H:%M:%S"').stdout.strip() now = datetime.datetime.strptime(now, '%Y-%m-%d %H:%M:%S') stdio.verbose('now: %s' % now) @@ -353,10 +353,9 @@ def get_option(key, default=''): if cursor: ob_time = cursor.fetchone("SELECT NOW() now")['now'] stdio.verbose('ob_time: %s' % ob_time) - if not abs((now - ob_time).total_seconds()) < 180: - critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server)) - - if cursor and cursor.user == 'root@sys': + if not abs((now - ob_time).total_seconds()) < 60: + critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server), suggests=[err.SUG_OCP_SERVER_MACHINE_TIME]) + if cursor and cursor.user == 'root@sys' and source_option == 'start_check': stdio.verbose('tenant check ') zone_obs_num = {} sql = "select zone, count(*) num from oceanbase.DBA_OB_SERVERS where status = 'active' group by zone" @@ -405,20 +404,20 @@ def get_option(key, default=''): log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default()) - meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes - monitor_db_memory_size = Capacity(global_conf_with_default['ocp_monitor_tenant'].get('memory_size', 0)).btyes + meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).bytes + monitor_db_memory_size = Capacity(global_conf_with_default['ocp_monitor_tenant'].get('memory_size', 0)).bytes meta_db_max_cpu = global_conf_with_default['ocp_meta_tenant'].get('max_cpu') monitor_db_max_cpu = global_conf_with_default['ocp_monitor_tenant'].get('max_cpu', 0) meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size', 0) - meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes + meta_db_log_disk_size = Capacity(meta_db_log_disk_size).bytes monitor_db_log_disk_size = global_conf_with_default['ocp_monitor_tenant'].get('log_disk_size', 0) - monitor_db_log_disk_size = Capacity(monitor_db_log_disk_size).btyes + monitor_db_log_disk_size = Capacity(monitor_db_log_disk_size).bytes if meta_db_max_cpu and monitor_db_max_cpu: if int(meta_db_max_cpu) + int(monitor_db_max_cpu) > cpu_available: critical('tenant cpu', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='cpu', avail=cpu_available, need=int(meta_db_max_cpu) + int(monitor_db_max_cpu))) if meta_db_memory_size and monitor_db_memory_size: if meta_db_memory_size + monitor_db_memory_size > mem_available: - critical('tenant mem', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='memory', avail=Capacity(mem_available), need=Capacity(meta_db_memory_size + monitor_db_memory_size))) + critical('tenant mem', err.EC_OCP_SERVER_EXIST_METADB_TENANT_MEMORY_NOT_ENOUGH.format(avail=Capacity(mem_available), need=Capacity(meta_db_memory_size + monitor_db_memory_size)), suggests=[err.SUG_OCP_SERVER_EXIST_METADB_TENANT_NOT_ENOUGH.format()]) if meta_db_log_disk_size and monitor_db_log_disk_size: if meta_db_log_disk_size + monitor_db_log_disk_size > log_disk_available: critical('tenant log disk', err.EC_OCP_SERVER_RESOURCE_NOT_ENOUGH.format(resource='log_disk_size', avail=Capacity(log_disk_available), need=Capacity(meta_db_log_disk_size + monitor_db_log_disk_size))) @@ -543,8 +542,6 @@ def get_option(key, default=''): if client.execute_command(clockdiff_cmd): check_pass('clockdiff') else: - if not client.execute_command('sudo -n true'): - critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) ret = client.execute_command('sudo ' + clockdiff_cmd) if not ret: critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) @@ -563,12 +560,12 @@ def get_option(key, default=''): ip_servers = {} MIN_MEMORY_VALUE = 1073741824 - memory_size = Capacity(server_config.get('memory_size', '1G')).btyes + memory_size = Capacity(server_config.get('memory_size', '1G')).bytes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).btyes + need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).bytes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -611,7 +608,7 @@ def get_option(key, default=''): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = Capacity(str(v)).btyes + server_memory_stats[key] = Capacity(str(v)).bytes mem_suggests = [err.SUG_OCP_SERVER_REDUCE_MEM.format()] if memory_needed > server_memory_stats['available']: for server in ip_servers[ip]: diff --git a/plugins/prometheus/2.37.1/destroy.py b/plugins/prometheus/2.37.1/destroy.py index 202b7cc..968978f 100644 --- a/plugins/prometheus/2.37.1/destroy.py +++ b/plugins/prometheus/2.37.1/destroy.py @@ -28,10 +28,24 @@ global_ret = True +def check_mount_path(client, path, stdio): + stdio and getattr(stdio, 'verbose', print)('check mount: %s' % path) + try: + if client.execute_command("grep '\\s%s\\s' /proc/mounts" % path): + return True + return False + except Exception as e: + stdio and getattr(stdio, 'exception', print)('') + stdio and getattr(stdio, 'error', print)('failed to check mount: %s' % path) + + def destroy(plugin_context, *args, **kwargs): def clean(path): client = clients[server] - ret = client.execute_command('rm -fr %s' % path, timeout=-1) + if check_mount_path(client, path, stdio): + ret = client.execute_command('rm -fr %s/*' % path, timeout=-1) + else: + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: global global_ret global_ret = False diff --git a/plugins/prometheus/2.37.1/display.py b/plugins/prometheus/2.37.1/display.py index bff435a..ccf6468 100644 --- a/plugins/prometheus/2.37.1/display.py +++ b/plugins/prometheus/2.37.1/display.py @@ -48,7 +48,7 @@ def display(plugin_context, cursor, *args, **kwargs): 'password': password if password else '', 'status': 'active' if api_cursor and api_cursor.connect(stdio) else 'inactive' }) - stdio.print_list(results, ['url', 'user', 'password', 'status'], lambda x: [x['url'], x['user'], x['password'], x['status']], title='prometheus') + stdio.print_list(results, ['url', 'user', 'password', 'status'], lambda x: [x['url'], x['user'], x['password'], x['status']], title=cluster_config.name) active_result = [r for r in results if r['status'] == 'active'] info_dict = active_result[0] if len(active_result) > 0 else None if info_dict is not None: diff --git a/plugins/prometheus/2.37.1/init.py b/plugins/prometheus/2.37.1/init.py index 61cc0b9..837f137 100644 --- a/plugins/prometheus/2.37.1/init.py +++ b/plugins/prometheus/2.37.1/init.py @@ -22,7 +22,8 @@ import os.path -from _errno import EC_FAIL_TO_INIT_PATH, EC_CLEAN_PATH_FAILED, InitDirFailedErrorMessage, EC_CONFIG_CONFLICT_DIR +from _errno import EC_FAIL_TO_INIT_PATH, EC_CLEAN_PATH_FAILED, InitDirFailedErrorMessage, EC_CONFIG_CONFLICT_DIR, \ + EC_COMPONENT_DIR_NOT_EMPTY def _clean(server, client, path, stdio=None): @@ -39,7 +40,7 @@ def init(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio - + deploy_name = plugin_context.deploy_name global_ret = True force = getattr(plugin_context.options, 'force', False) clean = getattr(plugin_context.options, 'clean', False) @@ -100,6 +101,7 @@ def init(plugin_context, *args, **kwargs): stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format( path=home_path))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -115,6 +117,7 @@ def init(plugin_context, *args, **kwargs): stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='data_dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format( path=data_dir))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False @@ -134,6 +137,7 @@ def init(plugin_context, *args, **kwargs): stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='data_dir', msg=InitDirFailedErrorMessage.NOT_EMPTY.format( path=data_dir))) + stdio.error(EC_COMPONENT_DIR_NOT_EMPTY.format(deploy_name=deploy_name), _on_exit=True) continue else: global_ret = False diff --git a/plugins/sysbench/1.0.20/file_map.yaml b/plugins/sysbench/1.0.20/file_map.yaml new file mode 100644 index 0000000..08c12ab --- /dev/null +++ b/plugins/sysbench/1.0.20/file_map.yaml @@ -0,0 +1,3 @@ +- src_path: './usr/sysbench' + target_path: 'sysbench' + type: dir \ No newline at end of file diff --git a/plugins/sysbench/3.1.0/pre_test.py b/plugins/sysbench/3.1.0/pre_test.py index 04886dc..a9aa729 100644 --- a/plugins/sysbench/3.1.0/pre_test.py +++ b/plugins/sysbench/3.1.0/pre_test.py @@ -25,9 +25,22 @@ from ssh import LocalClient from _types import Capacity +from const import TOOL_SYSBENCH, COMP_OBCLIENT stdio = None +def file_path_check(bin_path, tool_name, tool_path, cmd): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if not result: + continue + break + else: + return None, result + return path, None + def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): @@ -57,26 +70,42 @@ def get_option(key, default=''): stdio.error('DO NOT use sys tenant for testing.') return - ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) - if not ret: + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd) + if result: stdio.error( '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( - ret.stderr, obclient_bin)) + result.stderr, obclient_bin)) return - ret = LocalClient.execute_command('%s --help' % sysbench_bin, stdio=stdio) - if not ret: + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) + + path, result = file_path_check(sysbench_bin, TOOL_SYSBENCH, 'sysbench/bin/sysbench', cmd) + if result: stdio.error( '%s\n%s is not an executable file. Please use `--sysbench-bin` to set.\nYou may not have ob-sysbench installed' % ( - ret.stderr, sysbench_bin)) + result.stderr, sysbench_bin)) return + setattr(options, 'sysbench_bin', path) - if not script_name.endswith('.lua'): - script_name += '.lua' - script_path = os.path.join(sysbench_script_dir, script_name) - if not os.path.exists(script_path): - stdio.error( - 'No such file %s. Please use `--sysbench-script-dir` to set sysbench scrpit dir.\nYou may not have ob-sysbench installed' % script_path) - return + if not os.path.exists(sysbench_script_dir): + sysbench_script_dir = os.path.join(os.getenv('HOME'), TOOL_SYSBENCH, 'sysbench/share/sysbench') + setattr(options, 'sysbench_script_dir', sysbench_script_dir) + + scripts = script_name.split(',') + for script in scripts: + if not script.endswith('.lua'): + script += '.lua' + script_path = os.path.join(sysbench_script_dir, script) + if not os.path.exists(script_path): + stdio.error( + 'No such file %s. Please use `--sysbench-script-dir` to set sysbench scrpit dir.\nYou may not have ob-sysbench installed' % script_path) + return + + for thread in threads.split(","): + if not thread.isdecimal(): + stdio.error("Illegal characters in threads: %s" % thread) + return sql = "select * from oceanbase.gv$tenant where tenant_name = %s" tenant_meta = cursor.fetchone(sql, [tenant_name]) diff --git a/plugins/sysbench/3.1.0/run_test.py b/plugins/sysbench/3.1.0/run_test.py index 257fed6..a20abc9 100644 --- a/plugins/sysbench/3.1.0/run_test.py +++ b/plugins/sysbench/3.1.0/run_test.py @@ -79,17 +79,13 @@ def get_option(key, default=''): sysbench_bin = get_option('sysbench_bin', 'sysbench') sysbench_script_dir = get_option('sysbench_script_dir', '/usr/sysbench/share/sysbench') - try: - sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script_name, host, port, user, tenant_name, mysql_db) - + def generate_sysbench_cmd(sysbench_cmd): if password: sysbench_cmd += ' --mysql-password=%s' % password if table_size: sysbench_cmd += ' --table_size=%s' % table_size if tables: sysbench_cmd += ' --tables=%s' % tables - if threads: - sysbench_cmd += ' --threads=%s' % threads if time: sysbench_cmd += ' --time=%s' % time if interval: @@ -104,8 +100,24 @@ def get_option(key, default=''): sysbench_cmd += ' --percentile=%s' % percentile for opt_key in opt_keys: sysbench_cmd += ' --%s=%s' % (opt_key.replace('_', '-'), getattr(options, opt_key)) - if exec_cmd('%s cleanup' % sysbench_cmd) and exec_cmd('%s prepare' % sysbench_cmd) and exec_cmd('%s --db-ps-mode=disable run' % sysbench_cmd): - return plugin_context.return_true() + return sysbench_cmd + + try: + scripts = script_name.split(',') + user_threads = threads.split(',') + max_thread = max(user_threads) + for script in scripts: + stdio.print("\nStart executing %s" % (script if script.endswith('.lua') else script + '.lua')) + sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script, host, port, user, tenant_name, mysql_db) + sysbench_cmd = generate_sysbench_cmd(sysbench_cmd) + base_cmd = f"{sysbench_cmd} --threads={max_thread}" + if not (exec_cmd('%s cleanup' % base_cmd) and exec_cmd('%s prepare' % base_cmd)): + return plugin_context.return_false() + for thread in user_threads: + sysbench_run_cmd = f"{sysbench_cmd} --threads={thread}" + if not exec_cmd('%s --db-ps-mode=disable run' % sysbench_run_cmd): + return plugin_context.return_false() + return plugin_context.return_true() except KeyboardInterrupt: pass except: diff --git a/plugins/sysbench/4.0.0.0/pre_test.py b/plugins/sysbench/4.0.0.0/pre_test.py index 43a7245..c39b37a 100644 --- a/plugins/sysbench/4.0.0.0/pre_test.py +++ b/plugins/sysbench/4.0.0.0/pre_test.py @@ -25,9 +25,21 @@ from ssh import LocalClient from _types import Capacity +from const import TOOL_SYSBENCH, COMP_OBCLIENT stdio = None +def file_path_check(bin_path, tool_name, tool_path, cmd): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if not result: + continue + break + else: + return None, result + return path, None def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): @@ -57,26 +69,42 @@ def get_option(key, default=''): stdio.error('DO NOT use sys tenant for testing.') return - ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) - if not ret: + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd) + if result: stdio.error( '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( - ret.stderr, obclient_bin)) + result.stderr, obclient_bin)) return - ret = LocalClient.execute_command('%s --help' % sysbench_bin, stdio=stdio) - if not ret: + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) + + path, result = file_path_check(sysbench_bin, TOOL_SYSBENCH, 'sysbench/bin/sysbench', cmd) + if result: stdio.error( '%s\n%s is not an executable file. Please use `--sysbench-bin` to set.\nYou may not have ob-sysbench installed' % ( - ret.stderr, sysbench_bin)) + result.stderr, sysbench_bin)) return + setattr(options, 'sysbench_bin', path) - if not script_name.endswith('.lua'): - script_name += '.lua' - script_path = os.path.join(sysbench_script_dir, script_name) - if not os.path.exists(script_path): - stdio.error( - 'No such file %s. Please use `--sysbench-script-dir` to set sysbench scrpit dir.\nYou may not have ob-sysbench installed' % script_path) - return + if not os.path.exists(sysbench_script_dir): + sysbench_script_dir = os.path.join(os.getenv('HOME'), TOOL_SYSBENCH, 'sysbench/share/sysbench') + setattr(options, 'sysbench_script_dir', sysbench_script_dir) + + scripts = script_name.split(',') + for script in scripts: + if not script.endswith('.lua'): + script += '.lua' + script_path = os.path.join(sysbench_script_dir, script) + if not os.path.exists(script_path): + stdio.error( + 'No such file %s. Please use `--sysbench-script-dir` to set sysbench scrpit dir.\nYou may not have ob-sysbench installed' % script_path) + return + + for thread in threads.split(","): + if not thread.isdecimal(): + stdio.error("Illegal characters in threads: %s" % thread) + return sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" max_cpu = 2 diff --git a/plugins/sysbench/4.0.0.0/run_test.py b/plugins/sysbench/4.0.0.0/run_test.py index 257fed6..a20abc9 100644 --- a/plugins/sysbench/4.0.0.0/run_test.py +++ b/plugins/sysbench/4.0.0.0/run_test.py @@ -79,17 +79,13 @@ def get_option(key, default=''): sysbench_bin = get_option('sysbench_bin', 'sysbench') sysbench_script_dir = get_option('sysbench_script_dir', '/usr/sysbench/share/sysbench') - try: - sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script_name, host, port, user, tenant_name, mysql_db) - + def generate_sysbench_cmd(sysbench_cmd): if password: sysbench_cmd += ' --mysql-password=%s' % password if table_size: sysbench_cmd += ' --table_size=%s' % table_size if tables: sysbench_cmd += ' --tables=%s' % tables - if threads: - sysbench_cmd += ' --threads=%s' % threads if time: sysbench_cmd += ' --time=%s' % time if interval: @@ -104,8 +100,24 @@ def get_option(key, default=''): sysbench_cmd += ' --percentile=%s' % percentile for opt_key in opt_keys: sysbench_cmd += ' --%s=%s' % (opt_key.replace('_', '-'), getattr(options, opt_key)) - if exec_cmd('%s cleanup' % sysbench_cmd) and exec_cmd('%s prepare' % sysbench_cmd) and exec_cmd('%s --db-ps-mode=disable run' % sysbench_cmd): - return plugin_context.return_true() + return sysbench_cmd + + try: + scripts = script_name.split(',') + user_threads = threads.split(',') + max_thread = max(user_threads) + for script in scripts: + stdio.print("\nStart executing %s" % (script if script.endswith('.lua') else script + '.lua')) + sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script, host, port, user, tenant_name, mysql_db) + sysbench_cmd = generate_sysbench_cmd(sysbench_cmd) + base_cmd = f"{sysbench_cmd} --threads={max_thread}" + if not (exec_cmd('%s cleanup' % base_cmd) and exec_cmd('%s prepare' % base_cmd)): + return plugin_context.return_false() + for thread in user_threads: + sysbench_run_cmd = f"{sysbench_cmd} --threads={thread}" + if not exec_cmd('%s --db-ps-mode=disable run' % sysbench_run_cmd): + return plugin_context.return_false() + return plugin_context.return_true() except KeyboardInterrupt: pass except: diff --git a/plugins/tpcc/3.1.0/check_requirement.py b/plugins/tpcc/3.1.0/check_requirement.py new file mode 100644 index 0000000..3585231 --- /dev/null +++ b/plugins/tpcc/3.1.0/check_requirement.py @@ -0,0 +1,31 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient + +def check_requirement(plugin_context, file_map=None, requirement_map=None, *args, **kwargs): + lib_check = False + need_libs = set() + java_bin = getattr(plugin_context.options, 'java_bin', 'java') + cmd = '%s -version' % java_bin + if not LocalClient.execute_command(cmd, stdio=plugin_context.stdio): + for file_item in file_map.values(): + need_libs.add(requirement_map[file_item.require]) + return plugin_context.return_true(checked=lib_check, requirements=need_libs) \ No newline at end of file diff --git a/plugins/tpcc/3.1.0/file_map.yaml b/plugins/tpcc/3.1.0/file_map.yaml new file mode 100644 index 0000000..6af0f4f --- /dev/null +++ b/plugins/tpcc/3.1.0/file_map.yaml @@ -0,0 +1,4 @@ +- src_path: './usr/ob-benchmarksql' + target_path: 'tpcc' + type: dir + require: openjdk-jre \ No newline at end of file diff --git a/plugins/tpcc/3.1.0/pre_test.py b/plugins/tpcc/3.1.0/pre_test.py index 30ac968..54b535a 100644 --- a/plugins/tpcc/3.1.0/pre_test.py +++ b/plugins/tpcc/3.1.0/pre_test.py @@ -25,6 +25,7 @@ from ssh import LocalClient from tool import DirectoryUtil +from const import TOOL_TPCC, TOOL_TPCC_BENCHMARKSQL, COMP_OBCLIENT PROPS4OB_TEMPLATE = """ db=oceanbase @@ -49,7 +50,18 @@ osCollectorScript=./misc/os_collector_linux.py osCollectorInterval=1 """ - + +def file_path_check(bin_path, tool_name, tool_path, cmd, stdio): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if not result: + continue + break + else: + return None, result + return path, None def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs): def get_option(key, default=''): @@ -89,6 +101,16 @@ def local_execute_command(command, env=None, timeout=None): stdio.error('Create tmp dir failed') return + bmsql_jar_paths = [bmsql_jar, os.path.join(os.getenv('HOME'), TOOL_TPCC, "tpcc/%s" % TOOL_TPCC_BENCHMARKSQL)] + for jar_path in bmsql_jar_paths: + if os.path.exists(jar_path): + bmsql_jar = jar_path + setattr(options, 'bmsql_dir', bmsql_jar) + break + else: + stdio.error('BenchmarkSQL jar file not found at %s. Please use `--bmsql-jar` to set BenchmarkSQL jar file' % bmsql_jar) + return + if not os.path.exists(bmsql_jar): stdio.error( 'BenchmarkSQL jar file not found at %s. Please use `--bmsql-jar` to set BenchmarkSQL jar file' % bmsql_jar) @@ -105,20 +127,27 @@ def local_execute_command(command, env=None, timeout=None): bmsql_classpath = ':'.join(jars) obclient_bin = get_option('obclient_bin', 'obclient') - ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) - if not ret: + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd, stdio) + if result: stdio.error( - '%s\n%s is not an executable file. please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( - ret.stderr, obclient_bin)) + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + result.stderr, obclient_bin)) return + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) java_bin = get_option('java_bin', 'java') - ret = local_execute_command('{java_bin} -version'.format(java_bin=java_bin)) - if not ret: + cmd = '%s -version' + path, result = file_path_check(java_bin, TOOL_TPCC, 'lib/bin/java', cmd, stdio) + if result: stdio.error( '%s\n%s is not an executable file. please use `--java-bin` to set.\nYou may not have java installed' % ( - ret.stderr, java_bin)) + result.stderr, java_bin)) return + java_bin = path + setattr(options, 'java_bin', java_bin) + exec_classes = ['jTPCC', 'LoadData', 'ExecJDBC'] passed = True for exec_class in exec_classes: diff --git a/plugins/tpcc/3.1.0/requirement.yaml b/plugins/tpcc/3.1.0/requirement.yaml new file mode 100644 index 0000000..b84e4d5 --- /dev/null +++ b/plugins/tpcc/3.1.0/requirement.yaml @@ -0,0 +1 @@ +openjdk-jre: \ No newline at end of file diff --git a/plugins/tpcc/4.0.0.0/pre_test.py b/plugins/tpcc/4.0.0.0/pre_test.py index 6ea230b..c371729 100644 --- a/plugins/tpcc/4.0.0.0/pre_test.py +++ b/plugins/tpcc/4.0.0.0/pre_test.py @@ -25,6 +25,7 @@ from ssh import LocalClient from tool import DirectoryUtil +from const import TOOL_TPCC, TOOL_TPCC_BENCHMARKSQL, COMP_OBCLIENT PROPS4OB_TEMPLATE = """ db=oceanbase @@ -49,7 +50,18 @@ osCollectorScript=./misc/os_collector_linux.py osCollectorInterval=1 """ - + +def file_path_check(bin_path, tool_name, tool_path, cmd, stdio): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if not result: + continue + break + else: + return None, result + return path, None def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs): def get_option(key, default=''): @@ -89,9 +101,14 @@ def local_execute_command(command, env=None, timeout=None): stdio.error('Create tmp dir failed') return - if not os.path.exists(bmsql_jar): - stdio.error( - 'BenchmarkSQL jar file not found at %s. Please use `--bmsql-jar` to set BenchmarkSQL jar file' % bmsql_jar) + bmsql_jar_paths = [bmsql_jar, os.path.join(os.getenv('HOME'), TOOL_TPCC, "tpcc/%s" % TOOL_TPCC_BENCHMARKSQL)] + for jar_path in bmsql_jar_paths: + if os.path.exists(jar_path): + bmsql_jar = jar_path + setattr(options, 'bmsql_dir', bmsql_jar) + break + else: + stdio.error('BenchmarkSQL jar file not found at %s. Please use `--bmsql-jar` to set BenchmarkSQL jar file' % bmsql_jar) return jars = [os.path.join(bmsql_jar, '*') if os.path.isdir(bmsql_jar) else bmsql_jar] @@ -105,20 +122,27 @@ def local_execute_command(command, env=None, timeout=None): bmsql_classpath = ':'.join(jars) obclient_bin = get_option('obclient_bin', 'obclient') - ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) - if not ret: + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd, stdio) + if result: stdio.error( - '%s\n%s is not an executable file. please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( - ret.stderr, obclient_bin)) + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + result.stderr, obclient_bin)) return + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) java_bin = get_option('java_bin', 'java') - ret = local_execute_command('{java_bin} -version'.format(java_bin=java_bin)) - if not ret: + cmd = '%s -version' + path, result = file_path_check(java_bin, TOOL_TPCC, 'lib/bin/java', cmd, stdio) + if result: stdio.error( '%s\n%s is not an executable file. please use `--java-bin` to set.\nYou may not have java installed' % ( - ret.stderr, java_bin)) + result.stderr, java_bin)) return + java_bin = path + setattr(options, 'java_bin', java_bin) + exec_classes = ['jTPCC', 'LoadData', 'ExecJDBC'] passed = True for exec_class in exec_classes: diff --git a/plugins/tpch/3.0.0/file_map.yaml b/plugins/tpch/3.0.0/file_map.yaml new file mode 100644 index 0000000..6933fda --- /dev/null +++ b/plugins/tpch/3.0.0/file_map.yaml @@ -0,0 +1,3 @@ +- src_path: './usr/tpc-h-tools/tpc-h-tools' + target_path: 'tpch' + type: dir \ No newline at end of file diff --git a/plugins/tpch/3.1.0/pre_test.py b/plugins/tpch/3.1.0/pre_test.py index c905130..13ffbac 100644 --- a/plugins/tpch/3.1.0/pre_test.py +++ b/plugins/tpch/3.1.0/pre_test.py @@ -31,7 +31,19 @@ from ssh import LocalClient from tool import DirectoryUtil from _types import Capacity - +from const import TOOL_TPCH, COMP_OBCLIENT + +def file_path_check(bin_path, tool_name, tool_path, cmd, stdio): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if result.code > 1: + continue + break + else: + return None, result + return path, None def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): @@ -88,10 +100,15 @@ def local_execute_command(command, env=None, timeout=None): setattr(options, 'sql_path', sql_path) obclient_bin = get_option('obclient_bin', 'obclient') - ret = local_execute_command('%s --help' % obclient_bin) - if not ret: - stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd, stdio) + if result: + stdio.error( + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + result.stderr, obclient_bin)) return + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) if not DirectoryUtil.mkdir(tmp_dir, stdio=stdio): return @@ -139,10 +156,17 @@ def local_execute_command(command, env=None, timeout=None): return else: if not tbl_path: - ret = local_execute_command('%s -h' % dbgen_bin) - if ret.code > 1: - stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (ret.stderr, dbgen_bin)) + cmd = '%s -h' + path, result = file_path_check(dbgen_bin, TOOL_TPCH, 'tpch/bin/dbgen', cmd, stdio) + if result: + stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (result.stderr, dbgen_bin)) return + dbgen_bin = path + setattr(options, 'dbgen_bin', dbgen_bin) + + if not os.path.exists(dss_config): + dss_config = os.path.join(os.getenv('HOME'), TOOL_TPCH, 'tpch') + setattr(options, 'dss_config', dss_config) dss_path = os.path.join(dss_config, 'dists.dss') if not os.path.exists(dss_path): diff --git a/plugins/tpch/3.1.0/run_test.py b/plugins/tpch/3.1.0/run_test.py index 9bd25a7..461ff98 100644 --- a/plugins/tpch/3.1.0/run_test.py +++ b/plugins/tpch/3.1.0/run_test.py @@ -80,6 +80,7 @@ def local_execute_command(command, env=None, timeout=None): tenant_id = kwargs.get('tenant_id') unit_count = kwargs.get('unit_count', 0) cpu_total = 0 + parallel_num = get_option('parallel', int(max_cpu * unit_count)) if not_test_only: sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') @@ -118,8 +119,6 @@ def local_execute_command(command, env=None, timeout=None): if not path.startswith(ret): stdio.error('Access denied. Please set `secure_file_priv` to "".') return - - parallel_num = int(max_cpu * unit_count) if not_test_only: # 替换并发数 diff --git a/plugins/tpch/4.0.0.0/pre_test.py b/plugins/tpch/4.0.0.0/pre_test.py index b547417..9e68ba2 100644 --- a/plugins/tpch/4.0.0.0/pre_test.py +++ b/plugins/tpch/4.0.0.0/pre_test.py @@ -31,7 +31,19 @@ from ssh import LocalClient from tool import DirectoryUtil from _types import Capacity - +from const import TOOL_TPCH, COMP_OBCLIENT + +def file_path_check(bin_path, tool_name, tool_path, cmd, stdio): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if result.code > 1: + continue + break + else: + return None, result + return path, None def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): @@ -90,10 +102,15 @@ def local_execute_command(command, env=None, timeout=None): setattr(options, 'sql_path', sql_path) obclient_bin = get_option('obclient_bin', 'obclient') - ret = local_execute_command('%s --help' % obclient_bin) - if not ret: - stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd, stdio) + if result: + stdio.error( + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + result.stderr, obclient_bin)) return + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) if not DirectoryUtil.mkdir(tmp_dir, stdio=stdio): return @@ -143,10 +160,17 @@ def local_execute_command(command, env=None, timeout=None): return else: if not tbl_path: - ret = local_execute_command('%s -h' % dbgen_bin) - if ret.code > 1: - stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (ret.stderr, dbgen_bin)) + cmd = '%s -h' + path, result = file_path_check(dbgen_bin, TOOL_TPCH, 'tpch/bin/dbgen', cmd, stdio) + if result: + stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (result.stderr, dbgen_bin)) return + dbgen_bin = path + setattr(options, 'dbgen_bin', dbgen_bin) + + if not os.path.exists(dss_config): + dss_config = os.path.join(os.getenv('HOME'), TOOL_TPCH, 'tpch') + setattr(options, 'dss_config', dss_config) dss_path = os.path.join(dss_config, 'dists.dss') if not os.path.exists(dss_path): diff --git a/plugins/tpch/4.0.0.0/run_test.py b/plugins/tpch/4.0.0.0/run_test.py index 6eab67a..050b915 100644 --- a/plugins/tpch/4.0.0.0/run_test.py +++ b/plugins/tpch/4.0.0.0/run_test.py @@ -83,6 +83,7 @@ def local_execute_command(command, env=None, timeout=None): max_cpu = kwargs.get('max_cpu', 2) tenant_id = kwargs.get('tenant_id') unit_count = kwargs.get('unit_count', 0) + parallel_num = get_option('parallel', int(max_cpu * unit_count)) if not_test_only: sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') @@ -122,8 +123,6 @@ def local_execute_command(command, env=None, timeout=None): if not path.startswith(ret): stdio.error('Access denied. Please set `secure_file_priv` to "".') return - - parallel_num = int(max_cpu * unit_count) if not_test_only: # 替换并发数 diff --git a/plugins/tpch/4.2.0.0/run_test.py b/plugins/tpch/4.2.0.0/run_test.py index 439f76c..61a5740 100644 --- a/plugins/tpch/4.2.0.0/run_test.py +++ b/plugins/tpch/4.2.0.0/run_test.py @@ -83,6 +83,7 @@ def local_execute_command(command, env=None, timeout=None): max_cpu = kwargs.get('max_cpu', 2) tenant_id = kwargs.get('tenant_id') unit_count = kwargs.get('unit_count', 0) + parallel_num = get_option('parallel', int(max_cpu * unit_count)) if not_test_only: sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') @@ -123,8 +124,6 @@ def local_execute_command(command, env=None, timeout=None): stdio.error('Access denied. Please set `secure_file_priv` to "\\".') return - parallel_num = int(max_cpu * unit_count) - if not_test_only: # 替换并发数 stdio.start_loading('Format DDL') diff --git a/plugins/tpch/4.3.0.0/analyze.sql b/plugins/tpch/4.3.0.0/analyze.sql new file mode 100644 index 0000000..1e2c758 --- /dev/null +++ b/plugins/tpch/4.3.0.0/analyze.sql @@ -0,0 +1,8 @@ +call dbms_stats.gather_table_stats('{database}', 'lineitem', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'orders', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'partsupp', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'part', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'customer', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'supplier', degree=>{cpu_total}, granularity=>'GLOBAL', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'nation', degree=>{cpu_total}, granularity=>'AUTO', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); +call dbms_stats.gather_table_stats('{database}', 'region', degree=>{cpu_total}, granularity=>'AUTO', method_opt=>'FOR ALL COLUMNS SIZE AUTO'); \ No newline at end of file diff --git a/plugins/tpch/4.3.0.0/pre_test.py b/plugins/tpch/4.3.0.0/pre_test.py index f42457a..670f6e9 100644 --- a/plugins/tpch/4.3.0.0/pre_test.py +++ b/plugins/tpch/4.3.0.0/pre_test.py @@ -20,10 +20,10 @@ from __future__ import absolute_import, division, print_function - import re import os from glob import glob +from const import TOOL_TPCH, COMP_OBCLIENT try: import subprocess32 as subprocess except: @@ -31,6 +31,18 @@ from ssh import LocalClient from tool import DirectoryUtil +def file_path_check(bin_path, tool_name, tool_path, cmd, stdio): + result = None + tool_path = os.path.join(os.getenv('HOME'), tool_name, tool_path) + for path in [bin_path, tool_path]: + result = LocalClient.execute_command(cmd % path, stdio=stdio) + if result.code > 1: + continue + break + else: + return None, result + return path, None + def format_size(size, precision=1): units = ['B', 'K', 'M', 'G'] @@ -93,7 +105,7 @@ def local_execute_command(command, env=None, timeout=None): if tenant_name == 'sys': stdio.error('DO NOT use sys tenant for testing.') - return + return test_server = get_option('test_server') tmp_dir = os.path.abspath(get_option('tmp_dir', './tmp')) @@ -107,10 +119,15 @@ def local_execute_command(command, env=None, timeout=None): setattr(options, 'sql_path', sql_path) obclient_bin = get_option('obclient_bin', 'obclient') - ret = local_execute_command('%s --help' % obclient_bin) - if not ret: - stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) + cmd = '%s --help' + path, result = file_path_check(obclient_bin, COMP_OBCLIENT, 'bin/obclient', cmd, stdio) + if result: + stdio.error( + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + result.stderr, obclient_bin)) return + obclient_bin = path + setattr(options, 'obclient_bin', obclient_bin) if not DirectoryUtil.mkdir(tmp_dir, stdio=stdio): return @@ -133,6 +150,7 @@ def local_execute_command(command, env=None, timeout=None): if tenant_unit is False: return max_cpu = tenant_unit['max_cpu'] + memory_size = tenant_unit['memory_size'] min_memory = MIN_MEMORY unit_count = pool['unit_count'] server_num = len(cluster_config.servers) @@ -151,7 +169,7 @@ def local_execute_command(command, env=None, timeout=None): if not remote_tbl_dir: stdio.error('Please use --remote-tbl-dir to set a dir for remote tbl files') return - + if disable_transfer: ret = clients[test_server].execute_command('ls %s' % (os.path.join(remote_tbl_dir, '*.tbl'))) tbl_path = ret.stdout.strip().split('\n') if ret else [] @@ -160,11 +178,18 @@ def local_execute_command(command, env=None, timeout=None): return else: if not tbl_path: - ret = local_execute_command('%s -h' % dbgen_bin) - if ret.code > 1: - stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (ret.stderr, dbgen_bin)) + cmd = '%s -h' + path, result = file_path_check(dbgen_bin, TOOL_TPCH, 'tpch/bin/dbgen', cmd, stdio) + if result: + stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (result.stderr, dbgen_bin)) return - + dbgen_bin = path + setattr(options, 'dbgen_bin', dbgen_bin) + + if not os.path.exists(dss_config): + dss_config = os.path.join(os.getenv('HOME'), TOOL_TPCH, 'tpch') + setattr(options, 'dss_config', dss_config) + dss_path = os.path.join(dss_config, 'dists.dss') if not os.path.exists(dss_path): stdio.error('No such file: %s' % dss_path) @@ -202,7 +227,7 @@ def local_execute_command(command, env=None, timeout=None): return plugin_context.return_true( obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db, max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, - tenant_id=tenant_meta['TENANT_ID'], format_size=format_size + tenant_id=tenant_meta['TENANT_ID'], format_size=format_size, memory_size=memory_size ) diff --git a/plugins/tpch/4.3.0.0/run_test.py b/plugins/tpch/4.3.0.0/run_test.py new file mode 100644 index 0000000..20672bd --- /dev/null +++ b/plugins/tpch/4.3.0.0/run_test.py @@ -0,0 +1,247 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +import re +import os +import time + +try: + import subprocess32 as subprocess +except: + import subprocess +from ssh import LocalClient +from tool import FileUtil + + +stdio = None + + +def exec_cmd(cmd): + stdio.verbose('execute: %s' % cmd) + process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + while process.poll() is None: + line = process.stdout.readline() + line = line.strip() + if line: + stdio.print(line.decode("utf8", 'ignore')) + return process.returncode == 0 + + +def run_test(plugin_context, db, cursor, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key, default) + if value is None: + value = default + return value + + def local_execute_command(command, env=None, timeout=None): + return LocalClient.execute_command(command, env, timeout, stdio) + + global stdio + cluster_config = plugin_context.cluster_config + stdio = plugin_context.stdio + clients = plugin_context.clients + options = plugin_context.options + + optimization = get_option('optimization') > 0 + not_test_only = not get_option('test_only') + + host = get_option('host', '127.0.0.1') + port = get_option('port', 2881) + mysql_db = get_option('database', 'test') + user = get_option('user', 'root') + tenant_name = get_option('tenant', 'test') + password = get_option('password', '') + ddl_path = get_option('ddl_path') + tbl_path = get_option('tbl_path') + sql_path = get_option('sql_path') + tmp_dir = get_option('tmp_dir') + direct_load = get_option('direct_load') + input_parallel = get_option('parallel') + obclient_bin = get_option('obclient_bin', 'obclient') + + sql_path = sorted(sql_path, key=lambda x: (len(x), x)) + + cpu_total = 0 + max_cpu = kwargs.get('max_cpu', 2) + tenant_id = kwargs.get('tenant_id') + unit_count = kwargs.get('unit_count', 0) + memory_size = kwargs.get('memory_size', kwargs.get('min_memory')) + if not_test_only: + sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') + ret = local_execute_command('%s -e "%s"' % (sql_cmd_prefix, 'create database if not exists %s' % mysql_db)) + sql_cmd_prefix += ' -D %s' % mysql_db + if not ret: + stdio.error(ret.stderr) + return + else: + sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -D %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '', mysql_db) + + ret = LocalClient.execute_command('%s -e "%s"' % (sql_cmd_prefix, 'select version();'), stdio=stdio) + if not ret: + stdio.error(ret.stderr) + return + + for server in cluster_config.servers: + client = clients[server] + ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") + if ret and ret.stdout.strip().isdigit(): + cpu_total += int(ret.stdout) + else: + server_config = cluster_config.get_server_conf(server) + cpu_total += int(server_config.get('cpu_count', 0)) + + try: + sql = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = 'secure_file_priv'" % tenant_id + ret = cursor.fetchone(sql) + if ret is False: + return + ret = ret['value'] + if ret is None: + stdio.error('Access denied. Please set `secure_file_priv` to "\\".') + return + if ret: + for path in tbl_path: + if not path.startswith(ret): + stdio.error('Access denied. Please set `secure_file_priv` to "\\".') + return + if input_parallel: + parallel_num = input_parallel + else: + if direct_load: + parallel_num = int(((memory_size) >> 20) * 0.001) / 2 if memory_size else 1 + else: + parallel_num = int(max_cpu * unit_count) + parallel_num = max(parallel_num, 1) + + if not_test_only: + # 替换并发数 + stdio.start_loading('Format DDL') + n_ddl_path = [] + for fp in ddl_path: + _, fn = os.path.split(fp) + nfp = os.path.join(tmp_dir, fn) + ret = local_execute_command("sed %s -e 's/partitions cpu_num/partitions %d/' > %s" % (fp, cpu_total, nfp)) + if not ret: + raise Exception(ret.stderr) + n_ddl_path.append(nfp) + ddl_path = n_ddl_path + stdio.stop_loading('succeed') + + stdio.start_loading('Create table') + for path in ddl_path: + path = os.path.abspath(path) + stdio.verbose('load %s' % path) + ret = local_execute_command('%s < %s' % (sql_cmd_prefix, path)) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + stdio.start_loading('Load data') + for path in tbl_path: + _, fn = os.path.split(path) + stdio.verbose('load %s' % path) + ret = local_execute_command("""%s -c -e "load data /*+ parallel(%d) %s */ infile '%s' %s into table %s fields terminated by '|' enclosed BY ''ESCAPED BY'';" """ % (sql_cmd_prefix, parallel_num, 'direct(true, 0)' if direct_load else '', path, 'ignore' if direct_load else '', fn[:-4])) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + # Major freeze + stdio.start_loading('Merge') + sql_frozen_scn = "select FROZEN_SCN, LAST_SCN from oceanbase.CDB_OB_MAJOR_COMPACTION where tenant_id = %s" % tenant_id + merge_version = cursor.fetchone(sql_frozen_scn) + if merge_version is False: + return + merge_version = merge_version['FROZEN_SCN'] + if cursor.fetchone("alter system major freeze tenant = %s" % tenant_name) is False: + return + while True: + current_version = cursor.fetchone(sql_frozen_scn) + if current_version is False: + return + current_version = current_version['FROZEN_SCN'] + if int(current_version) > int(merge_version): + break + time.sleep(5) + while True: + ret = cursor.fetchone(sql_frozen_scn) + if ret is False: + return + if int(ret.get("FROZEN_SCN", 0)) / 1000 == int(ret.get("LAST_SCN", 0)) / 1000: + break + time.sleep(5) + # analyze + local_dir, _ = os.path.split(__file__) + analyze_path = os.path.join(local_dir, 'analyze.sql') + with FileUtil.open(analyze_path, stdio=stdio) as f: + content = f.read() + analyze_content = content.format(cpu_total=cpu_total, database=mysql_db) + ret = LocalClient.execute_command('%s -e """%s"""' % (sql_cmd_prefix, analyze_content), stdio=stdio) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + # 替换并发数 + stdio.start_loading('Format SQL') + n_sql_path = [] + for fp in sql_path: + _, fn = os.path.split(fp) + nfp = os.path.join(tmp_dir, fn) + ret = local_execute_command("sed %s -e 's/parallel(cpu_num)/parallel(%d)/' > %s" % (fp, cpu_total, nfp)) + if not ret: + raise Exception(ret.stderr) + n_sql_path.append(nfp) + sql_path = n_sql_path + stdio.stop_loading('succeed') + + #warmup预热 + stdio.start_loading('Warmup') + times = 2 + for path in sql_path: + _, fn = os.path.split(path) + log_path = os.path.join(tmp_dir, '%s.log' % fn) + ret = local_execute_command('echo source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + total_cost = 0 + for path in sql_path: + start_time = time.time() + _, fn = os.path.split(path) + log_path = os.path.join(tmp_dir, '%s.log' % fn) + stdio.print('[%s]: start %s' % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time)), path)) + ret = local_execute_command('echo source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) + end_time = time.time() + cost = end_time - start_time + total_cost += cost + stdio.print('[%s]: end %s, cost %.2fs' % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time)), path, cost)) + if not ret: + raise Exception(ret.stderr) + stdio.print('Total Cost: %.2fs' % total_cost) + return plugin_context.return_true() + except KeyboardInterrupt: + stdio.stop_loading('fail') + except Exception as e: + stdio.stop_loading('fail') + stdio.exception(str(e)) diff --git a/requirements.txt b/requirements.txt index 8711b56..e59a3b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,4 @@ inspect2==0.1.2 six==1.16.0 pyinstaller==3.6 bcrypt==3.1.7 -zstandard==0.14.1 \ No newline at end of file +zstandard==0.14.1 diff --git a/rpm/ob-deploy.spec b/rpm/ob-deploy.spec index 2adcaf0..a3ced27 100644 --- a/rpm/ob-deploy.spec +++ b/rpm/ob-deploy.spec @@ -70,7 +70,7 @@ mkdir -p ${RPM_BUILD_ROOT}/usr/obd pip install -r plugins-requirements3.txt --target=$BUILD_DIR/SOURCES/site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com pip install -r service/service-requirements.txt --target=$BUILD_DIR/SOURCES/site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com # pyinstaller -y --clean -n obd-web -p $BUILD_DIR/SOURCES/site-packages -F service/app.py -pyinstaller --hidden-import=decimal -p $BUILD_DIR/SOURCES/site-packages --hidden-import service/app.py --hidden-import=configparser -F obd.py +pyinstaller --hidden-import=decimal -p $BUILD_DIR/SOURCES/site-packages --hidden-import service/app.py --hidden-import=configparser --hidden-import=Crypto.Hash.SHA --hidden-import=Crypto.PublicKey.RSA --hidden-import=Crypto.Signature.PKCS1_v1_5 --hidden-import=Crypto.Cipher.PKCS1_OAEP -F obd.py rm -f obd.py obd.spec \mkdir -p $BUILD_DIR/SOURCES/web \cp -rf $SRC_DIR/dist/obd ${RPM_BUILD_ROOT}/usr/bin/obd diff --git a/service/api/v1/common.py b/service/api/v1/common.py index 6dcb7bb..ea0b054 100644 --- a/service/api/v1/common.py +++ b/service/api/v1/common.py @@ -48,3 +48,16 @@ async def keep_alive(token: str = Query(None, description='token'), is_clear: bool = Query(False, description='is need clear token')): handler = handler_utils.new_common_handler() return response_utils.new_ok_response(handler.keep_alive(token, overwrite, is_clear)) + + +@router.get("/keys/rsa/public", + response_model=OBResponse, + description='rsa public key', + operation_id='rsaPublicKey', + tags=['Common']) +async def public_key(): + handler = handler_utils.new_rsa_handler() + key, err = handler.public_key_to_bytes() + if err: + return response_utils.new_internal_server_error_exception(Exception('get rea public key is failed')) + return response_utils.new_ok_response(key) diff --git a/service/api/v1/component_change.py b/service/api/v1/component_change.py new file mode 100644 index 0000000..1e03d2c --- /dev/null +++ b/service/api/v1/component_change.py @@ -0,0 +1,316 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +from fastapi import APIRouter, Path, Query, BackgroundTasks, Body +from typing import List + +from service.api import response_utils +from service.api.response import OBResponse, DataList +from service.handler import handler_utils +from service.model.deployments import TaskInfo +from service.model.deployments import InstallLog, PreCheckResult +from service.model.metadb import RecoverChangeParameter +from service.model.service_info import DeployName +from service.model.component_change import ComponentChangeMode, ComponentChangeInfo, ComponentChangeConfig, ComponentsChangeInfoDisplay, ComponentsServer, ComponentDepends, ConfigPath + +router = APIRouter() + + +@router.get("/component_change/deployment", + response_model=OBResponse[DataList[DeployName]], + description='get scale_out/component_add deployments name', + operation_id='ComponentChangeDeploymentsName', + tags=['ComponentChange']) +async def get_deployments(): + try: + handler = handler_utils.new_component_change_handler() + deploy_names = handler.get_deployments_name() + return response_utils.new_ok_response(deploy_names) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + + +@router.get("/component_change/deployment/detail", + response_model=OBResponse[ComponentChangeInfo], + description='get scale_out/component_add deployments info', + operation_id='ComponentChangeDeploymentsInfo', + tags=['ComponentChange']) +async def get_deployments(name=Query(..., description='query deployment name')): + try: + handler = handler_utils.new_component_change_handler() + deploy_info = handler.get_deployment_info(name) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + if deploy_info: + return response_utils.new_ok_response(deploy_info) + else: + return response_utils.new_bad_request_exception(Exception(f'Component Change: {name} get deployment info failed')) + + +@router.get("/component_change/deployment/depends", + response_model=OBResponse[DataList[ComponentDepends]], + description='get scale_out/component_add deployments info', + operation_id='ComponentChangeDeploymentsInfo', + tags=['ComponentChange']) +async def get_deployment_depends(name=Query(..., description='query deployment name')): + try: + handler = handler_utils.new_component_change_handler() + deploy_info = handler.get_deployment_depends(name) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + if deploy_info: + return response_utils.new_ok_response(deploy_info) + else: + return response_utils.new_bad_request_exception(Exception(f'Component Change: {name} get deployment info failed')) + + +@router.post("/component_change/{name}/deployment", + response_model=OBResponse, + description='create scale_out/component_add config', + operation_id='ComponentChangeConfig', + tags=['ComponentChange']) +async def create_deployment( + name: str = Path(description='name'), + config: ComponentChangeConfig = ..., +): + handler = handler_utils.new_component_change_handler() + try: + path = handler.create_component_change_path(name, config) + ret = handler.create_component_change_deployment(name, path, config.mode) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + if ret: + return response_utils.new_ok_response(ret) + else: + return response_utils.new_bad_request_exception(Exception(f'Component Change: {name} generate config failed')) + + +@router.post("/component_change/{name}/precheck", + response_model=OBResponse, + description='precheck for scale_out/component_add deployment', + operation_id='PrecheckComponentChange', + tags=['ComponentChange']) +async def precheck_component_change_deployment( + background_tasks: BackgroundTasks, + name: str = Path(description="deployment name") +): + try: + handler = handler_utils.new_component_change_handler() + ret = handler.component_change_precheck(name, background_tasks) + if not isinstance(ret, TaskInfo) and ret: + return response_utils.new_internal_server_error_exception(str(ret[1].args[0])) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + return response_utils.new_ok_response(ret) + + +@router.get("/component_change/{name}/precheck", + response_model=OBResponse[PreCheckResult], + description='get result of scale_out/component_add precheck', + operation_id='PrecheckComponentChangeRes', + tags=['ComponentChange']) +async def get_component_change_precheck_task( + name: str = Path(description="deployment name") +): + handler = handler_utils.new_component_change_handler() + try: + precheck_result = handler.get_precheck_result(name) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + return response_utils.new_ok_response(precheck_result) + + +@router.post("/component_change/{name}/recover", + response_model=OBResponse[DataList[RecoverChangeParameter]], + description='recover scale_out/component_add config', + operation_id='RecoverComponentChange', + tags=['ComponentChange']) +async def recover_deployment( + name: str = Path(description="deployment name"), +): + handler = handler_utils.new_component_change_handler() + try: + recover_result = handler.recover(name) + return response_utils.new_ok_response(recover_result) + except Exception as ex: + return response_utils.new_internal_server_error_exception(ex) + + +@router.post("/component_change/{name}", + response_model=OBResponse, + description='component change', + operation_id='ComponentChange', + tags=['ComponentChange']) +async def component_change( + background_tasks: BackgroundTasks, + name: str = Path(description="deployment name"), + mode: ComponentChangeMode = Body(description="mode") +): + handler = handler_utils.new_component_change_handler() + if mode.mode == 'add_component': + handler.add_components(name, background_tasks) + if mode.mode == 'scale_out': + handler.scale_out(name, background_tasks) + return response_utils.new_ok_response(True) + + +@router.get("/component_change/{name}/component_change", + response_model=OBResponse[TaskInfo], + description='get task res of component change', + operation_id='ComponentChangeTask', + tags=['ComponentChange']) +async def get_component_change_task( + name: str = Path(description="deployment name") +): + handler = handler_utils.new_component_change_handler() + task_info = handler.get_component_change_task_info(name) + if not isinstance(task_info, TaskInfo): + return response_utils.new_internal_server_error_exception("task {0} not found".format(name)) + return response_utils.new_ok_response(task_info) + + +@router.get("/component_change/{name}/component_change/log", + response_model=OBResponse[InstallLog], + description='get log of component change', + operation_id='ComponentChangeLog', + tags=['ComponentChange']) +async def get_component_change_log( + name: str = Path(description="deployment name"), + offset: int = Query(0, description="offset to read task log"), + components: List[str] = Query(None, description='component name') +): + handler = handler_utils.new_component_change_handler() + task_info = handler.get_component_change_task_info(name) + if task_info is None: + return response_utils.new_internal_server_error_exception("task {0} not found".format(name)) + log_content = handler.buffer.read() if components is None else handler.get_component_change_log_by_component(components, 'add_component') + log_info = InstallLog(log=log_content[offset:], offset=len(log_content)) + return response_utils.new_ok_response(log_info) + + +@router.post("/component_change/{name}/display", + response_model=OBResponse[ComponentsChangeInfoDisplay], + description='del component with node check', + operation_id='ComponentChangeNodeCheck', + tags=['ComponentChange']) +async def get_component_change_detail( + name: str = Path(description="deployment name"), +): + handler = handler_utils.new_component_change_handler() + try: + info = handler.get_component_change_detail(name) + except Exception as ex: + raise response_utils.new_internal_server_error_exception(ex) + return response_utils.new_ok_response(info) + + +@router.post("/component_change/{name}/node/check", + response_model=OBResponse[ComponentsServer], + description='del component with node check', + operation_id='ComponentChangeNodeCheck', + tags=['ComponentChange']) +async def node_check( + name: str = Path(description="deployment name"), + components: List[str] = Query(description="component name"), +): + handler = handler_utils.new_component_change_handler() + try: + info = handler.node_check(name, components) + except Exception as ex: + raise response_utils.new_internal_server_error_exception(ex) + return response_utils.new_ok_response(info) + + +@router.delete("/component_change/{name}", + response_model=OBResponse, + description='del componnet', + operation_id='ComponentChangeDelComponent', + tags=['ComponentChange']) +async def del_component( + background_tasks: BackgroundTasks, + name: str = Path(description="deployment name"), + components: List[str] = Query(description="component name"), + force: bool = Query(description="force") +): + handler = handler_utils.new_component_change_handler() + try: + info = handler.del_component(name, components, force, background_tasks) + except Exception as ex: + raise response_utils.new_internal_server_error_exception(ex) + return response_utils.new_ok_response(info) + + +@router.get("/component_change/{name}/del_component", + response_model=OBResponse, + description='get task res of component change', + operation_id='ComponentChangeTask', + tags=['ComponentChange']) +async def get_del_component_change_task( + name: str = Path(description="deployment name"), +): + handler = handler_utils.new_component_change_handler() + task_info = handler.get_del_component_task_info(name) + if not isinstance(task_info, TaskInfo): + return response_utils.new_internal_server_error_exception("task {0} not found".format(name)) + return response_utils.new_ok_response(task_info) + + +@router.get("/component_change/{name}/del", + response_model=OBResponse, + description='get del component task', + operation_id='ComponentChangeDelComponentTask', + tags=['ComponentChange']) +async def get_del_component_log( + name: str = Path(description="deployment name"), + offset: int = Query(0, description="offset to read task log"), + components: List[str] = Query(description="component name"), +): + handler = handler_utils.new_component_change_handler() + task_info = handler.get_del_component_task_info(name) + if task_info is None: + return response_utils.new_internal_server_error_exception("task {0} not found".format(name)) + log_content = handler.buffer.read() if components is None else handler.get_component_change_log_by_component(components, 'del_component') + return response_utils.new_ok_response(log_content) + + +@router.post("/component_change/{name}/remove", + response_model=OBResponse, + description='remove component', + operation_id='RemoveComponent', + tags=['ComponentChange']) +async def remove_component( + name: str = Path(description="deployment name"), + components: List[str] = Query(description="component name List"), +): + handler = handler_utils.new_component_change_handler() + info = handler.remove_component(name, components) + return response_utils.new_ok_response(info) + + +@router.get("/component_change/{name}/path", + response_model=OBResponse[ConfigPath], + description='get config path', + operation_id='GetConfigPath', + tags=['ComponentChange']) +async def get_config_path( + name: str = Path(description="deployment name") +): + handler = handler_utils.new_component_change_handler() + info = handler.get_config_path(name) + return response_utils.new_ok_response(info) \ No newline at end of file diff --git a/service/api/v1/deployments.py b/service/api/v1/deployments.py index 53bc19e..2d33355 100644 --- a/service/api/v1/deployments.py +++ b/service/api/v1/deployments.py @@ -17,13 +17,13 @@ # You should have received a copy of the GNU General Public License # along with OceanBase Deploy. If not, see . -from fastapi import APIRouter, Path, Query, BackgroundTasks +from fastapi import APIRouter, Path, Query, BackgroundTasks, Request from service.api import response_utils from service.api.response import OBResponse, DataList from service.handler import handler_utils from service.model.deployments import DeploymentConfig, PreCheckResult, RecoverChangeParameter, TaskInfo, \ - ConnectionInfo, InstallLog, Deployment, DeploymentInfo, DeploymentReport, DeploymentStatus + ConnectionInfo, InstallLog, Deployment, DeploymentInfo, DeploymentReport, DeploymentStatus, ScenarioType router = APIRouter() @@ -203,6 +203,24 @@ async def get_destroy_task_info(name: str): handler = handler_utils.new_deployment_handler() info = handler.get_destroy_task_info(name) return response_utils.new_ok_response(info) + + +@router.get("/deployments/scenario/type", + response_model=OBResponse[DataList[ScenarioType]], + description='get scenario', + operation_id='getScenario', + tags=['Deployments']) +async def get_destroy_task_info( + request: Request, + version: str = Query(None, description='ob version') +): + headers = request.headers + language = headers.get('accept-language') + handler = handler_utils.new_deployment_handler() + info = handler.get_scenario_by_version(version, language) + return response_utils.new_ok_response(info) + + @router.get("/deployments_test", response_model=OBResponse, description='get destroy task info', diff --git a/service/api/v1/service_info.py b/service/api/v1/service_info.py index 2468328..2f149d7 100644 --- a/service/api/v1/service_info.py +++ b/service/api/v1/service_info.py @@ -22,7 +22,7 @@ from service.api import response_utils from service.api.response import OBResponse from service.handler import handler_utils -from service.model.service_info import ServiceInfo, DeployName +from service.model.service_info import ServiceInfo, DeployNames from service.model.database import DatabaseConnection from service.model.server import OcpServerInfo @@ -41,7 +41,7 @@ async def get_info(): @router.get("/deployment/names", - response_model=OBResponse[DeployName], + response_model=OBResponse[DeployNames], description='get deployment names', operation_id='getDeploymentNames', tags=['Info']) diff --git a/service/app.py b/service/app.py index 4b7ede7..5fe285c 100644 --- a/service/app.py +++ b/service/app.py @@ -40,6 +40,7 @@ from service.api.v1 import ocp_deployments from service.api.v1 import metadb from service.api.v1 import installer +from service.api.v1 import component_change from const import DISABLE_SWAGGER @@ -64,7 +65,9 @@ def __init__(self, obd, white_ip_list=None, resource_path="./"): self.app.include_router(ocp_deployments.router, prefix='/api/v1') self.app.include_router(metadb.router, prefix='/api/v1') self.app.include_router(installer.router, prefix='/api/v1') + self.app.include_router(component_change.router, prefix='/api/v1') self.app.add_middleware(IdleShutdownMiddleware, logger=log.get_logger(), idle_time_before_shutdown=IDLE_TIME_BEFORE_SHUTDOWN) + self.app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*']) self.app.add_middleware(IPBlockMiddleware, ips=white_ip_list) self.app.add_middleware(ProcessTimeMiddleware) self.app.add_middleware(RequestResponseLogMiddleware, logger=log.get_logger()) diff --git a/service/common/const.py b/service/common/const.py index 4fbbbf8..6d5c9b9 100644 --- a/service/common/const.py +++ b/service/common/const.py @@ -49,6 +49,8 @@ DESTROY_PLUGIN = "destroy" INIT_PLUGINS = ("init",) START_PLUGINS = ("start_check", "start", "connect", "bootstrap", "display") +DEL_COMPONENT_PLUGINS = ("stop", "destroy") +CHANGED_COMPONENTS = ('obproxy-ce', 'obagent', 'ocp-express', 'ob-configserver', 'prometheus', 'grafana') UPGRADE_PLUGINS = ("upgrade") # filter component of oceanbase and obproxy version above 4.0 VERSION_FILTER = { diff --git a/service/handler/component_change_handler.py b/service/handler/component_change_handler.py new file mode 100644 index 0000000..9c51a50 --- /dev/null +++ b/service/handler/component_change_handler.py @@ -0,0 +1,1000 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +import copy +import json +import yaml +import tempfile +from optparse import Values +from uuid import uuid1 as uuid +from singleton_decorator import singleton + +from _rpm import Version +from _plugin import PluginType +from _errno import CheckStatus, FixEval +from collections import defaultdict +from const import COMP_JRE +from ssh import LocalClient +from _mirror import MirrorRepositoryType +from _deploy import DeployStatus, DeployConfigStatus +from service.handler.base_handler import BaseHandler +from service.handler.rsa_handler import RSAHandler +from service.common import log, task, const, util +from service.common.task import Serial as serial +from service.common.task import AutoRegister as auto_register +from service.model.service_info import DeployName +from service.common.task import TaskStatus, TaskResult +from service.model.components import ComponentInfo +from service.model.metadb import RecoverChangeParameter +from service.model.deployments import Parameter, PrecheckTaskResult, TaskInfo, PreCheckInfo, RecoverAdvisement, PreCheckResult, ComponentInfo as DeployComponentInfo +from service.model.component_change import ComponentChangeInfo, BestComponentInfo, ComponentChangeConfig, ComponentsChangeInfoDisplay, ComponentChangeInfoDisplay, ComponentServer, ComponentsServer, ComponentLog, ComponentDepends, ConfigPath + + +@singleton +class ComponentChangeHandler(BaseHandler): + + def get_components(self, component_filter=const.VERSION_FILTER): + local_packages = self.obd.mirror_manager.local_mirror.get_all_pkg_info() + remote_packages = list() + remote_mirrors = self.obd.mirror_manager.get_remote_mirrors() + for mirror in remote_mirrors: + remote_packages.extend(mirror.get_all_pkg_info()) + local_packages.sort() + remote_packages.sort() + local_pkg_idx = len(local_packages) - 1 + remote_pkg_idx = len(remote_packages) - 1 + component_dict = defaultdict(list) + while local_pkg_idx >= 0 and remote_pkg_idx >= 0: + local_pkg = local_packages[local_pkg_idx] + remote_pkg = remote_packages[remote_pkg_idx] + if local_pkg >= remote_pkg: + size = getattr(local_pkg, 'size', const.PKG_ESTIMATED_SIZE[local_pkg.name]) + size = const.PKG_ESTIMATED_SIZE[local_pkg.name] if not size else size + component_dict[local_pkg.name].append( + ComponentInfo(version=local_pkg.version, md5=local_pkg.md5, release=local_pkg.release, + arch=local_pkg.arch, type=MirrorRepositoryType.LOCAL.value, + estimated_size=size)) + local_pkg_idx -= 1 + else: + if len(component_dict[remote_pkg.name]) > 0 and component_dict[remote_pkg.name][-1].md5 == remote_pkg.md5: + log.get_logger().debug("already found local package %s", remote_pkg) + else: + size = getattr(remote_pkg, 'size', const.PKG_ESTIMATED_SIZE[remote_pkg.name]) + size = const.PKG_ESTIMATED_SIZE[remote_pkg.name] if not size else size + component_dict[remote_pkg.name].append( + ComponentInfo(version=remote_pkg.version, md5=remote_pkg.md5, release=remote_pkg.release, + arch=remote_pkg.arch, type=MirrorRepositoryType.REMOTE.value, + estimated_size=size)) + remote_pkg_idx -= 1 + if local_pkg_idx >= 0: + for pkg in local_packages[local_pkg_idx::-1]: + size = getattr(pkg, 'size', const.PKG_ESTIMATED_SIZE[pkg.name]) + size = const.PKG_ESTIMATED_SIZE[pkg.name] if not size else size + component_dict[pkg.name].append( + ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.LOCAL.value, + estimated_size=size)) + if remote_pkg_idx >= 0: + for pkg in remote_packages[remote_pkg_idx::-1]: + size = getattr(pkg, 'size', const.PKG_ESTIMATED_SIZE[pkg.name]) + size = const.PKG_ESTIMATED_SIZE[pkg.name] if not size else size + component_dict[pkg.name].append( + ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.REMOTE.value, + estimated_size=size)) + for component, version in component_filter.items(): + if component in component_dict.keys(): + log.get_logger().debug("filter component: {0} above version: {1}".format(component, version)) + log.get_logger().debug("original components: {0}".format(component_dict[component])) + component_dict[component] = list(filter(lambda c: Version(c.version) >= Version(version), component_dict[component])) + log.get_logger().debug("filtered components: {0}".format(component_dict[component])) + return component_dict + + def get_deployments_name(self): + deploys = self.obd.deploy_manager.get_deploy_configs() + log.get_logger().info('deploys: %s' % deploys) + ret = [] + for deploy in deploys: + deploy_config = deploy.deploy_config + deploy_info = deploy.deploy_info + if deploy.deploy_info.status == DeployStatus.STATUS_RUNNING and deploy.deploy_info.config_status == DeployConfigStatus.UNCHNAGE: + create_data = deploy_info.create_date if deploy_info.create_date else '' + if const.OCEANBASE in deploy_config.components.keys(): + cluster_config = deploy_config.components[const.OCEANBASE] + deploy_name = DeployName(name=deploy.name, deploy_user=deploy_config.user.username, ob_servers=[server.ip for server in cluster_config.servers], ob_version=deploy_info.components[const.OCEANBASE]['version'], create_date=create_data) + self.context['ob_servers'][deploy.name] = cluster_config.servers + ret.append(deploy_name) + if const.OCEANBASE_CE in deploy_config.components.keys(): + cluster_config = deploy_config.components[const.OCEANBASE_CE] + deploy_name = DeployName(name=deploy.name, deploy_user=deploy_config.user.username, ob_servers=[server.ip for server in cluster_config.servers], ob_version=deploy_info.components[const.OCEANBASE_CE]['version'], create_date=create_data) + self.context['ob_servers'][deploy.name] = cluster_config.servers + ret.append(deploy_name) + return ret + + def get_deployment_info(self, name): + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + + deploy_config = deploy.deploy_config + deploy_info = deploy.deploy_info + components = deploy_config.components.keys() + component_change_info = ComponentChangeInfo(component_list=[]) + for component in components: + version = deploy_info.components[component]['version'] + cluster_config = deploy_config.components[component] + if component == const.OCEANBASE or component == const.OCEANBASE_CE: + default_config = cluster_config.get_global_conf_with_default() + appname = default_config.get('appname', '') + self.context['ob_component'][name] = component + self.context['ob_version'][name] = version + self.context['appname'][name] = appname + continue + component_change_info.component_list.append(BestComponentInfo(component_name=component, version=version, deployed=1, node=', '.join([server.ip for server in cluster_config.servers]))) + + component_dict = self.get_components() + undeployed_components = set(list(const.CHANGED_COMPONENTS)) - set(components) + for component in undeployed_components: + component_change_info.component_list.append(BestComponentInfo(component_name=component, deployed=0, component_info=component_dict[component])) + self.context['component_change_info'][name] = component_change_info + return component_change_info + + def get_deployment_depends(self, name): + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + deploy_config = deploy.deploy_config + components = deploy_config.components.keys() + component_depends = [] + for component in components: + cluster_config = deploy_config.components[component] + depends = list(cluster_config.depends) + component_depends.append(ComponentDepends(component_name=component, depends=depends)) + return component_depends + + def generate_component_config(self, config, component_name, ext_keys=[], depend_component=[], ob_servers=None): + comp_config = dict() + input_comp_config = getattr(config, component_name) + config_dict = input_comp_config.dict() + for key in config_dict: + if config_dict[key] and key in {'servers', 'version', 'package_hash', 'release'}: + if ob_servers and key == 'servers': + config_dict['servers'] = list() + for server in ob_servers: + if server._name: + config_dict['servers'].append({'name': server.name, 'ip': server.ip}) + else: + config_dict['servers'].append(server.ip) + comp_config[key] = config_dict[key] + + if 'global' not in comp_config.keys(): + comp_config['global'] = dict() + + comp_config['global']['home_path'] = config.home_path + '/' + component_name + for key in ext_keys: + if config_dict[key]: + if key == 'admin_passwd': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + comp_config['global'][key] = passwd + continue + if key == 'obproxy_sys_password': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + comp_config['global'][key] = passwd + continue + comp_config['global'][key] = config_dict[key] + + if depend_component: + comp_config['depends'] = list() + comp_config['depends'].extend(depend_component) + + if input_comp_config.parameters: + for parameter in input_comp_config.parameters: + if not parameter.adaptive: + if parameter.key == 'http_basic_auth_password': + passwd = RSAHandler().decrypt_private_key(config_dict[parameter.key]) + comp_config['global'][parameter.key] = passwd + continue + comp_config['global'][parameter.key] = parameter.value + return comp_config + + def create_component_change_path(self, name, config, mem_save=True): + cluster_config = {} + + if config.obconfigserver: + cluster_config[config.obconfigserver.component] = self.generate_component_config(config, 'obconfigserver', ['listen_port']) + if config.obproxy: + if not config.obproxy.cluster_name and self.context['appname'][name]: + config.obproxy.cluster_name = self.context['appname'][name] + cluster_config[config.obproxy.component] = self.generate_component_config(config, 'obproxy', ['cluster_name', 'prometheus_listen_port', 'listen_port', 'rpc_listen_port', 'obproxy_sys_password'], [self.context['ob_component'][name]]) + if config.obagent: + cluster_config[config.obagent.component] = self.generate_component_config(config, config.obagent.component, ['monagent_http_port', 'mgragent_http_port'], [self.context['ob_component'][name]], self.context['ob_servers'][name]) + if config.ocpexpress: + depend_component = [self.context['ob_component'][name]] + if config.obproxy or const.OBPROXY_CE in self.obd.deploy.deploy_config.components: + depend_component.append(const.OBPROXY_CE) + if const.OBPROXY in self.obd.deploy.deploy_config.components: + depend_component.append(const.OBPROXY) + if config.obagent or const.OBAGENT in self.obd.deploy.deploy_config.components: + depend_component.append(const.OBAGENT) + cluster_config[config.ocpexpress.component] = self.generate_component_config(config, 'ocpexpress', ['port', 'admin_passwd'], depend_component) + + with tempfile.NamedTemporaryFile(delete=False, prefix="component_change", suffix=".yaml", mode="w", encoding="utf-8") as f: + f.write(yaml.dump(cluster_config, sort_keys=False)) + cluster_config_yaml_path = f.name + log.get_logger().info('dump config from path: %s' % cluster_config_yaml_path) + if mem_save: + self.context['component_change_deployment_info'][name] = config + self.context['component_change_path'][name] = cluster_config_yaml_path + return cluster_config_yaml_path + + def create_component_change_deployment(self, name, path, mode, mem_save=True): + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + deploy_config = self.obd.deploy.deploy_config + deploy_info = self.obd.deploy.deploy_info + deploy_config.set_undumpable() + self.context['mode'][name] = mode + if mode == 'add_component': + current_repositories = self.obd.load_local_repositories(deploy_info) + self.obd.set_repositories(current_repositories) + self.obd.search_param_plugin_and_apply(current_repositories, deploy_config) + if not deploy_config.add_components(path): + raise Exception('add component failed') + self.obd.set_deploy(deploy) + if mem_save: + self.context['new_obd'][name] = self.obd.fork(deploy=self.obd.deploy_manager.create_deploy_config(name + 'component_change', path)) + return True + if mode == 'scale_out': + pass + + def get_config_path(self, name): + return ConfigPath(config_path=self.context['component_change_path'][name]) + + @serial("component_change_precheck") + def component_change_precheck(self, name, background_tasks): + task_manager = task.get_task_manager() + task_info = task_manager.get_task_info(name, task_type="component_change_precheck") + if task_info is not None and task_info.status != TaskStatus.FINISHED: + raise Exception(f"task {name} exists and not finished") + + deploy_config = self.obd.deploy.deploy_config + pkgs, repositories, errors = self.obd.search_components_from_mirrors(deploy_config, only_info=True, components=deploy_config.added_components) + if errors: + raise Exception("{}".format('\n'.join(errors))) + repositories.extend(pkgs) + repositories = self.obd.sort_repository_by_depend(repositories, deploy_config) + self.context['new_obd'][name].set_repositories(repositories) + self.context['origin_repository'][name] = self.obd.repositories + all_repositories = self.obd.repositories + repositories + self.obd.set_repositories(all_repositories) + self.obd._call_stdio('start_loading', 'Get added repositories and plugins') + self.obd.search_param_plugin_and_apply(repositories, deploy_config) + self.obd._call_stdio('stop_loading', 'succeed') + + start_check_plugins = self.obd.search_py_script_plugin(repositories, 'start_check', no_found_act='warn') + self._precheck(name, repositories, start_check_plugins, init_check_status=True) + info = task_manager.get_task_info(name, task_type="component_change_precheck") + if info is not None and info.exception is not None: + exception = copy.deepcopy(info.exception) + info.exception = None + raise exception + task_manager.del_task_info(name, task_type="component_change_precheck") + background_tasks.add_task(self._precheck, name, repositories, start_check_plugins, init_check_status=False) + self.obd.set_deploy(self.obd.deploy) + + def _init_check_status(self, check_key, servers, check_result={}): + check_status = defaultdict(lambda: defaultdict(lambda: None)) + for server in servers: + if server in check_result: + status = check_result[server] + else: + status = CheckStatus() + check_status[server] = {check_key: status} + return check_status + + @auto_register('component_change_precheck') + def _precheck(self, name, repositories, start_check_plugins, init_check_status=False): + if init_check_status: + self._init_precheck(repositories, start_check_plugins) + else: + self._do_precheck(repositories, start_check_plugins) + + def _init_precheck(self, repositories, start_check_plugins): + log.get_logger().info('init precheck') + param_check_status = {} + servers_set = set() + for repository in repositories: + if repository not in start_check_plugins: + continue + repository_status = {} + res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=True, work_dir_check=True, clients={}) + if not res and res.get_return("exception"): + raise res.get_return("exception") + servers = self.obd.deploy.deploy_config.components.get(repository.name).servers + for server in servers: + repository_status[server] = {'param': CheckStatus()} + servers_set.add(server) + param_check_status[repository.name] = repository_status + + self.context['component_change_deployment']['param_check_status'] = param_check_status + server_connect_status = {} + for server in servers_set: + server_connect_status[server] = {'ssh': CheckStatus()} + self.context['component_change_deployment']['connect_check_status'] = {'ssh': server_connect_status} + self.context['component_change_deployment']['servers_set'] = servers_set + + def _do_precheck(self, repositories, start_check_plugins): + self.context['component_change_deployment']['chcek_pass'] = True + log.get_logger().info('start precheck') + log.get_logger().info('ssh check') + ssh_clients, connect_status = self.obd.get_clients_with_connect_status(self.obd.deploy.deploy_config, repositories, fail_exit=False) + log.get_logger().info('connect_status: ', connect_status) + check_status = self._init_check_status('ssh', self.context['component_change_deployment']['servers_set'], connect_status) + self.context['component_change_deployment']['connect_check_status'] = {'ssh': check_status} + for k, v in connect_status.items(): + if v.status == v.FAIL: + self.context['component_change_deployment_ssh']['ssh'] = False + log.get_logger().info('ssh check failed') + return + log.get_logger().info('ssh check succeed') + gen_config_plugins = self.obd.search_py_script_plugin(repositories, 'generate_config') + if len(repositories) != len(gen_config_plugins): + raise Exception("param_check: config error, check stop!") + + param_check_status, check_pass = self.obd.deploy_param_check_return_check_status(repositories, self.obd.deploy.deploy_config, gen_config_plugins=gen_config_plugins) + param_check_status_result = {} + for comp_name in param_check_status: + status_res = param_check_status[comp_name] + param_check_status_result[comp_name] = self._init_check_status('param', status_res.keys(), status_res) + self.context['component_change_deployment']['param_check_status'] = param_check_status_result + + log.get_logger().debug('precheck param check status: %s' % param_check_status) + log.get_logger().debug('precheck param check status res: %s' % check_pass) + if not check_pass: + self.context['component_change_deployment']['chcek_pass'] = False + return + + components = [comp_name for comp_name in self.obd.deploy.deploy_config.components.keys()] + for repository in repositories: + ret = self.obd.call_plugin(gen_config_plugins[repository], repository, generate_check=False, generate_consistent_config=True, auto_depend=True, components=components) + if ret is None: + raise Exception("generate config error") + elif not ret and ret.get_return("exception"): + raise ret.get_return("exception") + + log.get_logger().info('generate config succeed') + ssh_clients = self.obd.get_clients(self.obd.deploy.deploy_config, repositories) + for repository in repositories: + log.get_logger().info('begin start_check: %s' % repository.name) + java_check = True + if repository.name == const.OCP_EXPRESS: + jre_name = COMP_JRE + install_plugin = self.obd.search_plugin(repository, PluginType.INSTALL) + if install_plugin and jre_name in install_plugin.requirement_map(repository): + version = install_plugin.requirement_map(repository)[jre_name].version + min_version = install_plugin.requirement_map(repository)[jre_name].min_version + max_version = install_plugin.requirement_map(repository)[jre_name].max_version + if len(self.obd.search_images(jre_name, version=version, min_version=min_version, max_version=max_version)) > 0: + java_check = False + res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=False, work_dir_check=True, precheck=True, java_check=java_check, clients=ssh_clients) + if not res and res.get_return("exception"): + raise res.get_return("exception") + log.get_logger().info('end start_check: %s' % repository.name) + + def get_precheck_result(self, name): + precheck_result = PreCheckResult() + deploy = self.obd.deploy + if not deploy: + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + components = deploy.deploy_config._added_components + info = [] + total = 0 + finished = 0 + all_passed = True + param_check_status = None + connect_check_status = None + if 'component_change_deployment' in self.context.keys(): + param_check_status = self.context['component_change_deployment']['param_check_status'] + connect_check_status = self.context['component_change_deployment']['connect_check_status'] + connect_check_status_flag = True + for component in components: + namespace_union = {} + namespace = self.obd.get_namespace(component) + if namespace: + variables = namespace.variables + if 'start_check_status' in variables.keys(): + namespace_union = util.recursive_update_dict(namespace_union, variables.get('start_check_status')) + if param_check_status is not None: + namespace_union = util.recursive_update_dict(namespace_union, param_check_status[component]) + if connect_check_status is not None and connect_check_status_flag and 'ssh' in connect_check_status.keys(): + namespace_union = util.recursive_update_dict(namespace_union, connect_check_status['ssh']) + connect_check_status_flag = False + + if namespace_union: + for server, result in namespace_union.items(): + if result is None: + log.get_logger().warn("precheck for server: {} is None".format(server.ip)) + continue + all_passed, finished, total = self.parse_precheck_result(all_passed, component, finished, info, server, total, result) + info.sort(key=lambda p: p.status) + + task_info = task.get_task_manager().get_task_info(name, task_type="precheck") + if task_info is not None: + if task_info.status == TaskStatus.FINISHED: + precheck_result.status = task_info.result + if task_info.result == TaskResult.FAILED: + precheck_result.message = '{}'.format(task_info.exception) + else: + precheck_result.status = TaskResult.RUNNING + precheck_result.info = info + precheck_result.total = total + if total == 0: + all_passed = False + precheck_result.all_passed = all_passed + precheck_result.finished = total if precheck_result.status == TaskResult.SUCCESSFUL else finished + if total == finished: + precheck_result.status = TaskResult.SUCCESSFUL + if all_passed == False and (self.context['component_change_deployment']['chcek_pass'] == False or self.context['component_change_deployment_ssh']['ssh'] == False) and precheck_result.finished >= len(components): + precheck_result.status = TaskResult.SUCCESSFUL + return precheck_result + + def parse_precheck_result(self, all_passed, component, finished, info, server, total, result): + for k, v in result.items(): + total += 1 + check_info = PreCheckInfo(name='{}:{}'.format(component, k), server=server.ip) + if v.status == v.PASS: + check_info.result = PrecheckTaskResult.PASSED + check_info.status = TaskStatus.FINISHED + finished += 1 + elif v.status == v.FAIL: + check_info.result = PrecheckTaskResult.FAILED + check_info.status = TaskStatus.FINISHED + all_passed = False + + check_info.code = v.error.code + check_info.description = v.error.msg + check_info.recoverable = len(v.suggests) > 0 and v.suggests[0].auto_fix + msg = v.suggests[0].msg if len(v.suggests) > 0 and v.suggests[0].msg is not None else '' + advisement = RecoverAdvisement(description=msg) + check_info.advisement = advisement + + finished += 1 + elif v.status == v.WAIT: + check_info.status = TaskStatus.PENDING + all_passed = False + info.append(check_info) + return all_passed, finished, total + + def recover(self, name): + log.get_logger().info('recover config') + deploy = self.obd.deploy + if not deploy: + raise Exception('error get component change conf') + + components = deploy.deploy_config.components + param_check_status = {} + if 'component_change_deployment' in self.context.keys(): + param_check_status = self.context['component_change_deployment']['param_check_status'] + recover_change_parameter_list = [] + for component in components: + namespace_union = {} + if component in self.obd.namespaces: + namespace = self.obd.get_namespace(component) + if namespace: + util.recursive_update_dict(namespace_union, namespace.variables.get('start_check_status', {})) + util.recursive_update_dict(namespace_union, param_check_status.get('component', {})) + + for server, precheck_result in namespace_union.items(): + if precheck_result is None: + log.get_logger().warn('component : {},precheck_result is None'.format(component)) + continue + for k, v in precheck_result.items(): + if v.status == v.FAIL and v.suggests is not None and v.suggests[0].auto_fix and v.suggests[0].fix_eval: + for fix_eval in v.suggests[0].fix_eval: + if fix_eval.operation == FixEval.SET: + config_json = None + old_value = None + if fix_eval.is_global: + deploy.deploy_config.update_component_global_conf(name, fix_eval.key, fix_eval.value, save=False) + else: + deploy.deploy_config.update_component_server_conf(name, server, fix_eval.key, fix_eval.value, save=False) + else: + config_json, old_value = self.modify_config(component, name, fix_eval) + + if config_json is None: + log.get_logger().warn('config json is None') + continue + recover_change_parameter = RecoverChangeParameter(name=fix_eval.key, old_value=old_value, new_value=fix_eval.value) + recover_change_parameter_list.append(recover_change_parameter) + self.context['component_change_deployment_info'][name] = ComponentChangeConfig(**json.loads(json.dumps(config_json))) + self.recreate_deployment(name) + + return recover_change_parameter_list + + def recreate_deployment(self, name): + log.get_logger().info('recreate component_change deployment') + config = self.context['component_change_deployment_info'][name] + log.get_logger().info('config: %s' % config) + if config is not None: + cluster_config_yaml_path = self.create_component_change_path(name, config) + self.create_component_change_deployment(name, cluster_config_yaml_path, self.context['mode'][name]) + + def modify_config(self, component, name, fix_eval): + log.get_logger().info('modify component_change config') + if fix_eval.key == "parameters": + raise Exception("try to change parameters") + config = self.context['component_change_deployment_info'][name] if self.context['component_change_deployment_info'] is not None else None + if config is None: + log.get_logger().warn("config is none, no need to modify") + raise Exception('config is none') + log.get_logger().info('%s component_change config: %s' % (name, config)) + config = config['config'] + config_dict = config.dict() + if config_dict['components'] is None: + log.get_logger().warn("component is none, no need to modify") + raise Exception('component is none') + old_value = None + for value in config_dict['components'].values(): + if value is not None and 'component' in value.keys() and value['component'] == component: + log.get_logger().info('old value: %s' % value) + if fix_eval.key in value.keys(): + log.get_logger().info('new value: %s' % fix_eval.value) + old_value = value[fix_eval.key] + value[fix_eval.key] = fix_eval.value + elif "parameters" in value.keys() and value["parameters"] is not None: + log.get_logger().info('new value: %s' % fix_eval.value) + for parameter_dict in value["parameters"]: + parameter = Parameter(**parameter_dict) + if parameter.key == fix_eval.key: + if fix_eval.operation == FixEval.DEL: + old_value = parameter.value + value["parameters"].remove(parameter_dict) + else: + parameter_dict[fix_eval.key] = fix_eval.value + return config_dict, old_value + return None, None + + @serial("component_change") + def add_components(self, name, background_tasks): + task_manager = task.get_task_manager() + task_info = task_manager.get_task_info(name, task_type="component_change") + if task_info is not None and task_info.status != TaskStatus.FINISHED: + raise Exception("task {0} exists and not finished".format(name)) + task_manager.del_task_info(name, task_type="component_change") + background_tasks.add_task(self._add_components, name) + + @auto_register("component_change") + def _add_components(self, name): + log.get_logger().info("clean io buffer before start install") + self.buffer.clear() + log.get_logger().info("clean namespace for init") + for component in self.context['new_obd'][name].deploy.deploy_config.components: + for plugin in const.INIT_PLUGINS: + self.obd.namespaces[component].set_return(plugin, None) + log.get_logger().info("clean namespace for start") + for component in self.context['new_obd'][name].deploy.deploy_config.components: + for plugin in const.START_PLUGINS: + self.obd.namespaces[component].set_return(plugin, None) + + repositories, install_plugins = self.obd.search_components_from_mirrors_and_install(self.obd.deploy.deploy_config, components=self.obd.deploy.deploy_config.added_components) + if not repositories or not install_plugins: + return False + self.context['new_obd'][name].set_repositories(repositories) + repositories = self.context['origin_repository'][name] + repositories + self.obd.set_repositories(repositories) + scale_out_check_plugins = self.obd.search_py_script_plugin(repositories, 'scale_out_check', no_found_act='ignore') + + trace_id = str(uuid()) + self.context['component_trace']['deploy'] = trace_id + ret = self.obd.stdio.init_trace_logger(self.obd.stdio.log_path, trace_id=trace_id, recreate=True) + if ret is False: + log.get_logger().warn("component deploy log init error") + + check_pass = True + for repository in repositories: + if repository not in scale_out_check_plugins: + continue + ret = self.obd.call_plugin(scale_out_check_plugins[repository], repository) + if not ret: + self.obd._call_stdio('verbose', '%s scale out check failed.' % repository.name) + check_pass = False + if not check_pass: + log.get_logger().error('component scale out check failed') + return False + + succeed = True + # prepare for added components + for repository in repositories: + if repository in scale_out_check_plugins: + plugin_return = self.obd.get_namespace(repository.name).get_return(scale_out_check_plugins[repository].name) + plugins_list = plugin_return.get_return('plugins', []) + for plugin_name in plugins_list: + plugin = self.obd.search_py_script_plugin([repository], plugin_name) + if repository in plugin: + succeed = succeed and self.obd.call_plugin(plugin[repository], repository) + if not succeed: + log.get_logger().error('scale out check return plugin failed') + return False + + self.obd._call_stdio('verbose', 'Start to deploy additional servers') + if not self.obd._deploy_cluster(self.obd.deploy, self.context['new_obd'][name].repositories, dump=False): + log.get_logger().error('failed to deploy additional servers') + return False + + self.obd.deploy.deploy_config.enable_mem_mode() + self.obd._call_stdio('verbose', 'Start to start additional servers') + error_repositories = [] + succeed_repositories = [] + for repository in self.context['new_obd'][name].repositories: + opt = Values() + self.obd.set_options(opt) + trace_id = str(uuid()) + self.context['component_trace'][repository.name] = trace_id + ret = self.obd.stdio.init_trace_logger(self.obd.stdio.log_path, trace_id=trace_id, recreate=True) + if ret is False: + log.get_logger().error("component: {}, start log init error".format(repository.name)) + if not self.obd._start_cluster(self.obd.deploy, [repository]): + log.get_logger().error("failed to start component: %s", repository.name) + error_repositories.append(repository.name) + continue + succeed_repositories.append(repository.name) + dump_components = list(set(succeed_repositories) - set(error_repositories)) + log.get_logger().info('error components: %s' % ','.join(error_repositories)) + if error_repositories: + log.get_logger().info('start dump succeed component: %s' % ','.join(dump_components)) + for component in error_repositories: + if component in self.obd.deploy.deploy_config._src_data: + del self.obd.deploy.deploy_config._src_data[component] + del self.obd.deploy.deploy_config.components[component] + if component in self.obd.deploy.deploy_info.components: + del self.obd.deploy.deploy_info.components[component] + + self.obd.deploy.deploy_config.set_dumpable() + for repository in repositories: + if repository.name not in error_repositories: + self.obd.deploy.use_model(repository.name, repository, False) + + if not self.obd.deploy.deploy_config.dump(): + self.obd._call_stdio('error', 'Failed to dump new deploy config') + log.get_logger().error("failed to dump new deploy config") + return False + self.obd.deploy.dump_deploy_info() + self.obd.set_deploy(self.obd.deploy) + + def scale_out(self, name, background_tasks): + pass + + def get_component_change_task_info(self, name): + task_info = task.get_task_manager().get_task_info(name, task_type="component_change") + if task_info is None: + raise Exception("task {0} not found".format(name)) + components = self.context['new_obd'][name].deploy.deploy_config.components + total_count = (len(const.START_PLUGINS) + len(const.INIT_PLUGINS)) * len(components) + finished_count = 1 + current = "" + task_result = TaskResult.RUNNING + info_dict = dict() + + for component in components: + info_dict[component] = DeployComponentInfo(component=component, status=TaskStatus.PENDING, result=TaskResult.RUNNING) + if component in self.obd.namespaces: + for plugin in const.INIT_PLUGINS: + if self.obd.namespaces[component].get_return(plugin).value is not None: + info_dict[component].status = TaskStatus.RUNNING + finished_count += 1 + current = "{0}: {1} finished".format(component, plugin) + if not self.obd.namespaces[component].get_return(plugin): + info_dict[component].result = TaskResult.FAILED + + for component in components: + for plugin in const.START_PLUGINS: + if component not in self.obd.namespaces: + break + if self.obd.namespaces[component].get_return(plugin).value is not None: + info_dict[component].status = TaskStatus.RUNNING + finished_count += 1 + current = "{0}: {1} finished".format(component, plugin) + if not self.obd.namespaces[component].get_return(plugin): + info_dict[component].result = TaskResult.FAILED + else: + if plugin == const.START_PLUGINS[-1]: + info_dict[component].result = TaskResult.SUCCESSFUL + + if task_info.status == TaskStatus.FINISHED: + task_result = task_info.result + for v in info_dict.values(): + v.status = TaskStatus.FINISHED + if v.result != TaskResult.SUCCESSFUL: + v.result = TaskResult.FAILED + info_list = list() + for info in info_dict.values(): + info_list.append(info) + msg = "" if task_info.result == TaskResult.SUCCESSFUL else '{0}'.format(task_info.exception) + if all(info.result == TaskResult.SUCCESSFUL for info in info_list): + for info in info_list: + info.status = TaskStatus.FINISHED + status = TaskResult.SUCCESSFUL + elif any(info.result == TaskResult.RUNNING for info in info_list): + status = TaskResult.RUNNING + else: + for info in info_list: + info.status = TaskStatus.FINISHED + status = TaskResult.FAILED + return TaskInfo(total=total_count, finished=finished_count if task_result != TaskResult.SUCCESSFUL else total_count, current=current, status=status, info=info_list, msg=msg) + + def get_component_change_log_by_component(self, component_name, mode): + data = [] + stdout = '' + for component in component_name: + trace_id = self.context['component_trace'][component] + cmd = 'grep -h "\[{}\]" {}* | sed "s/\[{}\] //g" '.format(trace_id, self.obd.stdio.log_path, trace_id) + stdout = LocalClient.execute_command(cmd).stdout + if not stdout: + trace_id = self.context['component_trace']['deploy'] + cmd = 'grep -h "\[{}\]" {}* | sed "s/\[{}\] //g" '.format(trace_id, self.obd.stdio.log_path, trace_id) + stdout = LocalClient.execute_command(cmd).stdout + data.append(ComponentLog(component_name=component, log=stdout)) + if mode == 'add_component': + return stdout + if mode == 'del_component': + return data + + def get_component_change_detail(self, name): + config = self.context['component_change_deployment_info'][name] + if not config: + raise Exception(f'error get config for deploy:{name}') + data = ComponentsChangeInfoDisplay(components_change_info=[]) + deploy_config = self.obd.deploy.deploy_config + if config.obproxy: + if config.obproxy.component in deploy_config.components: + cluster_config = deploy_config.components[config.obproxy.component] + original_global_conf = cluster_config.get_original_global_conf() + component_change_display = ComponentChangeInfoDisplay(component_name=config.obproxy.component) + server = config.obproxy.servers[0] + port = str(config.obproxy.listen_port) + password = original_global_conf.get('obproxy_sys_password', '') + component_change_display.address = server + ':' + port + component_change_display.username = 'root@proxysys' + component_change_display.password = '' + component_change_display.access_string = f"obclient -h{server} -P{port} -uroot@proxysys -Doceanbase -A" + data.components_change_info.append(component_change_display) + if config.obagent: + component_change_display = ComponentChangeInfoDisplay(component_name=config.obagent.component) + component_change_display.address = '' + component_change_display.username = '' + component_change_display.password = '' + component_change_display.access_string = '' + data.components_change_info.append(component_change_display) + if config.obconfigserver: + component_change_display = ComponentChangeInfoDisplay(component_name=config.obconfigserver.component) + server = config.obconfigserver.servers[0] + port = str(config.obconfigserver.listen_port) + component_change_display.address = '' + component_change_display.username = '' + component_change_display.password = '' + component_change_display.access_string = f"curl -s 'http://{server}:{port}/services?Action=GetObProxyConfig'" + data.components_change_info.append(component_change_display) + if config.ocpexpress: + component_change_display = ComponentChangeInfoDisplay(component_name=config.ocpexpress.component) + if config.ocpexpress.component in deploy_config.components: + cluster_config = deploy_config.components[config.ocpexpress.component] + original_global_conf = cluster_config.get_original_global_conf() + component_change_display.address = 'http://' + config.ocpexpress.servers[0] + ':' + str(config.ocpexpress.port) + component_change_display.username = 'admin' + component_change_display.password = '' + component_change_display.access_string = 'http://' + config.ocpexpress.servers[0] + ':' + str(config.ocpexpress.port) + data.components_change_info.append(component_change_display) + return data + + def node_check(self, name, components): + deploy = self.obd.deploy_manager.get_deploy_config(name) + if not deploy: + raise Exception(f'error get deploy for name: {name}') + self.obd.set_deploy(deploy) + deploy_config = deploy.deploy_config + deploy_info = deploy.deploy_info + all_repositories = self.obd.load_local_repositories(deploy_info) + self.obd.set_repositories(all_repositories) + repositories = self.obd.get_component_repositories(deploy_info, components) + self.obd.search_param_plugin_and_apply(all_repositories, deploy_config) + ssh_clients, connect_status = self.obd.get_clients_with_connect_status(deploy_config, repositories, fail_exit=False) + failed_servers = [] + for k, v in connect_status.items(): + if v.status == v.FAIL: + failed_servers.append(k) + componets_server = ComponentsServer(components_server=[]) + for component in components: + cluster_config = deploy_config.components[component] + component_server = ComponentServer(component_name=component, failed_servers=[server.ip for server in list(set(cluster_config.servers) & set(failed_servers))]) + componets_server.components_server.append(component_server) + return componets_server + + @serial("del_component") + def del_component(self, name, components, force, background_tasks): + task_manager = task.get_task_manager() + task_info = task_manager.get_task_info(name, task_type="del_component") + if task_info is not None and task_info.status != TaskStatus.FINISHED: + raise Exception("task {0} exists and not finished".format(name)) + task_manager.del_task_info(name, task_type="del_component") + background_tasks.add_task(self._del_component, name, components, force) + + @auto_register("del_component") + def _del_component(self, name, components, force): + self.context['del_component'][name] = components + + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + deploy_info = deploy.deploy_info + deploy_config = deploy.deploy_config + + trace_id = str(uuid()) + self.context['component_trace']['deploy'] = trace_id + ret = self.obd.stdio.init_trace_logger(self.obd.stdio.log_path, trace_id=trace_id, recreate=True) + if ret is False: + log.get_logger().warn("component deploy log init error") + + if not components: + self.obd._call_stdio('error', 'Components is required.') + log.get_logger().error('Components is required.') + return False + + deploy_config.set_undumpable() + self.obd._call_stdio('start_loading', 'Get local repositories and plugins') + all_repositories = self.obd.load_local_repositories(deploy_info) + self.obd.set_repositories(all_repositories) + repositories = self.obd.get_component_repositories(deploy_info, components) + self.obd.search_param_plugin_and_apply(all_repositories, deploy_config) + self.obd._call_stdio('stop_loading', 'succeed') + + scale_in_check_plugins = self.obd.search_py_script_plugin(all_repositories, 'scale_in_check', no_found_act='ignore') + check_pass = True + for repository in all_repositories: + if repository not in scale_in_check_plugins: + continue + ret = self.obd.call_plugin(scale_in_check_plugins[repository], repository) + if not ret: + self.obd._call_stdio('verbose', '%s scale in check failed.' % repository.name) + log.get_logger().error('%s scale in check failed.' % repository.name) + check_pass = False + if not check_pass: + return False + + if not deploy_config.del_components(components, dryrun=True): + self.obd._call_stdio('error', 'Failed to delete components for %s' % name) + log.get_logger().error('Failed to delete components for %s' % name) + return False + + error_component = [] + for repository in repositories: + opt = Values() + setattr(opt, 'force', force) + self.obd.set_options(opt) + trace_id = str(uuid()) + self.context['component_trace'][repository.name] = trace_id + ret = self.obd.stdio.init_trace_logger(self.obd.stdio.log_path, trace_id=trace_id, recreate=True) + if ret is False: + log.get_logger().warn("component: {}, start log init error".format(repository.name)) + + self.obd._call_stdio('verbose', 'Start to stop target components') + self.obd.set_repositories([repository]) + if not self.obd._stop_cluster(deploy, [repository], dump=False): + self.obd._call_stdio('warn', f'failed to stop component {repository.name}') + error_component.append(repository.name) + + self.obd._call_stdio('verbose', 'Start to destroy target components') + if not self.obd._destroy_cluster(deploy, [repository], dump=False): + error_component.append(repository.name) + if error_component: + self.obd._call_stdio('warn', 'failed to stop component {}'.format(','.join([r.name for r in error_component]))) + log.get_logger().error('failed to stop component {}'.format(','.join([r.name for r in error_component]))) + return False + + if not deploy_config.del_components(components): + self.obd._call_stdio('error', 'Failed to delete components for %s' % name) + log.get_logger().error('Failed to delete components for %s' % name) + return False + + deploy_config.set_dumpable() + for repository in repositories: + deploy.unuse_model(repository.name, False) + deploy.dump_deploy_info() + + if not deploy_config.dump(): + self.obd._call_stdio('error', 'Failed to dump new deploy config') + log.get_logger().error('Failed to dump new deploy config') + return False + log.get_logger().warn(f"del components({','.join(components)}) success") + self.obd.set_deploy(self.obd.deploy) + + def get_del_component_task_info(self, name): + task_info = task.get_task_manager().get_task_info(name, task_type="del_component") + if task_info is None: + raise Exception("task {0} not found".format(name)) + components = self.context['del_component'][name] + if not components: + return None + total_count = len(const.DEL_COMPONENT_PLUGINS) * len(components) + finished_count = 0 + current = "" + task_result = TaskResult.RUNNING + info_dict = dict() + for c in components: + info_dict[c] = DeployComponentInfo(component=c, status=TaskStatus.PENDING, result=TaskResult.RUNNING) + for plugin in const.DEL_COMPONENT_PLUGINS: + if c not in self.obd.namespaces: + break + + if self.obd.namespaces[c].get_return(plugin).value is not None: + info_dict[c].status = TaskStatus.RUNNING + finished_count += 1 + current = "{0}: {1} finished".format(c, plugin) + if not self.obd.namespaces[c].get_return(plugin): + info_dict[c].result = TaskResult.FAILED + else: + if plugin == const.DEL_COMPONENT_PLUGINS[-1]: + info_dict[c].result = TaskResult.SUCCESSFUL + if task_info.status == TaskStatus.FINISHED: + for v in info_dict.values(): + v.status = TaskStatus.FINISHED + if v.result != TaskResult.SUCCESSFUL: + v.result = TaskResult.FAILED + + info_list = list() + for info in info_dict.values(): + info_list.append(info) + msg = "" if task_info.result == TaskResult.SUCCESSFUL else '{0}'.format(task_info.exception) + if all(info.result == TaskResult.SUCCESSFUL for info in info_list): + status = TaskResult.SUCCESSFUL + for info in info_list: + info.status = TaskStatus.FINISHED + elif any(info.result == TaskResult.RUNNING for info in info_list): + status = TaskResult.RUNNING + else: + status = TaskResult.FAILED + for info in info_list: + info.status = TaskStatus.FINISHED + return TaskInfo(total=total_count, finished=finished_count, current=current, status=status, info=info_list, msg=msg) + + def remove_component(self, name, components): + deploy = self.obd.deploy_manager.get_deploy_config(name) + self.obd.set_deploy(deploy) + deploy_info = deploy.deploy_info + deploy_config = deploy.deploy_config + + for component in components: + if component in [const.OCEANBASE_CE, const.OCEANBASE]: + raise Exception('not support remove oceanbase') + + all_repositories = self.obd.load_local_repositories(deploy_info) + self.obd.set_repositories(all_repositories) + repositories = self.obd.get_component_repositories(deploy_info, components) + self.obd.search_param_plugin_and_apply(all_repositories, deploy_config) + + deploy_config.set_undumpable() + if not deploy_config.del_components(components): + log.get_logger().error('Failed to delete components for %s' % name) + return False + deploy_config.set_dumpable() + + for repository in repositories: + deploy.unuse_model(repository.name, False) + deploy.dump_deploy_info() + + if not deploy_config.dump(): + log.get_logger().error('Failed to dump new deploy config') + return False + log.get_logger().info(f"force del components({','.join(components)}) success") + return components \ No newline at end of file diff --git a/service/handler/deployment_handler.py b/service/handler/deployment_handler.py index cd28a6f..6362d70 100644 --- a/service/handler/deployment_handler.py +++ b/service/handler/deployment_handler.py @@ -19,6 +19,7 @@ import json import tempfile +import re from copy import deepcopy from collections import defaultdict from uuid import uuid1 as uuid @@ -28,13 +29,15 @@ from _deploy import DeployStatus, DeployConfigStatus from _errno import CheckStatus, FixEval from _plugin import PluginType +from _rpm import Version from const import COMP_JRE, COMP_OCP_EXPRESS from service.api.v1.deployments import DeploymentInfo from service.handler.base_handler import BaseHandler +from service.handler.rsa_handler import RSAHandler from service.model.deployments import DeploymentConfig, PreCheckResult, RecoverChangeParameter, TaskInfo, \ ComponentInfo, PrecheckTaskResult, \ DeployMode, ConnectionInfo, PreCheckInfo, RecoverAdvisement, DeploymentReport, Deployment, Auth, DeployConfig, \ - DeploymentStatus, Parameter + DeploymentStatus, Parameter, ScenarioType from service.common import log, task, util, const from service.common.task import TaskStatus, TaskResult @@ -73,11 +76,12 @@ def generate_deployment_config(self, name: str, config: DeploymentConfig): if config.components.oceanbase is not None: self.generate_oceanbase_config(cluster_config, config, name, config.components.oceanbase) if config.components.obproxy is not None: - cluster_config[config.components.obproxy.component] = self.generate_component_config(config, const.OBPROXY, ['cluster_name', 'prometheus_listen_port', 'listen_port']) + cluster_config[config.components.obproxy.component] = self.generate_component_config(config, const.OBPROXY, ['cluster_name', 'prometheus_listen_port', 'listen_port', 'rpc_listen_port']) if config.components.obagent is not None: cluster_config[config.components.obagent.component] = self.generate_component_config(config, const.OBAGENT, ['monagent_http_port', 'mgragent_http_port']) if config.components.ocpexpress is not None: - cluster_config[config.components.ocpexpress.component] = self.generate_component_config(config, const.OCP_EXPRESS, ['port']) + ocp_pwd = self.generate_component_config(config, const.OCP_EXPRESS, ['port', 'admin_passwd']) + cluster_config[config.components.ocpexpress.component] = ocp_pwd if config.components.obconfigserver is not None: cluster_config[config.components.obconfigserver.component] = self.generate_component_config(config, const.OB_CONFIGSERVER, ['listen_port']) cluster_config_yaml_path = '' @@ -102,6 +106,10 @@ def generate_component_config(self, config, component_name, ext_keys=[]): ext_keys.insert(0, 'home_path') for key in ext_keys: if config_dict[key]: + if key == 'admin_passwd': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + comp_config['global'][key] = passwd + continue comp_config['global'][key] = config_dict[key] if input_comp_config.home_path == '': @@ -109,6 +117,10 @@ def generate_component_config(self, config, component_name, ext_keys=[]): for parameter in input_comp_config.parameters: if not parameter.adaptive: + if parameter.key.endswith('_password'): + passwd = RSAHandler().decrypt_private_key(parameter.value) + comp_config['global'][parameter.key] = passwd + continue comp_config['global'][parameter.key] = parameter.value return comp_config @@ -146,6 +158,10 @@ def generate_oceanbase_config(self, cluster_config, config, name, oceanbase): for key in config_dict: if config_dict[key] and key in {'mysql_port', 'rpc_port', 'home_path', 'data_dir', 'redo_dir', 'appname', 'root_password'}: + if key == 'root_password': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + oceanbase_config['global'][key] = passwd + continue oceanbase_config['global'][key] = config_dict[key] if oceanbase.home_path == '': @@ -168,7 +184,8 @@ def generate_auth_config(self, cluster_config, auth): cluster_config['user'] = {} cluster_config['user']['username'] = auth.user if auth.password: - cluster_config['user']['password'] = auth.password + passwd = RSAHandler().decrypt_private_key(auth.password) + cluster_config['user']['password'] = passwd cluster_config['user']['port'] = auth.port def create_deployment(self, name: str, config_path: str): @@ -414,8 +431,12 @@ def __build_connection_info(self, component, info): connect_url=info['cmd'] if info['type'] == 'db' else info['url']) def list_connection_info(self, name): + pwd_rege = r"-p'[^']*'\s*" if self.context["connection_info"][name] is not None: log.get_logger().info("get deployment {0} connection info from context".format(name)) + for item in self.context["connection_info"][name]: + item.password = '' + item.connect_url = re.sub(pwd_rege, '', item.connect_url) return self.context["connection_info"][name] deploy = self.get_deploy(name) connection_info_list = list() @@ -435,6 +456,7 @@ def list_connection_info(self, name): if connection_info is not None: connection_info_copy = deepcopy(connection_info) connection_info_copy.password = '' + connection_info_copy.connect_url = re.sub(pwd_rege, '', connection_info_copy.connect_url) connection_info_list.append(connection_info_copy) else: log.get_logger().warn("can not get connection info for component: {0}".format(component)) @@ -765,3 +787,67 @@ def get_install_log_by_component(self, component_name): cmd = 'grep -h "\[{}\]" {}* | sed "s/\[{}\] //g" '.format(trace_id, self.obd.stdio.log_path, trace_id) stdout = LocalClient.execute_command(cmd).stdout return stdout + + def get_scenario_by_version(self, version, language='zh-CN'): + version = version.split('-')[0] + if language == 'zh-CN': + scenario_4_3_0_0 = [ + { + 'type': 'Express OLTP', + 'desc': '适用于贸易、支付核心系统、互联网高吞吐量应用程序等工作负载。没有外键等限制、没有存储过程、没有长交易、没有大交易、没有复杂的连接、没有复杂的子查询。', + 'value': 'express_oltp' + }, + { + 'type': 'Complex OLTP', + 'desc': '适用于银行、保险系统等工作负载。他们通常具有复杂的联接、复杂的相关子查询、用 PL 编写的批处理作业,以及长事务和大事务。有时对短时间运行的查询使用并行执行', + 'value': 'complex_oltp' + }, + { + 'type': 'HTAP', + 'desc': '适用于混合 OLAP 和 OLTP 工作负载。通常用于从活动运营数据、欺诈检测和个性化建议中获取即时见解', + 'value': 'htap' + }, + { + 'type': 'OLAP', + 'desc': '用于实时数据仓库分析场景', + 'value': 'olap' + }, + { + 'type': 'OBKV', + 'desc': '用于键值工作负载和类似 Hbase 的宽列工作负载,这些工作负载通常具有非常高的吞吐量并且对延迟敏感', + 'value': 'kv' + }, + ] + else: + scenario_4_3_0_0 = [ + { + 'type': 'Express OLTP', + 'desc': 'This is suitable for trading, core payment systems, high-throughput Internet applications, and other workloads. There are no limitations such as foreign keys, stored procedures, long transactions, large transactions, complex joins, or complex subqueries.', + 'value': 'express_oltp' + }, + { + 'type': 'Complex OLTP', + 'desc': 'This is suitable for workloads in industries like banking and insurance. They often have complex joins, complex correlated subqueries, batch jobs written in PL, and long, large transactions. Sometimes parallel execution is used for queries that run for a short time.', + 'value': 'complex_oltp' + }, + { + 'type': 'HTAP', + 'desc': 'This is suitable for mixed OLAP and OLTP workloads, typically used to obtain real-time insights from activity operational data, fraud detection, and personalized recommendations.', + 'value': 'htap' + }, + { + 'type': 'OLAP', + 'desc': 'This is suitable for real-time data warehouse analysis scenarios.', + 'value': 'olap' + }, + { + 'type': 'OBKV', + 'desc': 'This is suitable for key-value workloads and wide-column workloads similar to HBase, which often have very high throughput and are sensitive to latency.', + 'value': 'kv' + }, + ] + data = [] + if Version(version) >= Version('4.3.0.0'): + for scenario in scenario_4_3_0_0: + data.append(ScenarioType(type=scenario['type'], desc=scenario['desc'], value=scenario['value'])) + return data diff --git a/service/handler/handler_utils.py b/service/handler/handler_utils.py index d8f2cc1..c0a48ce 100644 --- a/service/handler/handler_utils.py +++ b/service/handler/handler_utils.py @@ -24,6 +24,8 @@ from service.handler.mirror_handler import MirrorHandler from service.handler.ocp_handler import OcpHandler from service.handler.metadb_handler import MetadbHandler +from service.handler.component_change_handler import ComponentChangeHandler +from service.handler.rsa_handler import RSAHandler def new_component_handler(): @@ -56,3 +58,9 @@ def new_metadb_handler(): def new_server_info_handler(): return ServiceInfoHandler() + + +def new_component_change_handler(): + return ComponentChangeHandler() +def new_rsa_handler(): + return RSAHandler() diff --git a/service/handler/metadb_handler.py b/service/handler/metadb_handler.py index a3e8f5d..fad3b72 100644 --- a/service/handler/metadb_handler.py +++ b/service/handler/metadb_handler.py @@ -24,6 +24,7 @@ from singleton_decorator import singleton from service.handler.base_handler import BaseHandler +from service.handler.rsa_handler import RSAHandler from service.common import log, task, util, const from service.common.task import Serial as serial from service.common.task import AutoRegister as auto_register @@ -886,6 +887,8 @@ def get_destroy_task_info(self, id, task_id): def create_connection_info(self, info, sys=False): self.context["connection_info"][info.cluster_name] = info + passwd = RSAHandler().decrypt_private_key(info.password) + info.password = passwd log.get_logger().info( f'connection host: {info.host}, port: {info.port}, user: {info.user}, password: {info.password}' ) diff --git a/service/handler/ocp_handler.py b/service/handler/ocp_handler.py index 7e2a962..e92e3d8 100644 --- a/service/handler/ocp_handler.py +++ b/service/handler/ocp_handler.py @@ -27,6 +27,7 @@ from collections import defaultdict from service.handler.base_handler import BaseHandler +from service.handler.rsa_handler import RSAHandler from service.common import log, task, util, const from service.common.task import Serial as serial from service.common.task import AutoRegister as auto_register @@ -91,7 +92,8 @@ def generate_auth_config(self, cluster_config, auth): if 'user' not in cluster_config.keys(): cluster_config['user'] = {} cluster_config['user']['username'] = auth.user - cluster_config['user']['password'] = auth.password + passwd = RSAHandler().decrypt_private_key(auth.password) if auth.password is not None else auth.password + cluster_config['user']['password'] = passwd cluster_config['user']['port'] = auth.port def generate_metadb_config(self, cluster_config, oceanbase, home_path): @@ -128,6 +130,10 @@ def generate_metadb_config(self, cluster_config, oceanbase, home_path): for key in config_dict: if config_dict[key] and key in ['mysql_port', 'rpc_port', 'home_path', 'data_dir', 'redo_dir', 'appname', 'root_password']: + if key == 'root_password': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + oceanbase_config['global'][key] = passwd + continue oceanbase_config['global'][key] = config_dict[key] if oceanbase.home_path == '': @@ -159,11 +165,17 @@ def generate_obproxy_config(self, cluster_config, obproxy_config, home_path, ob_ if config_dict[key] and key in ('cluster_name', 'prometheus_listen_port', 'listen_port', 'home_path'): comp_config['global'][key] = config_dict[key] + comp_config['global']['enable_obproxy_rpc_service'] = False + if obproxy_config.home_path == '': comp_config['global']['home_path'] = home_path + '/obproxy' for parameter in obproxy_config.parameters: if not parameter.adaptive: + if parameter.key == 'obproxy_sys_password': + passwd = RSAHandler().decrypt_private_key(parameter.value) + comp_config['global'][parameter.key] = passwd + continue comp_config['global'][parameter.key] = parameter.value if 'depends' not in comp_config.keys(): comp_config['depends'] = list() @@ -189,6 +201,10 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob for key in config_dict: if config_dict[key] and key in ('port', 'admin_password', 'memory_size', 'manage_info', 'home_path', 'soft_dir', 'log_dir', 'ocp_site_url', 'launch_user'): + if key == 'admin_password': + passwd = RSAHandler().decrypt_private_key(config_dict[key]) + ocp_config['global'][key] = passwd + continue ocp_config['global'][key] = config_dict[key] if launch_user: @@ -197,7 +213,7 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob if config.metadb: ocp_config['global']['jdbc_url'] = 'jdbc:oceanbase://' + config_dict['metadb']['host'] + ':' + str(config_dict['metadb']['port']) + config_dict['metadb']['database'] ocp_config['global']['jdbc_username'] = config_dict['metadb']['user'] - ocp_config['global']['jdbc_password'] = config_dict['metadb']['password'] + ocp_config['global']['jdbc_password'] = RSAHandler().decrypt_private_key(config_dict['metadb']['password']) if config.meta_tenant: tenant_config = cluster_config[ob_component] if ob_component is not None else ocp_config @@ -206,7 +222,7 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob tenant_config['global']['ocp_meta_tenant']['max_cpu'] = config_dict['meta_tenant']['resource']['cpu'] tenant_config['global']['ocp_meta_tenant']['memory_size'] = str(config_dict['meta_tenant']['resource']['memory']) + 'G' tenant_config['global']['ocp_meta_username'] = config_dict['meta_tenant']['name']['user_name'] - tenant_config['global']['ocp_meta_password'] = config_dict['meta_tenant']['password'] + tenant_config['global']['ocp_meta_password'] = RSAHandler().decrypt_private_key(config_dict['meta_tenant']['password']) tenant_config['global']['ocp_meta_db'] = config_dict['meta_tenant']['name']['user_database'] if config_dict['meta_tenant']['name']['user_database'] != '' else 'meta_database' self.context['meta_tenant'] = config_dict['meta_tenant']['name']['tenant_name'] @@ -217,7 +233,7 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob tenant_config['global']['ocp_monitor_tenant']['max_cpu'] = config_dict['monitor_tenant']['resource']['cpu'] tenant_config['global']['ocp_monitor_tenant']['memory_size'] = str(config_dict['monitor_tenant']['resource']['memory']) + 'G' tenant_config['global']['ocp_monitor_username'] = config_dict['monitor_tenant']['name']['user_name'] - tenant_config['global']['ocp_monitor_password'] = config_dict['monitor_tenant']['password'] + tenant_config['global']['ocp_monitor_password'] = RSAHandler().decrypt_private_key(config_dict['monitor_tenant']['password']) tenant_config['global']['ocp_monitor_db'] = config_dict['monitor_tenant']['name']['user_database'] if config_dict['monitor_tenant']['name']['user_database'] != '' else 'monitor_database' self.context['monitor_tenant'] = config_dict['monitor_tenant']['name']['tenant_name'] @@ -289,6 +305,7 @@ def create_ocp_deployment(self, name: str, config_path: str): def check_user(self, user): self.context['upgrade_servers'] = user.servers + user.password = RSAHandler().decrypt_private_key(user.password) if user.password else user.password for ip in user.servers: log.get_logger().info('ip: %s, port: %s, user: %s, password: %s' % (ip, user.port, user.user, user.password)) self.context['upgrade_user'] = user.user @@ -296,11 +313,11 @@ def check_user(self, user): self.context['upgrade_ssh_port'] = user.port if user.port else 22 config = SshConfig(host=ip, port=user.port, username=user.user, password=user.password) client = SshClient(config) - if not (client.execute_command('sudo -n true') or client.execute_command('[ `id -u` == "0" ]')): - raise Exception('Please execute `bash -c \'echo "{user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers`\' as root in {ip}.'.format(user=user.user, ip=ip)) res = client.connect(self.obd.stdio, exit=False) if res != True: - return False + raise Exception("{user}@{ip} connect failed: username or password error".format(user=user.user, ip=ip)) + if not (client.execute_command('[ `id -u` == "0" ]') or client.execute_command('sudo -n true')): + raise Exception('Please execute `bash -c \'echo "{user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers`\' as root in {ip}.'.format(user=user.user, ip=ip)) return True def generate_secure_ocp_deployment(self, ocp_deployment): @@ -500,7 +517,7 @@ def _do_precheck(self, repositories, start_check_plugins): if len(self.obd.search_images(jre_name, version=version, min_version=min_version, max_version=max_version)) > 0: java_check = False res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=False, - work_dir_check=True, precheck=True, java_check=java_check, clients=ssh_clients, + work_dir_check=True, precheck=True, java_check=java_check, clients=ssh_clients, source_option = 'start_check', sys_cursor=self.context['metadb_cursor'], components=list(self.obd.deploy.deploy_config.components.keys())) if not res and res.get_return("exception"): raise res.get_return("exception") @@ -576,9 +593,9 @@ def parse_precheck_result(self, component, check_result, task_info, server, resu elif v.status == v.FAIL: check_info.result = PrecheckEventResult.FAILED check_info.code = v.error.code - check_info.advisement = v.error.msg - print(k, vars(v)) + check_info.description = v.error.msg check_info.recoverable = len(v.suggests) > 0 and v.suggests[0].auto_fix + check_info.advisement = v.suggests[0].msg if len(v.suggests) > 0 and v.suggests[0].msg is not None else '' all_passed = False info.status = TaskStatus.FINISHED info.result = TaskResult.FAILED @@ -733,12 +750,12 @@ def _create_tenant(self): deploy = self.obd.deploy self.obd.set_deploy(None) log.get_logger().info("start create meta tenant") - create_meta_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(meta_tenant_config)], clients = ssh_clients) + create_meta_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(meta_tenant_config)], clients=ssh_clients) if not create_meta_ret: self.obd.set_deploy(deploy) raise Exception("Create meta tenant failed") log.get_logger().info("start create monitor tenant") - create_monitor_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(monitor_tenant_config)], clients = ssh_clients) + create_monitor_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(monitor_tenant_config)], clients=ssh_clients) if not create_monitor_ret: self.obd.set_deploy(deploy) raise Exception("Create monitor tenant failed") @@ -825,6 +842,7 @@ def _do_install(self, id, task_id): servers = config.components.ocpserver.servers port = config.components.ocpserver.port password = config.components.ocpserver.admin_password + password = RSAHandler().decrypt_private_key(password) address = ['http://' + str(server) + ':' + str(port) for server in servers] self.obd.options._update_loose({"address": address[0], "user": 'admin', "password": password}) self.obd.export_to_ocp(name) @@ -858,7 +876,7 @@ def get_install_task_info(self, id, task_id): for plugin in const.INIT_PLUGINS: task_info.current = f'{component}-{plugin}' step_info = TaskStepInfo(name=f'{component}-{plugin}', status=TaskStatus.RUNNING, result=TaskResult.RUNNING) - if self.obd.namespaces[component].get_return(plugin) is not None: + if self.obd.namespaces[component].get_return(plugin).value is not None: if not self.obd.namespaces[component].get_return(plugin): failed += 1 step_info.result = TaskResult.FAILED @@ -874,7 +892,7 @@ def get_install_task_info(self, id, task_id): task_info.current = f'{component}-{plugin}' if component not in self.obd.namespaces: break - if self.obd.namespaces[component].get_return(plugin) is not None: + if self.obd.namespaces[component].get_return(plugin).value is not None: if not self.obd.namespaces[component].get_return(plugin): step_info.result = TaskResult.FAILED failed += 1 @@ -1017,7 +1035,7 @@ def get_reinstall_task_info(self, id, task_id): step_info = TaskStepInfo(name=f'{c}-{const.DESTROY_PLUGIN}', status=TaskStatus.RUNNING, result=TaskResult.RUNNING) if c in self.obd.namespaces: - if self.obd.namespaces[c].get_return(const.DESTROY_PLUGIN) is not None: + if self.obd.namespaces[c].get_return(const.DESTROY_PLUGIN).value is not None: task_info.status = TaskStatus.RUNNING task_info.current = f'{c}-{const.DESTROY_PLUGIN}' step_info.status = TaskStatus.FINISHED @@ -1034,7 +1052,7 @@ def get_reinstall_task_info(self, id, task_id): for plugin in const.INIT_PLUGINS: task_info.current = f'{component}-{plugin}' step_info = TaskStepInfo(name=f'{component}-{plugin}', status=TaskStatus.RUNNING, result=TaskResult.RUNNING) - if self.obd.namespaces[component].get_return(plugin) is not None: + if self.obd.namespaces[component].get_return(plugin).value is not None: if not self.obd.namespaces[component].get_return(plugin): failed += 1 step_info.result = TaskResult.FAILED @@ -1050,7 +1068,7 @@ def get_reinstall_task_info(self, id, task_id): task_info.current = f'{component}-{plugin}' if component not in self.obd.namespaces: break - if self.obd.namespaces[component].get_return(plugin) is not None: + if self.obd.namespaces[component].get_return(plugin).value is not None: if not self.obd.namespaces[component].get_return(plugin): step_info.result = TaskResult.FAILED failed += 1 @@ -1127,7 +1145,7 @@ def get_destroy_task_info(self, id, task_id): for c in self.obd.deploy.deploy_config.components: step_info = TaskStepInfo(name=f'{c}-{const.DESTROY_PLUGIN}', status=TaskStatus.RUNNING, result=TaskResult.RUNNING) if c in self.obd.namespaces: - if self.obd.namespaces[c].get_return(const.DESTROY_PLUGIN) is not None: + if self.obd.namespaces[c].get_return(const.DESTROY_PLUGIN).value is not None: task_info.status = TaskStatus.RUNNING task_info.current = f'{c}-{const.DESTROY_PLUGIN}' step_info.status = TaskStatus.FINISHED @@ -1638,6 +1656,7 @@ def _ocp_upgrade_from_new_deployment(self, repositories, deploy, pkgs, name, met opt = Values() setattr(opt, "without_parameter", True) setattr(opt, "skip_password_check", True) + setattr(opt, "source_option", 'upgrade') self.obd.set_options(opt) log.get_logger().info('begin start ocp') ret = self.obd.start_cluster(name) @@ -1667,21 +1686,21 @@ def get_ocp_upgrade_task(self, cluster_name, task_id): task_info.current = f'{component}-{plugin}' if component not in self.obd.namespaces: break - if self.obd.namespaces[component].get_return('stop') is not None: + if self.obd.namespaces[component].get_return('stop').value is not None: if not self.obd.namespaces[component].get_return('stop'): step_info.result = TaskResult.FAILED else: step_info.result = TaskResult.SUCCESSFUL step_info.status = TaskStatus.FINISHED - if self.obd.namespaces[component].get_return('start') is not None: + if self.obd.namespaces[component].get_return('start').value is not None: if not self.obd.namespaces[component].get_return('start'): step_info.result = TaskResult.FAILED else: step_info.result = TaskResult.SUCCESSFUL step_info.status = TaskStatus.FINISHED - if self.obd.namespaces[component].get_return('display') is not None: + if self.obd.namespaces[component].get_return('display').value is not None: if not self.obd.namespaces[component].get_return('display'): step_info.result = TaskResult.FAILED else: @@ -1707,7 +1726,7 @@ def get_installed_ocp_info(self, id): config = self.context['ocp_deployment_info'][id]['config'] servers = config.components.ocpserver.servers port = config.components.ocpserver.port - password = config.components.ocpserver.admin_password + password = '' address = ['http://' + str(server) + ':' + str(port) for server in servers] return OcpInstalledInfo(url=address, password=password) diff --git a/service/handler/rsa_handler.py b/service/handler/rsa_handler.py new file mode 100644 index 0000000..abac9af --- /dev/null +++ b/service/handler/rsa_handler.py @@ -0,0 +1,49 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +from singleton_decorator import singleton +from service.handler.base_handler import BaseHandler +from Crypto.Cipher import PKCS1_v1_5 +from Crypto.PublicKey import RSA +import base64 + + +@singleton +class RSAHandler(BaseHandler): + def __init__(self): + super().__init__() + self.private_key = RSA.generate(2048) + self.public_key = self.private_key.public_key() + + def public_key_to_bytes(self): + try: + pem_public_key = self.public_key.export_key(format='PEM') + return pem_public_key, None + except ValueError as e: + return None, e + + def decrypt_private_key(self, text): + try: + encrypt_data = base64.b64decode(text) + cipher = PKCS1_v1_5.new(self.private_key) + decrypt_data = cipher.decrypt(encrypt_data, None) + return decrypt_data.decode('utf-8') + except (ValueError, TypeError) as e: + self.obd.stdio.error("password decrypt failed, reason: %s" % e) + raise Exception('rsa decryption an exception occurred: %s' % e) diff --git a/service/handler/service_info_handler.py b/service/handler/service_info_handler.py index d8e4696..b6358d6 100644 --- a/service/handler/service_info_handler.py +++ b/service/handler/service_info_handler.py @@ -27,7 +27,7 @@ from ssh import LocalClient, SshConfig, SshClient from service.handler.base_handler import BaseHandler from service.common import log, const -from service.model.service_info import ServiceInfo, DeployName +from service.model.service_info import ServiceInfo, DeployNames from service.model.server import OcpServerInfo, InstallerMode, ComponentInfo, MsgInfo from service.model.metadb import DatabaseConnection from service.model.ocp import OcpDeploymentConfig @@ -36,6 +36,7 @@ from service.model.ssh import SshAuth from service.model.tenant import TenantConfig, TenantUser, TenantResource from service.handler.ocp_handler import OcpHandler +from service.handler.rsa_handler import RSAHandler @singleton @@ -222,7 +223,7 @@ def create_ocp_info(self, cluster_name): def get_deployments_name(self): deploys = self.obd.deploy_manager.get_deploy_configs() log.get_logger().info('deploys: %s' % deploys) - ret = DeployName() + ret = DeployNames() for _ in deploys: if _.deploy_info.status == DeployStatus.STATUS_RUNNING and \ (const.OCP_SERVER in _.deploy_config.components or const.OCP_SERVER_CE in _.deploy_config.components): @@ -298,7 +299,8 @@ def get_component_agent(self, metadb): user = '' self.context["connection_info"][metadb.cluster_name] = metadb self.context['meta_database'] = metadb.database - self.context['metadb_cursor'] = Cursor(ip=metadb.host, port=metadb.port, user=metadb.user, password=metadb.password, + meta_password = RSAHandler().decrypt_private_key(metadb.password) if metadb.password else metadb.password + self.context['metadb_cursor'] = Cursor(ip=metadb.host, port=metadb.port, user=metadb.user, password=meta_password, stdio=self.obd.stdio) log.get_logger().info('cursor: %s' % self.context['metadb_cursor']) monitor_tenant_sql = "select `value` from %s.config_properties where `key` = 'ocp.monitordb.username'" % metadb.database diff --git a/service/model/component_change.py b/service/model/component_change.py new file mode 100644 index 0000000..d18b5a4 --- /dev/null +++ b/service/model/component_change.py @@ -0,0 +1,113 @@ +from pydantic import BaseModel, Field +from typing import Optional, List, Dict +from fastapi import Body +from enum import Enum + +from service.model.deployments import Parameter +from service.model.components import ComponentInfo + + +class ComponentChangeMode(BaseModel): + mode: str = Field(..., description="component change mode. eq 'scale_out', 'component_add'") + + +class BestComponentInfo(BaseModel): + component_name: str = Field(..., description="component name, eq obporxy, ocp-express...") + version: str = Field('', description="component version") + deployed: int = Field(..., description="0 - not deployed, 1 - deployed") + node: str = Field('', description="component node") + component_info: Optional[List[ComponentInfo]] = Field([], description="component info") + + +class ComponentChangeInfo(BaseModel): + component_list: List[BestComponentInfo] = Field(..., description="component list") + + +class Obproxy(BaseModel): + component: str = Body(..., description='obproxy component name, ex:obproxy-ce,obproxy') + version: str = Body(..., description='version') + package_hash: str = Body('', description='obproxy package md5') + release: str = Body(..., description='obproxy release no') + prometheus_listen_port: int = Body(..., description='prometheus port') + listen_port: int = Body(..., description='sql port') + rpc_listen_port: int = Body(None, description='rpc port') + obproxy_sys_password: str = Body('', description='obproxy_sys_password') + parameters: List[Parameter] = Body(None, description='config parameter') + servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + cluster_name: str = Body('', description='cluster name') + + +class Obagent(BaseModel): + component: str = Body(..., description='obagent component name, ex:obagent') + version: str = Body(..., description='version') + package_hash: str = Body('', description='obagent package md5') + release: str = Body(..., description='obagent release no') + monagent_http_port: int = Body(..., description='server port') + mgragent_http_port: int = Body(..., description='debug port') + http_basic_auth_password: str = Body('', description='http_basic_auth_password') + parameters: List[Parameter] = Body(None, description='config parameter') + servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + + +class Obconfigserver(BaseModel): + component: str = Body(..., description='component name') + version: str = Body(..., description='version') + package_hash: str = Body('', description='package md5') + release: str = Body(..., description='release no') + parameters: List[Parameter] = Body(None, description='config parameter') + servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + listen_port: int = Body(..., description='server port') + + +class OcpExpress(BaseModel): + component: str = Body(..., description='component name') + version: str = Body(..., description='version') + package_hash: str = Body('', description='package md5') + release: str = Body(..., description='release no') + port: int = Body(..., description='server port') + admin_passwd: str = Body('', description='admin password') + parameters: List[Parameter] = Body(None, description='config parameter') + servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + + +class ComponentChangeConfig(ComponentChangeMode): + obproxy: Optional[Obproxy] + obagent: Optional[Obagent] + obconfigserver: Optional[Obconfigserver] + ocpexpress: Optional[OcpExpress] + home_path: str = Field(..., description="component change config path") + + +class ComponentChangeInfoDisplay(BaseModel): + component_name: str = Field(..., description="component name") + address: str = Field('', description="url address") + username: str = Field('', description="username") + password: str = Field('', description="password") + access_string: str = Field('', description="access string") + + +class ComponentsChangeInfoDisplay(BaseModel): + components_change_info: List[ComponentChangeInfoDisplay] = Field(..., description="components change info") + + +class ComponentServer(BaseModel): + component_name: str = Field(..., description="component name") + failed_servers: List[str] = Field(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + + +class ComponentLog(BaseModel): + component_name: str = Field(..., description="component name") + log: str = Field('', description="log path") + + +class ComponentsServer(BaseModel): + components_server: List[ComponentServer] = Field(..., description="components server") + + +class ComponentDepends(BaseModel): + component_name: str = Field(..., description="component name") + depends: List[str] = Field([], description="depends component name") + + +class ConfigPath(BaseModel): + config_path: str = Field(..., description="config path") \ No newline at end of file diff --git a/service/model/database.py b/service/model/database.py index 6be9a25..bd98641 100644 --- a/service/model/database.py +++ b/service/model/database.py @@ -24,7 +24,7 @@ class DatabaseConnection(BaseModel): - cluster_name = Body('', description="cluster name of the connection in installer") + cluster_name: str = Body('', description="cluster name of the connection in installer") host: str = Body('', description="host") port: int = Body(2881, description="port") user: str = Body('meta_user@ocp_meta', description="user") diff --git a/service/model/deployments.py b/service/model/deployments.py index f30589f..d72e980 100644 --- a/service/model/deployments.py +++ b/service/model/deployments.py @@ -102,6 +102,7 @@ class ObProxy(BaseModel): cluster_name: str = Body(None, description='obproxy name') home_path: str = Body('', description='install obproxy home path') prometheus_listen_port: int = Body(..., description='prometheus port') + rpc_listen_port: int = Body(None, description='rpc service port') listen_port: int = Body(..., description='sql port') parameters: List[Parameter] = Body(None, description='config parameter') servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") @@ -116,6 +117,7 @@ class OcpExpress(BaseModel): port: int = Body(..., description='server port') parameters: List[Parameter] = Body(None, description='config parameter') servers: List[str] = Body(..., description="server ip, ex:[ '1.1.1.1','2.2.2.2']") + admin_passwd: str = Body(..., description="ocp-express admin password") class ObAgent(BaseModel): @@ -292,5 +294,8 @@ class OCPDeploymnetConfig(BaseModel): launch_user: Optional[str] = Body(None, description='process user') - +class ScenarioType(BaseModel): + type: str = Body(..., description='scenario name') + desc: str = Body(..., description='scenario description') + value: str = Body(..., description='scenario value') diff --git a/service/model/mirror.py b/service/model/mirror.py index c9b711b..7566223 100644 --- a/service/model/mirror.py +++ b/service/model/mirror.py @@ -19,6 +19,7 @@ from fastapi import Body from pydantic import BaseModel +from typing import Union class Mirror(BaseModel): @@ -29,7 +30,7 @@ class Mirror(BaseModel): repomd_age: int = Body(None, description='repomd age') repo_age: int = Body(None, description='repo age') priority: int = Body(None, description='priority') - gpgcheck: str = Body('', description='gpgcheck') + gpgcheck: Union[str, int] = Body('', description='gpgcheck') enabled: bool = Body('', description='remote mirror is enabled') available: bool = Body('', description='remote mirror is enabled') diff --git a/service/model/service_info.py b/service/model/service_info.py index e1d8fb4..72420ab 100644 --- a/service/model/service_info.py +++ b/service/model/service_info.py @@ -27,4 +27,12 @@ class ServiceInfo(BaseModel): class DeployName(BaseModel): + name: str = Body('', description="deploy name list") + deploy_user: str = Body('', description="deploy user") + ob_servers: List[str] = Body([], description="ob servers") + ob_version: str = Body('', description="ob version") + create_date: str = Body(None, description="ob create date") + + +class DeployNames(BaseModel): name: List[str] = Body([], description="deploy name list") diff --git a/service/model/task.py b/service/model/task.py index 1e1c2ce..0b1dcfc 100644 --- a/service/model/task.py +++ b/service/model/task.py @@ -63,6 +63,7 @@ class PreCheckResult(BaseModel): result: PrecheckEventResult = Body('', description="precheck event result") recoverable: bool = Body(False, description="precheck event recoverable") code: str = Body('', description="error code") + description: str = Body('', description='error description') advisement: str = Body("", description="advisement of precheck event failure") diff --git a/ssh.py b/ssh.py index c3fccb7..3442ae3 100644 --- a/ssh.py +++ b/ssh.py @@ -26,6 +26,7 @@ import tempfile import warnings from glob import glob +from pathlib import Path from subprocess32 import Popen, PIPE @@ -352,7 +353,7 @@ def _update_env(self): def add_env(self, key, value, rewrite=False, stdio=None): if key not in self.env or not self.env[key] or rewrite: - stdio.verbose('%s@%s set env %s to \'%s\'' % (self.config.username, self.config.host, key, value)) + stdio.verbose('%s@%s export %s=\'%s\'' % (self.config.username, self.config.host, key, value)) if self._is_local: self._add_env_for_local(key, value, rewrite) else: @@ -476,7 +477,7 @@ def _execute_command(self, command, timeout=None, retry=3, stdio=None): except SSHException as e: if retry: self.close() - return self._execute_command(command, retry-1, stdio) + return self._execute_command(command, retry=retry - 1, stdio=stdio) else: stdio.exception('') stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e)) @@ -556,7 +557,7 @@ def _put_file(self): def _client_put_file(self, local_path, remote_path, stdio=None): if self.execute_command('mkdir -p %s && rm -fr %s' % (os.path.dirname(remote_path), remote_path), stdio=stdio): stdio.verbose('send %s to %s' % (local_path, remote_path)) - if self.sftp.put(local_path.replace('~', os.getenv('HOME')), remote_path.replace('~', os.getenv('HOME'))): + if self.sftp.put(str(Path(local_path).expanduser()), str(Path(remote_path).expanduser())): return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3:], remote_path)) return False diff --git a/tool.py b/tool.py index bd96942..6e4164f 100644 --- a/tool.py +++ b/tool.py @@ -43,6 +43,7 @@ from _errno import EC_SQL_EXECUTE_FAILED from _stdio import SafeStdio + _open = open if sys.version_info.major == 2: import MySQLdb as mysql @@ -836,3 +837,103 @@ def close(self): if self.db: self.db.close() self.db = None + + +class Exector(SafeStdio): + + def __init__(self, host, port, user, pwd, exector_path, stdio): + self._host = host + self._port = port + self._user = user + self._pwd = pwd + self._cmd = None + self.stdio = stdio + self._exector = os.path.join(exector_path, 'executer27/bin/executer') + + @property + def host(self): + return self._host + + @property + def port(self): + return self._port + + @property + def user(self): + return self._user + + @property + def pwd(self): + return self._pwd + + @property + def exector(self): + return self._exector + + @property + def cmd(self): + if self._cmd is None: + self._cmd = '%s %%s' % self._exector + return self._cmd + + @host.setter + def host(self, value): + self._host = value + self._cmd = None + + @port.setter + def port(self, value): + self._port = value + self._cmd = None + + @user.setter + def user(self, value): + self._user = value + self._cmd = None + + @pwd.setter + def pwd(self, value): + self._pwd = value + self._cmd = None + + @exector.setter + def exector(self, exector_path): + self._exector = os.path.join(exector_path, 'bin/executer27') + self._cmd = None + + def create_temp(self, repository, direct_upgrade=False): + tmp_path = os.path.join('/tmp', self.tmp_prefix, repository.md5) + if not os.path.exists(tmp_path): + relative_dir = 'etc/direct_upgrade' if direct_upgrade else 'etc' + script_dir = os.path.join(repository.repository_dir, relative_dir) + from ssh import LocalClient + LocalClient.put_dir(script_dir, tmp_path) + return tmp_path + + def clear_temp(self): + tmp_path = os.path.join('/tmp', self.tmp_prefix) + DirectoryUtil.rm(tmp_path) + + def exec_script(self, name, repository, can_skip=True, param=''): + path = os.path.join(repository.repository_dir, 'bin', name) + self.stdio.verbose('exec %s %s' % (repository, name)) + try: + if os.path.exists(path): + cmd = '{} {}'.format(self.cmd.replace('%s', path, 1), param) + self.stdio.start_loading('Exec %s %s' % (repository, name)) + from ssh import LocalClient + if LocalClient.execute_command(cmd, stdio=self.stdio): + self.stdio.stop_loading('succeed') + return True + else: + self.stdio.stop_loading('fail') + return False + else: + if can_skip: + self.stdio.print('skip %s %s' % (repository, name)) + return True + else: + self.stdio.error('No such file: %s' % path) + return False + except: + pass diff --git a/web/config/config.ts b/web/config/config.ts index 5cd23dd..3537da3 100644 --- a/web/config/config.ts +++ b/web/config/config.ts @@ -1,20 +1,32 @@ import AntdMomentWebpackPlugin from '@ant-design/moment-webpack-plugin'; import { defineConfig } from 'umi'; + import routes from './routes'; export default defineConfig({ - // routes: [{ path: '/', component: 'index' }], routes, title: 'OceanBase Deployer', fastRefresh: true, favicons: ['/assets/logo.png'], history: { type: 'hash' }, + extraBabelPlugins: + process.env.NODE_ENV === 'production' + ? ['babel-plugin-dynamic-import-node'] + : [], mfsu: {}, locale: { default: 'zh-CN', antd: false, title: false, }, + // openAPI: [ + // { + // requestLibPath: "import { request } from '@umijs/max'", + // schemaPath:`${host}/openapi.json`, + // projectName: 'component-change', + // mock: true, + // }, + // ], request: {}, model: {}, metas: [ @@ -35,7 +47,7 @@ export default defineConfig({ `!function(modules){function __webpack_require__(moduleId){if(installedModules[moduleId])return installedModules[moduleId].exports;var module=installedModules[moduleId]={exports:{},id:moduleId,loaded:!1};return modules[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.loaded=!0,module.exports}var installedModules={};return __webpack_require__.m=modules,__webpack_require__.c=installedModules,__webpack_require__.p="",__webpack_require__(0)}([function(module,exports){"use strict";!function(){if(!window.Tracert){for(var Tracert={_isInit:!0,_readyToRun:[],_guid:function(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(c){var r=16*Math.random()|0,v="x"===c?r:3&r|8;return v.toString(16)})},get:function(key){if("pageId"===key){if(window._tracert_loader_cfg=window._tracert_loader_cfg||{},window._tracert_loader_cfg.pageId)return window._tracert_loader_cfg.pageId;var metaa=document.querySelectorAll("meta[name=data-aspm]"),spma=metaa&&metaa[0].getAttribute("content"),spmb=document.body&&document.body.getAttribute("data-aspm"),pageId=spma&&spmb?spma+"."+spmb+"_"+Tracert._guid()+"_"+Date.now():"-_"+Tracert._guid()+"_"+Date.now();return window._tracert_loader_cfg.pageId=pageId,pageId}return this[key]},call:function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i { if (env === 'production') { config.optimization.delete('noEmitOnErrors'); @@ -55,5 +67,5 @@ export default defineConfig({ // 静态资源的文件限制调整为 1GB,避免视频等大文件资源阻塞项目启动 config.performance.maxAssetSize(1000000000); return config; - } + }, }); diff --git a/web/config/routes.ts b/web/config/routes.ts index 734e822..c2f8874 100644 --- a/web/config/routes.ts +++ b/web/config/routes.ts @@ -23,26 +23,31 @@ export default [ path: 'update', component: 'OcpInstaller/Update', name: 'ocp升级', - spmb:'b71440' + spmb: 'b71440', }, { path: 'ocpInstaller/install', component: 'OcpInstaller/Install', name: '安装无MetaDB', - spmb:'b71462', + spmb: 'b71462', exact: true, }, { path: 'ocpInstaller/configuration', component: 'OcpInstaller/Configuration', name: '安装有MetaDB', - spmb:'b71463', + spmb: 'b71463', exact: true, }, { - path: 'ocpInstaller', - component: 'OcpInstaller/Index/index', - name: 'oceanbase云平台安装', + path: 'componentDeploy', + component: 'ComponentDeploy/index', + name: '安装组件', + }, + { + path: 'componentUninstall', + component: 'ComponentUninstall/index', + name: '卸载组件', }, { path: 'quit', @@ -53,7 +58,7 @@ export default [ { path: '/guide', component: 'Guide', - spmb:'b57206', + spmb: 'b57206', name: '部署向导', }, ], diff --git a/web/mock/ComponentChange.mock.ts b/web/mock/ComponentChange.mock.ts new file mode 100644 index 0000000..3f3ef0c --- /dev/null +++ b/web/mock/ComponentChange.mock.ts @@ -0,0 +1,10 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'POST /api/v1/component_change/:name': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 97, data: null, msg: '声得意于去于理力化应便该西识效起些。', success: false }); + }, +}; diff --git a/web/mock/ComponentChangeConfig.mock.ts b/web/mock/ComponentChangeConfig.mock.ts new file mode 100644 index 0000000..e717056 --- /dev/null +++ b/web/mock/ComponentChangeConfig.mock.ts @@ -0,0 +1,10 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'POST /api/v1/component_change/:name/deployment': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 64, data: null, msg: '影酸点形比全口织解一消等选影八月。', success: false }); + }, +}; diff --git a/web/mock/ComponentChangeDelComponent.mock.ts b/web/mock/ComponentChangeDelComponent.mock.ts new file mode 100644 index 0000000..23fd0c6 --- /dev/null +++ b/web/mock/ComponentChangeDelComponent.mock.ts @@ -0,0 +1,10 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'DELETE /api/v1/component_change/:name': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 72, data: null, msg: '研点更压定置与计这民品至产。', success: true }); + }, +}; diff --git a/web/mock/ComponentChangeDelComponentTask.mock.ts b/web/mock/ComponentChangeDelComponentTask.mock.ts new file mode 100644 index 0000000..500f580 --- /dev/null +++ b/web/mock/ComponentChangeDelComponentTask.mock.ts @@ -0,0 +1,13 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/:name/del': (req: Request, res: Response) => { + res.status(200).send({ + code: 66, + data: { log: '来如年标感精元近数转已应于。', offset: 84 }, + msg: '术号产小定做热社支会需者。', + success: false, + }); + }, +}; diff --git a/web/mock/ComponentChangeDeploymentsInfo.mock.ts b/web/mock/ComponentChangeDeploymentsInfo.mock.ts new file mode 100644 index 0000000..d5d6c32 --- /dev/null +++ b/web/mock/ComponentChangeDeploymentsInfo.mock.ts @@ -0,0 +1,148 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/deployment/detail': ( + req: Request, + res: Response, + ) => { + res.status(200).send({ + code: 84, + data: { + component_list: [ + { + component_name: 'ob-configserver', + version: '', + deployed: 0, + node: '', + component_info: [ + { + estimated_size: 24259515, + version: '1.0.0', + type: 'remote', + release: '2.el7', + arch: 'x86_64', + md5: '18687f6085f7a2f6c8a409d09912959589e0bdf0d2086fba1f0e2cebaec525cd', + version_type: '', + }, + ], + }, + { + component_name: 'prometheus', + version: '', + deployed: 1, + node: '', + component_info: [ + { + estimated_size: 211224073, + version: '2.37.1', + type: 'remote', + release: '10000102022110211.el7', + arch: 'x86_64', + md5: '39e4b09f16d6e3cae76382b2b176102ca001c52d451381eb2e5a50941c5d86f1', + version_type: '', + }, + { + estimated_size: 211224073, + version: '2.37.1', + type: 'remote', + release: '10000102022110211.el7', + arch: 'x86_64', + md5: '62d20b25430f0e5be7783ed5661bb42428ad61915150b7028b74e0468bfb8c4f', + version_type: '', + }, + ], + }, + { + component_name: 'grafana', + version: '', + deployed: 1, + node: '', + component_info: [ + { + estimated_size: 177766248, + version: '7.5.17', + type: 'remote', + release: '1', + arch: 'x86_64', + md5: 'f0c86571a2987ee6338a42b79bc1a38aebe2b07500d0120ee003aa7dd30973a5', + version_type: '', + }, + { + estimated_size: 177766248, + version: '7.5.17', + type: 'remote', + release: '1', + arch: 'x86_64', + md5: '9f81466722c5971fbad649a134f994fc1470dd4f76c360f744e2b06af559f6e5', + version_type: '', + }, + ], + }, + { + component_name: 'ocp-express', + version: '', + deployed: 0, + node: '1.1.1.1', + component_info: [ + { + estimated_size: 78426196, + version: '4.2.2', + type: 'remote', + release: '100000022024011120.el7', + arch: 'x86_64', + md5: '74a00bfb44909e81990a32ae0087180de6586816a917dcb49fab2b9082ca6e33', + version_type: '', + }, + ], + }, + { + component_name: 'obagent', + version: '', + deployed: 0, + node: '', + component_info: [ + { + estimated_size: 72919140, + version: '4.2.2', + type: 'remote', + release: '100000042024011120.el7', + arch: 'x86_64', + md5: '988b403da97f57801e05857122317ae8ea913853bb7bee7538ca6dcfcadc088a', + version_type: '', + }, + ], + }, + { + component_name: 'obproxy-ce', + version: '', + deployed: 0, + node: '', + component_info: [ + { + estimated_size: 688373235, + version: '4.2.3.0', + type: 'remote', + release: '3.el7', + arch: 'x86_64', + md5: '7ca6c000887b90db111093180e6984bf4cf8f7380a948870f3eb2ac30be38f37', + version_type: '', + }, + { + estimated_size: 673773899, + version: '4.2.0.0', + type: 'remote', + release: '7.el7', + arch: 'x86_64', + md5: 'da8953ed07a09b41d08140e865f108353b05914f8f774cdd40802ae6b8d3c14c', + version_type: '', + }, + ], + }, + ], + }, + msg: '数准才必持引市才以只定革等走容。', + success: true, + }); + }, +}; diff --git a/web/mock/ComponentChangeDeploymentsName.mock.ts b/web/mock/ComponentChangeDeploymentsName.mock.ts new file mode 100644 index 0000000..f4f13ed --- /dev/null +++ b/web/mock/ComponentChangeDeploymentsName.mock.ts @@ -0,0 +1,23 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/deployment': (req: Request, res: Response) => { + res.status(200).send({ + code: 78, + data: { + total: 76, + items: [ + { + name: '吕洋', + ob_version: '4.2.3', + create_date: '2024年7月', + deploy_user: 'root' + }, + ], + }, + msg: '级表真养取六色前音将个制情近战度照。', + success: true, + }); + }, +}; diff --git a/web/mock/ComponentChangeLog.mock.ts b/web/mock/ComponentChangeLog.mock.ts new file mode 100644 index 0000000..f3530f3 --- /dev/null +++ b/web/mock/ComponentChangeLog.mock.ts @@ -0,0 +1,13 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/:name/component_change/log': (req: Request, res: Response) => { + res.status(200).send({ + code: 97, + data: { log: '式特存小理最听集装际切清转该。', offset: 63 }, + msg: '十石却江音三地叫只由张被说而题。', + success: false, + }); + }, +}; diff --git a/web/mock/ComponentChangeNodeCheck.mock.ts b/web/mock/ComponentChangeNodeCheck.mock.ts new file mode 100644 index 0000000..8841132 --- /dev/null +++ b/web/mock/ComponentChangeNodeCheck.mock.ts @@ -0,0 +1,413 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'POST /api/v1/component_change/:name/display': (req: Request, res: Response) => { + res.status(200).send({ + code: 93, + data: { + components_change_info: [ + { + component_name: '计选经式工拉成段看格次完界至复平。', + address: '湖北省 黄冈市 英山县', + username: 'Martinez', + password: 'TG71[EnQ#uDg0VHx', + access_string: '为论厂直价工北圆器件音飞打。', + }, + { + component_name: '达只不又是就养想整品根实京社第。', + address: '上海 上海市 长宁区', + username: 'White', + password: '3P@)Gw65rahbHRcT', + access_string: '京几只儿基级采然列了果万天。', + }, + { + component_name: '么教量应存八流它规切通路专。', + address: '湖北省 随州市 广水市', + username: 'Gonzalez', + password: 'wNW(3KCuwil4t(*C', + access_string: '并角公过织级委状美度条出。', + }, + { + component_name: '其众却提该十清需性九没此率石联。', + address: '云南省 楚雄彝族自治州 武定县', + username: 'Allen', + password: 'Jjn%zw^96B1GdQ)I', + access_string: '么图都革工建们造设到又成电候来风。', + }, + { + component_name: '般整利一算给运行按白育马报全素选它。', + address: '云南省 文山壮族苗族自治州 富宁县', + username: 'Hall', + password: '&IV@bE2a2Z!Z@mot', + access_string: '期具但应口正今温便清去此识强。', + }, + { + component_name: '非据群完写路快集年定构标正性七。', + address: '河南省 商丘市 虞城县', + username: 'Rodriguez', + password: '0ay[U(iB5RsXClA!', + access_string: '根边无今之电等近论表速指现。', + }, + { + component_name: '电性活去越物列基广影然式系种整。', + address: '福建省 厦门市 集美区', + username: 'Thomas', + password: '8O7JnRHqeN9)q[K4', + access_string: '水音志张文前现空造识属电任。', + }, + { + component_name: '世标儿心便四层先性对确多。', + address: '海南省 海口市 美兰区', + username: 'White', + password: 'n]ZhXS#rHM9DHr^F', + access_string: '设经始八三列点回六车代具件传。', + }, + ], + }, + msg: '时广到被林老国想面处界给江叫何清得例。', + success: false, + }); + }, + 'POST /api/v1/component_change/:name/node/check': (req: Request, res: Response) => { + res.status(200).send({ + code: 62, + data: { + components_server: [ + { + component_name: '断为多直看命日第置利生化时运。', + failed_servers: [ + '设国部线手别装该机取团确热。', + '过具提响情具支并青动好情层万期。', + '始道即认路成场温山其断制率引没前角质。', + '同包型红研物出外战属身家经十没布教加。', + '给开军证非满线得做研它重。', + '走及使常去更观手技定给节越。', + '成日压算人几建生式同对验少色院采文。', + '划周间七将千又百选写王研四三。', + '离林老格统其号即却将说适品再。', + '群儿江由事改流七部调省改道气带育化。', + '织过角与来政委外教单在样取类具府决。', + '事指无气克到西开先权可时圆。', + '分你期通将半图合战音难道重。', + '很知明物程质花低标领现半龙热重。', + '查程反传回单王难水容样书样改即。', + '低打党易集金中代出体情委节什为清取。', + '动通市原起金治只美住农向报件。', + ], + }, + { + component_name: '心采至成王样带人圆派展领电此。', + failed_servers: [ + '酸关基交油自同列层选加风参清果目。', + '收消场发思使情制由议规东置。', + '属九如类那京备白每十济造件书场权需。', + '程现战张油酸型明从维设正等增它员。', + '斯少积号断斗必养新战件采速放。', + '设金及自会容才强东说事组生林他民米海。', + '记阶传儿反代领始安更和圆联文达。', + '战那表系二特成强始照第通参干长。', + '比就年低步解斯导还厂华采子。', + '调对走儿价真风别连政数须信图写如。', + '为片际过四界信育己题约王规状办当。', + '影列再增出或什火习向查比深公结分第相。', + '加社当层林型单收工定院声国维示三温被。', + '对题速度规体干示林会算命持军构。', + '者成许华要万力设受白身非效年求少由委。', + '很义表应平正义众性做无用往治。', + '它到最学但目亲求管律第先起持完表线风。', + ], + }, + { + component_name: '党力角支民值拉马边存劳斗老从。', + failed_servers: [ + '支两科毛命克分市要京收近解办业。', + '与各广式用高实事具广历装还参。', + '上题报改内现便今气价平取应些。', + '斯性该前度实明约被重被或特。', + '住长子商出造门安受来向处十表与造委派。', + '府华可联华由容要马又次难在。', + '意用例山业包线验他界从及段次。', + '际老细据较七力农历满增口出。', + '那争眼办设设许走号证行明状增光标。', + '压展者选规正最解包划权期小。', + '决委的开压往长眼两程列习候她速图。', + '水美需分量统还和组住车断大素。', + '集大族信动地社海造听识表们处系办。', + '开各其角满相构物指能整空热工。', + '名离上克维民热情级查红济合任半及品织。', + '儿解属了消性指点快科从根华有调。', + '入之料基同况为元使白史建外。', + ], + }, + { + component_name: '先往走教称热易存需厂听改由引观江有。', + failed_servers: [ + '五群的之达亲作于见政就复严持装传造。', + '毛个他整造我状院相四自斗区。', + '算列来世解活许思事回酸家九数区少过。', + '层精口张几技史越住时程权连术五。', + '身红不马半地高金长际里级酸主土战史。', + '原月出则相九就通十据确金就平。', + '天起本两把实越新阶群花之规现。', + '团快信带支实东代新第具下我。', + '外四无该身江位做和广走许件调花律比火。', + '深增影结水或思产月来增本个地。', + '满党确基在影行文长车广感养。', + '达级不水同半话话特快发精入做前才应选。', + '难红面整都后常农影表精没给几毛教。', + '东上月相路厂革实细义该广业组。', + '农易历的什段层达机它志阶权认根持入新。', + '石业参采区长量形算们三眼学低为干青。', + '具海调济组解写米斯要技前使思。', + ], + }, + { + component_name: '克他火量一内整维例件处克列。', + failed_servers: [ + '儿两局目性龙价老然带下使至。', + '如实参题并议经石市九目学眼出化空。', + '事专据极人公交题五这们物土参品。', + '值方存造十务原调命学给办即圆料日本。', + '里分门老而到格更特但来变光还局。', + '根将族少般何国后比七军公知么数比。', + '战之照说并但压基些见济实图关带养实。', + '影口命才消离地音个家展话严他。', + '到约可半它片低示空压年比说。', + '史各起而委积节价参东即除毛求即度比。', + '了看十圆统专往压战技除列始。', + '五五日己空当段开利是局已学。', + '越了着团常老明便身方性色教。', + '先子形识收物给验题能整由观专。', + '并美流手回子即设思说快江南分文信定点。', + '为时府史器组她系件离下业己又式。', + '市数天火细种义京军式观将几。', + ], + }, + { + component_name: '低取进强商术取里毛界理话都阶律质何今。', + failed_servers: [ + '进她准不红矿备基求级记加也为。', + '石具九适物员主基温资严族率算者场。', + '西文全眼八作题按上何平温。', + '再了为交器形老写级连把到。', + '阶青查指满些边证期离他角油求着地路。', + '被龙被道青代例等认但生利长电严族。', + '织志可米干什区因老照进其林心行本。', + '了下半十务切定农火断万按决热步家最家。', + '但世场信具活没时民关政志复十难各总级。', + '体影带铁装革眼法众回动解。', + '门林给事义他有程五京影收计建家除机。', + '比众么这变回区长命派气造满。', + '经马解去相住格东革过等了即传八白率。', + '五正特效可强的例十清外式气。', + '导工必国主去别基于里般火是按领拉机。', + '需种极设队加响风离亲意容参。', + '音场节非北打值不头车军取办知开因半。', + ], + }, + { + component_name: '应件验农省克这素向几争并列自自音。', + failed_servers: [ + '通位府机感手实头越段备许确思。', + '单信龙较具长同系流名合身约报有行一。', + '系度长间强往照经们们要水间也。', + '细王速明打线象飞料日小区事接习正果际。', + '入指上快张加儿反接看图济备步形通。', + '基原直自六中养化族青持速。', + '信系格线即采就作精更老场极果政。', + '状叫状江信员由面南应关变八电比林。', + '切有当再影加查存即程类和美较点。', + '劳厂己许说真往将路非县我。', + '步外约起身难设片解容文因平。', + '历行从始习资然清比得复其料段强则取。', + '天圆走音件常经被委院拉院题油状号参。', + '联标际火五增值世队意受量前打政据统。', + '算记车安理团值记局备种京点话很称。', + '资规众次集今造设研土如其反细。', + '响细适局做克看要件即况应和毛江么。', + ], + }, + { + component_name: '影南写机员拉再就斯至算只易克话军划。', + failed_servers: [ + '引员美治满识年色加价例收。', + '见交少没间心通务保众社习人。', + '色点程育两展格风写调为存历界种导。', + '党写只对效学志照研人效需表布需。', + '见号千活列体专响才第第满京心你教然。', + '儿即领华始己造权说两片来家都花原把位。', + '打并研光正例角片这两活石亲使。', + '给型压领江分最近何起以共运前。', + '主然了现等文维展求引业实压。', + '住平物专低土学统间集中元斗从。', + '她领场调科金研心必各八者气。', + '发快件取劳以万市便上什导导。', + '管第真候史比习法书马今动体西。', + '效头路感之研何队风特金深次。', + '标切条回世飞东影流张将记回太片自下消。', + '派照达办酸二里每类叫并按而这按理资。', + '斯品出重把特直周况利影示活。', + ], + }, + { + component_name: '才论设斯此证半层非想南没省。', + failed_servers: [ + '治越导非展老受政这式们万加马说。', + '太界南西改设离装老加展自为必提第北于。', + '通想商示问到叫利人时大至向很要流。', + '道文律入战法总处四西集土机中开速百。', + '进说变解我程情党格式见立维队众形。', + '建理比于周思心主点无马体值。', + '期家并用算省二毛响平九什圆及统法率。', + '公并为须育适观究断例得半。', + '向已六证一约保难装切根百精度生。', + '主见时政住计系领类华再外达。', + '号见解总般养基拉建容定段步院还。', + '本满动义着连们节务确近问置报阶质习者。', + '放体几意说大先传些万目得者须情律被在。', + '查叫他细问路劳亲常才导共。', + '及电位命生习林精种儿查影。', + '处单有号教加已间元关容青目。', + '美类照制次再科公局心每再须量立低风。', + ], + }, + { + component_name: '热院西导平起状流口务且能极计然。', + failed_servers: [ + '许速例查特要候代此加建而斯叫口。', + '适整话持起整自空器育织育果。', + '器利革细五造太济备作府间次然采立容。', + '阶备工声选何般金集日场两大证真或大。', + '面工照给快她确类离况又交具再。', + '理教全与以并素走出规例解看容专究革热。', + '片厂据满号给小加织气向县资县加写转。', + '步律量料知公利难其式改开万。', + '知造克空受组称满程养听片命林走。', + '资日现百各形角称今然她下石。', + '石天人行应很分性少难置再老革成术米。', + '去子立示利可从律来明很美重。', + '离级年例老来图叫确属内也料需。', + '元张公着说取习品时面去应我高效。', + '有过什门派体率公层类局革组用文。', + '选该话无表办少法各住备方大月取劳九。', + '习府长院元干率再规工型列时。', + ], + }, + { + component_name: '北音商况了影位儿院状任着加。', + failed_servers: [ + '常界战要种始严龙理拉院二直三。', + '历增交亲被事众米想选立能有亲直被。', + '转信立他准展民在总道证点农。', + '千安果再院四支口带报油政常划使青。', + '应活现们许中身那好员面先常四中。', + '治备家称重器常见部东东之段毛反。', + '具律及不化保改律引空学算把铁九查。', + '把导取分计算实流体分什老收全结龙。', + '向没专就因革量把使因想它毛来长安。', + '化我在新查点委六候列而小造华应。', + '自共事计么法律叫非始按务。', + '西而即给经必主据格定社形油样小回。', + '空价外议心此才热院日军身二解。', + '量步件证走运真广第织从数进记主方。', + '细着口志见积月收利关角角。', + '段石水界型育住争月出感动放七目克成年。', + '什更调片立置毛青装走由来很决只。', + ], + }, + { + component_name: '算都历京持门状条类方各联后积。', + failed_servers: [ + '题走比老油题县解路比才条各共且置一。', + '社般要速着红华当治单拉天技次府油知。', + '才开历受性种回值下今心查能化际报。', + '解位阶先少必展龙象在切儿始样非劳斯。', + '构万上主马件空根说研局大市。', + '张收给常马文写再置区改式老较。', + '听两后造发照制当回道半石该该划四。', + '时实反反两高小府放增造农本育形。', + '建行况较号系证劳影约有林议清研术。', + '者下干石转见它易十几大反海做千被。', + '须中明使至口风色强身六方并京江。', + '红织子组图更林红往研边群验率。', + '后类受经厂传九真织且到设速。', + '车任率断同群最东完共她目放专问例亲。', + '场干或中流百打道平火派物人中。', + '果原书之教作口写千斯非指。', + '制过声他因素断治主位现放严己。', + ], + }, + { + component_name: '商型表应六我东空院平造相支满然。', + failed_servers: [ + '精矿使满什发万变长事此她。', + '历由情土方与感员电技土又根程学书。', + '然据土极人局在机到片所名采各比土之家。', + '说用江济多感步听划世要型书。', + '新处给被九斯战价命其好音阶。', + '音联每制改员长习色量情和外。', + '划往青处角克接程会里学平然他地。', + '义矿了飞次设时信开单开上加。', + '必象别过认不格验称式又信也及。', + '九铁状该对识放和育律世场场象与程王。', + '集证近金段的消公员品前门构体必过造利。', + '象年划技南处龙子备事处解群际进。', + '集传国精认和文志气才采书反。', + '划价例温者身领意教所影从。', + '道务命都生只始事美约好育主比面拉。', + '需义达人影约参器新习此铁队越马第。', + '求期省价军影结会因年象华必准。', + ], + }, + { + component_name: '历场特到京生解没原斯物统起长南把教状。', + failed_servers: [ + '到三和社单取团写山干况五。', + '除类切领料委计广政者才京并法单党。', + '转始近你际第青前社组无习回群。', + '热历起领强斯深京资值日接象记。', + '例位识院算据员话才因千从一质太。', + '边面除线该示传空省断由定向除众这斯。', + '式主及听展适林会万非而越。', + '建手省府青切她五变二天根约权为油。', + '查等压管现业看式马好不量。', + '经导员改维平气行点分存称加号严以。', + '步切民八元快团米严了统些原。', + '里示转科变低连劳法何称同由养。', + '油率地任又今日料南最把期快阶证。', + '己人中低各这入清空应观队。', + '有内长难军关类器百共量再海会。', + '志片究志五七文段志得阶行与从保响式压。', + '酸同切加下林织术件快决何酸度。', + ], + }, + { + component_name: '处安造到表区格从支亲消果第采。', + failed_servers: [ + '标局处人着别将四被代须东斯设流。', + '放结效程非物头资二养正取响经只。', + '行六务济增斗再基电交何计导第任拉国。', + '确海务市近八立包新象参风较位八式程。', + '用效习以己水则或传北立和场传。', + '回专当志口求两西制离应几实那照。', + '近理济支后写确种起速农划取现克低导大。', + '八深理候况断水委己律治识物上小素。', + '切得老斗山克数采照火须过部农。', + '如类经意米先红千身而日写连认深经。', + '商此便活第并派值界状实厂品资。', + '果别能属育业任集上分后传理住所极。', + '此着育联应林第消严群油社华名。', + '你目前属条广长反感强立必提热重状他。', + '斗带是王发总完矿府管作量决整共每话。', + '上矿直江打史处几转里次放温走期立。', + '查收眼要还党速选造子年火基华离市热改。', + ], + }, + ], + }, + msg: '集般离放产今流了九号状率她速中生细。', + success: false, + }); + }, +}; diff --git a/web/mock/ComponentChangeTask.mock.ts b/web/mock/ComponentChangeTask.mock.ts new file mode 100644 index 0000000..2c5cda3 --- /dev/null +++ b/web/mock/ComponentChangeTask.mock.ts @@ -0,0 +1,69 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/:name/component_change': (req: Request, res: Response) => { + res.status(200).send({ + code: 94, + data: { + total: 67, + finished: 99, + current: '起此求统根属十级三了进太厂华果。', + status: { + '0': 'S', + '1': 'U', + '2': 'C', + '3': 'C', + '4': 'E', + '5': 'S', + '6': 'S', + '7': 'F', + '8': 'U', + '9': 'L', + }, + msg: '认车准都却去题计是年一果热支选步清。', + info: [ + { + component: '素文记然百积青需无式原标整一。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { + '0': 'S', + '1': 'U', + '2': 'C', + '3': 'C', + '4': 'E', + '5': 'S', + '6': 'S', + '7': 'F', + '8': 'U', + '9': 'L', + }, + }, + { + component: '切起着八压在治王派连九生层。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { + '0': 'S', + '1': 'U', + '2': 'C', + '3': 'C', + '4': 'E', + '5': 'S', + '6': 'S', + '7': 'F', + '8': 'U', + '9': 'L', + }, + }, + ], + }, + msg: '也气果因住按车才层给八习较场南见查离。', + success: true, + }); + }, + 'GET /api/v1/component_change/:name/del_component': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 66, data: null, msg: '完住片所各领重江京家按般感增气。', success: false }); + }, +}; diff --git a/web/mock/PrecheckComponentChange.mock.ts b/web/mock/PrecheckComponentChange.mock.ts new file mode 100644 index 0000000..02a158a --- /dev/null +++ b/web/mock/PrecheckComponentChange.mock.ts @@ -0,0 +1,10 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'POST /api/v1/component_change/:name/precheck': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 90, data: null, msg: '书入他积数历次条把是院表名。', success: false }); + }, +}; diff --git a/web/mock/PrecheckComponentChangeRes.mock.ts b/web/mock/PrecheckComponentChangeRes.mock.ts new file mode 100644 index 0000000..d1e6bf2 --- /dev/null +++ b/web/mock/PrecheckComponentChangeRes.mock.ts @@ -0,0 +1,192 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/:name/precheck': (req: Request, res: Response) => { + res.status(200).send({ + code: 65, + data: { + total: 85, + finished: 76, + all_passed: true, + status: { + '0': 'S', + '1': 'U', + '2': 'C', + '3': 'C', + '4': 'E', + '5': 'S', + '6': 'S', + '7': 'F', + '8': 'U', + '9': 'L', + }, + message: '龙少先两资周群传法子又信行例气。', + info: [ + { + name: '段秀兰', + server: '强划开里就就角个身产置人商相题空。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '委口极解重代极心百油至花就。', + description: '应究如温出出下市类权下毛好究设。', + advisement: { description: '展常飞打细社接目县圆即在王。' }, + }, + { + name: '蔡平', + server: '口题分重活象直回部按利积如北热么。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '阶了决果号角影利改当历间。', + description: '育始没市常权此那又立半东此增向标。', + advisement: { description: '入会名圆产实除术路好称约声。' }, + }, + { + name: '邓刚', + server: '对里技观龙积然多思空离十值。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '规斗当过一品标程验真位争九。', + description: '式亲小保于除者下公只习开例。', + advisement: { description: '社确八矿类资好能难商并性间表中。' }, + }, + { + name: '黎刚', + server: '技活次许他计便政毛完市建较如光内响。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '队非验节管图中保调可思期。', + description: '特技带现业打没美心任专断由。', + advisement: { description: '前动空造院求员发于历来义龙。' }, + }, + { + name: '吕秀英', + server: '位军便风变物六话土严分什声。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '支世周切间选单不五持各不林界已。', + description: '科如北状级清对音强半极动京证前。', + advisement: { description: '前期使干先系器代机及市已两所。' }, + }, + { + name: '卢超', + server: '被存世好音总那证给打局务断到。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '况查直列特县看样半花山毛那好完。', + description: '内铁或南维县月即属己段自七条别正干。', + advisement: { description: '前白然取至装器生温物装半周立放。' }, + }, + { + name: '白艳', + server: '我合矿称价维质书全社济照电织军研。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '目设志为用来拉物气变通共回也交形通。', + description: '等定议增物做热价住步度术百领。', + advisement: { description: '业结代她务因理果海做子水头。' }, + }, + { + name: '潘秀英', + server: '行儿导度如电世光图声多化上收须线织。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '场二正何放委年风该收员界共意所。', + description: '多增改变理和区设全容地直影常政。', + advisement: { description: '众知其严学达石后引从交明提合作维南。' }, + }, + { + name: '张芳', + server: '市期包维展照目军及完就整县一。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '极好两安始节九运意包空学高规。', + description: '起育非何族成县放之更转他程。', + advisement: { description: '准思传该层候算而离生使认近看完。' }, + }, + { + name: '孙芳', + server: '消参确用外群美总志连风化节存土才。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '影状可压对建步需具学写头生一。', + description: '列完论成大观常金至九者设直问家已。', + advisement: { description: '通然角书才真器化题各器候京便。' }, + }, + { + name: '方伟', + server: '军影器于包价面前好即头下。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '群会成置光构权调记又验单造所于。', + description: '标响华生铁先价日年应世农百主半当引。', + advisement: { description: '每织效此是整技大提单行识。' }, + }, + { + name: '孔芳', + server: '和深体期合代深中织率何率原。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '采步革用有龙常外劳好想难到龙。', + description: '难话列了群四下发你五布光照号张地重。', + advisement: { description: '用利设团始至段车其就即调你。' }, + }, + { + name: '潘艳', + server: '对生者区料成头世易周听格效认。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '次建者率关写家影前圆交识段说信任知。', + description: '会规都都文分就权格强土现层。', + advisement: { description: '各教般分清次党名元点位保严设。' }, + }, + { + name: '叶勇', + server: '还做界平支则了系象众由划适号增这。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '带少话界着率第题节合指种声很得。', + description: '布性第电史安高自省或来切化。', + advisement: { description: '持点米以研须去或毛立林领先。' }, + }, + { + name: '易霞', + server: '型行着经感易话化意复认如己证自感组。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: true, + code: '王代加技况多但条办织书头花后开党。', + description: '长实儿你须准感装感达期声半。', + advisement: { description: '住部些在么效为就量政参连阶。' }, + }, + { + name: '常刚', + server: '它把政命门民油厂等多定线元除商。', + status: { '0': 'P', '1': 'E', '2': 'N', '3': 'D', '4': 'I', '5': 'N', '6': 'G' }, + result: { '0': 'P', '1': 'A', '2': 'S', '3': 'S', '4': 'E', '5': 'D' }, + recoverable: false, + code: '实音阶他步活着可日相连现府。', + description: '步军制西华很素打平格确收其速阶号线。', + advisement: { description: '走引阶文称需料统说容全基同提共类真。' }, + }, + ], + }, + msg: '回美等论区据值历们正应感准。', + success: false, + }); + }, +}; diff --git a/web/mock/RecoverComponentChange.mock.ts b/web/mock/RecoverComponentChange.mock.ts new file mode 100644 index 0000000..9ca3aac --- /dev/null +++ b/web/mock/RecoverComponentChange.mock.ts @@ -0,0 +1,77 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'POST /api/v1/component_change/:name/recover': (req: Request, res: Response) => { + res.status(200).send({ + code: 72, + data: { + total: 62, + items: [ + { + name: '石娟', + old_value: '常打化管还利历过想大受期车织音严团。', + new_value: '政王命合年论容南家选期四除又。', + }, + { + name: '陈洋', + old_value: '存色着发身各平眼者习高应物算。', + new_value: '带际例主那动铁门万见向王书对。', + }, + { + name: '方强', + old_value: '了是色办经运质形美支身消几感了上万。', + new_value: '门象议你候员才变住集革因国极中种月。', + }, + { + name: '毛超', + old_value: '活工都市品易花划整性周知反身给光。', + new_value: '作还价在任整第属与张制局半。', + }, + { + name: '范超', + old_value: '定究需况且红术验眼和验离革消压四。', + new_value: '心马我适对月效定根务素县志海造。', + }, + { + name: '蔡勇', + old_value: '运把管到题始火声算元加火效书技节还。', + new_value: '力交集支根也收国系想林理路决前里才。', + }, + { + name: '贾娟', + old_value: '到油点权那少只候有员况备代。', + new_value: '出接素况四动格青入家做一就儿周石。', + }, + { + name: '夏静', + old_value: '主些必局数口务度流只基向连有。', + new_value: '外素许花听色具验如调工五亲化世。', + }, + { + name: '赵平', + old_value: '细品转易化正山示观离基治人响温备争。', + new_value: '务众联反层商京究查般管计农品统。', + }, + { + name: '贺平', + old_value: '消些思除以样片劳知传日见。', + new_value: '连作行日前最亲地例术华般共越时度。', + }, + { + name: '苏平', + old_value: '格政证想回美眼称济华都气支处明收。', + new_value: '组张照场三即基度专土现想。', + }, + { + name: '曹伟', + old_value: '头近两何都带信其成前二然地千标海。', + new_value: '于解指九龙用进华各来件几。', + }, + ], + }, + msg: '石节车面命需因但结快义适它百历放。', + success: true, + }); + }, +}; diff --git a/web/mock/RemoveComponent.mock.ts b/web/mock/RemoveComponent.mock.ts new file mode 100644 index 0000000..fe3ef34 --- /dev/null +++ b/web/mock/RemoveComponent.mock.ts @@ -0,0 +1,10 @@ +// @ts-ignore +import { Request, Response } from 'express'; + +export default { + 'GET /api/v1/component_change/:name/remove': (req: Request, res: Response) => { + res + .status(200) + .send({ code: 78, data: null, msg: '革较共因看七总思干水认和你手性议些。', success: false }); + }, +}; diff --git a/web/package.json b/web/package.json index 41ec745..d395bae 100644 --- a/web/package.json +++ b/web/package.json @@ -29,12 +29,14 @@ "@oceanbase/icons": "^0.2.0", "@oceanbase/ui": "0.2.17", "@oceanbase/util": "^0.2.1", - "@umijs/plugin-openapi": "1.3.3", + "@umijs/max-plugin-openapi": "^2.0.3", + "@umijs/plugin-openapi": "^1.3.3", "antd": "5.0.7", "classnames": "^2.3.2", "copy-to-clipboard": "3.3.3", "cross-env": "7.0.3", "i18next": "^23.8.1", + "jsencrypt": "^3.3.2", "lodash": "^4.17.21", "lottie-web": "5.10.2", "moment": "^2.29.4", @@ -55,6 +57,7 @@ "@types/validator": "^12.0.1", "@types/video.js": "7.3.50", "@umijs/max": "^4.0.88", + "babel-plugin-dynamic-import-node": "^2.3.3", "immer": "9.0.6", "lint-staged": "10.5.4", "prettier": "2.7.1", diff --git a/web/public/assets/data-empty.svg b/web/public/assets/data-empty.svg new file mode 100644 index 0000000..1241691 --- /dev/null +++ b/web/public/assets/data-empty.svg @@ -0,0 +1,14 @@ + + + Z#/2.缺省图/暂无数据-组件 + + + + + + + + + + + \ No newline at end of file diff --git a/web/public/assets/welcome/component-manage.svg b/web/public/assets/welcome/component-manage.svg new file mode 100644 index 0000000..2dc6069 --- /dev/null +++ b/web/public/assets/welcome/component-manage.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/web/public/assets/welcome/ob-guide.png b/web/public/assets/welcome/ob-guide.png new file mode 100644 index 0000000..b3c262d Binary files /dev/null and b/web/public/assets/welcome/ob-guide.png differ diff --git a/web/public/assets/welcome/ocp-guide.png b/web/public/assets/welcome/ocp-guide.png new file mode 100644 index 0000000..de1b4de Binary files /dev/null and b/web/public/assets/welcome/ocp-guide.png differ diff --git a/web/public/assets/welcome/oms-guide-disable.png b/web/public/assets/welcome/oms-guide-disable.png new file mode 100644 index 0000000..2707533 Binary files /dev/null and b/web/public/assets/welcome/oms-guide-disable.png differ diff --git a/web/public/assets/welcome/oms-guide.png b/web/public/assets/welcome/oms-guide.png new file mode 100644 index 0000000..8f8dd14 Binary files /dev/null and b/web/public/assets/welcome/oms-guide.png differ diff --git a/web/src/component/ComponentsPort/index.tsx b/web/src/component/ComponentsPort/index.tsx new file mode 100644 index 0000000..90c97fa --- /dev/null +++ b/web/src/component/ComponentsPort/index.tsx @@ -0,0 +1,124 @@ +import InputPort from '@/component/InputPort'; +import { + commonStyle, + configServerComponent, + obagentComponent, + obproxyComponent, + ocpexpressComponent, +} from '@/pages/constants'; +import { intl } from '@/utils/intl'; +import { QuestionCircleOutlined } from '@ant-design/icons'; +import { Row, Space, Tooltip } from 'antd'; + +interface ComponentsPortProps { + lowVersion?: boolean; + selectedConfig: string[]; +} + +export default function ComponentsPort({ + selectedConfig, + lowVersion = false, +}: ComponentsPortProps) { + return ( + <> + {(selectedConfig.includes(obproxyComponent) || + selectedConfig.includes('obproxy-ce')) && ( + + + + + + {intl.formatMessage({ + id: 'OBD.pages.components.ClusterConfig.PortObproxyExporter', + defaultMessage: 'OBProxy Exporter 端口', + })} + + + + + + } + fieldProps={{ style: commonStyle }} + /> + + + + )} + {selectedConfig.includes(obagentComponent) && ( + + + + + + + + )} + {(selectedConfig.includes(ocpexpressComponent) || + selectedConfig.includes(configServerComponent)) && + !lowVersion && ( + + + {selectedConfig.includes(ocpexpressComponent) && ( + + )} + + {selectedConfig.includes(configServerComponent) && ( + + )} + + + )} + + ); +} diff --git a/web/src/component/ConnectConfig/index.tsx b/web/src/component/ConnectConfig/index.tsx index 95301a5..87f005d 100644 --- a/web/src/component/ConnectConfig/index.tsx +++ b/web/src/component/ConnectConfig/index.tsx @@ -5,15 +5,16 @@ import { Space, Tooltip, Button, - InputNumber, message, Modal, } from 'antd'; import { QuestionCircleOutlined, CloseCircleOutlined } from '@ant-design/icons'; import { useRequest } from 'ahooks'; +import { getPublicKey } from '@/services/ob-deploy-web/Common'; import { useModel } from 'umi'; import * as Metadb from '@/services/ocp_installer_backend/Metadb'; +import { encrypt } from '@/utils/encrypt'; import CustomFooter from '../CustomFooter'; import InputPort from '../InputPort'; import ExitBtn from '../ExitBtn'; @@ -78,15 +79,16 @@ export default function ConnectConfig({ setCurrent, current }: API.StepProp) { const nextStep = () => { form .validateFields() - .then((values) => { + .then(async (values) => { const { host, port, user, password } = values.metadb; + const { data: publicKey } = await getPublicKey(); createMetadbConnection( { sys: true }, { host, port, user, - password, + password: encrypt(password, publicKey) || password, cluster_name, }, ).then(() => { diff --git a/web/src/component/CustomAlert/AlertMetadb.tsx b/web/src/component/CustomAlert/AlertMetadb.tsx new file mode 100644 index 0000000..39b96dd --- /dev/null +++ b/web/src/component/CustomAlert/AlertMetadb.tsx @@ -0,0 +1,19 @@ +import { intl } from '@/utils/intl'; +import CustomAlert from '.'; + +export default function AlertMetadb() { + return ( + + ); +} diff --git a/web/src/component/CustomAlert/index.less b/web/src/component/CustomAlert/index.less new file mode 100644 index 0000000..f3d422e --- /dev/null +++ b/web/src/component/CustomAlert/index.less @@ -0,0 +1,36 @@ +.alertContainer { + display: flex; + align-items: center; + justify-content: center; + padding-inline: 13px; + padding-top: 9px; + padding-bottom: 9px; + :global { + .ant-alert-icon { + font-size: 14px !important; + } + } +} +.alertContainer::before { + content: ''; + width: 8px; + transform: rotateY(60deg); + position: absolute; + height: calc(100% - 1px); + left: -3px; + top: 0.5px; + border-top-left-radius: 8px; + border-bottom-left-radius: 8px; +} +.alert-warning::before { + background-color: #ffa21a; +} +.alert-success::before { + background-color: #0ac185; +} +.alert-info::before { + background-color: #006aff; +} +.alert-info::error { + background-color: #f93939; +} diff --git a/web/src/component/CustomAlert/index.tsx b/web/src/component/CustomAlert/index.tsx new file mode 100644 index 0000000..a4631de --- /dev/null +++ b/web/src/component/CustomAlert/index.tsx @@ -0,0 +1,13 @@ +import type { AlertProps } from 'antd'; +import { Alert } from 'antd'; +import styles from './index.less'; +export default function CustomAlert(props: AlertProps) { + const alertTypeClass = props.type || 'info'; + return ( + + ); +} diff --git a/web/src/component/CustomPasswordInput/index.tsx b/web/src/component/CustomPasswordInput/index.tsx index 59e8b6b..8466c50 100644 --- a/web/src/component/CustomPasswordInput/index.tsx +++ b/web/src/component/CustomPasswordInput/index.tsx @@ -5,7 +5,7 @@ import { OCP_PASSWORD_ERROR_REASON, OCP_PASSWORD_ERROR_REASON_OLD, passwordCheck, - passwordCheckLowVersion + passwordCheckLowVersion, } from '@/utils/helper'; import { intl } from '@/utils/intl'; import { ProForm } from '@ant-design/pro-components'; @@ -23,6 +23,8 @@ interface CustomPasswordInputProps { msgInfo: MsgInfoType; useOldRuler?: boolean; useFor: 'ob' | 'ocp'; + style?: React.CSSProperties; + innerInputStyle?: React.CSSProperties; setMsgInfo: React.Dispatch>; } @@ -46,9 +48,10 @@ export default function CustomPasswordInput({ setMsgInfo, useOldRuler = false, useFor, + innerInputStyle = { width: 328 }, ...props }: CustomPasswordInputProps) { - const textStyle = { marginTop: '8px' }; + const textStyle = { marginTop: '8px', marginBottom: '24px' }; const oldValidateInput = (value: string): MsgInfoType => { if (!passwordCheckLowVersion(value)) { return { @@ -121,14 +124,13 @@ export default function CustomPasswordInput({ {intl.formatMessage({ id: 'OBD.component.CustomPasswordInput.PleaseRememberThePasswordOr', defaultMessage: '请牢记密码,也可', - })} - + })}{' '} passwordCopy()}> {intl.formatMessage({ id: 'OBD.component.CustomPasswordInput.CopyPassword', defaultMessage: '复制密码', })} - + {' '} {intl.formatMessage({ id: 'OBD.component.CustomPasswordInput.AndKeepItProperly', defaultMessage: '并妥善保存', @@ -156,15 +158,15 @@ export default function CustomPasswordInput({ }), }, { - validator:(_,value)=>{ - let validateRes = validateInput(value) - if( validateRes.validateStatus === 'success'){ - return Promise.resolve() - }else{ - return Promise.reject(new Error(validateRes.errorMsg!)) + validator: (_, value) => { + let validateRes = validateInput(value); + if (validateRes.validateStatus === 'success') { + return Promise.resolve(); + } else { + return Promise.reject(new Error(validateRes.errorMsg!)); } - } - } + }, + }, ]} name={name} {...props} @@ -173,17 +175,17 @@ export default function CustomPasswordInput({ handleChange(e.target.value)} value={value} - style={{ width: 328 }} + style={innerInputStyle} /> - {showCopyBtn && ( -