diff --git a/.gitignore b/.gitignore index 0dd8fdb..87a2dba 100644 --- a/.gitignore +++ b/.gitignore @@ -35,5 +35,8 @@ htmlcov .mr.developer.cfg .project .pydevproject +.idea/ +.vagrant/ +Vagrantfile pg_view.log diff --git a/pg_view/__init__.py b/pg_view/__init__.py index babef24..6f23cc3 100644 --- a/pg_view/__init__.py +++ b/pg_view/__init__.py @@ -14,24 +14,26 @@ from pg_view import flags from pg_view.collectors.host_collector import HostStatCollector from pg_view.collectors.memory_collector import MemoryStatCollector -from pg_view.collectors.partition_collector import PartitionStatCollector, DetachedDiskStatCollector -from pg_view.collectors.pg_collector import PgstatCollector +from pg_view.collectors.partition_collector import PartitionStatCollector, DetachedDiskStatCollector, \ + DiskCollectorConsumer +from pg_view.collectors.pg_collector import PgStatCollector from pg_view.collectors.system_collector import SystemStatCollector +from pg_view.exceptions import NoPidConnectionError, InvalidConnectionParamError, NotConnectedError, \ + DuplicatedConnectionError from pg_view.loggers import logger, enable_logging_to_stderr, disable_logging_to_stderr -from pg_view.models.consumers import DiskCollectorConsumer -from pg_view.models.db_client import build_connection, detect_db_connection_arguments, \ - establish_user_defined_connection, make_cluster_desc, get_postmasters_directories -from pg_view.models.outputs import CommonOutput, CursesOutput -from pg_view.utils import get_valid_output_methods, OUTPUT_METHOD, \ - output_method_is_valid, read_configuration, process_single_collector, process_groups +from pg_view.models.db_client import make_cluster_desc, DBClient +from pg_view.models.outputs import CommonOutput, CursesOutput, get_displayer_by_class +from pg_view.models.parsers import ProcWorker +from pg_view.utils import get_valid_output_methods, OUTPUT_METHOD, output_method_is_valid, \ + read_configuration, process_single_collector, process_groups, validate_autodetected_conn_param try: import psycopg2 import psycopg2.extras - - psycopg2_available = True except ImportError: - psycopg2_available = False + print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue') + sys.exit(254) + try: import curses @@ -40,6 +42,10 @@ print('Unable to import ncurses, curses output will be unavailable') curses_available = False +# setup system constants +output_method = OUTPUT_METHOD.curses +options = None + def parse_args(): """parse command-line options""" @@ -56,7 +62,7 @@ def parse_args(): parser.add_option('-o', '--output-method', help='send output to the following source', action='store', default=OUTPUT_METHOD.curses, dest='output_method') parser.add_option('-V', '--use-version', - help='version of the instance to monitor (in case it can\'t be autodetected)', + help="version of the instance to monitor (in case it can't be autodetected)", action='store', dest='version', type='float') parser.add_option('-l', '--log-file', help='direct log output to the file', action='store', dest='log_file') @@ -79,11 +85,6 @@ def parse_args(): return options, args -# setup system constants -output_method = OUTPUT_METHOD.curses -options = None - - # execution starts here def loop(collectors, consumer, groups, output_method): if output_method == OUTPUT_METHOD.curses: @@ -114,10 +115,8 @@ def poll_keys(screen, output): return True -def do_loop(screen, groups, output_method, collectors, consumer): - """ Display output (or pass it through to ncurses) """ - - if output_method == OUTPUT_METHOD.curses: +def get_output(method, screen): + if method == OUTPUT_METHOD.curses: if screen is None: logger.error('No parent screen is passed to the curses application') sys.exit(1) @@ -129,19 +128,26 @@ def do_loop(screen, groups, output_method, collectors, consumer): sys.exit(1) else: output = CommonOutput() + return output + + +def do_loop(screen, groups, output_method, collectors, consumer): + """ Display output (or pass it through to ncurses) """ + + output = get_output(output_method, screen) while 1: # process input: consumer.consume() - for st in collectors: + for collector in collectors: if output_method == OUTPUT_METHOD.curses and not poll_keys(screen, output): - # bail out immediately - return - st.set_units_display(flags.display_units) - st.set_ignore_autohide(not flags.autohide_fields) - st.set_notrim(flags.notrim) - process_single_collector(st) + if not poll_keys(screen, output): + # bail out immediately + return + + process_single_collector(collector, flags.filter_aux) if output_method == OUTPUT_METHOD.curses and not poll_keys(screen, output): - return + if not poll_keys(screen, output): + return if output_method == OUTPUT_METHOD.curses: process_groups(groups) @@ -149,8 +155,15 @@ def do_loop(screen, groups, output_method, collectors, consumer): # clears the screen, so we need to refresh before display to clear the old data. if options.clear_screen and output_method != OUTPUT_METHOD.curses: output.refresh() - for st in collectors: - output.display(st.output(output_method)) + for collector in collectors: + displayer = get_displayer_by_class( + output_method, collector, + show_units=flags.display_units, + ignore_autohide=not flags.autohide_fields, + notrim=flags.notrim + ) + formatted_data = collector.output(displayer) + output.display(formatted_data) # in the curses case, refresh shows the data queued by display if output_method == OUTPUT_METHOD.curses: output.refresh() @@ -166,13 +179,8 @@ def main(): print('Non Linux database hosts are not supported at the moment. Can not continue') sys.exit(243) - if not psycopg2_available: - print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue') - sys.exit(254) - options, args = parse_args() consts.TICK_LENGTH = options.tick - output_method = options.output_method if not output_method_is_valid(output_method): @@ -197,60 +205,75 @@ def main(): if options.instance and instance != options.instance: continue # pass already aquired connections to make sure we only list unique clusters. - host = config[instance].get('host') - port = config[instance].get('port') - conn = build_connection(host, port, - config[instance].get('user'), config[instance].get('dbname')) - - if not establish_user_defined_connection(instance, conn, clusters): - logger.error('failed to acquire details about ' + - 'the database cluster {0}, the server will be skipped'.format(instance)) + db_client = DBClient.from_config(config[instance]) + try: + cluster = db_client.establish_user_defined_connection(instance, clusters) + except (NotConnectedError, NoPidConnectionError): + logger.error('failed to acquire details about the database cluster {0}, the server ' + 'will be skipped'.format(instance)) + except DuplicatedConnectionError: + pass + else: + clusters.append(cluster) + elif options.host: # connect to the database using the connection string supplied from command-line - conn = build_connection(options.host, options.port, options.username, options.dbname) + db_client = DBClient.from_options(options) instance = options.instance or "default" - if not establish_user_defined_connection(instance, conn, clusters): + try: + cluster = db_client.establish_user_defined_connection(instance, clusters) + except (NotConnectedError, NoPidConnectionError): logger.error("unable to continue with cluster {0}".format(instance)) + except DuplicatedConnectionError: + pass + else: + clusters.append(cluster) elif options.use_service and options.instance: + db_client = DBClient({'service': options.instance}) # connect to the database using the service name - if not establish_user_defined_connection(options.instance, {'service': options.instance}, clusters): + if not db_client.establish_user_defined_connection(options.instance, clusters): logger.error("unable to continue with cluster {0}".format(options.instance)) else: # do autodetection - postmasters = get_postmasters_directories() - + postmasters = ProcWorker().get_postmasters_directories() # get all PostgreSQL instances - for result_work_dir, data in postmasters.items(): - (ppid, dbversion, dbname) = data - # if user requested a specific database name and version - don't try to connect to others + for result_work_dir, connection_params in postmasters.items(): + (ppid, dbversion, dbname) = connection_params + try: + validate_autodetected_conn_param(dbname, dbversion, result_work_dir, connection_params) + except InvalidConnectionParamError: + continue + if options.instance: if dbname != options.instance or not result_work_dir or not ppid: continue if options.version is not None and dbversion != options.version: continue + db_client = DBClient.from_postmasters(result_work_dir, ppid, dbversion, options) + if db_client is None: + continue try: - conndata = detect_db_connection_arguments( - result_work_dir, ppid, dbversion, options.username, options.dbname) - if conndata is None: - continue - host = conndata['host'] - port = conndata['port'] - conn = build_connection(host, port, options.username, options.dbname) - pgcon = psycopg2.connect(**conn) + pgcon = psycopg2.connect(**db_client.connection_params) except Exception as e: logger.error('PostgreSQL exception {0}'.format(e)) - pgcon = None - if pgcon: - desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir, - pid=ppid, pgcon=pgcon, conn=conn) + else: + desc = make_cluster_desc( + name=dbname, + version=dbversion, + workdir=result_work_dir, + pid=ppid, + pgcon=pgcon, + conn=db_client.connection_params + ) clusters.append(desc) + collectors = [] groups = {} try: - if len(clusters) == 0: + if not clusters: logger.error('No suitable PostgreSQL instances detected, exiting...') - logger.error('hint: use -v for details, ' + - 'or specify connection parameters manually in the configuration file (-c)') + logger.error('hint: use -v for details, or specify connection parameters ' + 'manually in the configuration file (-c)') sys.exit(1) # initialize the disks stat collector process and create an exchange queue @@ -265,13 +288,14 @@ def main(): collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) - for cl in clusters: - part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer) - pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid) - groupname = cl['wd'] - groups[groupname] = {'pg': pg, 'partitions': part} - collectors.append(part) - collectors.append(pg) + + for cluster in clusters: + partition_collector = PartitionStatCollector.from_cluster(cluster, consumer) + pg_collector = PgStatCollector.from_cluster(cluster, options.pid) + + groups[cluster['wd']] = {'pg': pg_collector, 'partitions': partition_collector} + collectors.append(partition_collector) + collectors.append(pg_collector) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. disable_logging_to_stderr() diff --git a/pg_view/collectors/base_collector.py b/pg_view/collectors/base_collector.py index d056ec6..6ea2ff7 100644 --- a/pg_view/collectors/base_collector.py +++ b/pg_view/collectors/base_collector.py @@ -1,39 +1,24 @@ -import json -import os -import subprocess import time -from datetime import timedelta, datetime -from numbers import Number +from abc import ABCMeta +from pg_view.consts import NCURSES_CUSTOM_OUTPUT_FIELDS from pg_view.loggers import logger -from pg_view.models.outputs import COLSTATUS, COLALIGN, COLTYPES, COLHEADER, ColumnType -from pg_view.utils import OUTPUT_METHOD +from pg_view.utils import UnitConverter -class StatCollector(object): +def warn_non_optional_column(colname): + logger.error('Column {0} is not optional, but input row has no value for it'.format(colname)) + +class BaseStatCollector(object): """ Generic class to store abstract function and data required to collect system statistics, produce diffs and emit output rows. """ - - BYTE_MAP = [('TB', 1073741824), ('GB', 1048576), ('MB', 1024)] - USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK']) - RD = 1 - - NCURSES_DEFAULTS = { - 'pos': -1, - 'noautohide': False, - 'w': 0, - 'align': COLALIGN.ca_none, - 'column_header': COLHEADER.ch_default, - } - - NCURSES_CUSTOM_OUTPUT_FIELDS = ['header', 'prefix', 'prepend_column_headers'] + __metaclass__ = ABCMeta def __init__(self, ticks_per_refresh=1, produce_diffs=True): self.rows_prev = [] self.rows_cur = [] - self.time_diff = 0 self.rows_diff = [] self.ticks = 0 self.ticks_per_refresh = ticks_per_refresh @@ -41,9 +26,7 @@ def __init__(self, ticks_per_refresh=1, produce_diffs=True): self._previous_moment = None self._current_moment = None self.produce_diffs = produce_diffs - self.show_units = False - self.ignore_autohide = True - self.notrim = False + self.unit_converter = UnitConverter # transformation data self.transform_dict_data = {} # data to transform a dictionary input to the stat row @@ -53,10 +36,7 @@ def __init__(self, ticks_per_refresh=1, produce_diffs=True): self.diff_generator_data = {} # data to produce a diff row out of 2 input ones. self.output_transform_data = {} # data to transform diff output - self.output_function = {OUTPUT_METHOD.console: self.console_output, OUTPUT_METHOD.json: self.json_output, - OUTPUT_METHOD.curses: self.ncurses_output} - self.cook_function = {OUTPUT_METHOD.curses: self.curses_cook_value} - self.ncurses_custom_fields = dict.fromkeys(StatCollector.NCURSES_CUSTOM_OUTPUT_FIELDS, None) + self.ncurses_custom_fields = dict.fromkeys(NCURSES_CUSTOM_OUTPUT_FIELDS, None) def postinit(self): for l in [self.transform_list_data, self.transform_dict_data, self.diff_generator_data, @@ -64,32 +44,12 @@ def postinit(self): self.validate_list_out(l) self.output_column_positions = self._calculate_output_column_positions() - def set_ignore_autohide(self, new_status): - self.ignore_autohide = new_status - - def set_notrim(self, val): - self.notrim = val - def _calculate_output_column_positions(self): result = {} for idx, col in enumerate(self.output_transform_data): result[col['out']] = idx - return result - def enumerate_output_methods(self): - return self.output_function.keys() - - @staticmethod - def exec_command_with_output(cmdline): - """ Execute comand (including shell ones), return a tuple with error code (1 element) and output (rest) """ - - proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - ret = proc.wait() - if ret != 0: - logger.info('The command {cmd} returned a non-zero exit code'.format(cmd=cmdline)) - return ret, proc.stdout.read().strip() - @staticmethod def validate_list_out(l): """ If the list element doesn't supply an out column - remove it """ @@ -99,168 +59,6 @@ def validate_list_out(l): el = l.pop(l.index(col)) logger.error('Removed {0} column because it did not specify out value'.format(el)) - @staticmethod - def ticks_to_seconds(tick_value_str): - return float(tick_value_str) / StatCollector.USER_HZ if tick_value_str is not None else None - - @staticmethod - def bytes_to_mbytes(bytes_val): - return float(bytes_val) / 1048576 if bytes_val is not None else None - - @staticmethod - def sectors_to_mbytes(sectors): - return float(sectors) / 2048 if sectors is not None else None - - @staticmethod - def kb_to_mbytes(kb): - return float(kb) / 1024 if kb is not None else None - - @staticmethod - def time_diff_to_percent(timediff_val): - return float(timediff_val) * 100 if timediff_val is not None else None - - @staticmethod - def format_date_from_epoch(epoch_val): - lt = time.localtime(epoch_val) - today = time.localtime() - time_format_str = '%H:%M:%S' - if lt.tm_year != today.tm_year or lt.tm_mon != today.tm_mon or lt.tm_mday != today.tm_mday: - # only show minutes and seconds - time_format_str = '%m-%d %H:%M:%S' - # show full date - return time.strftime(time_format_str, time.localtime(epoch_val)) - - @staticmethod - def kb_pretty_print_long(b): - """ Show kb values in a human readable form. """ - - r = [] - for l, n in StatCollector.BYTE_MAP: - d = b / n - if d: - r.append(str(d) + l) - b %= n - return ' '.join(r) - - @staticmethod - def kb_pretty_print(b): - """ Show memory size as a float value in the biggest measurement units """ - - r = [] - for l, n in StatCollector.BYTE_MAP: - if b > n: - v = round(float(b) / n, 1) - r.append(str(v) + l) - break - if len(r) == 0: - return '{0}KB'.format(str(b)) - else: - return ' '.join(r) - - @staticmethod - def time_interval_pretty_print(start_time, is_delta): - """Returns a human readable string that shows a time between now and the timestamp passed as an argument. - The passed argument can be a timestamp (returned by time.time() call) a datetime object or a timedelta object. - In case it is a timedelta object, then it is formatted only - """ - - if isinstance(start_time, Number): - if is_delta: - delta = timedelta(seconds=int(time.time() - start_time)) - else: - delta = timedelta(seconds=start_time) - elif isinstance(start_time, datetime): - if is_delta: - delta = datetime.now() - start_time - else: - delta = start_time - elif isinstance(start_time, timedelta): - delta = start_time - else: - raise ValueError('passed value should be either a number of seconds ' + - 'from year 1970 or datetime instance of timedelta instance') - - delta = abs(delta) - - secs = delta.seconds - mins = int(secs / 60) - secs %= 60 - hrs = int(mins / 60) - mins %= 60 - hrs %= 24 - result = '' - if delta.days: - result += str(delta.days) + 'd' - if hrs: - if hrs < 10: - result += '0' - result += str(hrs) - result += ':' - if mins < 10: - result += '0' - result += str(mins) - result += ':' - if secs < 10: - result += '0' - result += str(secs) - if not result: - result = str(int(delta.microseconds / 1000)) + 'ms' - return result - - @staticmethod - def time_pretty_print(start_time): - return StatCollector.time_interval_pretty_print(start_time, False) - - @staticmethod - def delta_pretty_print(start_time): - return StatCollector.time_interval_pretty_print(start_time, True) - - @staticmethod - def sectors_pretty_print(b): - return StatCollector.kb_pretty_print(b * 2) - - @staticmethod - def int_lower_than_non_zero(row, col, val, bound): - return 0 < val < bound - - @staticmethod - def time_field_to_seconds(val): - result = 0 - num = 0 - accum_digits = [] - semicolons_no = val.count(':') - for c in val: - if c.isdigit(): - accum_digits.append(c) - else: - if len(accum_digits) > 0: - num = int(''.join(accum_digits)) - if c == 'd': - num *= 86400 - elif c == ':': - num *= 60 ** semicolons_no - semicolons_no -= 1 - result += num - num = 0 - accum_digits = [] - return result - - def time_field_status(self, row, col): - val = row[self.output_column_positions[col['out']]] - num = StatCollector.time_field_to_seconds(val) - if num <= col['critical']: - return {-1: COLSTATUS.cs_critical} - elif num <= col['warning']: - return {-1: COLSTATUS.cs_warning} - return {-1: COLSTATUS.cs_ok} - - @staticmethod - def warn_non_optional_column(colname): - logger.error('Column {0} is not optional, but input row has no value for it'.format(colname)) - - def set_units_display(self, status): - self.show_units = status - def needs_diffs(self): """ whether the collector needs diffs. It might not if it's not interested in them, or if it doesn't have data to produce them yet. @@ -283,60 +81,6 @@ def ident(self): def ncurses_set_prefix(self, new_prefix): self.ncurses_custom_fields['prefix'] = new_prefix - def cook_row(self, row, header, method): - cooked_vals = [] - if not self.cook_function.get(method): - return row - if len(row) != len(header): - logger.error( - 'Unable to cook row with non-matching number of header and value columns: ' + - 'row {0} header {1}'.format(row, header) - ) - cook_fn = self.cook_function[method] - for no, val in enumerate(row): - # if might be tempting to just get the column from output_transform_data using - # the header, but it's wrong: see _produce_output_name for details. This, of - # course, assumes the number of columns in the output_transform_data is the - # same as in row: thus, we need to avoid filtering rows in the collector. - newval = cook_fn(val, header[no], self.output_transform_data[no]) - cooked_vals.append(newval) - return cooked_vals - - def curses_cook_value(self, attname, raw_val, output_data): - """ return cooked version of the row, with values transformed. A transformation is - the same for all columns and depends on the values only. - """ - val = raw_val - header = str(attname) - # change the None output to '' - if raw_val is None: - return ColumnType(value='', header='', header_position=None) - if str(raw_val) == 'True': - val = 'T' - elif str(raw_val) == 'False': - val = 'F' - if output_data.get('maxw', 0) > 0 and not self.notrim and len(str(val)) > output_data['maxw']: - # if the value is larger than the maximum allowed width - trim it by removing chars from the middle - val = self._trim_text_middle(val, output_data['maxw']) - if self.ncurses_custom_fields.get('prepend_column_headers') or output_data.get( - 'column_header', COLHEADER.ch_default) == COLHEADER.ch_prepend: - header_position = COLHEADER.ch_prepend - elif output_data.get('column_header', COLHEADER.ch_default) == COLHEADER.ch_append: - header_position = COLHEADER.ch_append - else: - header = '' - header_position = None - return ColumnType(value=str(val), header=header, header_position=header_position) - - @staticmethod - def _trim_text_middle(val, maxw): - """ Trim data by removing middle characters, so hello world' for 8 will become hel..rld. - This kind of trimming seems to be better than tail trimming for user and database names. - """ - - half = int((maxw - 2) / 2) - return val[:half] + '..' + val[-half:] - def _do_refresh(self, new_rows): """ Make a place for new rows and calculate the time diff """ @@ -353,7 +97,6 @@ def _produce_diff_row(self, prev, cur): """ produce output columns out of 2 input ones (previous and current). If the value doesn't exist in either of the diffed rows - we set the result to None """ - # exit early if we don't need any diffs if not self.produce_diffs: return {} @@ -367,8 +110,8 @@ def _produce_diff_row(self, prev, cur): result[attname] = (cur[incol] if incol in cur else None) elif 'fn' in col: # if diff is True and fn is supplied - apply it to the current and previous row. - result[attname] = (col['fn'](incol, cur, prev) if cur.get(incol, None) is not None and prev.get(incol, - None) is not None else None) + result[attname] = (col['fn'](incol, cur, prev) if cur.get(incol, None) is not None and prev.get( + incol, None) is not None else None) else: # default case - calculate the diff between the current attribute's values of # old and new rows and divide it by the time interval passed between measurements. @@ -376,109 +119,13 @@ def _produce_diff_row(self, prev, cur): prev.get(incol, None) is not None and self.diff_time >= 0 else None) return result - def _produce_output_row(self, row): - """ produce the output row for the screen, json or the database - from the diff rows. It consists of renaming columns and rounding - the result when necessary - """ - - result = {} - # produce the output row column by column - for col in self.output_transform_data: - attname = self._produce_output_name(col) - val = self._produce_output_value(row, col) - result[attname] = val - return result - - @staticmethod - def _produce_output_value(row, col, method=OUTPUT_METHOD.console): - # get the input value - if 'in' in col: - val = row.get(col['in'], None) - else: - val = row.get(col['out'], None) - # if function is specified - apply it to the input value - if 'fn' in col and val is not None: - val = col['fn'](val) - # if rounding is necessary - round the input value up to specified - # decimal points - if 'round' in col and val is not None: - val = round(val, col['round']) - return val - - def _produce_output_name(self, col): - # get the output column name - attname = col['out'] - # add units to the column name if neccessary - if 'units' in col and self.show_units: - attname += ' ' + col['units'] - return attname - - @staticmethod - def _calculate_output_status(row, col, val, method): - """ Examine the current status indicators and produce the status - value for the specific column of the given row - """ - - st = {-1: COLSTATUS.cs_ok} - # if value is missing - don't bother calculating anything - if val is None: - return st - if 'status_fn' in col: - st = col['status_fn'](row, col) - if len(st) == 0: - st = {-1: COLSTATUS.cs_ok} - else: - words = str(val).split() - for i, word in enumerate(words): - for st_name, st_status in zip(('critical', 'warning'), (COLSTATUS.cs_critical, COLSTATUS.cs_warning)): - if st_name in col: - typ = type(col[st_name]) - if typ == int: - typ = float - if typ(word) >= col[st_name]: - st[i] = st_status - break - if i not in st: - st[i] = COLSTATUS.cs_ok - return st - - def _get_columns_to_hide(self, result_rows, status_rows): - """ scan the (cooked) rows, do not show columns that are empty """ - - to_skip = [] - for col in self.output_transform_data: - if col.get('pos') == -1: - continue - attname = self._produce_output_name(col) - empty = True - for r in result_rows: - if r[attname].value != '': - empty = False - break - if empty: - to_skip.append(attname) - elif col.get('hide_if_ok', False): - status_ok = True - for row in status_rows: - if attname in row and row[attname]: - for cl in row[attname]: - if row[attname][cl] != COLSTATUS.cs_ok: - status_ok = False - break - if not status_ok: - break - if status_ok: - to_skip.append(attname) - return to_skip - def _transform_input(self, x, custom_transformation_data=None): if isinstance(x, list) or isinstance(x, tuple): return self._transform_list(x, custom_transformation_data) elif isinstance(x, dict): return self._transform_dict(x, custom_transformation_data) elif isinstance(x, str): - return self._transform_string(x) + raise Exception('transformation of input type string is not implemented') else: raise Exception('transformation of data type {0} is not supported'.format(type(x))) @@ -514,7 +161,7 @@ def _transform_list(self, l, custom_transformation_data=None): # nothing at all from them - then the problem is elsewhere and there is no need # to bleat here for each missing column. if not col.get('optional', False) and len(l) > 0: - self.warn_non_optional_column(incol) + warn_non_optional_column(incol) else: result[attname] = l[incol] # if transformation function is supplied - apply it to the input data. @@ -548,7 +195,7 @@ def _transform_dict(self, l, custom_transformation_data=None): result[attname] = None # see the comment at _transform_list on why we do complain here. if not col.get('optional', False) and len(l) > 0: - self.warn_non_optional_column(incol) + warn_non_optional_column(incol) else: result[attname] = l[incol] if 'fn' in col and result[attname] is not None: @@ -556,203 +203,19 @@ def _transform_dict(self, l, custom_transformation_data=None): return result raise Exception('No data for the dict transformation supplied') - @staticmethod - def _transform_string(d): - raise Exception('transformation of input type string is not implemented') - - def _output_template_for_console(self): - return ' '.join(self._output_row_for_console(None, 't')) - - def _output_row_for_console(self, row, typ='v'): - return self._output_row_generic(row, typ, method=OUTPUT_METHOD.console) - - def _output_row_for_curses(self, row, typ='v'): - return self._output_row_generic(row, typ, method=OUTPUT_METHOD.curses) - - def _output_row_generic(self, row, typ='v', method=OUTPUT_METHOD.console): - """ produce a single output row of the type specified by the - last argument: - t - template row - h - header row (only names) - v - values rows - """ - - vals = [] - # produce the output row column by column - for i, col in enumerate(self.output_transform_data): - # get the final attribute name and value - if typ == 't': - if 'w' not in col: - val = '{{{0}}}'.format(i) - else: - val = '{{{0}:<{1}}}'.format(i, col['w']) - elif typ == 'h': - val = self._produce_output_name(col) - else: - val = self._produce_output_value(row, col, method) - # prepare the list for the output - vals.append(val) - if 'typ' != 'v': - return vals - else: - return vals - - def console_output(self, rows, before_string=None, after_string=None): - """ Main entry point for preparing textual console output """ - - result = [] - # start by filling-out width of the values - self._calculate_dynamic_width(rows) - - # now produce output template, headers and actual values - templ = self._output_template_for_console() - header = self._output_row_for_console(None, 'h') - - if before_string: - result.append(before_string) - - result.append(templ.format(*header)) - - for r in rows: - row = self._output_row_for_console(r, 'v') - result.append(templ.format(*row)) - - if after_string: - result.append(after_string) - - return '\n'.join(result) - - def _calculate_dynamic_width(self, rows, method=OUTPUT_METHOD.console): - """ Examine values in all rows and get the width dynamically """ - - for col in self.output_transform_data: - minw = col.get('minw', 0) - attname = self._produce_output_name(col) - # XXX: if append_column_header, min width should include the size of the attribut name - if method == OUTPUT_METHOD.curses and self.ncurses_custom_fields.get('prepend_column_headers'): - minw += len(attname) + 1 - col['w'] = len(attname) - # use cooked values - for row in rows: - if method == OUTPUT_METHOD.curses and self.ncurses_filter_row(row): - continue - val = self._produce_output_value(row, col, method) - if self.cook_function.get(method): - val = self.cook_function[method](attname, val, col) - if method == OUTPUT_METHOD.curses: - curw = val.length - else: - curw = len(str(val)) - if curw > col['w']: - col['w'] = curw - if minw > 0: - col['w'] = max(minw, col['w']) - - def _calculate_statuses_for_row(self, row, method): - statuses = [] - for num, col in enumerate(self.output_transform_data): - statuses.append(self._calculate_output_status(row, col, row[num], method)) - return statuses - - @staticmethod - def _calculate_column_types(rows): - result = {} - if len(rows) > 0: - colnames = rows[0].keys() - for colname in colnames: - for r in rows: - val = r[colname] - if val is None or val == '': - continue - else: - if isinstance(val, Number): - result[colname] = COLTYPES.ct_number - else: - result[colname] = COLTYPES.ct_string - break - else: - # if all values are None - we don't care, so use a generic string - result[colname] = COLTYPES.ct_string - return result - - def _get_highlights(self): - return [col.get('highlight', False) for col in self.output_transform_data] - @staticmethod def _get_input_column_name(col): - if 'in' in col: - return col['in'] - else: - return col['out'] - - def json_output(self, rows, before_string=None, after_string=None): - output = {} - data = [] - output['type'] = StatCollector.ident(self) - if self.__dict__.get('dbname') and self.__dict__.get('dbver'): - output['name'] = '{0}/{1}'.format(self.dbname, self.dbver) - for r in rows: - data.append(self._produce_output_row(r)) - output['data'] = data - return json.dumps(output, indent=4) + return col['in'] if 'in' in col else col['out'] def ncurses_filter_row(self, row): return False - def ncurses_output(self, rows, before_string=None, after_string=None): - """ for ncurses - we just return data structures. The output code - is quite complex and deserves a separate class. - """ + def output(self, displayer, before_string=None, after_string=None): + rows = self._get_rows() + return displayer.display(rows, before_string, after_string) - self._calculate_dynamic_width(rows, method=OUTPUT_METHOD.curses) - - raw_result = {} - for k in StatCollector.NCURSES_DEFAULTS.keys(): - raw_result[k] = [] - - for col in self.output_transform_data: - for opt in StatCollector.NCURSES_DEFAULTS.keys(): - raw_result[opt].append((col[opt] if opt in col else StatCollector.NCURSES_DEFAULTS[opt])) - - result_header = self._output_row_for_curses(None, 'h') - result_rows = [] - status_rows = [] - values_rows = [] - - for r in rows: - values_row = self._output_row_for_curses(r, 'v') - if self.ncurses_filter_row(dict(zip(result_header, values_row))): - continue - cooked_row = self.cook_row(result_header, values_row, method=OUTPUT_METHOD.curses) - status_row = self._calculate_statuses_for_row(values_row, method=OUTPUT_METHOD.curses) - result_rows.append(dict(zip(result_header, cooked_row))) - status_rows.append(dict(zip(result_header, status_row))) - values_rows.append(dict(zip(result_header, values_row))) - - types_row = self._calculate_column_types(values_rows) - - result = {'rows': result_rows, - 'statuses': status_rows, - 'hide': self._get_columns_to_hide(result_rows, status_rows), - 'highlights': dict(zip(result_header, self._get_highlights())), - 'types': types_row} - for x in StatCollector.NCURSES_CUSTOM_OUTPUT_FIELDS: - result[x] = self.ncurses_custom_fields.get(x, None) - for k in StatCollector.NCURSES_DEFAULTS.keys(): - if k == 'noautohide' and self.ignore_autohide: - result[k] = dict.fromkeys(result_header, True) - else: - result[k] = dict(zip(result_header, raw_result[k])) - return {self.ident(): result} - - def output(self, method, before_string=None, after_string=None): - if method not in self.output_function: - raise Exception('Output method {0} is not supported'.format(method)) - if self.produce_diffs: - rows = self.rows_diff - else: - rows = self.rows_cur - return self.output_function[method](rows, before_string, after_string) + def _get_rows(self): + return self.rows_diff if self.produce_diffs else self.rows_cur def diff(self): self.clear_diffs() diff --git a/pg_view/collectors/host_collector.py b/pg_view/collectors/host_collector.py index 60f238f..9d21f1a 100644 --- a/pg_view/collectors/host_collector.py +++ b/pg_view/collectors/host_collector.py @@ -3,23 +3,30 @@ from datetime import timedelta from multiprocessing import cpu_count -from pg_view.collectors.base_collector import StatCollector +from pg_view.collectors.base_collector import BaseStatCollector from pg_view.loggers import logger -from pg_view.models.outputs import COLSTATUS, COLHEADER +from pg_view.models.formatters import StatusFormatter +from pg_view.models.outputs import COLHEADER -class HostStatCollector(StatCollector): - +class HostStatCollector(BaseStatCollector): """ General system-wide statistics """ - UPTIME_FILE = '/proc/uptime' def __init__(self): super(HostStatCollector, self).__init__(produce_diffs=False) + self.status_formatter = StatusFormatter(self) + + self.transform_list_data = [ + {'out': 'loadavg', 'infn': self._concat_load_avg} + ] + self.transform_uptime_data = [ + {'out': 'uptime', 'in': 0, 'fn': self._uptime_to_str} + ] - self.transform_list_data = [{'out': 'loadavg', 'infn': self._concat_load_avg}] - self.transform_uptime_data = [{'out': 'uptime', 'in': 0, 'fn': self._uptime_to_str}] - self.transform_uname_data = [{'out': 'sysname', 'infn': self._construct_sysname}] + self.transform_uname_data = [ + {'out': 'sysname', 'infn': self._construct_sysname} + ] self.output_transform_data = [ { @@ -30,7 +37,7 @@ def __init__(self): 'warning': 5, 'critical': 20, 'column_header': COLHEADER.ch_prepend, - 'status_fn': self._load_avg_state, + 'status_fn': self.status_formatter.load_avg_state, }, { 'out': 'up', @@ -61,7 +68,6 @@ def __init__(self): ] self.ncurses_custom_fields = {'header': False, 'prefix': None, 'prepend_column_headers': False} - self.postinit() def refresh(self): @@ -72,53 +78,22 @@ def refresh(self): raw_result.update(self._read_uname()) raw_result.update(self._read_cpus()) self._do_refresh([raw_result]) + return raw_result def _read_load_average(self): return self._transform_list(os.getloadavg()) - def _load_avg_state(self, row, col): - state = {} - load_avg_str = row[self.output_column_positions[col['out']]] - if not load_avg_str: - return {} - # load average consists of 3 values. - load_avg_vals = load_avg_str.split() - for no, val in enumerate(load_avg_vals): - if float(val) >= col['critical']: - state[no] = COLSTATUS.cs_critical - elif float(val) >= col['warning']: - state[no] = COLSTATUS.cs_warning - else: - state[no] = COLSTATUS.cs_ok - return state - @staticmethod def _concat_load_avg(colname, row, optional): """ concat all load averages into a single string """ - - if len(row) >= 3: - return ' '.join(str(x) for x in row[:3]) - else: - return '' - - @staticmethod - def _load_avg_status(row, col, val, bound): - if val is not None: - loads = str(val).split() - if len(loads) != 3: - logger.error('load average value is not 1min 5min 15 min') - for x in loads: - f = float(x) - if f > bound: - return True - return False + return ' '.join(str(x) for x in row[:3]) if len(row) >= 3 else '' @staticmethod def _read_cpus(): - cpus = 0 try: cpus = cpu_count() - except: + except NotImplementedError: + cpus = 0 logger.error('multiprocessing does not support cpu_count') return {'cores': cpus} @@ -149,8 +124,7 @@ def _read_hostname(): return {'hostname': socket.gethostname()} def _read_uname(self): - uname_row = os.uname() - return self._transform_input(uname_row, self.transform_uname_data) + return self._transform_input(os.uname(), self.transform_uname_data) - def output(self, method): - return super(self.__class__, self).output(method, before_string='Host statistics', after_string='\n') + def output(self, displayer, before_string=None, after_string=None): + return super(HostStatCollector, self).output(displayer, before_string='Host statistics', after_string='\n') diff --git a/pg_view/collectors/memory_collector.py b/pg_view/collectors/memory_collector.py index 582faf4..785e86e 100644 --- a/pg_view/collectors/memory_collector.py +++ b/pg_view/collectors/memory_collector.py @@ -1,57 +1,40 @@ -from pg_view.collectors.base_collector import StatCollector +from pg_view.collectors.base_collector import BaseStatCollector, warn_non_optional_column from pg_view.loggers import logger +from pg_view.models.formatters import FnFormatter, StatusFormatter -class MemoryStatCollector(StatCollector): +class MemoryStatCollector(BaseStatCollector): """ Collect memory-related statistics """ - MEMORY_STAT_FILE = '/proc/meminfo' def __init__(self): super(MemoryStatCollector, self).__init__(produce_diffs=False) + self.status_formatter = StatusFormatter(self) + self.fn_formatter = FnFormatter(self) + self.transform_dict_data = [ {'in': 'MemTotal', 'out': 'total', 'fn': int}, {'in': 'MemFree', 'out': 'free', 'fn': int}, - { - 'in': 'Buffers', - 'out': 'buffers', - 'fn': int, - 'optional': True, - }, + {'in': 'Buffers', 'out': 'buffers', 'fn': int, 'optional': True}, {'in': 'Cached', 'out': 'cached', 'fn': int}, {'in': 'Dirty', 'out': 'dirty', 'fn': int}, - { - 'in': 'CommitLimit', - 'out': 'commit_limit', - 'fn': int, - 'optional': True, - }, - { - 'in': 'Committed_AS', - 'out': 'committed_as', - 'fn': int, - 'optional': True, - }, - { - 'infn': self.calculate_kb_left_until_limit, - 'out': 'commit_left', - 'fn': int, - 'optional': True, - }, + {'in': 'CommitLimit', 'out': 'commit_limit', 'fn': int, 'optional': True}, + {'in': 'Committed_AS', 'out': 'committed_as', 'fn': int, 'optional': True}, + {'infn': self.calculate_kb_left_until_limit, 'out': 'commit_left', 'fn': int, 'optional': True} ] self.output_transform_data = [ { 'out': 'total', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 0, 'minw': 6, }, { 'out': 'free', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 1, 'noautohide': True, 'minw': 6, @@ -59,21 +42,21 @@ def __init__(self): { 'out': 'buffers', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 2, 'minw': 6, }, { 'out': 'cached', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 3, 'minw': 6, }, { 'out': 'dirty', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 4, 'noautohide': True, 'minw': 6, @@ -82,7 +65,7 @@ def __init__(self): 'out': 'limit', 'in': 'commit_limit', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 5, 'noautohide': True, 'minw': 6, @@ -91,7 +74,7 @@ def __init__(self): 'out': 'as', 'in': 'committed_as', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 6, 'minw': 6, }, @@ -99,7 +82,7 @@ def __init__(self): 'out': 'left', 'in': 'commit_left', 'units': 'MB', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 7, 'noautohide': True, 'minw': 6, @@ -107,7 +90,6 @@ def __init__(self): ] self.ncurses_custom_fields = {'header': False, 'prefix': 'mem: ', 'prepend_column_headers': True} - self.postinit() def refresh(self): @@ -116,6 +98,7 @@ def refresh(self): memdata = self._read_memory_data() raw_result = self._transform_input(memdata) self._do_refresh([raw_result]) + return raw_result @staticmethod def _read_memory_data(): @@ -149,11 +132,13 @@ def _read_memory_data(): return result def calculate_kb_left_until_limit(self, colname, row, optional): - result = (int(row['CommitLimit']) - int(row['Committed_AS']) if row.get('CommitLimit', None) is not None and - row.get('Committed_AS', None) is not None else None) - if result is None and not optional: - self.warn_non_optional_column(colname) - return result + memory_left = (int(row['CommitLimit']) - int(row['Committed_AS']) if self._is_commit(row) else None) + if memory_left is None and not optional: + warn_non_optional_column(colname) + return memory_left + + def _is_commit(self, row): + return row.get('CommitLimit') is not None and row.get('Committed_AS') is not None - def output(self, method): - return super(self.__class__, self).output(method, before_string='Memory statistics:', after_string='\n') + def output(self, displayer, before_string=None, after_string=None): + return super(MemoryStatCollector, self).output(displayer, before_string='Memory statistics:', after_string='\n') diff --git a/pg_view/collectors/partition_collector.py b/pg_view/collectors/partition_collector.py index c0802be..a2104ef 100644 --- a/pg_view/collectors/partition_collector.py +++ b/pg_view/collectors/partition_collector.py @@ -4,17 +4,20 @@ import time from multiprocessing import Process -from pg_view.collectors.base_collector import StatCollector from pg_view import consts +from pg_view.collectors.base_collector import BaseStatCollector from pg_view.loggers import logger +from pg_view.models.formatters import FnFormatter, StatusFormatter from pg_view.models.outputs import COLALIGN -from pg_view.utils import BLOCK_SIZE if sys.hexversion >= 0x03000000: long = int + from queue import Empty +else: + from Queue import Empty -class PartitionStatCollector(StatCollector): +class PartitionStatCollector(BaseStatCollector): """Collect statistics about PostgreSQL partitions """ DISK_STAT_FILE = '/proc/diskstats' @@ -28,13 +31,25 @@ def __init__(self, dbname, dbversion, work_directory, consumer): self.dbver = dbversion self.queue_consumer = consumer self.work_directory = work_directory - self.df_list_transformation = [{'out': 'dev', 'in': 0, 'fn': self._dereference_dev_name}, - {'out': 'space_total', 'in': 1, 'fn': int}, - {'out': 'space_left', 'in': 2, 'fn': int}] - self.io_list_transformation = [{'out': 'sectors_read', 'in': 5, 'fn': int}, {'out': 'sectors_written', 'in': 9, - 'fn': int}, - {'out': 'await', 'in': 13, 'fn': int}] - self.du_list_transformation = [{'out': 'path_size', 'in': 0, 'fn': int}, {'out': 'path', 'in': 1}] + self.status_formatter = StatusFormatter(self) + self.fn_formatter = FnFormatter(self) + + self.df_list_transformation = [ + {'out': 'dev', 'in': 0, 'fn': self._dereference_dev_name}, + {'out': 'space_total', 'in': 1, 'fn': int}, + {'out': 'space_left', 'in': 2, 'fn': int} + ] + + self.io_list_transformation = [ + {'out': 'sectors_read', 'in': 5, 'fn': int}, + {'out': 'sectors_written', 'in': 9, 'fn': int}, + {'out': 'await', 'in': 13, 'fn': int} + ] + + self.du_list_transformation = [ + {'out': 'path_size', 'in': 0, 'fn': int}, + {'out': 'path', 'in': 1} + ] self.diff_generator_data = [ {'out': 'type', 'diff': False}, @@ -57,8 +72,8 @@ def __init__(self, dbname, dbversion, work_directory, consumer): 'out': 'fill', 'in': 'path_fill_rate', 'units': 'MB/s', - 'fn': self.kb_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.kb_to_mbytes, + 'round': consts.RD, 'pos': 2, 'minw': 6, }, @@ -67,8 +82,8 @@ def __init__(self, dbname, dbversion, work_directory, consumer): 'in': 'time_until_full', 'pos': 3, 'noautohide': True, - 'status_fn': self.time_field_status, - 'fn': StatCollector.time_pretty_print, + 'status_fn': self.status_formatter.time_field_status, + 'fn': self.fn_formatter.time_pretty_print, 'warning': 10800, 'critical': 3600, 'hide_if_ok': True, @@ -77,7 +92,7 @@ def __init__(self, dbname, dbversion, work_directory, consumer): { 'out': 'total', 'in': 'space_total', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 4, 'minw': 5, 'align': COLALIGN.ca_right, @@ -85,7 +100,7 @@ def __init__(self, dbname, dbversion, work_directory, consumer): { 'out': 'left', 'in': 'space_left', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 5, 'noautohide': False, 'minw': 5, @@ -94,8 +109,8 @@ def __init__(self, dbname, dbversion, work_directory, consumer): { 'out': 'read', 'units': 'MB/s', - 'fn': self.sectors_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.sectors_to_mbytes, + 'round': consts.RD, 'pos': 6, 'noautohide': True, 'minw': 6, @@ -103,8 +118,8 @@ def __init__(self, dbname, dbversion, work_directory, consumer): { 'out': 'write', 'units': 'MB/s', - 'fn': self.sectors_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.sectors_to_mbytes, + 'round': consts.RD, 'pos': 7, 'noautohide': True, 'minw': 6, @@ -112,23 +127,26 @@ def __init__(self, dbname, dbversion, work_directory, consumer): { 'out': 'await', 'units': 'ms', - 'round': StatCollector.RD, + 'round': consts.RD, 'pos': 8, 'minw': 8, }, { 'out': 'path_size', - 'fn': self.kb_pretty_print, + 'fn': self.fn_formatter.kb_pretty_print, 'pos': 9, 'noautohide': True, 'align': COLALIGN.ca_right, }, {'out': 'path', 'pos': 10}, ] - self.ncurses_custom_fields = {'header': True, - 'prefix': None} + self.ncurses_custom_fields = {'header': True, 'prefix': None} self.postinit() + @classmethod + def from_cluster(cls, cluster, consumer): + return cls(['name'], cluster['ver'], cluster['wd'], consumer) + def ident(self): return '{0} ({1}/{2})'.format(super(PartitionStatCollector, self).ident(), self.dbname, self.dbver) @@ -146,13 +164,12 @@ def refresh(self): if queue_data: (du_out, df_out) = queue_data - for pname in PartitionStatCollector.DATA_NAME, PartitionStatCollector.XLOG_NAME: + for pname in self.DATA_NAME, self.XLOG_NAME: result[pname] = self._transform_input(df_out[pname], self.df_list_transformation) - io_out = self.get_io_data([result[PartitionStatCollector.DATA_NAME]['dev'], - result[PartitionStatCollector.XLOG_NAME]['dev']]) + io_out = self.get_io_data([result[self.DATA_NAME]['dev'], result[self.XLOG_NAME]['dev']]) - for pname in PartitionStatCollector.DATA_NAME, PartitionStatCollector.XLOG_NAME: + for pname in self.DATA_NAME, self.XLOG_NAME: if result[pname]['dev'] in io_out: result[pname].update(self._transform_input(io_out[result[pname]['dev']], self.io_list_transformation)) if pname in du_out: @@ -160,7 +177,9 @@ def refresh(self): # set the type manually result[pname]['type'] = pname - self._do_refresh([result[PartitionStatCollector.DATA_NAME], result[PartitionStatCollector.XLOG_NAME]]) + new_rows = [result[self.DATA_NAME], result[self.XLOG_NAME]] + self._do_refresh(new_rows) + return new_rows @staticmethod def calculate_time_until_full(colname, prev, cur): @@ -198,16 +217,17 @@ def get_io_data(pnames): fp and fp.close() return result - def output(self, method): - return super(self.__class__, self).output(method, before_string='PostgreSQL partitions:', after_string='\n') + def output(self, displayer, before_string=None, after_string=None): + return super(PartitionStatCollector, self).output( + displayer, before_string='PostgreSQL partitions:', after_string='\n') class DetachedDiskStatCollector(Process): """ This class runs in a separate process and runs du and df """ + BLOCK_SIZE = 1024 OLD_WAL_SUBDIR = '/pg_xlog/' WAL_SUBDIR = '/pg_wal/' - NEW_WAL_SINCE = 10.0 def __init__(self, q, work_directories, db_version): @@ -234,32 +254,30 @@ def run(self): self.q.join() result = {} self.df_cache = {} - for wd in self.work_directories: - du_data = self.get_du_data(wd) - df_data = self.get_df_data(wd) - result[wd] = [du_data, df_data] + for work_directory in self.work_directories: + du_data = self.get_du_data(work_directory) + df_data = self.get_df_data(work_directory) + result[work_directory] = [du_data, df_data] self.q.put(result) time.sleep(consts.TICK_LENGTH) - def get_du_data(self, wd): - data_size = 0 - xlog_size = 0 - + def get_du_data(self, work_directory): result = {'data': [], 'xlog': []} try: - data_size = self.run_du(wd, BLOCK_SIZE) - xlog_size = self.run_du(wd + self.wal_directory, BLOCK_SIZE) + data_size = self.run_du(work_directory) + xlog_size = self.run_du(work_directory + self.wal_directory) except Exception as e: - logger.error('Unable to read free space information for the pg_xlog and data directories for the directory\ - {0}: {1}'.format(wd, e)) + logger.error('Unable to read free space information for the pg_xlog and data ' + 'directories for the directory {0}: {1}'.format(work_directory, e)) else: # XXX: why do we pass the block size there? - result['data'] = str(data_size), wd - result['xlog'] = str(xlog_size), wd + self.wal_directory + result['data'] = str(data_size), work_directory + result['xlog'] = str(xlog_size), work_directory + self.wal_directory return result - @staticmethod - def run_du(pathname, block_size=BLOCK_SIZE, exclude=['lost+found']): + def run_du(self, pathname, exclude=None): + if exclude is None: + exclude = ['lost+found'] size = 0 folders = [pathname] root_dev = os.lstat(pathname).st_dev @@ -283,40 +301,38 @@ def run_du(pathname, block_size=BLOCK_SIZE, exclude=['lost+found']): size += st.st_size if mode == 0x8000: # S_IFREG size += st.st_size - return long(size / block_size) + return long(size / self.BLOCK_SIZE) def get_df_data(self, work_directory): """ Retrive raw data from df (transformations are performed via df_list_transformation) """ - result = {'data': [], 'xlog': []} # obtain the device names data_dev = self.get_mounted_device(self.get_mount_point(work_directory)) xlog_dev = self.get_mounted_device(self.get_mount_point(work_directory + self.wal_directory)) - if data_dev not in self.df_cache: - data_vfs = os.statvfs(work_directory) - self.df_cache[data_dev] = data_vfs - else: - data_vfs = self.df_cache[data_dev] - if xlog_dev not in self.df_cache: - xlog_vfs = os.statvfs(work_directory + self.wal_directory) - self.df_cache[xlog_dev] = xlog_vfs - else: - xlog_vfs = self.df_cache[xlog_dev] + data_vfs = self._get_or_update_df_cache(work_directory, data_dev) + xlog_vfs = self._get_or_update_df_cache(work_directory + self.wal_directory, xlog_dev) - result['data'] = (data_dev, data_vfs.f_blocks * (data_vfs.f_bsize / BLOCK_SIZE), - data_vfs.f_bavail * (data_vfs.f_bsize / BLOCK_SIZE)) + data_vfs_blocks = data_vfs.f_bsize / self.BLOCK_SIZE + result['data'] = (data_dev, data_vfs.f_blocks * data_vfs_blocks, data_vfs.f_bavail * data_vfs_blocks) if data_dev != xlog_dev: - result['xlog'] = (xlog_dev, xlog_vfs.f_blocks * (xlog_vfs.f_bsize / BLOCK_SIZE), - xlog_vfs.f_bavail * (xlog_vfs.f_bsize / BLOCK_SIZE)) + xlog_vfs_blocks = (xlog_vfs.f_bsize / self.BLOCK_SIZE) + result['xlog'] = (xlog_dev, xlog_vfs.f_blocks * xlog_vfs_blocks, xlog_vfs.f_bavail * xlog_vfs_blocks) else: result['xlog'] = result['data'] return result + def _get_or_update_df_cache(self, work_directory, dev): + if dev not in self.df_cache: + vfs = os.statvfs(work_directory) + self.df_cache[dev] = vfs + else: + vfs = self.df_cache[dev] + return vfs + @staticmethod def get_mounted_device(pathname): """Get the device mounted at pathname""" - # uses "/proc/mounts" raw_dev_name = None dev_name = None @@ -353,7 +369,6 @@ def get_mounted_device(pathname): @staticmethod def get_mount_point(pathname): """Get the mounlst point of the filesystem containing pathname""" - pathname = os.path.normcase(os.path.realpath(pathname)) parent_device = path_device = os.stat(pathname).st_dev while parent_device == path_device: @@ -363,3 +378,35 @@ def get_mount_point(pathname): break parent_device = os.stat(pathname).st_dev return mount_point + + +class DiskCollectorConsumer(object): + """ consumes information from the disk collector and provides it for the local + collector classes running in the same subprocess. + """ + def __init__(self, q): + self.result = {} + self.cached_result = {} + self.q = q + + def consume(self): + # if we haven't consumed the previous value + if len(self.result) != 0: + return + try: + self.result = self.q.get_nowait() + self.cached_result = self.result.copy() + except Empty: + # we are too fast, just do nothing. + pass + else: + self.q.task_done() + + def fetch(self, work_directory): + data = None + if work_directory in self.result: + data = self.result[work_directory] + del self.result[work_directory] + elif work_directory in self.cached_result: + data = self.cached_result[work_directory] + return data diff --git a/pg_view/collectors/pg_collector.py b/pg_view/collectors/pg_collector.py index 1779dd0..53b9ae9 100644 --- a/pg_view/collectors/pg_collector.py +++ b/pg_view/collectors/pg_collector.py @@ -2,11 +2,16 @@ import sys import psycopg2 +import psycopg2.extras -from pg_view.collectors.base_collector import StatCollector +from pg_view import consts +from pg_view.collectors.base_collector import BaseStatCollector from pg_view.loggers import logger -from pg_view.models.outputs import COLSTATUS, COLALIGN -from pg_view.utils import MEM_PAGE_SIZE, dbversion_as_float +from pg_view.models.formatters import FnFormatter, StatusFormatter +from pg_view.models.outputs import COLALIGN +from pg_view.sqls import SELECT_PG_IS_IN_RECOVERY, SHOW_MAX_CONNECTIONS, SELECT_PGSTAT_VERSION_LESS_THAN_92, \ + SELECT_PGSTAT_VERSION_LESS_THAN_96, SELECT_PGSTAT_NEVER_VERSION +from pg_view.utils import MEM_PAGE_SIZE, exec_command_with_output, dbversion_as_float if sys.hexversion >= 0x03000000: long = int @@ -15,19 +20,23 @@ maxsize = sys.maxint -class PgstatCollector(StatCollector): - """ Collect PostgreSQL-related statistics """ +def process_sort_key(process): + return process.get('age', maxsize) or maxsize + +class PgStatCollector(BaseStatCollector): + """ Collect PostgreSQL-related statistics """ STATM_FILENAME = '/proc/{0}/statm' def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): - super(PgstatCollector, self).__init__() + super(PgStatCollector, self).__init__() self.postmaster_pid = pid self.pgcon = pgcon self.reconnect = reconnect - self.pids = [] self.rows_diff = [] - self.rows_diff_output = [] + self.status_formatter = StatusFormatter(self) + self.fn_formatter = FnFormatter(self) + # figure out our backend pid self.connection_pid = pgcon.get_backend_pid() self.max_connections = self._get_max_connections() @@ -43,28 +52,20 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): self.transform_list_data = [ {'out': 'pid', 'in': 0, 'fn': int}, {'out': 'state', 'in': 2}, - {'out': 'utime', 'in': 13, 'fn': StatCollector.ticks_to_seconds}, - {'out': 'stime', 'in': 14, 'fn': StatCollector.ticks_to_seconds}, + {'out': 'utime', 'in': 13, 'fn': self.unit_converter.ticks_to_seconds}, + {'out': 'stime', 'in': 14, 'fn': self.unit_converter.ticks_to_seconds}, {'out': 'priority', 'in': 17, 'fn': int}, {'out': 'starttime', 'in': 21, 'fn': long}, {'out': 'vsize', 'in': 22, 'fn': int}, {'out': 'rss', 'in': 23, 'fn': int}, - { - 'out': 'delayacct_blkio_ticks', - 'in': 41, - 'fn': long, - 'optional': True, - }, - { - 'out': 'guest_time', - 'in': 42, - 'fn': StatCollector.ticks_to_seconds, - 'optional': True, - }, + {'out': 'delayacct_blkio_ticks', 'in': 41, 'fn': long, 'optional': True}, + {'out': 'guest_time', 'in': 42, 'fn': self.unit_converter.ticks_to_seconds, 'optional': True}, ] - self.transform_dict_data = [{'out': 'read_bytes', 'fn': int, 'optional': True}, {'out': 'write_bytes', - 'fn': int, 'optional': True}] + self.transform_dict_data = [ + {'out': 'read_bytes', 'fn': int, 'optional': True}, + {'out': 'write_bytes', 'fn': int, 'optional': True} + ] self.diff_generator_data = [ {'out': 'pid', 'diff': False}, @@ -105,14 +106,14 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'out': 's', 'in': 'state', 'pos': 2, - 'status_fn': self.check_ps_state, + 'status_fn': self.status_formatter.check_ps_state, 'warning': 'D', }, { 'out': 'utime', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 4, 'warning': 90, 'align': COLALIGN.ca_right, @@ -120,8 +121,8 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): { 'out': 'stime', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 5, 'warning': 5, 'critical': 30, @@ -130,22 +131,22 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'out': 'guest', 'in': 'guest_time', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 6, }, { 'out': 'delay_blkio', 'in': 'delayacct_blkio_ticks', 'units': '/s', - 'round': StatCollector.RD, + 'round': consts.RD, }, { 'out': 'read', 'in': 'read_bytes', 'units': 'MB/s', - 'fn': StatCollector.bytes_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.bytes_to_mbytes, + 'round': consts.RD, 'pos': 7, 'noautohide': True, }, @@ -153,8 +154,8 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'out': 'write', 'in': 'write_bytes', 'units': 'MB/s', - 'fn': StatCollector.bytes_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.bytes_to_mbytes, + 'round': consts.RD, 'pos': 8, 'noautohide': True, }, @@ -162,8 +163,8 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'out': 'uss', 'in': 'uss', 'units': 'MB', - 'fn': StatCollector.bytes_to_mbytes, - 'round': StatCollector.RD, + 'fn': self.unit_converter.bytes_to_mbytes, + 'round': consts.RD, 'pos': 9, 'noautohide': True }, @@ -172,8 +173,8 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'in': 'age', 'noautohide': True, 'pos': 9, - 'fn': StatCollector.time_pretty_print, - 'status_fn': self.age_status_fn, + 'fn': self.fn_formatter.time_pretty_print, + 'status_fn': self.status_formatter.age_status_fn, 'align': COLALIGN.ca_right, 'warning': 300, }, @@ -201,112 +202,58 @@ def __init__(self, pgcon, reconnect, pid, dbname, dbver, always_track_pids): 'out': 'query', 'pos': 12, 'noautohide': True, - 'fn': self.idle_format_fn, + 'fn': self.fn_formatter.idle_format_fn, 'warning': 'idle in transaction', 'critical': 'locked', - 'status_fn': self.query_status_fn, + 'status_fn': self.status_formatter.query_status_fn, }, ] - - self.ncurses_custom_fields = {'header': True, - 'prefix': None} - + self.ncurses_custom_fields = {'header': True, 'prefix': None} self.postinit() + @classmethod + def from_cluster(cls, cluster, pid): + return cls(cluster['pgcon'], cluster['reconnect'], cluster['pid'], cluster['name'], cluster['ver'], pid) + def get_subprocesses_pid(self): ppid = self.postmaster_pid - result = self.exec_command_with_output('ps -o pid --ppid {0} --noheaders'.format(ppid)) + result = exec_command_with_output('ps -o pid --ppid {0} --noheaders'.format(ppid)) if result[0] != 0: logger.info("Couldn't determine the pid of subprocesses for {0}".format(ppid)) - self.pids = [] - self.pids = [int(x) for x in result[1].split()] - - def check_ps_state(self, row, col): - if row[self.output_column_positions[col['out']]] == col.get('warning', ''): - return {0: COLSTATUS.cs_warning} - return {0: COLSTATUS.cs_ok} - - def age_status_fn(self, row, col): - age_string = row[self.output_column_positions[col['out']]] - age_seconds = self.time_field_to_seconds(age_string) - if 'critical' in col and col['critical'] < age_seconds: - return {-1: COLSTATUS.cs_critical} - if 'warning' in col and col['warning'] < age_seconds: - return {-1: COLSTATUS.cs_warning} - return {-1: COLSTATUS.cs_ok} - - def idle_format_fn(self, text): - r = re.match(r'idle in transaction (\d+)', text) - if not r: - return text - else: - if self.dbver >= 9.2: - return 'idle in transaction for ' + StatCollector.time_pretty_print(int(r.group(1))) - else: - return 'idle in transaction ' + StatCollector.time_pretty_print(int(r.group(1))) \ - + ' since the last query start' - - def query_status_fn(self, row, col): - if row[self.output_column_positions['w']] is True: - return {-1: COLSTATUS.cs_critical} - else: - val = row[self.output_column_positions[col['out']]] - if val and val.startswith(col.get('warning', '!')): - return {-1: COLSTATUS.cs_warning} - return {-1: COLSTATUS.cs_ok} + return [] + return [int(x) for x in result[1].split()] def ident(self): return '{0} ({1}/{2})'.format('postgres', self.dbname, self.dbver) @staticmethod def _get_psinfo(cmdline): - """ gets PostgreSQL process type from the command-line.""" - pstype = 'unknown' - action = None - if cmdline is not None and len(cmdline) > 0: - # postgres: stats collector process - m = re.match(r'postgres:\s+(.*)\s+process\s*(.*)$', cmdline) - if m: - pstype = m.group(1) - action = m.group(2) - else: - if re.match(r'postgres:.*', cmdline): - # assume it's a backend process - pstype = 'backend' - if pstype == 'autovacuum worker': - pstype = 'autovacuum' - return pstype, action + if not cmdline: + return 'unknown', None + m = re.match(r'postgres:\s+(.*)\s+process\s*(.*)$', cmdline) + if m: + pstype = m.group(1) + action = m.group(2) + return 'autovacuum' if pstype == 'autovacuum worker' else pstype, action + elif re.match(r'postgres:.*', cmdline): + # assume it's a backend process + return 'backend', None + return 'unknown', None @staticmethod def _is_auxiliary_process(pstype): - if pstype == 'backend' or pstype == 'autovacuum': - return False - return True + return pstype not in ('backend', 'autovacuum') def set_aux_processes_filter(self, newval): self.filter_aux_processes = newval def ncurses_filter_row(self, row): - if self.filter_aux_processes: - # type is the second column - return self._is_auxiliary_process(row['type']) - else: - return False + return self._is_auxiliary_process(row['type']) if self.filter_aux_processes else False def refresh(self): - """ Reads data from /proc and PostgreSQL stats """ - result = [] - # fetch up-to-date list of subprocess PIDs - self.get_subprocesses_pid() try: if not self.pgcon: - # if we've lost the connection, try to reconnect and - # re-initialize all connection invariants - self.pgcon, self.postmaster_pid = self.reconnect() - self.connection_pid = self.pgcon.get_backend_pid() - self.max_connections = self._get_max_connections() - self.dbver = dbversion_as_float(self.pgcon) - self.server_version = self.pgcon.get_parameter_status('server_version') + self._try_reconnect() stat_data = self._read_pg_stat_activity() except psycopg2.OperationalError as e: logger.info("failed to query the server: {}".format(e)) @@ -314,9 +261,14 @@ def refresh(self): self.pgcon.close() self.pgcon = None self._do_refresh([]) - return + return [] + + # fetch up-to-date list of subprocess PIDs + pids = self.get_subprocesses_pid() logger.info("new refresh round") - for pid in self.pids: + + result = [] + for pid in pids: if pid == self.connection_pid: continue is_backend = pid in stat_data @@ -334,6 +286,7 @@ def refresh(self): result.append(result_row) # and refresh the rows with this data self._do_refresh(result) + return result def _read_proc(self, pid, is_backend, is_active): """ see man 5 proc for details (/proc/[pid]/stat) """ @@ -382,6 +335,14 @@ def _read_proc(self, pid, is_backend, is_active): result['uss'] = self._get_memory_usage(pid) return result + def _try_reconnect(self): + # if we've lost the connection, try to reconnect and re-initialize all connection invariants + self.pgcon, self.postmaster_pid = self.reconnect() + self.connection_pid = self.pgcon.get_backend_pid() + self.max_connections = self._get_max_connections() + self.dbver = dbversion_as_float(self.pgcon) + self.server_version = self.pgcon.get_parameter_status('server_version') + def _get_memory_usage(self, pid): """ calculate usage of private memory per process """ # compute process's own non-shared memory. @@ -410,150 +371,53 @@ def _get_memory_usage(self, pid): return uss def _get_max_connections(self): - """ Read max connections from the database """ - - cur = self.pgcon.cursor(cursor_factory=psycopg2.extras.RealDictCursor) - cur.execute('show max_connections') - result = cur.fetchone() - cur.close() + result = self._execute_fetchone_query(SHOW_MAX_CONNECTIONS) return int(result.get('max_connections', 0)) def _get_recovery_status(self): - """ Determine whether the Postgres process is in recovery """ + result = self._execute_fetchone_query(SELECT_PG_IS_IN_RECOVERY) + return result.get('role', 'unknown') + def _execute_fetchone_query(self, query): cur = self.pgcon.cursor(cursor_factory=psycopg2.extras.RealDictCursor) - cur.execute("select case when pg_is_in_recovery() then 'standby' else 'master' end as role") + cur.execute(query) result = cur.fetchone() cur.close() - return result.get('role', 'unknown') + return result def _read_pg_stat_activity(self): """ Read data from pg_stat_activity """ - self.recovery_status = self._get_recovery_status() cur = self.pgcon.cursor(cursor_factory=psycopg2.extras.RealDictCursor) - - # the pg_stat_activity format has been changed to 9.2, avoiding ambigiuous meanings for some columns. - # since it makes more sense then the previous layout, we 'cast' the former versions to 9.2 - if self.dbver < 9.2: - cur.execute(""" - SELECT datname, - procpid as pid, - usename, - client_addr, - client_port, - round(extract(epoch from (now() - xact_start))) as age, - waiting, - NULLIF(array_to_string(array_agg(DISTINCT other.pid ORDER BY other.pid), ','), '') - as locked_by, - CASE WHEN current_query = ' in transaction' THEN - CASE WHEN xact_start != query_start THEN - 'idle in transaction ' || CAST( - abs(round(extract(epoch from (now() - query_start)))) AS text - ) - ELSE 'idle in transaction' - END - WHEN current_query = '' THEN 'idle' - ELSE current_query - END AS query - FROM pg_stat_activity a - LEFT JOIN pg_locks this ON (this.pid = procpid and this.granted = 'f') - LEFT JOIN pg_locks other ON this.locktype = other.locktype - AND this.database IS NOT DISTINCT FROM other.database - AND this.relation IS NOT DISTINCT FROM other.relation - AND this.page IS NOT DISTINCT FROM other.page - AND this.tuple IS NOT DISTINCT FROM other.tuple - AND this.virtualxid IS NOT DISTINCT FROM other.virtualxid - AND this.transactionid IS NOT DISTINCT FROM other.transactionid - AND this.classid IS NOT DISTINCT FROM other.classid - AND this.objid IS NOT DISTINCT FROM other.objid - AND this.objsubid IS NOT DISTINCT FROM other.objsubid - AND this.pid != other.pid - AND other.granted = 't' - WHERE procpid != pg_backend_pid() - GROUP BY 1,2,3,4,5,6,7,9 - """) - elif self.dbver < 9.6: - cur.execute(""" - SELECT datname, - a.pid as pid, - usename, - client_addr, - client_port, - round(extract(epoch from (now() - xact_start))) as age, - waiting, - NULLIF(array_to_string(array_agg(DISTINCT other.pid ORDER BY other.pid), ','), '') - as locked_by, - CASE WHEN state = 'idle in transaction' THEN - CASE WHEN xact_start != state_change THEN - 'idle in transaction ' || CAST( - abs(round(extract(epoch from (now() - state_change)))) AS text - ) - ELSE 'idle in transaction' - END - WHEN state = 'active' THEN query - ELSE state - END AS query - FROM pg_stat_activity a - LEFT JOIN pg_locks this ON (this.pid = a.pid and this.granted = 'f') - LEFT JOIN pg_locks other ON this.locktype = other.locktype - AND this.database IS NOT DISTINCT FROM other.database - AND this.relation IS NOT DISTINCT FROM other.relation - AND this.page IS NOT DISTINCT FROM other.page - AND this.tuple IS NOT DISTINCT FROM other.tuple - AND this.virtualxid IS NOT DISTINCT FROM other.virtualxid - AND this.transactionid IS NOT DISTINCT FROM other.transactionid - AND this.classid IS NOT DISTINCT FROM other.classid - AND this.objid IS NOT DISTINCT FROM other.objid - AND this.objsubid IS NOT DISTINCT FROM other.objsubid - AND this.pid != other.pid - AND other.granted = 't' - WHERE a.pid != pg_backend_pid() - GROUP BY 1,2,3,4,5,6,7,9 - """) - else: - cur.execute(""" - SELECT datname, - a.pid as pid, - usename, - client_addr, - client_port, - round(extract(epoch from (now() - xact_start))) as age, - wait_event_type IS NOT DISTINCT FROM 'Lock' AS waiting, - NULLIF(array_to_string(ARRAY(SELECT unnest(pg_blocking_pids(a.pid)) ORDER BY 1), ','), '') - as locked_by, - CASE WHEN state = 'idle in transaction' THEN - CASE WHEN xact_start != state_change THEN - 'idle in transaction ' || CAST( - abs(round(extract(epoch from (now() - state_change)))) AS text - ) - ELSE 'idle in transaction' - END - WHEN state = 'active' THEN query - ELSE state - END AS query - FROM pg_stat_activity a - WHERE a.pid != pg_backend_pid() AND a.datname IS NOT NULL - GROUP BY 1,2,3,4,5,6,7,9 - """) + cur.execute(self.get_sql_pgstat_by_version()) results = cur.fetchall() + # fill in the number of total connections, including ourselves self.total_connections = len(results) + 1 self.active_connections = 0 - ret = {} - for r in results: + formatted_results = {} + for result in results: # stick multiline queries together - if r.get('query', None): - if r['query'] != 'idle': - if r['pid'] != self.connection_pid: + if result.get('query'): + if result['query'] != 'idle': + if result['pid'] != self.connection_pid: self.active_connections += 1 - lines = r['query'].splitlines() + lines = result['query'].splitlines() newlines = [re.sub('\s+', ' ', l.strip()) for l in lines] - r['query'] = ' '.join(newlines) - ret[r['pid']] = r + result['query'] = ' '.join(newlines) + formatted_results[result['pid']] = result self.pgcon.commit() cur.close() - return ret + return formatted_results + + def get_sql_pgstat_by_version(self): + # the pg_stat_activity format has been changed to 9.2, avoiding ambigiuous meanings for some columns. + # since it makes more sense then the previous layout, we 'cast' the former versions to 9.2 + if self.dbver < 9.2: + return SELECT_PGSTAT_VERSION_LESS_THAN_92 + elif self.dbver < 9.6: + return SELECT_PGSTAT_VERSION_LESS_THAN_96 + return SELECT_PGSTAT_NEVER_VERSION def ncurses_produce_prefix(self): if self.pgcon: @@ -565,17 +429,10 @@ def ncurses_produce_prefix(self): max_conns=self.max_connections, active_conns=self.active_connections) else: - return "{dbname} {version} (offline)\n". \ - format(dbname=self.dbname, - version=self.server_version) - - @staticmethod - def process_sort_key(process): - return process['age'] if process['age'] is not None else maxsize + return "{dbname} {version} (offline)\n".format(dbname=self.dbname, version=self.server_version) def diff(self): """ we only diff backend processes if new one is not idle and use pid to identify processes """ - self.rows_diff = [] self.running_diffs = [] self.blocked_diffs = {} @@ -606,7 +463,7 @@ def diff(self): # order the result rows by the start time value if len(self.blocked_diffs) == 0: self.rows_diff = self.running_diffs - self.rows_diff.sort(key=self.process_sort_key, reverse=True) + self.rows_diff.sort(key=process_sort_key, reverse=True) else: blocked_temp = [] # we traverse the tree of blocked processes in a depth-first order, building a list @@ -615,10 +472,10 @@ def diff(self): # by the current one from the plain list of process information rows, that's why # we use a dictionary of lists of blocked processes with a blocker pid as a key # and effectively build a separate tree for each blocker. - self.running_diffs.sort(key=self.process_sort_key, reverse=True) + self.running_diffs.sort(key=process_sort_key, reverse=True) # sort elements in the blocked lists, so that they still appear in the latest to earliest order for key in self.blocked_diffs: - self.blocked_diffs[key].sort(key=self.process_sort_key) + self.blocked_diffs[key].sort(key=process_sort_key) for parent_row in self.running_diffs: self.rows_diff.append(parent_row) # if no processes blocked by this one - just skip to the next row @@ -633,5 +490,5 @@ def diff(self): blocked_temp.extend(self.blocked_diffs[child_row['pid']]) del self.blocked_diffs[child_row['pid']] - def output(self, method): - return super(self.__class__, self).output(method, before_string='PostgreSQL processes:', after_string='\n') + def output(self, displayer, before_string=None, after_string=None): + return super(PgStatCollector, self).output(displayer, before_string='PostgreSQL processes:', after_string='\n') diff --git a/pg_view/collectors/system_collector.py b/pg_view/collectors/system_collector.py index 410a87a..430342d 100644 --- a/pg_view/collectors/system_collector.py +++ b/pg_view/collectors/system_collector.py @@ -1,11 +1,10 @@ -from pg_view.collectors.base_collector import StatCollector +from pg_view import consts +from pg_view.collectors.base_collector import BaseStatCollector from pg_view.loggers import logger -class SystemStatCollector(StatCollector): - +class SystemStatCollector(BaseStatCollector): """ Collect global system statistics, i.e. CPU/IO usage, not including memory. """ - PROC_STAT_FILENAME = '/proc/stat' def __init__(self): @@ -17,29 +16,17 @@ def __init__(self): {'out': 'idle', 'in': 3, 'fn': float}, {'out': 'iowait', 'in': 4, 'fn': float}, {'out': 'irq', 'in': 5, 'fn': float}, - { - 'out': 'softirq', - 'in': 6, - 'fn': float, - 'optional': True, - }, - { - 'out': 'steal', - 'in': 7, - 'fn': float, - 'optional': True, - }, - { - 'out': 'guest', - 'in': 8, - 'fn': float, - 'optional': True, - }, + {'out': 'softirq', 'in': 6, 'fn': float, 'optional': True}, + {'out': 'steal', 'in': 7, 'fn': float, 'optional': True}, + {'out': 'guest', 'in': 8, 'fn': float, 'optional': True} ] - self.transform_dict_data = [{'out': 'ctxt', 'fn': float}, {'out': 'cpu'}, {'out': 'running', - 'in': 'procs_running', 'fn': int}, {'out': 'blocked', 'in': 'procs_blocked', - 'fn': int}] + self.transform_dict_data = [ + {'out': 'ctxt', 'fn': float}, + {'out': 'cpu'}, + {'out': 'running', 'in': 'procs_running', 'fn': int}, + {'out': 'blocked', 'in': 'procs_blocked', 'fn': int} + ] self.diff_generator_data = [ {'out': 'utime', 'fn': self._cpu_time_diff}, @@ -59,8 +46,8 @@ def __init__(self): { 'out': 'utime', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'minw': 5, 'pos': 0, 'warning': 50, @@ -69,8 +56,8 @@ def __init__(self): { 'out': 'stime', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 1, 'minw': 5, 'warning': 10, @@ -79,16 +66,16 @@ def __init__(self): { 'out': 'idle', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 2, 'minw': 5, }, { 'out': 'iowait', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, 'pos': 3, 'minw': 5, 'warning': 20, @@ -97,27 +84,27 @@ def __init__(self): { 'out': 'irq', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, }, { 'out': 'soft', 'in': 'softirq', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, }, { 'out': 'steal', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, }, { 'out': 'guest', 'units': '%', - 'fn': StatCollector.time_diff_to_percent, - 'round': StatCollector.RD, + 'fn': self.unit_converter.time_diff_to_percent, + 'round': consts.RD, }, { 'out': 'ctxt', @@ -145,12 +132,10 @@ def __init__(self): self.current_total_cpu_time = 0 self.cpu_time_diff = 0 self.ncurses_custom_fields = {'header': False, 'prefix': 'sys: ', 'prepend_column_headers': True} - self.postinit() def refresh(self): """ Read data from global /proc/stat """ - result = {} stat_data = self._read_proc_stat() cpu_data = self._read_cpu_data(stat_data.get('cpu', [])) @@ -158,6 +143,7 @@ def refresh(self): result.update(cpu_data) self._refresh_cpu_time_values(cpu_data) self._do_refresh([result]) + return result def _refresh_cpu_time_values(self, cpu_data): # calculate the sum of all CPU indicators and store it. @@ -169,7 +155,6 @@ def _refresh_cpu_time_values(self, cpu_data): def _read_proc_stat(self): """ see man 5 proc for details (/proc/stat). We don't parse cpu info here """ - raw_result = {} result = {} try: @@ -187,16 +172,14 @@ def _read_proc_stat(self): logger.error('Unable to read {0}, global data will be unavailable'.format(self.PROC_STAT_FILENAME)) return result - def _cpu_time_diff(self, colname, cur, prev): - if cur.get(colname, None) and prev.get(colname, None) and self.cpu_time_diff > 0: - return (cur[colname] - prev[colname]) / self.cpu_time_diff - else: - return None + def _cpu_time_diff(self, colname, current, previous): + if current.get(colname) and previous.get(colname) and self.cpu_time_diff > 0: + return (current[colname] - previous[colname]) / self.cpu_time_diff + return None def _read_cpu_data(self, cpu_row): """ Parse the cpu row from /proc/stat """ - return self._transform_input(cpu_row) - def output(self, method): - return super(SystemStatCollector, self).output(method, before_string='System statistics:', after_string='\n') + def output(self, displayer, before_string=None, after_string=None): + return super(SystemStatCollector, self).output(displayer, before_string='System statistics:', after_string='\n') diff --git a/pg_view/consts.py b/pg_view/consts.py index cfe87de..92d2fbb 100644 --- a/pg_view/consts.py +++ b/pg_view/consts.py @@ -1 +1,7 @@ +import os + +USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK']) +NCURSES_CUSTOM_OUTPUT_FIELDS = ['header', 'prefix', 'prepend_column_headers'] TICK_LENGTH = 1 +RD = 1 +SECTOR_SIZE = 512 diff --git a/pg_view/exceptions.py b/pg_view/exceptions.py new file mode 100644 index 0000000..43c6767 --- /dev/null +++ b/pg_view/exceptions.py @@ -0,0 +1,14 @@ +class InvalidConnectionParamError(Exception): + pass + + +class NotConnectedError(Exception): + pass + + +class NoPidConnectionError(Exception): + pass + + +class DuplicatedConnectionError(Exception): + pass diff --git a/pg_view/models/db_client.py b/pg_view/models/db_client.py index a779e16..e1c9694 100644 --- a/pg_view/models/db_client.py +++ b/pg_view/models/db_client.py @@ -1,25 +1,21 @@ -import glob -import os -import re - import psycopg2 +from pg_view.exceptions import NotConnectedError, NoPidConnectionError, DuplicatedConnectionError from pg_view.loggers import logger -from pg_view.models.parsers import ProcNetParser -from pg_view.utils import STAT_FIELD, dbversion_as_float +from pg_view.models.parsers import ProcNetParser, ProcWorker +from pg_view.utils import dbversion_as_float def read_postmaster_pid(work_directory, dbname): """ Parses the postgres directory tree and extracts the pid of the postmaster process """ - fp = None try: fp = open('{0}/postmaster.pid'.format(work_directory)) pid = fp.readline().strip() except: # XXX: do not bail out in case we are collecting data for multiple PostgreSQL clusters - logger.error('Unable to read postmaster.pid for {name} at {wd}\n HINT: \ - make sure Postgres is running'.format(name=dbname, wd=work_directory)) + logger.error('Unable to read postmaster.pid for {name} at {wd}\n HINT: ' + 'make sure Postgres is running'.format(name=dbname, wd=work_directory)) return None finally: if fp is not None: @@ -27,117 +23,6 @@ def read_postmaster_pid(work_directory, dbname): return pid -def build_connection(host, port, user, database): - result = {} - if host: - result['host'] = host - if port: - result['port'] = port - if user: - result['user'] = user - if database: - result['database'] = database - return result - - -def pick_connection_arguments(conn_args, username, dbname): - """ go through all decected connections, picking the first one that actually works """ - result = {} - for conn_type in 'unix', 'tcp', 'tcp6': - if len(result) > 0: - break - for arg in conn_args.get(conn_type, []): - if can_connect_with_connection_arguments(*arg, username=username, dbname=dbname): - (result['host'], result['port']) = arg - break - return result - - -def can_connect_with_connection_arguments(host, port, username, dbname): - """ check that we can connect given the specified arguments """ - conn = build_connection(host, port, username, dbname) - try: - test_conn = psycopg2.connect(**conn) - test_conn.close() - except psycopg2.OperationalError: - return False - return True - - -def detect_with_proc_net(pid): - inodes = fetch_socket_inodes_for_process(pid) - parser = ProcNetParser() - result = parser.match_socket_inodes(inodes) - if not result or len(result) == 0: - logger.error('could not detect connection string from /proc/net for postgres process {0}'.format(pid)) - return None - return result - - -def detect_db_connection_arguments(work_directory, pid, version, username, dbname): - """ - Try to detect database connection arguments from the postmaster.pid - We do this by first extracting useful information from postmaster.pid, - next reading the postgresql.conf if necessary and, at last, - """ - conn_args = detect_with_proc_net(pid) - if not conn_args: - # if we failed to detect the arguments via the /proc/net/ readings, - # perhaps we'll get better luck with just peeking into postmaster.pid. - conn_args = detect_with_postmaster_pid(work_directory, version) - if not conn_args: - logger.error('unable to detect connection parameters for the PostgreSQL cluster at {0}'.format( - work_directory)) - return None - # try all acquired connection arguments, starting from unix, then tcp, then tcp over ipv6 - result = pick_connection_arguments(conn_args, username, dbname) - if len(result) == 0: - logger.error('unable to connect to PostgreSQL cluster at {0} using any of ' - 'the detected connection options: {1}'.format(work_directory, conn_args)) - return None - return result - - -def establish_user_defined_connection(instance, conn, clusters): - """ connect the database and get all necessary options like pid and work_directory - we use port, host and socket_directory, prefering socket over TCP connections - """ - # establish a new connection - try: - pgcon = psycopg2.connect(**conn) - except Exception as e: - logger.error('failed to establish connection to {0} via {1}'.format(instance, conn)) - logger.error('PostgreSQL exception: {0}'.format(e)) - return None - # get the database version from the pgcon properties - dbver = dbversion_as_float(pgcon) - cur = pgcon.cursor() - cur.execute('show data_directory') - work_directory = cur.fetchone()[0] - cur.close() - pgcon.commit() - # now, when we have the work directory, acquire the pid of the postmaster. - pid = read_postmaster_pid(work_directory, instance) - if pid is None: - logger.error('failed to read pid of the postmaster on {0}'.format(conn)) - return None - # check that we don't have the same pid already in the accumulated results. - # for instance, a user may specify 2 different set of connection options for - # the same database (one for the unix_socket_directory and another for the host) - pids = [opt['pid'] for opt in clusters if 'pid' in opt] - if pid in pids: - duplicate_instance = [opt['name'] for opt in clusters if 'pid' in opt and opt.get('pid', 0) == pid][0] - logger.error('duplicate connection options detected for databases ' - '{0} and {1}, same pid {2}, skipping {0}'.format(instance, duplicate_instance, pid)) - pgcon.close() - return True - # now we have all components to create a cluster descriptor - desc = make_cluster_desc(name=instance, version=dbver, workdir=work_directory, - pid=pid, pgcon=pgcon, conn=conn) - clusters.append(desc) - return True - - def make_cluster_desc(name, version, workdir, pid, pgcon, conn): """Create cluster descriptor, complete with the reconnect function.""" @@ -156,166 +41,159 @@ def reconnect(): } -def get_postmasters_directories(): - """ detect all postmasters running and get their pids """ +def prepare_connection_params(host, port, user='', database=''): + result = {} + if host: + result['host'] = host + if port: + result['port'] = port + if user: + result['user'] = user + if database: + result['database'] = database + return result - pg_pids = [] - postmasters = {} - pg_proc_stat = {} - # get all 'number' directories from /proc/ and sort them - for f in glob.glob('/proc/[0-9]*/stat'): - # make sure the particular pid is accessible to us - if not os.access(f, os.R_OK): - continue - try: - with open(f, 'rU') as fp: - stat_fields = fp.read().strip().split() - except: - logger.error('failed to read {0}'.format(f)) - continue - # read PostgreSQL processes. Avoid zombies - if len(stat_fields) < STAT_FIELD.st_start_time + 1 or stat_fields[STAT_FIELD.st_process_name] not in \ - ('(postgres)', '(postmaster)') or stat_fields[STAT_FIELD.st_state] == 'Z': - if stat_fields[STAT_FIELD.st_state] == 'Z': - logger.warning('zombie process {0}'.format(f)) - if len(stat_fields) < STAT_FIELD.st_start_time + 1: - logger.error('{0} output is too short'.format(f)) - continue - # convert interesting fields to int - for no in STAT_FIELD.st_pid, STAT_FIELD.st_ppid, STAT_FIELD.st_start_time: - stat_fields[no] = int(stat_fields[no]) - pid = stat_fields[STAT_FIELD.st_pid] - pg_proc_stat[pid] = stat_fields - pg_pids.append(pid) - # we have a pid -> stat fields map, and an array of all pids. - # sort pids array by the start time of the process, so that we - # minimize the number of looks into /proc/../cmdline latter - # the idea is that processes starting earlier are likely to be - # parent ones. - pg_pids.sort(key=lambda pid: pg_proc_stat[pid][STAT_FIELD.st_start_time]) - for pid in pg_pids: - st = pg_proc_stat[pid] - ppid = st[STAT_FIELD.st_ppid] - # if parent is also a postgres process - no way this is a postmaster - if ppid in pg_pids: - continue - link_filename = '/proc/{0}/cwd'.format(pid) - # now get its data directory in the /proc/[pid]/cmdline - if not os.access(link_filename, os.R_OK): - logger.warning( - 'potential postmaster work directory file {0} is not accessible'.format(link_filename)) - continue - # now read the actual directory, check this is accessible to us and belongs to PostgreSQL - # additionally, we check that we haven't seen this directory before, in case the check - # for a parent pid still produce a postmaster child. Be extra careful to catch all exceptions - # at this phase, we don't want one bad postmaster to be the reason of tool's failure for the - # other good ones. - try: - pg_dir = os.readlink(link_filename) - except os.error as e: - logger.error('unable to readlink {0}: OS reported {1}'.format(link_filename, e)) - continue - if pg_dir in postmasters: - continue - if not os.access(pg_dir, os.R_OK): - logger.warning( - 'unable to access the PostgreSQL candidate directory {0}, have to skip it'.format(pg_dir)) - continue - # if PG_VERSION file is missing, this is not a postgres directory - PG_VERSION_FILENAME = '{0}/PG_VERSION'.format(link_filename) - if not os.access(PG_VERSION_FILENAME, os.R_OK): - logger.warning( - 'PostgreSQL candidate directory {0} is missing PG_VERSION file, have to skip it'.format(pg_dir)) - continue - try: - fp = open(PG_VERSION_FILENAME, 'rU') - val = fp.read().strip() - if val is not None and len(val) >= 2: - version = float(val) - except os.error as e: - logger.error( - 'unable to read version number from PG_VERSION directory {0}, have to skip it'.format(pg_dir)) - continue - except ValueError: - logger.error('PG_VERSION doesn\'t contain a valid version number: {0}'.format(val)) - continue - else: - dbname = get_dbname_from_path(pg_dir) - postmasters[pg_dir] = [pid, version, dbname] - return postmasters +class DBConnectionFinder(object): + CONN_TYPES = ('unix', 'tcp', 'tcp6') + def __init__(self, work_directory, ppid, dbver, username, dbname): + self.work_directory = work_directory + self.pid = ppid + self.version = dbver + self.username = username + self.dbname = dbname -def fetch_socket_inodes_for_process(pid): - """ read /proc/[pid]/fd and get those that correspond to sockets """ - inodes = [] - fd_dir = '/proc/{0}/fd'.format(pid) - if not os.access(fd_dir, os.R_OK): - logger.warning("unable to read {0}".format(fd_dir)) - else: - for link in glob.glob('{0}/*'.format(fd_dir)): - if not os.access(link, os.F_OK): - logger.warning("unable to access link {0}".format(link)) - continue - try: - target = os.readlink(link) - except: - logger.error('coulnd\'t read link {0}'.format(link)) - else: - # socket:[8430] - match = re.search(r'socket:\[(\d+)\]', target) - if match: - inodes.append(int(match.group(1))) - return inodes + def detect_db_connection_arguments(self): + """ Try to detect database connection arguments from the postmaster.pid + We do this by first extracting useful information from postmaster.pid, + next reading the postgresql.conf if necessary and, at last, + """ + conn_args = self.detect_with_proc_net() + if not conn_args: + # if we failed to detect the arguments via the /proc/net/ readings, + # perhaps we'll get better luck with just peeking into postmaster.pid. + conn_args = ProcWorker().detect_with_postmaster_pid(self.work_directory, self.version) + if not conn_args: + msg = 'unable to detect connection parameters for the PostgreSQL cluster at {0}' + logger.error(msg.format(self.work_directory)) + return None + # try all acquired connection arguments, starting from unix, then tcp, then tcp over ipv6 + result = self.pick_connection_arguments(conn_args) + if not result: + logger.error('unable to connect to PostgreSQL cluster at {0} using any of the detected connection ' + 'options: {1}'.format(self.work_directory, conn_args)) + return None + return result + def pick_connection_arguments(self, conn_args): + """ go through all decected connections, picking the first one that actually works """ + result = {} + for conn_type in self.CONN_TYPES: + if result: + break + for arg in conn_args.get(conn_type, []): + connection_candidate = prepare_connection_params(*arg, user=self.username, database=self.dbname) + if self.can_connect_with_connection_arguments(connection_candidate): + (result['host'], result['port']) = arg + break + return result + + def can_connect_with_connection_arguments(self, connection_params): + """ check that we can connect given the specified arguments """ + try: + test_conn = psycopg2.connect(**connection_params) + test_conn.close() + except psycopg2.OperationalError as e: + logger.error(e) + return False + return True -def detect_with_postmaster_pid(work_directory, version): + def detect_with_proc_net(self): + parser = ProcNetParser(self.pid) + result = parser.match_socket_inodes() + if not result: + logger.error( + 'could not detect connection string from /proc/net for postgres process {0}'.format(self.pid)) + return None + return result - # PostgreSQL 9.0 doesn't have enough data - result = {} - if version is None or version == 9.0: - return None - PID_FILE = '{0}/postmaster.pid'.format(work_directory) - # try to access the socket directory - if not os.access(work_directory, os.R_OK | os.X_OK): - logger.warning( - 'cannot access PostgreSQL cluster directory {0}: permission denied'.format(work_directory)) - return None - try: - with open(PID_FILE, 'rU') as fp: - lines = fp.readlines() - except os.error as e: - logger.error('could not read {0}: {1}'.format(PID_FILE, e)) - return None - if len(lines) < 6: - logger.error('{0} seems to be truncated, unable to read connection information'.format(PID_FILE)) - return None - port = lines[3].strip() - unix_socket_path = lines[4].strip() - if unix_socket_path != '': - result['unix'] = [(unix_socket_path, port)] - tcp_address = lines[5].strip() - if tcp_address != '': - if tcp_address == '*': - tcp_address = '127.0.0.1' - result['tcp'] = [(tcp_address, port)] - if len(result) == 0: - logger.error('could not acquire a socket postmaster at {0} is listening on'.format(work_directory)) - return None - return result +class DBClient(object): + SHOW_COMMAND = 'SHOW DATA_DIRECTORY' + def __init__(self, connection_params): + self.connection_params = connection_params -def get_dbname_from_path(db_path): - """ - >>> get_dbname_from_path('foo') - 'foo' - >>> get_dbname_from_path('/pgsql_bar/9.4/data') - 'bar' - """ - m = re.search(r'/pgsql_(.*?)(/\d+.\d+)?/data/?', db_path) - if m: - dbname = m.group(1) - else: - dbname = db_path - return dbname + def establish_user_defined_connection(self, instance, clusters): + """ connect the database and get all necessary options like pid and work_directory + we use port, host and socket_directory, prefering socket over TCP connections + """ + try: + pgcon = psycopg2.connect(**self.connection_params) + except Exception as e: + logger.error('failed to establish connection to {0} via {1}'.format(instance, self.connection_params)) + logger.error('PostgreSQL exception: {0}'.format(e)) + raise NotConnectedError + + # get the database version from the pgcon properties + dbver = dbversion_as_float(pgcon.server_version) + work_directory = self.execute_query_and_fetchone(pgcon) + # now, when we have the work directory, acquire the pid of the postmaster. + pid = read_postmaster_pid(work_directory, instance) + + if pid is None: + logger.error('failed to read pid of the postmaster on {0}'.format(self.connection_params)) + raise NoPidConnectionError + + # check that we don't have the same pid already in the accumulated results. + # for instance, a user may specify 2 different set of connection options for + # the same database (one for the unix_socket_directory and another for the host) + pids = [opt['pid'] for opt in clusters if 'pid' in opt] + + if pid in pids: + duplicate_instance = self.get_duplicated_instance(clusters, pid) + logger.error('duplicate connection options detected for databases {0} and {1}, ' + 'same pid {2}, skipping {0}'.format(instance, duplicate_instance, pid)) + pgcon.close() + raise DuplicatedConnectionError + + # now we have all components to create a cluster descriptor + return make_cluster_desc( + name=instance, version=dbver, workdir=work_directory, pid=pid, pgcon=pgcon, conn=self.connection_params) + + def get_duplicated_instance(self, clusters, pid): + return [opt['name'] for opt in clusters if 'pid' in opt and opt.get('pid', 0) == pid][0] + + def execute_query_and_fetchone(self, pgcon): + cur = pgcon.cursor() + cur.execute(self.SHOW_COMMAND) + entry = cur.fetchone()[0] + cur.close() + pgcon.commit() + return entry + + @classmethod + def from_config(cls, config): + connection_params = prepare_connection_params( + host=config.get('host'), + port=config.get('port'), + user=config.get('user'), + database=config.get('database'), + ) + return cls(connection_params) + + @classmethod + def from_options(cls, options): + connection_params = prepare_connection_params(options.host, options.port, options.username, options.dbname) + return cls(connection_params) + + @classmethod + def from_postmasters(cls, work_directory, ppid, dbver, options): + db_finder = DBConnectionFinder(work_directory, ppid, dbver, options.username, options.dbname) + connection_data = db_finder.detect_db_connection_arguments() + if connection_data is None: + return None + connection_params = prepare_connection_params( + connection_data['host'], connection_data['port'], options.username, options.dbname) + return cls(connection_params) diff --git a/pg_view/models/displayers.py b/pg_view/models/displayers.py new file mode 100644 index 0000000..a4efceb --- /dev/null +++ b/pg_view/models/displayers.py @@ -0,0 +1,413 @@ +import json +from abc import ABCMeta +from collections import namedtuple +from numbers import Number + +from pg_view.loggers import logger +from pg_view.consts import NCURSES_CUSTOM_OUTPUT_FIELDS +from pg_view.utils import enum + +COLALIGN = enum(ca_none=0, ca_left=1, ca_center=2, ca_right=3) +COLSTATUS = enum(cs_ok=0, cs_warning=1, cs_critical=2) +COLTYPES = enum(ct_string=0, ct_number=1) +COLHEADER = enum(ch_default=0, ch_prepend=1, ch_append=2) + + +class ColumnType(namedtuple('ColumnType', 'value header header_position')): + __slots__ = () + + @property + def length(self): + return len(self.value) + (0 if not self.header_position else len(self.header) + 1) + + +class BaseDisplayer(object): + __metaclass__ = ABCMeta + + def __init__(self, output_transform_data, ident, show_units, ignore_autohide, notrim): + self.output_transform_data = output_transform_data + self.ident = ident + + self.show_units = show_units + self.ignore_autohide = ignore_autohide + self.notrim = notrim + + def _produce_output_name(self, col): + # get the output column name + attname = col['out'] + # add units to the column name if neccessary + if 'units' in col and self.show_units: + attname += ' ' + col['units'] + return attname + + @staticmethod + def _produce_output_value(row, col): + # get the input value + if 'in' in col: + val = row.get(col['in']) + else: + val = row.get(col['out']) + # if function is specified - apply it to the input value + if 'fn' in col and val is not None: + val = col['fn'](val) + # if rounding is necessary - round the input value up to specified + # decimal points + if 'round' in col and val is not None: + val = round(val, col['round']) + return val + + @classmethod + def from_collector(cls, collector, show_units, ignore_autohide, notrim): + return cls( + collector.output_transform_data, + collector.ident(), + show_units=show_units, + ignore_autohide=ignore_autohide, + notrim=notrim + ) + + +class JsonDisplayer(BaseDisplayer): + def __init__(self, output_transform_data, ident, show_units, ignore_autohide, notrim): + super(JsonDisplayer, self).__init__(output_transform_data, ident, show_units, ignore_autohide, notrim) + + def display(self, rows, before_string=None, after_string=None): + output = {} + data = [] + output['type'] = self.ident + for row in rows: + data.append(self._produce_output_row(row)) + output['data'] = data + return json.dumps(output, indent=4) + + def _produce_output_row(self, row): + """ produce the output row for the screen, json or the database + from the diff rows. It consists of renaming columns and rounding + the result when necessary + """ + result = {} + # produce the output row column by column + for col in self.output_transform_data: + attname = self._produce_output_name(col) + val = self._produce_output_value(row, col) + result[attname] = val + return result + + +class BaseStreamDisplayer(BaseDisplayer): + __metaclass__ = ABCMeta + + def __init__(self, output_transform_data, ident, show_units, ignore_autohide, notrim): + super(BaseStreamDisplayer, self).__init__(output_transform_data, ident, show_units, ignore_autohide, notrim) + + def _output_row_generic(self, row, typ='v'): + """ produce a single output row of the type specified by the + last argument: + t - template row + h - header row (only names) + v - values rows + """ + + vals = [] + # produce the output row column by column + for i, col in enumerate(self.output_transform_data): + # get the final attribute name and value + if typ == 't': + if 'w' not in col: + val = '{{{0}}}'.format(i) + else: + val = '{{{0}:<{1}}}'.format(i, col['w']) + elif typ == 'h': + val = self._produce_output_name(col) + else: + val = self._produce_output_value(row, col) + # prepare the list for the output + vals.append(val) + return vals + + +class ConsoleDisplayer(BaseStreamDisplayer): + def _calculate_dynamic_width(self, rows): + """ Examine values in all rows and get the width dynamically """ + + for col in self.output_transform_data: + minw = col.get('minw', 0) + attname = self._produce_output_name(col) + # XXX: if append_column_header, min width should include the size of the attribut name + col['w'] = len(attname) + # use cooked values + for row in rows: + val = self._produce_output_value(row, col) + curw = len(str(val)) + if curw > col['w']: + col['w'] = curw + if minw > 0: + col['w'] = max(minw, col['w']) + + def display(self, rows, before_string=None, after_string=None): + """ Main entry point for preparing textual console output """ + + result = [] + # start by filling-out width of the values + self._calculate_dynamic_width(rows) + + # now produce output template, headers and actual values + templ = self._output_template_for_console() + header = self._output_row_for_console(None, 'h') + + if before_string: + result.append(before_string) + + result.append(templ.format(*header)) + + for r in rows: + row = self._output_row_for_console(r, 'v') + result.append(templ.format(*row)) + + if after_string: + result.append(after_string) + return '\n'.join(result) + + def _output_template_for_console(self): + return ' '.join(self._output_row_for_console(None, 't')) + + def _output_row_for_console(self, row, typ='v'): + return self._output_row_generic(row, typ) + + +class CursesDisplayer(BaseStreamDisplayer): + NCURSES_DEFAULTS = { + 'pos': -1, + 'noautohide': False, + 'w': 0, + 'align': COLALIGN.ca_none, + 'column_header': COLHEADER.ch_default, + } + + def __init__(self, output_transform_data, ident, ncurses_filter_row, ncurses_custom_fields, show_units, + ignore_autohide, notrim): + self.ncurses_filter_row = ncurses_filter_row + self.ncurses_custom_fields = ncurses_custom_fields + super(CursesDisplayer, self).__init__(output_transform_data, ident, show_units, ignore_autohide, notrim) + + def _calculate_dynamic_width(self, rows): + """ Examine values in all rows and get the width dynamically """ + + for col in self.output_transform_data: + minw = col.get('minw', 0) + attname = self._produce_output_name(col) + # XXX: if append_column_header, min width should include the size of the attribut name + if self.ncurses_custom_fields.get('prepend_column_headers'): + minw += len(attname) + 1 + col['w'] = len(attname) + # use cooked values + for row in rows: + if self.ncurses_filter_row(row): + continue + val = self._produce_output_value(row, col) + val = self.curses_cook_value(attname, val, col) + curw = val.length + if curw > col['w']: + col['w'] = curw + if minw > 0: + col['w'] = max(minw, col['w']) + + def display(self, rows, before_string=None, after_string=None): + """ for ncurses - we just return data structures. The output code + is quite complex and deserves a separate class. + """ + + self._calculate_dynamic_width(rows) + + raw_result = {} + for k in self.NCURSES_DEFAULTS.keys(): + raw_result[k] = [] + + for col in self.output_transform_data: + for opt in self.NCURSES_DEFAULTS.keys(): + raw_result[opt].append((col[opt] if opt in col else self.NCURSES_DEFAULTS[opt])) + + result_header = self._output_row_for_curses(None, 'h') + result_rows = [] + status_rows = [] + values_rows = [] + + for r in rows: + values_row = self._output_row_for_curses(r, 'v') + if self.ncurses_filter_row(dict(zip(result_header, values_row))): + continue + cooked_row = self.cook_row(result_header, values_row) + status_row = self._calculate_statuses_for_row(values_row) + result_rows.append(dict(zip(result_header, cooked_row))) + status_rows.append(dict(zip(result_header, status_row))) + values_rows.append(dict(zip(result_header, values_row))) + + types_row = self._calculate_column_types(values_rows) + + result = { + 'rows': result_rows, + 'statuses': status_rows, + 'hide': self._get_columns_to_hide(result_rows, status_rows), + 'highlights': dict(zip(result_header, self._get_highlights())), + 'types': types_row + } + for x in NCURSES_CUSTOM_OUTPUT_FIELDS: + result[x] = self.ncurses_custom_fields.get(x, None) + for k in self.NCURSES_DEFAULTS.keys(): + if k == 'noautohide' and self.ignore_autohide: + result[k] = dict.fromkeys(result_header, True) + else: + result[k] = dict(zip(result_header, raw_result[k])) + return {self.ident: result} + + def _get_columns_to_hide(self, result_rows, status_rows): + """ scan the (cooked) rows, do not show columns that are empty """ + + to_skip = [] + for col in self.output_transform_data: + if col.get('pos') == -1: + continue + attname = self._produce_output_name(col) + empty = True + for r in result_rows: + if r[attname].value != '': + empty = False + break + if empty: + to_skip.append(attname) + elif col.get('hide_if_ok', False): + status_ok = True + for row in status_rows: + if attname in row and row[attname]: + for cl in row[attname]: + if row[attname][cl] != COLSTATUS.cs_ok: + status_ok = False + break + if not status_ok: + break + if status_ok: + to_skip.append(attname) + return to_skip + + def _output_row_for_curses(self, row, typ='v'): + return self._output_row_generic(row, typ) + + @staticmethod + def _calculate_column_types(rows): + result = {} + if len(rows) > 0: + colnames = rows[0].keys() + for colname in colnames: + for r in rows: + val = r[colname] + if val is None or val == '': + continue + else: + if isinstance(val, Number): + result[colname] = COLTYPES.ct_number + else: + result[colname] = COLTYPES.ct_string + break + else: + # if all values are None - we don't care, so use a generic string + result[colname] = COLTYPES.ct_string + return result + + def _calculate_statuses_for_row(self, row): + statuses = [] + for num, col in enumerate(self.output_transform_data): + statuses.append(self._calculate_output_status(row, col, row[num])) + return statuses + + @staticmethod + def _calculate_output_status(row, col, val): + """ Examine the current status indicators and produce the status + value for the specific column of the given row + """ + + st = {-1: COLSTATUS.cs_ok} + # if value is missing - don't bother calculating anything + if val is None: + return st + if 'status_fn' in col: + st = col['status_fn'](row, col) + if len(st) == 0: + st = {-1: COLSTATUS.cs_ok} + else: + words = str(val).split() + for i, word in enumerate(words): + for st_name, st_status in zip(('critical', 'warning'), (COLSTATUS.cs_critical, COLSTATUS.cs_warning)): + if st_name in col: + typ = type(col[st_name]) + if typ == int: + typ = float + if typ(word) >= col[st_name]: + st[i] = st_status + break + if i not in st: + st[i] = COLSTATUS.cs_ok + return st + + def _get_highlights(self): + return [col.get('highlight', False) for col in self.output_transform_data] + + def cook_row(self, row, header): + cooked_vals = [] + if len(row) != len(header): + logger.error('Unable to cook row with non-matching number of header and value columns: ' + + 'row {0} header {1}'.format(row, header)) + for no, val in enumerate(row): + # if might be tempting to just get the column from output_transform_data using + # the header, but it's wrong: see _produce_output_name for details. This, of + # course, assumes the number of columns in the output_transform_data is the + # same as in row: thus, we need to avoid filtering rows in the collector. + newval = self.curses_cook_value(val, header[no], self.output_transform_data[no]) + cooked_vals.append(newval) + return cooked_vals + + def curses_cook_value(self, attname, raw_val, output_data): + """ return cooked version of the row, with values transformed. A transformation is + the same for all columns and depends on the values only. + """ + val = raw_val + header = str(attname) + # change the None output to '' + if raw_val is None: + return ColumnType(value='', header='', header_position=None) + if str(raw_val) == 'True': + val = 'T' + elif str(raw_val) == 'False': + val = 'F' + if output_data.get('maxw', 0) > 0 and not self.notrim and len(str(val)) > output_data['maxw']: + # if the value is larger than the maximum allowed width - trim it by removing chars from the middle + val = self._trim_text_middle(val, output_data['maxw']) + if self.ncurses_custom_fields.get('prepend_column_headers') or output_data.get( + 'column_header', COLHEADER.ch_default) == COLHEADER.ch_prepend: + header_position = COLHEADER.ch_prepend + elif output_data.get('column_header', COLHEADER.ch_default) == COLHEADER.ch_append: + header_position = COLHEADER.ch_append + else: + header = '' + header_position = None + return ColumnType(value=str(val), header=header, header_position=header_position) + + @staticmethod + def _trim_text_middle(val, maxw): + """ Trim data by removing middle characters, so hello world' for 8 will become hel..rld. + This kind of trimming seems to be better than tail trimming for user and database names. + """ + + half = int((maxw - 2) / 2) + return val[:half] + '..' + val[-half:] + + @classmethod + def from_collector(cls, collector, show_units, ignore_autohide, notrim): + return cls( + collector.output_transform_data, + collector.ident(), + collector.ncurses_filter_row, + collector.ncurses_custom_fields, + show_units=show_units, + ignore_autohide=ignore_autohide, + notrim=notrim + ) diff --git a/pg_view/models/formatters.py b/pg_view/models/formatters.py new file mode 100644 index 0000000..29f2954 --- /dev/null +++ b/pg_view/models/formatters.py @@ -0,0 +1,127 @@ +import re +from datetime import timedelta +from numbers import Number + +from pg_view.models.outputs import COLSTATUS +from pg_view.utils import time_field_to_seconds + + +class StatusFormatter(object): + def __init__(self, collector): + self.collector = collector + + def query_status_fn(self, row, col): + if row[self.collector.output_column_positions['w']] is True: + return {-1: COLSTATUS.cs_critical} + + val = row[self.collector.output_column_positions[col['out']]] + if val and val.startswith(col.get('warning', '!')): + return {-1: COLSTATUS.cs_warning} + return {-1: COLSTATUS.cs_ok} + + def age_status_fn(self, row, col): + age_string = row[self.collector.output_column_positions[col['out']]] + age_seconds = time_field_to_seconds(age_string) + if 'critical' in col and col['critical'] < age_seconds: + return {-1: COLSTATUS.cs_critical} + if 'warning' in col and col['warning'] < age_seconds: + return {-1: COLSTATUS.cs_warning} + return {-1: COLSTATUS.cs_ok} + + def check_ps_state(self, row, col): + if row[self.collector.output_column_positions[col['out']]] == col.get('warning', ''): + return {0: COLSTATUS.cs_warning} + return {0: COLSTATUS.cs_ok} + + def time_field_status(self, row, col): + val = row[self.collector.output_column_positions[col['out']]] + num = time_field_to_seconds(val) + if num <= col['critical']: + return {-1: COLSTATUS.cs_critical} + elif num <= col['warning']: + return {-1: COLSTATUS.cs_warning} + return {-1: COLSTATUS.cs_ok} + + def load_avg_state(self, row, col): + state = {} + load_avg_str = row[self.collector.output_column_positions[col['out']]] + if not load_avg_str: + return {} + + # load average consists of 3 values. + load_avg_vals = load_avg_str.split() + for no, val in enumerate(load_avg_vals): + if float(val) >= col['critical']: + state[no] = COLSTATUS.cs_critical + elif float(val) >= col['warning']: + state[no] = COLSTATUS.cs_warning + else: + state[no] = COLSTATUS.cs_ok + return state + + +class FnFormatter(object): + BYTE_MAP = [('TB', 1073741824), ('GB', 1048576), ('MB', 1024)] + + def __init__(self, collector): + self.collector = collector + + def kb_pretty_print(self, b): + """ Show memory size as a float value in the biggest measurement units """ + r = [] + for l, n in self.BYTE_MAP: + if b > n: + v = round(float(b) / n, 1) + r.append(str(v) + l) + break + return '{0}KB'.format(str(b)) if len(r) == 0 else ' '.join(r) + + def idle_format_fn(self, text): + r = re.match(r'idle in transaction (\d+)', text) + if not r: + return text + formatted_time = self.time_pretty_print(int(r.group(1))) + if self.collector.dbver >= 9.2: + return 'idle in transaction for {0}'.format(formatted_time) + return 'idle in transaction {0} since the last query start'.format(formatted_time) + + def time_pretty_print(self, start_time): + """Returns a human readable string that shows a time between now and the timestamp passed as an argument. + The passed argument can be a timestamp (returned by time.time() call) a datetime object or a timedelta object. + In case it is a timedelta object, then it is formatted only + """ + + if isinstance(start_time, Number): + delta = timedelta(seconds=start_time) + elif isinstance(start_time, timedelta): + delta = start_time + else: + raise ValueError('passed value should be either a number of seconds ' + + 'from year 1970 or datetime instance of timedelta instance') + + delta = abs(delta) + + secs = delta.seconds + mins = int(secs / 60) + secs %= 60 + hrs = int(mins / 60) + mins %= 60 + hrs %= 24 + result = '' + if delta.days: + result += str(delta.days) + 'd' + if hrs: + if hrs < 10: + result += '0' + result += str(hrs) + result += ':' + if mins < 10: + result += '0' + result += str(mins) + result += ':' + if secs < 10: + result += '0' + result += str(secs) + if not result: + result = str(int(delta.microseconds / 1000)) + 'ms' + return result diff --git a/pg_view/models/outputs.py b/pg_view/models/outputs.py index dbcd08b..53d6de3 100644 --- a/pg_view/models/outputs.py +++ b/pg_view/models/outputs.py @@ -2,27 +2,26 @@ import os import re import time -from collections import namedtuple from operator import itemgetter from pg_view import flags from pg_view.meta import __appname__, __version__, __license__ +from pg_view.models.displayers import ConsoleDisplayer, JsonDisplayer, CursesDisplayer, \ + COLSTATUS, COLHEADER, COLALIGN, COLTYPES +from pg_view.utils import OUTPUT_METHOD -from pg_view.utils import enum +OUTPUT_METHODS_TO_DISPLAYER = { + OUTPUT_METHOD.console: ConsoleDisplayer, + OUTPUT_METHOD.json: JsonDisplayer, + OUTPUT_METHOD.curses: CursesDisplayer +} -COLSTATUS = enum(cs_ok=0, cs_warning=1, cs_critical=2) -COLALIGN = enum(ca_none=0, ca_left=1, ca_center=2, ca_right=3) -COLTYPES = enum(ct_string=0, ct_number=1) -COLHEADER = enum(ch_default=0, ch_prepend=1, ch_append=2) - - -class ColumnType(namedtuple('ColumnType', 'value header header_position')): - __slots__ = () - - @property - def length(self): - return len(self.value) + (0 if not self.header_position else len(self.header) + 1) +def get_displayer_by_class(method, collector, show_units, ignore_autohide, notrim): + if method not in OUTPUT_METHODS_TO_DISPLAYER: + raise Exception('Output method {0} is not supported'.format(method)) + klass = OUTPUT_METHODS_TO_DISPLAYER[method] + return klass.from_collector(collector, show_units, ignore_autohide, notrim) class CommonOutput(object): @@ -41,13 +40,8 @@ def refresh(): class CursesOutput(object): - """ Show ncurses output """ - CLOCK_FORMAT = '%H:%M:%S' - MIN_ELLIPSIS_FIELD_LENGTH = 10 - MIN_TRUNCATE_FIELD_LENGTH = 50 # do not try to truncate fields lower than this size - MIN_TRUNCATED_LEAVE = 10 # do not leave the truncated field if it's less than this size def __init__(self, screen): super(CursesOutput, self) @@ -101,7 +95,6 @@ def toggle_help(self): def refresh(self): """ actual data output goes here """ - self.next_y = 0 # ncurses doesn't erase the old contents when the screen is refreshed, @@ -123,10 +116,6 @@ def refresh(self): self.screen.refresh() self.output_order = [] - def screen_erase(self): - self.screen.erase() - self.screen.refresh() - def update_screen_metrics(self): self.screen_y, self.screen_x = self.screen.getmaxyx() @@ -269,9 +258,8 @@ def color_value(self, val, xcol, status_map, highlight, result): def help(self): y = 0 - self.print_text(y, 0, '{0} {1} - a monitor for PostgreSQL related system statistics'.format(__appname__, - __version__), - self.COLOR_NORMAL | curses.A_BOLD) + self.print_text(y, 0, '{0} {1} - a monitor for PostgreSQL related system statistics'.format( + __appname__, __version__), self.COLOR_NORMAL | curses.A_BOLD) y += 1 self.print_text(y, 0, 'Distributed under the terms of {0} license'.format(__license__)) y += 2 @@ -300,10 +288,13 @@ def help(self): y += 2 self.print_text(y, 0, "Press 'h' to exit this screen") - def show_collector_data(self, collector, clock=False): - if collector not in self.data or len(self.data[collector]) <= 0 or \ - len(self.data[collector].get('rows', ())) <= 0 and not self.data[collector]['prefix']: - return + def is_invalid_data_collector(self, collector): + return collector not in self.data or len(self.data[collector]) <= 0 or \ + len(self.data[collector].get('rows', ())) <= 0 and not self.data[collector]['prefix'] + + def show_collector_data(self, collector): + if self.is_invalid_data_collector(collector): + return None rows = self.data[collector]['rows'] statuses = self.data[collector]['statuses'] @@ -342,8 +333,8 @@ def show_collector_data(self, collector, clock=False): self.show_status_of_invisible_fields(layout, status, 0) for field in layout: # calculate colors and alignment for the data value - column_alignment = (align.get(field, - COLALIGN.ca_none) if not prepend_column_headers else COLALIGN.ca_left) + column_alignment = ( + align.get(field, COLALIGN.ca_none) if not prepend_column_headers else COLALIGN.ca_left) w = layout[field]['width'] # now check if we need to add ellipsis to indicate that the value has been truncated. # we don't do this if the value is less than a certain length or when the column is marked as @@ -358,8 +349,8 @@ def show_collector_data(self, collector, clock=False): color_fields = self.color_text(status[field], highlights[field], text, header, row[field].header_position) for f in color_fields: - self.screen.addnstr(self.next_y, layout[field]['start'] + f['start'], f['word'], f['width'], - f['color']) + self.screen.addnstr( + self.next_y, layout[field]['start'] + f['start'], f['word'], f['width'], f['color']) self.next_y += 1 @staticmethod @@ -414,10 +405,15 @@ def display_prefix(self, collector, header): def display_header(self, layout, align, types): for field in layout: - text = self._align_field(field, '', layout[field]['width'], align.get(field, COLALIGN.ca_none), - types.get(field, COLTYPES.ct_string)) - self.screen.addnstr(self.next_y, layout[field]['start'], text, layout[field]['width'], self.COLOR_NORMAL | - curses.A_BOLD) + text = self._align_field( + field, + '', + layout[field]['width'], + align.get(field, COLALIGN.ca_none), + types.get(field, COLTYPES.ct_string) + ) + self.screen.addnstr( + self.next_y, layout[field]['start'], text, layout[field]['width'], self.COLOR_NORMAL | curses.A_BOLD) def calculate_fields_position(self, collector, xstart): width = self.data[collector]['w'] @@ -477,7 +473,6 @@ def layout_x(self, xstart, colwidth, colnames, colhidden, colcandrop): can be hidden, if they are not important (determined at column defintion) and if we don't have enough space for them. """ - layout = {} # get only the columns that are not hidden col_remaining = [name for name in colnames if name not in colhidden] diff --git a/pg_view/models/parsers.py b/pg_view/models/parsers.py index 38abe62..c95c343 100644 --- a/pg_view/models/parsers.py +++ b/pg_view/models/parsers.py @@ -1,11 +1,163 @@ +import glob import os import re import socket +from collections import namedtuple from pg_view.loggers import logger +from pg_view.utils import readlines_file, read_file, STAT_FIELD +connection_params = namedtuple('connection_params', ['pid', 'version', 'dbname']) -class ProcNetParser: + +def get_dbname_from_path(db_path): + m = re.search(r'/pgsql_(.*?)(/\d+.\d+)?/data/?', db_path) + return m.group(1) if m else db_path + + +class ProcWorker(object): + def get_postmasters_directories(self): + """ detect all postmasters running and get their pids """ + postmasters = {} + pg_pids, pg_proc_stat = self._get_postgres_processes() + + # we have a pid -> stat fields map, and an array of all pids. + # sort pids array by the start time of the process, so that we + # minimize the number of looks into /proc/../cmdline latter + # the idea is that processes starting earlier are likely to be + # parent ones. + pg_pids.sort(key=lambda pid: pg_proc_stat[pid][STAT_FIELD.st_start_time]) + for pid in pg_pids: + st = pg_proc_stat[pid] + ppid = st[STAT_FIELD.st_ppid] + # if parent is also a postgres process - no way this is a postmaster + if ppid in pg_pids: + continue + link_filename = '/proc/{0}/cwd'.format(pid) + # now get its data directory in the /proc/[pid]/cmdline + if not os.access(link_filename, os.R_OK): + logger.warning( + 'potential postmaster work directory file {0} is not accessible'.format(link_filename)) + continue + # now read the actual directory, check this is accessible to us and belongs to PostgreSQL + # additionally, we check that we haven't seen this directory before, in case the check + # for a parent pid still produce a postmaster child. Be extra careful to catch all exceptions + # at this phase, we don't want one bad postmaster to be the reason of tool's failure for the + # other good ones. + try: + pg_dir = os.readlink(link_filename) + except os.error as e: + logger.error('unable to readlink {0}: OS reported {1}'.format(link_filename, e)) + continue + if pg_dir in postmasters: + continue + if not os.access(pg_dir, os.R_OK): + logger.warning( + 'unable to access the PostgreSQL candidate directory {0}, have to skip it'.format(pg_dir)) + continue + + params = self.get_pg_version_from_file(pid, pg_dir) + if params: + postmasters[pg_dir] = params + return postmasters + + def _get_postgres_processes(self): + pg_pids = [] + pg_proc_stat = {} + # get all 'number' directories from /proc/ and sort them + for f in glob.glob('/proc/[0-9]*/stat'): + # make sure the particular pid is accessible to us + if not os.access(f, os.R_OK): + continue + try: + with open(f, 'rU') as fp: + stat_fields = fp.read().strip().split() + except: + logger.error('failed to read {0}'.format(f)) + continue + # read PostgreSQL processes. Avoid zombies + if len(stat_fields) < STAT_FIELD.st_start_time + 1 or stat_fields[STAT_FIELD.st_process_name] not in \ + ('(postgres)', '(postmaster)') or stat_fields[STAT_FIELD.st_state] == 'Z': + if stat_fields[STAT_FIELD.st_state] == 'Z': + logger.warning('zombie process {0}'.format(f)) + if len(stat_fields) < STAT_FIELD.st_start_time + 1: + logger.error('{0} output is too short'.format(f)) + continue + # convert interesting fields to int + for no in STAT_FIELD.st_pid, STAT_FIELD.st_ppid, STAT_FIELD.st_start_time: + stat_fields[no] = int(stat_fields[no]) + pid = stat_fields[STAT_FIELD.st_pid] + pg_proc_stat[pid] = stat_fields + pg_pids.append(pid) + return pg_pids, pg_proc_stat + + def get_pg_version_from_file(self, pid, pg_dir): + link_filename = '/proc/{0}/cwd'.format(pid) + # if PG_VERSION file is missing, this is not a postgres directory + PG_VERSION_FILENAME = '{0}/PG_VERSION'.format(link_filename) + if not os.access(PG_VERSION_FILENAME, os.R_OK): + logger.warning('PostgreSQL candidate directory {0} is missing PG_VERSION file, ' + 'have to skip it'.format(pg_dir)) + return None + try: + data = read_file(PG_VERSION_FILENAME).strip() + version = self._version_or_value_error(data) + except os.error: + logger.error('unable to read version number from PG_VERSION directory {0}, ' + 'have to skip it'.format(pg_dir)) + except ValueError: + logger.error("PG_VERSION doesn't contain a valid version number: {0}".format(data)) + else: + dbname = get_dbname_from_path(pg_dir) + return connection_params(pid=pid, version=version, dbname=dbname) + return None + + def _version_or_value_error(self, data): + if data is not None and len(data) >= 2: + version = float(data) + else: + raise ValueError + return version + + def detect_with_postmaster_pid(self, work_directory, version): + # PostgreSQL 9.0 doesn't have enough data + result = {} + if version is None or version == 9.0: + return None + PID_FILE = '{0}/postmaster.pid'.format(work_directory) + + # try to access the socket directory + if not os.access(work_directory, os.R_OK | os.X_OK): + logger.warning( + 'cannot access PostgreSQL cluster directory {0}: permission denied'.format(work_directory)) + return None + try: + lines = readlines_file(PID_FILE) + except os.error as e: + logger.error('could not read {0}: {1}'.format(PID_FILE, e)) + return None + + if len(lines) < 6: + logger.error('{0} seems to be truncated, unable to read connection information'.format(PID_FILE)) + return None + + port = lines[3].strip() + unix_socket_path = lines[4].strip() + if unix_socket_path != '': + result['unix'] = [(unix_socket_path, port)] + + tcp_address = lines[5].strip() + if tcp_address != '': + if tcp_address == '*': + tcp_address = '127.0.0.1' + result['tcp'] = [(tcp_address, port)] + if not result: + logger.error('could not acquire a socket postmaster at {0} is listening on'.format(work_directory)) + return None + return result + + +class ProcNetParser(object): """ Parse /proc/net/{tcp,tcp6,unix} and return the list of address:port pairs given the set of socket descriptors belonging to the object. The result is grouped by the socket type in a dictionary. @@ -14,17 +166,15 @@ class ProcNetParser: NET_TCP_FILENAME = '/proc/net/tcp' NET_TCP6_FILENAME = '/proc/net/tcp6' - def __init__(self): - self.reinit() - - def reinit(self): + def __init__(self, pid): + self.pid = pid self.sockets = {} self.unix_socket_header_len = 0 # initialize the sockets hash with the contents of unix # and tcp sockets. tcp IPv6 is also read if it's present - for fname in ProcNetParser.NET_UNIX_FILENAME, ProcNetParser.NET_TCP_FILENAME: + for fname in self.NET_UNIX_FILENAME, self.NET_TCP_FILENAME: self.read_socket_file(fname) - if os.access(ProcNetParser.NET_TCP6_FILENAME, os.R_OK): + if os.access(self.NET_TCP6_FILENAME, os.R_OK): self.read_socket_file(ProcNetParser.NET_TCP6_FILENAME) @staticmethod @@ -41,11 +191,34 @@ def _hex_to_ipv6(val): newval_list = [format(socket.ntohl(int(val[x: x + 8], 16)), '08X') for x in range(0, 32, 8)] return ':'.join([':'.join((x[:4], x[4:])) for x in newval_list]) - def match_socket_inodes(self, inodes): + def fetch_socket_inodes_for_process(self): + """ read /proc/[pid]/fd and get those that correspond to sockets """ + inodes = [] + fd_dir = '/proc/{0}/fd'.format(self.pid) + if not os.access(fd_dir, os.R_OK): + logger.warning("unable to read {0}".format(fd_dir)) + else: + for link in glob.glob('{0}/*'.format(fd_dir)): + if not os.access(link, os.F_OK): + logger.warning("unable to access link {0}".format(link)) + continue + try: + target = os.readlink(link) + except: + logger.error('coulnd\'t read link {0}'.format(link)) + else: + # socket:[8430] + match = re.search(r'socket:\[(\d+)\]', target) + if match: + inodes.append(int(match.group(1))) + return inodes + + def match_socket_inodes(self): """ return the dictionary with socket types as strings, containing addresses (or unix path names) and port """ result = {} + inodes = self.fetch_socket_inodes_for_process() for inode in inodes: if inode in self.sockets: addr_tuple = self.parse_single_line(inode) diff --git a/pg_view/sqls.py b/pg_view/sqls.py new file mode 100644 index 0000000..035cafc --- /dev/null +++ b/pg_view/sqls.py @@ -0,0 +1,103 @@ +SELECT_PGSTAT_VERSION_LESS_THAN_92 = """ + SELECT datname, + procpid as pid, + usename, + client_addr, + client_port, + round(extract(epoch from (now() - xact_start))) as age, + waiting, + NULLIF(array_to_string(array_agg(DISTINCT other.pid ORDER BY other.pid), ','), '') + as locked_by, + CASE WHEN current_query = ' in transaction' THEN + CASE WHEN xact_start != query_start THEN + 'idle in transaction ' || CAST( + abs(round(extract(epoch from (now() - query_start)))) AS text + ) + ELSE 'idle in transaction' + END + WHEN current_query = '' THEN 'idle' + ELSE current_query + END AS query + FROM pg_stat_activity a + LEFT JOIN pg_locks this ON (this.pid = procpid and this.granted = 'f') + LEFT JOIN pg_locks other ON this.locktype = other.locktype + AND this.database IS NOT DISTINCT FROM other.database + AND this.relation IS NOT DISTINCT FROM other.relation + AND this.page IS NOT DISTINCT FROM other.page + AND this.tuple IS NOT DISTINCT FROM other.tuple + AND this.virtualxid IS NOT DISTINCT FROM other.virtualxid + AND this.transactionid IS NOT DISTINCT FROM other.transactionid + AND this.classid IS NOT DISTINCT FROM other.classid + AND this.objid IS NOT DISTINCT FROM other.objid + AND this.objsubid IS NOT DISTINCT FROM other.objsubid + AND this.pid != other.pid + AND other.granted = 't' + WHERE procpid != pg_backend_pid() + GROUP BY 1,2,3,4,5,6,7,9 +""" + +SELECT_PGSTAT_VERSION_LESS_THAN_96 = """ + SELECT datname, + a.pid as pid, + usename, + client_addr, + client_port, + round(extract(epoch from (now() - xact_start))) as age, + waiting, + NULLIF(array_to_string(array_agg(DISTINCT other.pid ORDER BY other.pid), ','), '') + as locked_by, + CASE WHEN state = 'idle in transaction' THEN + CASE WHEN xact_start != state_change THEN + 'idle in transaction ' || CAST( + abs(round(extract(epoch from (now() - state_change)))) AS text + ) + ELSE 'idle in transaction' + END + WHEN state = 'active' THEN query + ELSE state + END AS query + FROM pg_stat_activity a + LEFT JOIN pg_locks this ON (this.pid = a.pid and this.granted = 'f') + LEFT JOIN pg_locks other ON this.locktype = other.locktype + AND this.database IS NOT DISTINCT FROM other.database + AND this.relation IS NOT DISTINCT FROM other.relation + AND this.page IS NOT DISTINCT FROM other.page + AND this.tuple IS NOT DISTINCT FROM other.tuple + AND this.virtualxid IS NOT DISTINCT FROM other.virtualxid + AND this.transactionid IS NOT DISTINCT FROM other.transactionid + AND this.classid IS NOT DISTINCT FROM other.classid + AND this.objid IS NOT DISTINCT FROM other.objid + AND this.objsubid IS NOT DISTINCT FROM other.objsubid + AND this.pid != other.pid + AND other.granted = 't' + WHERE a.pid != pg_backend_pid() + GROUP BY 1,2,3,4,5,6,7,9 +""" + +SELECT_PGSTAT_NEVER_VERSION = """ + SELECT datname, + a.pid as pid, + usename, + client_addr, + client_port, + round(extract(epoch from (now() - xact_start))) as age, + wait_event_type IS NOT DISTINCT FROM 'Lock' AS waiting, + NULLIF(array_to_string(ARRAY(SELECT unnest(pg_blocking_pids(a.pid)) ORDER BY 1), ','), '') + as locked_by, + CASE WHEN state = 'idle in transaction' THEN + CASE WHEN xact_start != state_change THEN + 'idle in transaction ' || CAST( + abs(round(extract(epoch from (now() - state_change)))) AS text + ) + ELSE 'idle in transaction' + END + WHEN state = 'active' THEN query + ELSE state + END AS query + FROM pg_stat_activity a + WHERE a.pid != pg_backend_pid() AND a.datname IS NOT NULL + GROUP BY 1,2,3,4,5,6,7,9 +""" + +SELECT_PG_IS_IN_RECOVERY = "SELECT case WHEN pg_is_in_recovery() THEN 'standby' ELSE 'master' END AS role" +SHOW_MAX_CONNECTIONS = 'SHOW max_connections' diff --git a/pg_view/utils.py b/pg_view/utils.py index 6d7f3d5..6cf82d4 100644 --- a/pg_view/utils.py +++ b/pg_view/utils.py @@ -1,8 +1,12 @@ +import os import re import resource +import subprocess import sys +from pg_view import consts from pg_view import flags +from pg_view.exceptions import InvalidConnectionParamError from pg_view.loggers import logger if sys.hexversion >= 0x03000000: @@ -15,12 +19,57 @@ def enum(**enums): return type('Enum', (), enums) +BYTES_IN_MB = 1048576 +SECTORS_IN_MB = 2048 +KB_IN_MB = 1024 + STAT_FIELD = enum(st_pid=0, st_process_name=1, st_state=2, st_ppid=3, st_start_time=21) BLOCK_SIZE = 1024 MEM_PAGE_SIZE = resource.getpagesize() +PAGESIZE = os.sysconf("SC_PAGE_SIZE") OUTPUT_METHOD = enum(console='console', json='json', curses='curses') +def open_universal(file_path): + return open(file_path, 'rU') + + +def read_file(file_path): + with open_universal(file_path) as f: + return f.read() + + +def readlines_file(file_path): + with open_universal(file_path) as f: + return f.readlines() + + +class UnitConverter(object): + @staticmethod + def kb_to_mbytes(kb): + return float(kb) / KB_IN_MB if kb is not None else None + + @staticmethod + def sectors_to_mbytes(sectors): + return float(sectors) / SECTORS_IN_MB if sectors is not None else None + + @staticmethod + def bytes_to_mbytes(bytes_val): + return float(bytes_val) / BYTES_IN_MB if bytes_val is not None else None + + @staticmethod + def bytes_to_kb(bytes_val): + return float(bytes_val) / KB_IN_MB if bytes_val is not None else None + + @staticmethod + def ticks_to_seconds(tick_value_str): + return float(tick_value_str) / consts.USER_HZ if tick_value_str is not None else None + + @staticmethod + def time_diff_to_percent(timediff_val): + return float(timediff_val) * 100 if timediff_val is not None else None + + def get_valid_output_methods(): result = [] for key in OUTPUT_METHOD.__dict__.keys(): @@ -31,18 +80,11 @@ def get_valid_output_methods(): def output_method_is_valid(method): - """ - >>> output_method_is_valid('foo') - False - >>> output_method_is_valid('curses') - True - """ return method in get_valid_output_methods() def read_configuration(config_file_name): # read PostgreSQL connection options - config_data = {} if not config_file_name: return None config = ConfigParser.ConfigParser() @@ -50,15 +92,12 @@ def read_configuration(config_file_name): if not f: logger.error('Configuration file {0} is empty or not found'.format(config_file_name)) return None + + config_data = {} # get through all defined databases for section in config.sections(): config_data[section] = {} - for argname in ( - 'port', - 'host', - 'user', - 'dbname', - ): + for argname in ('port', 'host', 'user', 'dbname'): try: val = config.get(section, argname) except ConfigParser.NoOptionError: @@ -69,24 +108,24 @@ def read_configuration(config_file_name): return config_data -def process_single_collector(st): +def process_single_collector(collector, filter_aux): """ perform all heavy-lifting for a single collector, i.e. data collection, diff calculation, etc. This is meant to be run in a separate thread. """ - from pg_view.collectors.pg_collector import PgstatCollector - if isinstance(st, PgstatCollector): - st.set_aux_processes_filter(flags.filter_aux) - st.tick() + from pg_view.collectors.pg_collector import PgStatCollector + if isinstance(collector, PgStatCollector): + collector.set_aux_processes_filter(filter_aux) + collector.tick() if not flags.freeze: - if st.needs_refresh(): - st.refresh() - if st.needs_diffs(): - st.diff() + if collector.needs_refresh(): + collector.refresh() + if collector.needs_diffs(): + collector.diff() else: # if the server goes offline, we need to clear diffs here, # otherwise rows from the last successful reading will be # displayed forever - st.clear_diffs() + collector.clear_diffs() def process_groups(groups): @@ -96,7 +135,46 @@ def process_groups(groups): part.ncurses_set_prefix(pg.ncurses_produce_prefix()) -def dbversion_as_float(pgcon): - version_num = pgcon.server_version - version_num /= 100 - return float('{0}.{1}'.format(version_num / 100, version_num % 100)) +def validate_autodetected_conn_param(user_dbname, user_dbver, result_work_dir, connection_params): + if user_dbname: + if connection_params.dbname != user_dbname or not result_work_dir or not connection_params.pid: + raise InvalidConnectionParamError + if user_dbver is not None and user_dbver != connection_params.version: + raise InvalidConnectionParamError + + +def exec_command_with_output(cmdline): + """ Execute comand (including shell ones), return a tuple with error code (1 element) and output (rest) """ + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + ret = proc.wait() + if ret != 0: + logger.info('The command {cmd} returned a non-zero exit code'.format(cmd=cmdline)) + return ret, proc.stdout.read().strip() + + +def time_field_to_seconds(val): + result = 0 + num = 0 + accum_digits = [] + semicolons_no = val.count(':') + for c in val: + if c.isdigit(): + accum_digits.append(c) + else: + if len(accum_digits) > 0: + num = int(''.join(accum_digits)) + if c == 'd': + num *= 86400 + elif c == ':': + num *= 60 ** semicolons_no + semicolons_no -= 1 + result += num + num = 0 + accum_digits = [] + return result + + +def dbversion_as_float(server_version): + version_num = server_version + version_num //= 100 + return float('{0}.{1}'.format(version_num // 100, version_num % 100)) diff --git a/setup.py b/setup.py index 9d05d3d..72695ec 100644 --- a/setup.py +++ b/setup.py @@ -51,9 +51,12 @@ def read_module(path): class PyTest(TestCommand): - - user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=', - None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')] + user_options = [ + ('cov=', None, 'Run coverage'), + ('cov-xml=', None, 'Generate junit xml report'), + ('cov-html=', None, 'Generate junit html report'), + ('junitxml=', None, 'Generate xml of test results') + ] def initialize_options(self): TestCommand.initialize_options(self) @@ -121,7 +124,7 @@ def setup_package(): install_requires=install_reqs, setup_requires=['flake8'], cmdclass=cmdclass, - tests_require=['pytest-cov', 'pytest'], + tests_require=['pytest-cov', 'pytest', 'mock', 'freezegun'], command_options=command_options, entry_points={'console_scripts': CONSOLE_SCRIPTS}, ) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..0679d95 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,7 @@ +freezegun==0.3.8 +funcsigs==1.0.2 +mock==2.0.0 +pbr==1.10.0 +psycopg2 +python-dateutil==2.6.0 +six==1.10.0 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/common.py b/tests/common.py new file mode 100644 index 0000000..9710f76 --- /dev/null +++ b/tests/common.py @@ -0,0 +1,33 @@ +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +import os + +TEST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__))) + + +class ContextualStringIO(StringIO): + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + return False + + +class ErrorAfter(object): + def __init__(self, limit): + self.limit = limit + self.calls = 0 + + def __call__(self, *args): + self.calls += 1 + if self.calls > self.limit: + raise CallableExhaustedError + return args + + +class CallableExhaustedError(Exception): + pass diff --git a/tests/configs/default_ok.cfg b/tests/configs/default_ok.cfg new file mode 100644 index 0000000..69e464a --- /dev/null +++ b/tests/configs/default_ok.cfg @@ -0,0 +1,9 @@ +[DEFAULT] +port=5435 + +[testdb] +port=5432 +host=/var/run/postgresql +user=username +dbname=postgres + diff --git a/tests/configs/default_with_none_user.cfg b/tests/configs/default_with_none_user.cfg new file mode 100644 index 0000000..36d6f21 --- /dev/null +++ b/tests/configs/default_with_none_user.cfg @@ -0,0 +1,8 @@ +[DEFAULT] +port=5435 + +[testdb] +port=5432 +host=/var/run/postgresql +dbname=postgres + diff --git a/tests/postmaster_pg_files/postmaster_info_missing_data b/tests/postmaster_pg_files/postmaster_info_missing_data new file mode 100644 index 0000000..cc0331f --- /dev/null +++ b/tests/postmaster_pg_files/postmaster_info_missing_data @@ -0,0 +1,7 @@ +1048 +/var/lib/postgresql/9.3/main +1479595131 +5432 + + + 5432001 0 diff --git a/tests/postmaster_pg_files/postmaster_info_ok b/tests/postmaster_pg_files/postmaster_info_ok new file mode 100644 index 0000000..89d0947 --- /dev/null +++ b/tests/postmaster_pg_files/postmaster_info_ok @@ -0,0 +1,7 @@ +1048 +/var/lib/postgresql/9.3/main +1479595131 +5432 +/var/run/postgresql +localhost + 5432001 0 diff --git a/tests/postmaster_pg_files/postmaster_info_tcp b/tests/postmaster_pg_files/postmaster_info_tcp new file mode 100644 index 0000000..f482064 --- /dev/null +++ b/tests/postmaster_pg_files/postmaster_info_tcp @@ -0,0 +1,7 @@ +1048 +/var/lib/postgresql/9.3/main +1479595131 +5432 +/var/run/postgresql +* + 5432001 0 diff --git a/tests/postmaster_pg_files/postmaster_info_truncated b/tests/postmaster_pg_files/postmaster_info_truncated new file mode 100644 index 0000000..3e0111c --- /dev/null +++ b/tests/postmaster_pg_files/postmaster_info_truncated @@ -0,0 +1,4 @@ +1048 +/var/lib/postgresql/9.3/main +1479595131 +5432 diff --git a/tests/test_collectors_host.py b/tests/test_collectors_host.py new file mode 100644 index 0000000..7e8fb98 --- /dev/null +++ b/tests/test_collectors_host.py @@ -0,0 +1,167 @@ +import json +import unittest +from datetime import datetime +from unittest import TestCase + +import mock +from freezegun import freeze_time + +from pg_view.collectors.host_collector import HostStatCollector +from pg_view.models.displayers import ColumnType +from pg_view.models.outputs import get_displayer_by_class +from pg_view.utils import OUTPUT_METHOD + + +class HostStatCollectorTest(TestCase): + def setUp(self): + self.collector = HostStatCollector() + super(HostStatCollectorTest, self).setUp() + + def test_refresh_should_contain_proper_keys(self): + refreshed_data = self.collector.refresh() + self.assertIn('cores', refreshed_data) + self.assertIn('hostname', refreshed_data) + self.assertIn('loadavg', refreshed_data) + self.assertIn('uptime', refreshed_data) + self.assertIn('sysname', refreshed_data) + + @mock.patch('pg_view.collectors.host_collector.os.getloadavg', return_value=(3.47, 3.16, 2.89)) + def test__read_load_average_should_call_load_average(self, mocked_os_getloadavg): + refreshed_data = self.collector._read_load_average() + self.assertEqual({'loadavg': '3.47 3.16 2.89'}, refreshed_data) + + @unittest.skip('psutil') + @freeze_time('2016-10-31 00:25:00') + @mock.patch('pg_view.collectors.host_collector.psutil.boot_time', return_value=1477834496.0) + def test_refresh_should_call_uptime(self, mocked_boot_time): + refreshed_data = self.collector._read_uptime() + expected_uptime = datetime(2016, 10, 31, 0, 25) - datetime.fromtimestamp(1477834496.0) + self.assertEqual({'uptime': str(expected_uptime)}, refreshed_data) + + @mock.patch('pg_view.collectors.host_collector.socket.gethostname', return_value='Macbook-Pro') + def test__read_hostname_should_call_get_hostname(self, mocked_socket_gethostname): + refreshed_data = self.collector._read_hostname() + self.assertEqual({'hostname': 'Macbook-Pro'}, refreshed_data) + + @mock.patch('pg_view.collectors.host_collector.os.uname', return_value=( + 'Darwin', 'MacBook-Pro', '15.6.0', 'KV 15.6.0: Thu Sep 1 PDT 2016; root:xnu-3248', 'x86_64')) + def test__read_uname_should_call_os_uname(self, mocked_os_uname): + refreshed_data = self.collector._read_uname() + self.assertEqual({'sysname': 'Darwin 15.6.0'}, refreshed_data) + + @mock.patch('pg_view.collectors.host_collector.cpu_count', return_value=1) + def test__read_cpus_should_call_cpu_count_when_ok(self, mocked_cpu_count): + refreshed_data = self.collector._read_cpus() + self.assertEqual({'cores': 1}, refreshed_data) + + @mock.patch('pg_view.collectors.host_collector.cpu_count') + @mock.patch('pg_view.collectors.host_collector.logger') + def test__read_cpus_should_log_error_when_cpu_count_not_implemented_error(self, mocked_logging, mocked_cpu_count): + mocked_cpu_count.side_effect = NotImplementedError + refreshed_data = self.collector._read_cpus() + self.assertEqual({'cores': 0}, refreshed_data) + mocked_logging.error.assert_called_with('multiprocessing does not support cpu_count') + + def test_output_should_raise_not_support_when_unknown_method(self): + with self.assertRaises(Exception): + self.collector.output('unknown') + + def test_output_should_return_json_when_output_json(self): + faked_refresh_data = { + 'sysname': 'Linux 3.13.0-100-generic', + 'uptime': '2 days, 22:04:58', + 'loadavg': '0.06 0.04 0.05', + 'hostname': 'vagrant-ubuntu-trusty-64', + 'cores': 1 + } + + self.collector._do_refresh([faked_refresh_data]) + displayer = get_displayer_by_class(OUTPUT_METHOD.json, self.collector, False, True, False) + json_data = self.collector.output(displayer) + expected_resp = { + 'data': [{ + 'cores': 1, + 'host': 'vagrant-ubuntu-trusty-64', + 'load average': '0.06 0.04 0.05', + 'name': 'Linux 3.13.0-100-generic', + 'up': '2 days, 22:04:58' + }], + 'type': 'host' + } + self.assertEqual(expected_resp, json.loads(json_data)) + + def test_output_should_return_console_output_when_console(self): + faked_refresh_data = { + 'sysname': 'Linux 3.13.0-100-generic', + 'uptime': '2 days, 22:04:58', + 'loadavg': '0.06 0.04 0.05', + 'hostname': 'vagrant-ubuntu-trusty-64', + 'cores': 1 + } + displayer = get_displayer_by_class(OUTPUT_METHOD.console, self.collector, False, True, False) + self.collector._do_refresh([faked_refresh_data]) + console_data = self.collector.output(displayer) + expected_resp = [ + 'Host statistics', + 'load average up host cores name ', + '0.06 0.04 0.05 2 days, 22:04:58 vagrant-ubuntu-trusty-64 1 Linux 3.13.0-100-generic', '\n' + ] + self.assertEqual('\n'.join(expected_resp), console_data) + + def test_output_should_return_ncurses_output_when_ncurses(self): + faked_refresh_data = { + 'sysname': 'Linux 3.13.0-100-generic', + 'uptime': '2 days, 22:04:58', + 'loadavg': '0.06 0.04 0.05', + 'hostname': 'vagrant-ubuntu-trusty-64', + 'cores': 1 + } + displayer = get_displayer_by_class(OUTPUT_METHOD.curses, self.collector, False, True, False) + self.collector._do_refresh([faked_refresh_data]) + console_data = self.collector.output(displayer) + expected_resp = { + 'host': { + 'rows': [{ + 'cores': ColumnType(value='1', header='cores', header_position=2), + 'host': ColumnType(value='vagrant-ubuntu-trusty-64', header='', header_position=None), + 'load average': ColumnType(value='0.06 0.04 0.05', header='load average', header_position=1), + 'name': ColumnType(value='Linux 3.13.0-100-generic', header='', header_position=None), + 'up': ColumnType(value='2 days, 22:04:58', header='up', header_position=1) + }], + 'hide': [], + 'noautohide': {'cores': True, 'host': True, 'load average': True, 'name': True, 'up': True}, + 'prepend_column_headers': False, + 'highlights': {'cores': False, 'host': True, 'load average': False, 'name': False, 'up': False}, + 'align': {'cores': 0, 'host': 0, 'load average': 0, 'name': 0, 'up': 0}, + 'pos': {'cores': 2, 'host': 0, 'load average': 4, 'name': 3, 'up': 1}, + 'column_header': {'cores': 2, 'host': 0, 'load average': 1, 'name': 0, 'up': 1}, + 'header': False, + 'prefix': None, 'statuses': [{ + 'cores': {0: 0, -1: 0}, 'host': {0: 0, -1: 0}, 'load average': {0: 0, 1: 0, 2: 0}, + 'name': {0: 0, 1: 0, -1: 0}, 'up': {0: 0, 1: 0, 2: 0, -1: 0} + }], + 'w': {'cores': 7, 'host': 24, 'load average': 27, 'name': 24, 'up': 19}, + 'types': {'up': 0, 'cores': 1, 'host': 0, 'load average': 0, 'name': 0} + } + } + self.assertEqual(expected_resp, console_data) + + def test__concat_load_avg_should_return_empty_when_less_than_three_rows(self): + concatenated_data = self.collector._concat_load_avg('loadavg', (0.16, 0.05), False) + self.assertEqual('', concatenated_data) + + def test__concat_load_avg_should_return_load_avg_when_input_ok(self): + concatenated_data = self.collector._concat_load_avg('loadavg', (0.16, 0.05, 0.06), False) + self.assertEqual('0.16 0.05 0.06', concatenated_data) + + def test__construct_sysname_should_return_none_when_less_than_three_rows(self): + sysname = self.collector._construct_sysname('', ('Linux', 'vagrant-ubuntu-trusty-64'), 'optional') + self.assertIsNone(sysname) + + def test__construct_sysname_should_return_sysname_when_input_ok(self): + row = ( + 'Linux', 'vagrant-ubuntu-trusty-64', '3.13.0-100-generic', '#147-Ubuntu SMP Tue Oct 18 16:48:51 UTC 2016', + 'x86_64' + ) + sysname = self.collector._construct_sysname('', row, 'optional') + self.assertEqual('Linux 3.13.0-100-generic', sysname) diff --git a/tests/test_collectors_memory.py b/tests/test_collectors_memory.py new file mode 100644 index 0000000..a3a7ee4 --- /dev/null +++ b/tests/test_collectors_memory.py @@ -0,0 +1,106 @@ +import os +import unittest +from collections import namedtuple +from unittest import TestCase + +import mock + +from pg_view.collectors.memory_collector import MemoryStatCollector +from pg_view.utils import open_universal, KB_IN_MB +from tests.common import TEST_DIR + + +class MemoryStatCollectorTest(TestCase): + def setUp(self): + self.collector = MemoryStatCollector() + super(MemoryStatCollectorTest, self).setUp() + + @unittest.skip('psutil') + def test_refresh_should_contain_proper_data_keys(self): + refreshed_data = self.collector.refresh() + self.assertIn('cached', refreshed_data) + self.assertIn('commit_limit', refreshed_data) + self.assertIn('free', refreshed_data) + self.assertIn('dirty', refreshed_data) + self.assertIn('commit_left', refreshed_data) + self.assertIn('total', refreshed_data) + self.assertIn('buffers', refreshed_data) + self.assertIn('committed_as', refreshed_data) + + @unittest.skip('psutil') + @mock.patch('pg_view.models.collector_system.psutil.virtual_memory') + @mock.patch('pg_view.models.collector_memory.psutil.LINUX', False) + def test_read_memory_data_should_return_data_when_cpu_virtual_memory_for_macos(self, mocked_virtual_memory): + linux_svmem = namedtuple('linux_svmem', 'total free buffers cached') + mocked_virtual_memory.return_value = linux_svmem( + total=2048 * 1024, free=1024 * 1024, buffers=3072 * 1024, cached=4096 * 1024 + ) + refreshed_cpu = self.collector.read_memory_data() + expected_data = { + 'MemTotal': 2048, + 'MemFree': 1024, + 'Buffers': 3072, + 'Cached': 4096, + 'Dirty': 0, + 'CommitLimit': 0, + 'Committed_AS': 0, + } + self.assertEqual(expected_data, refreshed_cpu) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.models.collector_memory.psutil._pslinux.open_binary') + @mock.patch('pg_view.models.collector_memory.psutil.virtual_memory') + def test__read_memory_data_should_parse_data_from_proc_meminfo_when_linux(self, mocked_virtual_memory, + mocked_open_binary): + meminfo_ok_path = os.path.join(TEST_DIR, 'proc_files', 'meminfo_ok') + linux_svmem = namedtuple('linux_svmem', 'total free buffers cached') + mocked_open_binary.return_value = open_universal(meminfo_ok_path) + mocked_virtual_memory.return_value = linux_svmem( + total=2048 * 1024, free=1024 * 1024, buffers=3072 * 1024, cached=4096 * 1024 + ) + expected_data = { + 'MemTotal': 2048, + 'Cached': 4096, + 'MemFree': 1024, + 'Buffers': 3072, + 'CommitLimit': 250852, + 'Dirty': 36, + 'Committed_AS': 329264 + } + refreshed_data = self.collector.read_memory_data() + self.assertEqual(expected_data, refreshed_data) + + def test__is_commit_should_return_false_when_both_none(self): + self.assertFalse(self.collector._is_commit({})) + + def test__is_commit_should_return_false_when_commit_limit_none(self): + self.assertFalse(self.collector._is_commit({'Commited_AS': 10})) + + def test__is_commit_should_return_false_when_commited_as_none(self): + self.assertFalse(self.collector._is_commit({'CommitLimit': 10})) + + def test__is_commit_should_return_true_when_both_exist(self): + self.assertFalse(self.collector._is_commit({'CommitLimit': 10, 'Commited_AS': 20})) + + def test__calculate_kb_left_until_limit_should_return_result(self): + data = self.collector.calculate_kb_left_until_limit( + 'commit_left', {'CommitLimit': 30, 'Committed_AS': 20}, True) + self.assertEqual(10, data) + + @mock.patch('pg_view.collectors.base_collector.logger') + def test__calculate_kb_left_until_limit_should_log_warn_when_non_optional_and_not_commit(self, mocked_logger): + data = self.collector.calculate_kb_left_until_limit('commit_left', {}, False) + self.assertIsNone(data) + mocked_logger.error.assert_called_with('Column commit_left is not optional, but input row has no value for it') + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.models.collector_system.psutil._pslinux.open_binary') + def test_get_missing_memory_stat_from_file_should_parse_data_from_proc_stat(self, mocked_open): + cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'meminfo_ok') + mocked_open.return_value = open_universal(cpu_info_ok) + refreshed_data = self.collector.get_missing_memory_stat_from_file() + expected_data = { + 'CommitLimit:': 250852 * KB_IN_MB, 'Committed_AS:': 329264 * KB_IN_MB, 'Dirty:': 36 * KB_IN_MB} + self.assertEqual(expected_data, refreshed_data) diff --git a/tests/test_collectors_partition.py b/tests/test_collectors_partition.py new file mode 100644 index 0000000..d550470 --- /dev/null +++ b/tests/test_collectors_partition.py @@ -0,0 +1,333 @@ +import os +import posix +import unittest +from collections import namedtuple +from multiprocessing import JoinableQueue +from unittest import TestCase + +import mock +# import psutil + +from tests.common import TEST_DIR, ErrorAfter, CallableExhaustedError +from pg_view.collectors.partition_collector import PartitionStatCollector, DetachedDiskStatCollector, \ + DiskCollectorConsumer + +sdiskio = namedtuple( + 'sdiskio', ['read_count', 'write_count', 'read_bytes', 'write_bytes', 'read_time', 'write_time', 'busy_time'] +) + + +class PartitionStatCollectorTest(TestCase): + def setUp(self): + self.collector = PartitionStatCollector( + dbname='/var/lib/postgresql/9.3/main', + dbversion=9.3, + work_directory='/var/lib/postgresql/9.3/main', + consumer=DiskCollectorConsumer(JoinableQueue(1)) + ) + super(PartitionStatCollectorTest, self).setUp() + + def _assert_data_has_proper_structure(self, data): + self.assertIn('type', data) + self.assertIn('path_size', data) + self.assertIn('dev', data) + self.assertIn('space_total', data) + self.assertIn('path', data) + self.assertIn('space_left', data) + + def test_refresh_should_contain_proper_data_keys(self): + refreshed_data = self.collector.refresh() + self.assertIsInstance(refreshed_data, list) + self.assertEqual(2, len(refreshed_data)) + data_type = refreshed_data[0] + self._assert_data_has_proper_structure(data_type) + xlog_type = refreshed_data[1] + self._assert_data_has_proper_structure(xlog_type) + + def test__dereference_dev_name_should_return_input_when_not_dev(self): + self.assertEqual('/abc', self.collector._dereference_dev_name('/abc')) + + def test__dereference_dev_name_should_return_none_when_devname_false(self): + self.assertIsNone(self.collector._dereference_dev_name('')) + + def test__dereference_dev_name_should_replace_dev_when_dev(self): + dev_name = self.collector._dereference_dev_name('/dev/sda1') + self.assertEqual('sda1', dev_name) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.partition_collector.PartitionStatCollector.get_missing_io_stat_from_file') + @mock.patch('pg_view.collectors.partition_collector.psutil.disk_io_counters') + def test_get_io_data_should_return_data_when_disk_in_pnames(self, mocked_disk_io_counters, + mocked_get_missing_io_stat_from_file): + mocked_get_missing_io_stat_from_file.return_value = {} + mocked_disk_io_counters.return_value = { + 'sda1': sdiskio(read_count=10712, write_count=4011, read_bytes=157438976, + write_bytes=99520512, read_time=6560, write_time=3768, busy_time=5924 + ) + } + io_data = self.collector.get_io_data('sda1') + expected_data = { + 'sda1': { + 'await': 0, + 'sectors_read': 307498, + 'sectors_written': 194376 + } + } + + self.assertEqual(expected_data, io_data) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.partition_collector.psutil.disk_io_counters') + def test_get_io_data_should_return_empty_when_disk_not_in_pnames(self, mocked_disk_io_counters): + mocked_disk_io_counters.return_value = { + 'sda1': sdiskio(read_count=10712, write_count=4011, read_bytes=157438976, + write_bytes=99520512, read_time=6560, write_time=3768, busy_time=5924 + ) + } + io_data = self.collector.get_io_data('sda2') + self.assertEqual({}, io_data) + + @unittest.skip('psutil') + def test_get_name_from_fields_should_return_ok_when_linux_24(self): + fields = [ + '8', '1', 'sda1', '11523', '7', '383474', '7304', '24134', '24124', '528624', '6276', '0', '5916', '13452'] + name = self.collector.get_name_from_fields(fields) + self.assertEqual('sda1', name) + + @unittest.skip('psutil') + def test_get_name_from_fields_should_return_ok_when_linux_26(self): + fields = [ + '0', '8', '1', 'sda2', '11523', '7', '383474', '7304', '24134', '24124', '528624', '6276', '0', '5916', + '13452' + ] + name = self.collector.get_name_from_fields(fields) + self.assertEqual('sda2', name) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.collectors.partition_collector.psutil.disk_io_counters') + @mock.patch('pg_view.collectors.partition_collector.PartitionStatCollector.get_missing_io_stat_from_file') + def test_get_io_data_should_parse_data_from_proc_meminfo_when_linux(self, mocked_get_missing_io_stat_from_file, + mocked_io_counters): + mocked_get_missing_io_stat_from_file.return_value = {'sda1': {'await': 100022}} + io_counters = { + 'sda1': sdiskio(read_count=11523, write_count=24279, read_bytes=196338688, write_bytes=271822848, + read_time=7304, write_time=6284, busy_time=5924) + } + mocked_io_counters.return_value = io_counters + expected_data = { + 'sda1': {'await': 100022, 'sectors_read': 383474, 'sectors_written': 530904} + } + refreshed_data = self.collector.get_io_data(['sda1']) + self.assertEqual(expected_data, refreshed_data) + mocked_get_missing_io_stat_from_file.assert_called_with(['sda1']) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.collectors.system_collector.psutil._pslinux.open_text') + def test_get_missing_io_stat_from_file_should_return_empty_when_no_data_for_name(self, mocked_open_text): + cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'diskstat_24') + mocked_open_text.return_value = open(cpu_info_ok, "rt") + refreshed_data = self.collector.get_missing_io_stat_from_file(['sda2']) + expected_data = {} + self.assertEqual(expected_data, refreshed_data) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.collectors.system_collector.psutil._pslinux.open_text') + def test_get_missing_io_stat_from_file_should_return_stats_when_data_for_names_24(self, mocked_open_text): + cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'diskstat_24') + mocked_open_text.return_value = open(cpu_info_ok, "rt") + refreshed_data = self.collector.get_missing_io_stat_from_file(['sda', 'sda1']) + expected_data = {'sda': {'await': 13524}, 'sda1': {'await': 13476}} + self.assertEqual(expected_data, refreshed_data) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.collectors.system_collector.psutil._pslinux.open_text') + def test_get_missing_io_stat_from_file_should_return_stats_when_data_for_names_26(self, mocked_open_text): + cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'diskstat_26') + mocked_open_text.return_value = open(cpu_info_ok, "rt") + refreshed_data = self.collector.get_missing_io_stat_from_file(['sda', 'sda1']) + expected_data = {'sda': {'await': 135241}, 'sda1': {'await': 135241}} + self.assertEqual(expected_data, refreshed_data) + + +class DetachedDiskStatCollectorTest(TestCase): + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.run_du') + def test_get_du_data_should_run_du_when_work_directory_and_db_version_less_than_10(self, mocked_run_du): + mocked_run_du.side_effect = [35628, 35620] + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + result = detached_disk.get_du_data('/var/lib/postgresql/9.3/main') + expected_result = { + 'xlog': ('35620', '/var/lib/postgresql/9.3/main/pg_xlog/'), + 'data': ('35628', '/var/lib/postgresql/9.3/main') + } + self.assertEqual(expected_result, result) + mocked_run_du.assert_has_calls([ + mock.call('/var/lib/postgresql/9.3/main'), + mock.call('/var/lib/postgresql/9.3/main/pg_xlog/') + ]) + + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.run_du') + def test_get_du_data_should_run_du_when_work_directory_and_db_version_bigger_than_10(self, mocked_run_du): + mocked_run_du.side_effect = [35628, 35620] + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 10.3) + result = detached_disk.get_du_data('/var/lib/postgresql/10.3/main') + expected_result = { + 'xlog': ('35620', '/var/lib/postgresql/10.3/main/pg_wal/'), + 'data': ('35628', '/var/lib/postgresql/10.3/main') + } + self.assertEqual(expected_result, result) + mocked_run_du.assert_has_calls([ + mock.call('/var/lib/postgresql/10.3/main'), + mock.call('/var/lib/postgresql/10.3/main/pg_wal/') + ]) + + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.run_du') + @mock.patch('pg_view.collectors.partition_collector.logger') + def test_get_du_data_should_log_error_when_run_du_raises_exception(self, mocked_logger, mocked_run_du): + mocked_run_du.side_effect = Exception + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + detached_disk.get_du_data('/var/lib/postgresql/9.3/main') + expected_msg = 'Unable to read free space information for the pg_xlog and data directories for the directory ' \ + '/var/lib/postgresql/9.3/main: ' + mocked_logger.error.assert_called_with(expected_msg) + + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_du_data') + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_df_data') + def test_run_should_loop_forever_processing_both_collectors(self, mocked_get_df_data, mocked_get_du_data): + mocked_get_du_data.side_effect = ErrorAfter(1) + mocked_get_df_data.side_effect = ErrorAfter(1) + queue = mock.Mock() + detached_disk = DetachedDiskStatCollector(queue, ['/var/lib/postgresql/9.3/main'], 9.3) + with self.assertRaises(CallableExhaustedError): + detached_disk.run() + + mocked_get_du_data.assert_called_with('/var/lib/postgresql/9.3/main') + mocked_get_df_data.assert_called_with('/var/lib/postgresql/9.3/main') + queue.put.assert_called_once_with( + {'/var/lib/postgresql/9.3/main': [('/var/lib/postgresql/9.3/main',), ('/var/lib/postgresql/9.3/main',)]}) + + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_mounted_device', + return_value='/dev/sda1') + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_mount_point', return_value='/') + @mock.patch('pg_view.collectors.partition_collector.os.statvfs') + def test_get_df_data_should_return_proper_data_when_data_dev_and_xlog_dev_are_equal(self, mocked_os_statvfs, + mocked_get_mounted_device, + mocked_get_mount_point): + seq = (4096, 4096, 10312784, 9823692, 9389714, 2621440, 2537942, 2537942, 4096, 255) + mocked_os_statvfs.return_value = posix.statvfs_result(sequence=seq) + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + df_data = detached_disk.get_df_data('/var/lib/postgresql/9.3/main') + expected_df_data = {'data': ('/dev/sda1', 41251136, 37558856), 'xlog': ('/dev/sda1', 41251136, 37558856)} + self.assertEqual(expected_df_data, df_data) + + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_mounted_device', + side_effect=['/dev/sda1', '/dev/sda2']) + @mock.patch('pg_view.collectors.partition_collector.DetachedDiskStatCollector.get_mount_point', return_value='/') + @mock.patch('pg_view.collectors.partition_collector.os.statvfs') + def test_get_df_data_should_return_proper_data_when_data_dev_and_xlog_dev_are_different(self, mocked_os_statvfs, + mocked_get_mounted_device, + mocked_get_mount_point): + mocked_os_statvfs.side_effect = [ + posix.statvfs_result( + sequence=(4096, 4096, 10312784, 9823692, 9389714, 2621440, 2537942, 2537942, 4096, 255)), + posix.statvfs_result(sequence=(1024, 1024, 103127, 9823, 9389, 2621, 2537, 2537, 1024, 255)) + ] + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + df_data = detached_disk.get_df_data('/var/lib/postgresql/9.3/main') + expected_df_data = {'data': ('/dev/sda1', 41251136, 37558856), 'xlog': ('/dev/sda2', 103127, 9389)} + self.assertEqual(expected_df_data, df_data) + + @mock.patch('pg_view.collectors.partition_collector.os.statvfs', return_value=(4096, 4096)) + def test__get_or_update_df_cache_should_call_os_statvfs_when_empty_cache(self, mocked_os_statvfs): + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + df_data = detached_disk._get_or_update_df_cache('/var/lib/postgresql/9.3/main', '/sda/dev1') + self.assertEqual((4096, 4096,), df_data) + self.assertEqual((4096, 4096,), detached_disk.df_cache['/sda/dev1']) + mocked_os_statvfs.assert_called_once_with('/var/lib/postgresql/9.3/main') + + @mock.patch('pg_view.collectors.partition_collector.os.statvfs') + def test__get_or_update_df_cache_should_get_from_cache_when_entry_exists(self, mocked_os_statvfs): + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + detached_disk.df_cache = {'/sda/dev1': (4096, 4096,)} + df_data = detached_disk._get_or_update_df_cache('/var/lib/postgresql/9.3/main', '/sda/dev1') + self.assertEqual((4096, 4096,), df_data) + mocked_os_statvfs.assert_not_called() + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.partition_collector.psutil.disk_partitions', return_value=[]) + def test_get_mounted_device_should_return_none_when_no_device_on_pathname(self, mocked_disk_partitions): + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + mounted_device = detached_disk.get_mounted_device('/test') + self.assertIsNone(mounted_device) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.partition_collector.psutil.disk_partitions') + def test_get_mounted_device_should_return_dev_when_device_on_pathname(self, mocked_disk_partitions): + device = mock.Mock(mountpoint='/', device='sda1') + mocked_disk_partitions.return_value = [device] + detached_disk = DetachedDiskStatCollector(mock.Mock(), ['/var/lib/postgresql/9.3/main'], 9.3) + mounted_device = detached_disk.get_mounted_device('/') + self.assertEqual('sda1', mounted_device) + + +class DiskCollectorConsumerTest(TestCase): + def test_consume_should_not_get_new_data_from_queue_when_old_not_consumed(self): + queue = mock.Mock() + first_data = {'/var/lib/postgresql/9.3/main': [{ + 'xlog': ('16388', '/var/lib/postgresql/9.3/main/pg_xlog'), + 'data': ('35620', '/var/lib/postgresql/9.3/main') + }, { + 'xlog': ('/dev/sda1', 41251136, 37716376), + 'data': ('/dev/sda1', 41251136, 37716376)} + ]} + + queue.get_nowait.return_value = first_data + + consumer = DiskCollectorConsumer(queue) + consumer.consume() + self.assertEqual(first_data, consumer.result) + self.assertEqual(first_data, consumer.cached_result) + + self.assertIsNone(consumer.consume()) + self.assertEqual(first_data, consumer.result) + self.assertEqual(first_data, consumer.cached_result) + + def test_consume_should_consume_new_data_when_old_fetched(self): + queue = mock.Mock() + first_data = {'/var/lib/postgresql/9.3/main': [{ + 'xlog': ('16388', '/var/lib/postgresql/9.3/main/pg_xlog'), + 'data': ('35620', '/var/lib/postgresql/9.3/main') + }, { + 'xlog': ('/dev/sda1', 41251136, 37716376), + 'data': ('/dev/sda1', 41251136, 37716376)} + ]} + + second_data = {'/var/lib/postgresql/9.3/main': [{ + 'xlog': ('16389', '/var/lib/postgresql/9.3/main/pg_xlog'), + 'data': ('35621', '/var/lib/postgresql/9.3/main') + }, { + 'xlog': ('/dev/sda1', 41251137, 37716377), + 'data': ('/dev/sda1', 41251137, 37716377)} + ]} + + queue.get_nowait.side_effect = [first_data.copy(), second_data.copy()] + + consumer = DiskCollectorConsumer(queue) + consumer.consume() + self.assertEqual(first_data, consumer.result) + self.assertEqual(first_data, consumer.cached_result) + + self.assertEqual(first_data['/var/lib/postgresql/9.3/main'], consumer.fetch('/var/lib/postgresql/9.3/main')) + self.assertEqual({}, consumer.result) + self.assertEqual(first_data, consumer.cached_result) + + consumer.consume() + self.assertEqual(second_data, consumer.result) + self.assertEqual(second_data, consumer.cached_result) + + self.assertEqual(second_data['/var/lib/postgresql/9.3/main'], consumer.fetch('/var/lib/postgresql/9.3/main')) + self.assertEqual({}, consumer.result) + self.assertEqual(second_data, consumer.cached_result) diff --git a/tests/test_collectors_pg.py b/tests/test_collectors_pg.py new file mode 100644 index 0000000..ce8efd0 --- /dev/null +++ b/tests/test_collectors_pg.py @@ -0,0 +1,380 @@ +import datetime +import unittest +from collections import namedtuple +from unittest import TestCase + +import mock +# import psutil +import psycopg2 + +from pg_view.collectors.pg_collector import PgStatCollector +from pg_view.sqls import SHOW_MAX_CONNECTIONS, SELECT_PG_IS_IN_RECOVERY, SELECT_PGSTAT_VERSION_LESS_THAN_92, \ + SELECT_PGSTAT_VERSION_LESS_THAN_96, SELECT_PGSTAT_NEVER_VERSION +from pg_view.utils import dbversion_as_float + +pmem = namedtuple('pmem', ['rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty']) +pio = namedtuple('pio', ['read_count', 'write_count', 'read_bytes', 'write_bytes']) +pcputimes = namedtuple('pcputimes', ['user', 'system', 'children_user', 'children_system']) + + +class PgStatCollectorTest(TestCase): + def setUp(self): + super(PgStatCollectorTest, self).setUp() + self.cluster = { + 'ver': 9.3, + 'name': '/var/lib/postgresql/9.3/main', + 'pid': 1049, + 'reconnect': mock.Mock(), + 'pgcon': mock.MagicMock(), + } + + def test_dbversion_as_float_should_return_formatted_version_from_pgcon_version(self): + self.assertEqual(9.3, dbversion_as_float(90314)) + + def test__get_psinfo_should_return_empty_when_no_cmdline(self): + pstype, action = PgStatCollector.from_cluster(self.cluster, 1049)._get_psinfo('') + self.assertEqual('unknown', pstype) + self.assertIsNone(action) + + def test__get_psinfo_should_return_pstype_action_when_cmdline_matches_postgres(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + pstype, action = collector._get_psinfo('postgres: back') + self.assertEqual('backend', pstype) + self.assertIsNone(action) + + def test__get_psinfo_should_return_pstype_action_when_cmdline_matches_postgres_process(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + pstype, action = collector._get_psinfo('postgres: checkpointer process') + self.assertEqual('checkpointer', pstype) + self.assertEqual('', action) + + def test__get_psinfo_should_return_pstype_action_when_cmdline_matches_autovacuum_worker(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + pstype, action = collector._get_psinfo('postgres: autovacuum worker process') + self.assertEqual('autovacuum', pstype) + self.assertEqual('', action) + + def test__get_psinfo_should_return_unknown_when_cmdline_not_match(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + pstype, action = collector._get_psinfo('postgres1: worker process') + self.assertEqual('unknown', pstype) + self.assertIsNone(action) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.psutil.Process') + @mock.patch('pg_view.collectors.pg_collector.logger') + def test_get_subprocesses_pid_should_return_empty_when_no_cmd_output(self, mocked_logger, mocked_process): + mocked_process.return_value.children.return_value = [] + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual([], collector.get_subprocesses_pid()) + mocked_logger.info.assert_called_with("Couldn't determine the pid of subprocesses for 1049") + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.psutil.Process') + def test_get_subprocesses_pid_should_return_subprocesses_when_children_processes(self, mocked_process): + subprocesses = [mock.Mock(pid=1051), mock.Mock(pid=1052), mock.Mock(pid=1206)] + mocked_process.return_value.children.return_value = subprocesses + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual([1051, 1052, 1206], collector.get_subprocesses_pid()) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.psutil.Process') + def test__get_memory_usage_should_return_uss_when_memory_info_ok(self, mocked_psutil_process): + mocked_psutil_process.return_value.memory_info.return_value = pmem( + rss=1769472, vms=252428288, shared=344064, text=5492736, lib=0, data=1355776, dirty=0) + collector = PgStatCollector.from_cluster(self.cluster, 1049) + memory_usage = collector._get_memory_usage(1049) + self.assertEqual(1425408, memory_usage) + + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._execute_fetchone_query', return_value={}) + def test__get_max_connections_should_return_zero_when_no_output(self, mocked_execute_fetchone_query): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual(0, collector._get_max_connections()) + mocked_execute_fetchone_query.assert_called_with(SHOW_MAX_CONNECTIONS) + + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._execute_fetchone_query', + return_value={'max_connections': '1'}) + def test__get_max_connections_should_return_zero_when_output_ok(self, mocked_execute_fetchone_query): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual(1, collector._get_max_connections()) + mocked_execute_fetchone_query.assert_called_with(SHOW_MAX_CONNECTIONS) + + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._execute_fetchone_query', return_value={}) + def test__get_recovery_status_should_return_unknown_when_no_output(self, mocked_execute_fetchone_query): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual('unknown', collector._get_recovery_status()) + mocked_execute_fetchone_query.assert_called_with(SELECT_PG_IS_IN_RECOVERY) + + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._execute_fetchone_query', return_value={'role': 'role'}) + def test__get_recovery_status_should_return_zero_when_output_ok(self, mocked_execute_fetchone_query): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + self.assertEqual('role', collector._get_recovery_status()) + mocked_execute_fetchone_query.assert_called_with(SELECT_PG_IS_IN_RECOVERY) + + def test_get_sql_by_pg_version_should_return_92_when_dbver_less_than_92(self): + cluster = self.cluster.copy() + cluster['ver'] = 9.1 + collector = PgStatCollector.from_cluster(cluster, 1049) + self.assertEqual(SELECT_PGSTAT_VERSION_LESS_THAN_92, collector.get_sql_pgstat_by_version()) + + def test_get_sql_by_pg_version_should_return_less_than_96_when_dbver_95(self): + cluster = self.cluster.copy() + cluster['ver'] = 9.5 + collector = PgStatCollector.from_cluster(cluster, 1049) + self.assertEqual(SELECT_PGSTAT_VERSION_LESS_THAN_96, collector.get_sql_pgstat_by_version()) + + def test_get_sql_by_pg_version_should_return_newer_when_bigger_than_96(self): + cluster = self.cluster.copy() + cluster['ver'] = 9.7 + collector = PgStatCollector.from_cluster(cluster, 1049) + self.assertEqual(SELECT_PGSTAT_NEVER_VERSION, collector.get_sql_pgstat_by_version()) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.os') + @mock.patch('pg_view.collectors.pg_collector.psutil.Process') + def test__read_proc_should_return_data_when_process_ok(self, mocked_psutil_process, mocked_os): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + mocked_os.sysconf.return_value.SC_PAGE_SIZE = 4096 + mocked_process = mocked_psutil_process.return_value + mocked_process.pid = 1049 + mocked_process.status.return_value = 'status' + mocked_process.io_counters.return_value = pio( + read_count=12, write_count=13, read_bytes=655, write_bytes=1) + cpu_times = pcputimes(user=0.02, system=0.01, children_user=0.0, children_system=0.0) + memory_info = pmem( + rss=1769472, vms=252428288, shared=344064, text=5492736, lib=0, data=1355776, dirty=0) + mocked_process.cpu_times.return_value = cpu_times + mocked_process.memory_info.return_value = memory_info + mocked_process.nice.return_value = '10' + mocked_process.cmdline.return_value = ['backend \n'] + mocked_process.create_time.return_value = 1480777289.0 + proc_stats = collector.get_proc_data(1048) + expected_proc_stats = { + 'read_bytes': 655, + 'write_bytes': 1, + + 'pid': 1049, + 'state': 'status', + 'utime': 0.0002, + 'stime': 0.0001, + 'rss': 432, + 'priority': 10, + 'vsize': 252428288, + + 'guest_time': 0.0, + 'starttime': datetime.datetime.fromtimestamp(1480777289.0), + 'delayacct_blkio_ticks': 0, + 'cmdline': 'backend' + } + self.assertEqual(expected_proc_stats, proc_stats) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, 'Linux only') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_psinfo') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage') + def test_get_additional_info_should_update_when_not_backend_and_action(self, mocked__get_memory_usage, + mocked__get_psinfo): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + mocked__get_psinfo.return_value = ('vacuum', 'query') + mocked__get_memory_usage.return_value = 10 + info = collector.get_additional_proc_info(1049, {'cmdline': ''}, [10]) + self.assertEqual({'type': 'vacuum', 'query': 'query', 'cmdline': '', 'uss': 10}, info) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, 'Linux only') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_psinfo') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage') + def test_get_additional_info_should_update_when_not_backend_and_not_action(self, mocked__get_memory_usage, + mocked__get_psinfo): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + mocked__get_psinfo.return_value = ('vacuum', None) + mocked__get_memory_usage.return_value = 10 + info = collector.get_additional_proc_info(1049, {'cmdline': ''}, [10]) + self.assertEqual({'type': 'vacuum', 'cmdline': '', 'uss': 10}, info) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_psinfo') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage') + def test_get_additional_info_should_update_when_backend_and_not_active(self, mocked__get_memory_usage, + mocked__get_psinfo): + collector = PgStatCollector.from_cluster(self.cluster, [1011]) + mocked__get_psinfo.return_value = ('vacuum', None) + mocked__get_memory_usage.return_value = 10 + info = collector.get_additional_proc_info(1049, {'cmdline': ''}, {1049: {'query': 'idle'}}) + self.assertEqual({'type': 'backend', 'cmdline': ''}, info) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, 'Linux only') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_psinfo') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage') + def test_get_additional_info_should_update_when_backend_and_active_query_not_idle(self, mocked__get_memory_usage, + mocked__get_psinfo): + collector = PgStatCollector.from_cluster(self.cluster, [1011]) + mocked__get_psinfo.return_value = ('vacuum', None) + mocked__get_memory_usage.return_value = 10 + info = collector.get_additional_proc_info(1049, {'cmdline': ''}, {1049: {'query': 'not idle'}}) + self.assertEqual({'type': 'backend', 'cmdline': '', 'uss': 10}, info) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, 'Linux only') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_psinfo') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage') + def test_get_additional_info_should_update_when_backend_and_active_pid_in_track_pids(self, mocked__get_memory_usage, + mocked__get_psinfo): + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + mocked__get_psinfo.return_value = ('vacuum', None) + mocked__get_memory_usage.return_value = 10 + info = collector.get_additional_proc_info(1049, {'cmdline': ''}, {1049: {'query': 'idle'}}) + self.assertEqual({'type': 'backend', 'cmdline': '', 'uss': 10}, info) + + def test__read_pg_stat_activity_should_parse_pg_stats_when_ok(self): + results = [{ + 'datname': 'postgres', + 'client_addr': None, + 'locked_by': None, + 'pid': 11139, + 'waiting': False, + 'client_port': -1, + 'query': 'idle', + 'age': None, + 'usename': 'postgres' + }] + + self.cluster['pgcon'].cursor.return_value.fetchall.return_value = results + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + activity_stats = collector._read_pg_stat_activity() + expected_stats = { + 11139: { + 'datname': 'postgres', + 'client_addr': None, + 'locked_by': None, + 'pid': 11139, + 'waiting': False, + 'client_port': -1, + 'query': 'idle', + 'age': None, + 'usename': 'postgres' + } + } + + self.assertEqual(expected_stats, activity_stats) + + def test_ncurses_produce_prefix_should_return_offline_when_no_pgcon(self): + self.cluster['pgcon'].get_parameter_status.return_value = '9.3' + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + collector.pgcon = None + self.assertEqual('/var/lib/postgresql/9.3/main 9.3 (offline)\n', collector.ncurses_produce_prefix()) + + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_max_connections', return_value=10) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_recovery_status', return_value='role') + def test_ncurses_produce_prefix_should_return_online_when_pgcon(self, mocked__status, mocked__max_conn): + self.cluster['pgcon'].get_parameter_status.return_value = '9.3' + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + self.assertEqual( + '/var/lib/postgresql/9.3/main 9.3 role connections: 0 of 10 allocated, 0 active\n', + collector.ncurses_produce_prefix() + ) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, 'Linux only') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage', return_value=10) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._read_pg_stat_activity') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_subprocesses_pid', return_value=[1010]) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_proc_data') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._do_refresh') + def test_refresh_should_return_results_when_ok(self, mocked__do_refresh, mocked_get_proc_data, + mocked_get_subprocesses_pid, mocked__read_pg_stat_activity, + mocked___get_memory_usage): + mocked_get_proc_data.return_value = { + 'read_bytes': 655, + 'write_bytes': 1, + 'pid': 1049, + 'status': 'status', + 'utime': 0.0002, + 'stime': 0.0001, + 'rss': 432, + 'priority': 10, + 'vsize': 252428288, + 'guest_time': 0.0, + 'starttime': 911, + 'delayacct_blkio_ticks': 1, + 'cmdline': 'backend' + } + + mocked__read_pg_stat_activity.return_value = { + 11139: { + 'datname': 'postgres', + 'client_addr': None, + 'locked_by': None, + 'pid': 11139, + 'waiting': False, + 'client_port': -1, + 'query': 'idle', + 'age': None, + 'usename': 'postgres' + } + } + + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + result = collector.refresh() + expected_results = [{ + 'status': 'status', + 'write_bytes': 1, + 'vsize': 252428288, + 'delayacct_blkio_ticks': 1, + 'pid': 1049, + 'priority': 10, + 'cmdline': 'backend', + 'read_bytes': 655, + 'uss': 10, + 'stime': 0.0001, + 'starttime': 911, + 'utime': 0.0002, + 'type': 'unknown', + 'guest_time': 0.0, + 'rss': 432 + }] + self.assertEqual(expected_results, result) + mocked__do_refresh.assert_called_with(result) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._try_reconnect') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage', return_value=10) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._read_pg_stat_activity') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_subprocesses_pid', return_value=[1010]) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_proc_data') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._do_refresh') + def test_refresh_should_try_reconnect_whne_no_pgcon(self, mocked__do_refresh, mocked_get_proc_data, + mocked_get_subprocesses_pid, + mocked__read_pg_stat_activity, mocked___get_memory_usage, + mocked_try_reconnect): + mocked_get_proc_data.return_value = {} + mocked__read_pg_stat_activity.return_value = {} + + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + collector.pgcon = None + result = collector.refresh() + mocked_try_reconnect.assert_called_with() + mocked__do_refresh.assert_called_with(result) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._try_reconnect') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._get_memory_usage', return_value=10) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._read_pg_stat_activity') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_subprocesses_pid', return_value=[1010]) + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector.get_proc_data') + @mock.patch('pg_view.collectors.pg_collector.PgStatCollector._do_refresh') + def test_refresh_should_return_none_when_try_reconnect_raises_error(self, mocked__do_refresh, mocked_get_proc_data, + mocked_get_subprocesses_pid, + mocked__read_pg_stat_activity, + mocked___get_memory_usage, + mocked_try_reconnect): + mocked_get_proc_data.return_value = {} + mocked__read_pg_stat_activity.return_value = {} + + collector = PgStatCollector.from_cluster(self.cluster, [1049]) + collector.pgcon = None + mocked_try_reconnect.side_effect = psycopg2.OperationalError + result = collector.refresh() + self.assertIsNone(result) + mocked__do_refresh.assert_called_with([]) diff --git a/tests/test_collectors_system.py b/tests/test_collectors_system.py new file mode 100644 index 0000000..d79ab14 --- /dev/null +++ b/tests/test_collectors_system.py @@ -0,0 +1,176 @@ +import unittest +from collections import namedtuple +from unittest import TestCase + +import mock +import os +# import psutil + +from pg_view.collectors.system_collector import SystemStatCollector +from tests.common import TEST_DIR + + +class SystemStatCollectorTest(TestCase): + def setUp(self): + self.collector = SystemStatCollector() + super(SystemStatCollectorTest, self).setUp() + + @unittest.skip('psutil') + def test_refresh_should_contain_proper_data_keys(self): + refreshed_data = self.collector.refresh() + self.assertIn('stime', refreshed_data) + self.assertIn('softirq', refreshed_data) + self.assertIn('iowait', refreshed_data) + self.assertIn('idle', refreshed_data) + self.assertIn('ctxt', refreshed_data) + self.assertIn('running', refreshed_data) + self.assertIn('blocked', refreshed_data) + self.assertIn('guest', refreshed_data) + self.assertIn('irq', refreshed_data) + self.assertIn('utime', refreshed_data) + self.assertIn('steal', refreshed_data) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.system_collector.SystemStatCollector._refresh_cpu_time_values') + @mock.patch('pg_view.collectors.system_collector.SystemStatCollector._do_refresh') + @mock.patch('pg_view.collectors.system_collector.SystemStatCollector.read_cpu_stats') + @mock.patch('pg_view.collectors.system_collector.SystemStatCollector.read_cpu_times') + def test_refresh_should_call_helpers_with_proper_data(self, mocked_read_cpu_times, mocked_read_proc_stat, + mocked__do_refresh, mocked__refresh_cpu_time_values): + cpu_stats = { + 'cpu': ['46535', '0', '40348', '8412642', '188', '1', '2020', '0', '0', '0'], 'blocked': 0, + 'ctxt': 11530476.0, 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, 'running': 1, + 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 + } + + cpu_times = { + 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, + 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 + } + + mocked_read_proc_stat.return_value = cpu_stats + mocked_read_cpu_times.return_value = cpu_times + merged_data = dict(cpu_times, **cpu_stats) + + self.collector.refresh() + mocked__refresh_cpu_time_values.assert_called_once_with(cpu_times) + mocked__do_refresh.assert_called_once_with([merged_data]) + + @unittest.skip('psutil') + # @unittest.skipUnless(psutil.LINUX, "Linux only") + @mock.patch('pg_view.collectors.system_collector.psutil._pslinux.open_binary') + def test_get_missing_cpu_stat_from_file_should_parse_data_from_proc_stat(self, mocked_open): + cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'cpu_info_ok') + mocked_open.return_value = open(cpu_info_ok, "rb") + refreshed_data = self.collector.get_missing_cpu_stat_from_file() + self.assertEqual({b'procs_blocked': 0, b'procs_running': 1}, refreshed_data) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.system_collector.psutil.cpu_times') + def test_read_cpu_data_should_transform_input_when_cpu_times_for_linux(self, mocked_cpu_times): + linux_scputimes = namedtuple('scputimes', 'user nice system idle iowait irq softirq steal guest') + mocked_cpu_times.return_value = linux_scputimes( + user=848.31, nice=0.0, system=775.15, idle=105690.03, iowait=2.05, irq=0.01, + softirq=54.83, steal=0.0, guest=0.0 + ) + refreshed_cpu = self.collector.read_cpu_times() + expected_data = { + 'guest': 0.0, 'idle': 105690.03, 'iowait': 2.05, 'irq': 0.01, 'softirq': 54.83, + 'steal': 0.0, 'stime': 775.15, 'utime': 848.31 + } + self.assertEqual(expected_data, refreshed_cpu) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.system_collector.psutil.cpu_times') + def test_read_cpu_data_should_transform_input_when_cpu_times_for_macos(self, mocked_cpu_times): + macos_scputimes = namedtuple('scputimes', 'user system idle') + mocked_cpu_times.return_value = macos_scputimes( + user=49618.61, system=28178.55, idle=341331.57) + refreshed_cpu = self.collector.read_cpu_times() + expected_data = { + 'guest': 0.0, 'idle': 341331.57, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, + 'steal': 0.0, 'stime': 28178.55, 'utime': 49618.61 + } + self.assertEqual(expected_data, refreshed_cpu) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.system_collector.psutil.cpu_stats') + @mock.patch('pg_view.collectors.system_collector.psutil.LINUX', False) + def test_read_cpu_data_should_transform_input_when_cpu_stats_for_macos(self, mocked_cpu_times): + macos_scpustats = namedtuple('scpustats', 'ctx_switches interrupts soft_interrupts syscalls') + mocked_cpu_times.return_value = macos_scpustats( + ctx_switches=12100, interrupts=888823, soft_interrupts=211467872, syscalls=326368) + + refreshed_cpu = self.collector.read_cpu_stats() + expected_data = {'running': 0.0, 'ctxt': 12100, 'blocked': 0.0} + self.assertEqual(expected_data, refreshed_cpu) + + @unittest.skip('psutil') + @mock.patch('pg_view.collectors.system_collector.psutil.cpu_stats') + @mock.patch('pg_view.collectors.system_collector.psutil.LINUX', True) + @mock.patch('pg_view.collectors.system_collector.SystemStatCollector.get_missing_cpu_stat_from_file') + def test_read_cpu_data_should_transform_input_when_cpu_stats_for_linux(self, mocked_get_missing_cpu_stat_from_file, + mocked_cpu_times): + linux_scpu_stats = namedtuple('scpustats', 'ctx_switches interrupts soft_interrupts syscalls') + mocked_get_missing_cpu_stat_from_file.return_value = { + 'procs_running': 10, + 'procs_blocked': 20, + } + mocked_cpu_times.return_value = linux_scpu_stats( + ctx_switches=12100, interrupts=888823, soft_interrupts=211467872, syscalls=326368) + + refreshed_cpu = self.collector.read_cpu_stats() + expected_data = {'running': 10.0, 'ctxt': 12100, 'blocked': 20.0} + self.assertEqual(expected_data, refreshed_cpu) + mocked_get_missing_cpu_stat_from_file.assert_called_with() + + def test__refresh_cpu_time_values_should_update_cpu_when_ok(self): + cpu_data = { + 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, + 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 + } + self.collector.current_total_cpu_time = 1.0 + self.collector._refresh_cpu_time_values(cpu_data) + + self.assertEqual(1.0, self.collector.previos_total_cpu_time) + self.assertEqual(8501734.0, self.collector.current_total_cpu_time) + self.assertEqual(8501733.0, self.collector.cpu_time_diff) + + def test__cpu_time_diff_should_return_none_when_cpu_time_diff_zero(self): + current = { + 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, + 'utime': 292.11, 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 + } + previous = { + 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, + 'utime': 291.99, 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 + } + + self.collector.cpu_time_diff = 0 + self.assertIsNone(self.collector._cpu_time_diff('utime', current, previous)) + + def test__cpu_time_diff_should_return_none_when_no_colname_in_data(self): + current = { + 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, + 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 + } + previous = { + 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, + 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 + } + + self.collector.cpu_time_diff = 1 + self.assertIsNone(self.collector._cpu_time_diff('utime', current, previous)) + + def test__cpu_time_diff_should_return_diff_when_ok(self): + current = { + 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, + 'utime': 293, 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 + } + previous = { + 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, + 'utime': 292, 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 + } + + self.collector.cpu_time_diff = 1 + self.assertEqual(1, self.collector._cpu_time_diff('utime', current, previous)) diff --git a/tests/test_model_outputs.py b/tests/test_model_outputs.py new file mode 100644 index 0000000..fceccd0 --- /dev/null +++ b/tests/test_model_outputs.py @@ -0,0 +1,30 @@ +from unittest import TestCase + +import mock + +from pg_view.models.outputs import get_displayer_by_class +from pg_view.utils import OUTPUT_METHOD + + +class GetDisplayerByClassTest(TestCase): + def test_get_displayer_by_class_should_raise_exception_when_unknown_method(self): + with self.assertRaises(Exception): + get_displayer_by_class('unknown', {}, True, True, True) + + @mock.patch('pg_view.models.outputs.JsonDisplayer.from_collector') + def test_get_displayer_by_class_should_return_json_displayer_when_json(self, mocked_from_collector): + collector = mock.Mock() + get_displayer_by_class(OUTPUT_METHOD.json, collector, True, True, True) + mocked_from_collector.assert_called_with(collector, True, True, True) + + @mock.patch('pg_view.models.outputs.ConsoleDisplayer.from_collector') + def test_get_displayer_by_class_should_return_console_displayer_when_console(self, mocked_from_collector): + collector = mock.Mock() + get_displayer_by_class(OUTPUT_METHOD.console, collector, True, True, True) + mocked_from_collector.assert_called_with(collector, True, True, True) + + @mock.patch('pg_view.models.outputs.CursesDisplayer.from_collector') + def test_get_displayer_by_class_should_return_curses_displayer_when_curses(self, mocked_from_collector): + collector = mock.Mock() + get_displayer_by_class(OUTPUT_METHOD.curses, collector, True, True, True) + mocked_from_collector.assert_called_with(collector, True, True, True) diff --git a/tests/test_model_parsers.py b/tests/test_model_parsers.py new file mode 100644 index 0000000..919e256 --- /dev/null +++ b/tests/test_model_parsers.py @@ -0,0 +1,350 @@ +import os +import unittest +from collections import namedtuple +from unittest import TestCase + +import mock + +from tests.common import TEST_DIR +from pg_view.models.parsers import ProcNetParser, get_dbname_from_path, ProcWorker, connection_params + +sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr', 'status', 'pid']) + + +@unittest.skip('psutil') +class ProcNetParserTest(TestCase): + @mock.patch('pg_view.models.parsers.logger') + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test__get_connection_by_type_should_return_none_when_unix_type_wrong_format(self, mocked_net_connections, + mocked_logger): + parser = ProcNetParser(1048) + unix_conn = sconn( + fd=6, family=1, type=1, laddr='/var/run/.s.PGSQQL.5432', raddr=None, status='NONE', pid=1048) + conn_params = parser._get_connection_by_type('unix', unix_conn) + self.assertIsNone(conn_params) + expected_msg = 'unix socket name is not recognized as belonging to PostgreSQL: {0}'.format(unix_conn) + mocked_logger.warning.assert_called_with(expected_msg) + + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test__get_connection_by_type_should_return_conn_params_when_unix_type_ok(self, mocked_net_connections): + parser = ProcNetParser(1048) + unix_conn = sconn( + fd=6, family=1, type=1, laddr='/var/run/postgres/.s.PGSQL.5432', raddr=None, status='NONE', pid=1048) + conn_params = parser._get_connection_by_type('unix', unix_conn) + self.assertEqual(('/var/run/postgres', '5432'), conn_params) + + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test__get_connection_by_type_should_return_conn_params_when_tcp_type_ok(self, mocked_net_connections): + parser = ProcNetParser(1048) + unix_conn = sconn(fd=3, family=2, type=1, laddr=('127.0.0.1', 5432), raddr=(), status='LISTEN', pid=1048) + conn_params = parser._get_connection_by_type('tcp', unix_conn) + self.assertEqual(('127.0.0.1', 5432), conn_params) + + conn_params = parser._get_connection_by_type('tcp6', unix_conn) + self.assertEqual(('127.0.0.1', 5432), conn_params) + + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test_get_socket_connections_call_net_connections_with_allowed_conn_types(self, mocked_net_connections): + ProcNetParser(1048) + calls = [mock.call('unix'), mock.call('tcp'), mock.call('tcp6')] + mocked_net_connections.assert_has_calls(calls, any_order=True) + + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test_get_socket_connections_exclude_by_pid(self, mocked_net_connections): + unix_conns = [ + sconn(fd=6, family=1, type=1, laddr='/var/run/postgres/.s.PGSQL.5432', raddr=None, status='NONE', pid=1048), + sconn(fd=6, family=1, type=1, laddr='/var/run/postgres/.s.PGSQL.5432', raddr=None, status='NONE', pid=1049) + ] + tcp_conns = [ + sconn(fd=6, family=1, type=1, laddr=('127.0.0.1', 5432), raddr=None, status='NONE', pid=1048), + sconn(fd=6, family=1, type=1, laddr=('127.0.0.1', 5432), raddr=None, status='NONE', pid=1049) + ] + + mocked_net_connections.side_effect = [unix_conns, tcp_conns, []] + parser = ProcNetParser(1048) + + self.assertEqual(1, len(parser.sockets['unix'])) + self.assertIn(unix_conns[0], parser.sockets['unix']) + + self.assertEqual(1, len(parser.sockets['tcp'])) + self.assertIn(tcp_conns[0], parser.sockets['tcp']) + + @mock.patch('pg_view.models.parsers.psutil.net_connections') + def test_get_connections_from_sockets_should_return_connections_by_type_when_ok(self, mocked_net_connections): + unix_conns = [ + sconn(fd=6, family=1, type=1, laddr='/var/run/postgres/.s.PGSQL.5432', raddr=None, status='NONE', pid=1048), + ] + + tcp_conns = [ + sconn(fd=6, family=1, type=1, laddr=('127.0.0.1', 5432), raddr=None, status='NONE', pid=1048), + sconn(fd=6, family=1, type=1, laddr=('127.0.0.1', 5432), raddr=None, status='NONE', pid=1049) + ] + tcp6_conns = [ + sconn(fd=6, family=1, type=1, laddr=('127.0.0.1', 5432), raddr=None, status='NONE', pid=1048), + ] + + mocked_net_connections.side_effect = [unix_conns, tcp_conns, tcp6_conns] + parser = ProcNetParser(1048) + expected_connections = { + 'unix': [('/var/run/postgres', '5432')], + 'tcp6': [('127.0.0.1', 5432)], + 'tcp': [('127.0.0.1', 5432)] + } + self.assertEqual(expected_connections, parser.get_connections_from_sockets()) + + +class UtilsTest(TestCase): + def test_get_dbname_from_path_should_return_last_when_name(self): + self.assertEqual('foo', get_dbname_from_path('foo')) + + def test_get_dbname_from_path_should_return_last_when_path(self): + self.assertEqual('bar', get_dbname_from_path('/pgsql_bar/9.4/data')) + + +@unittest.skip('psutil') +class ProcWorkerTest(TestCase): + def setUp(self): + super(ProcWorkerTest, self).setUp() + self.worker = ProcWorker() + + def test_detect_with_postmaster_pid_should_return_none_when_version_none(self): + result = self.worker.detect_with_postmaster_pid('', None) + self.assertIsNone(result) + + def test_detect_with_postmaster_pid_should_return_none_when_version_90(self): + result = self.worker.detect_with_postmaster_pid('', 9.0) + self.assertIsNone(result) + + @mock.patch('pg_view.models.parsers.os.access', return_value=False) + @mock.patch('pg_view.models.parsers.logger') + def test_detect_with_postmaster_pid_should_return_none_when_no_access_to_postmaster(self, mocked_logger, + mocked_os_access): + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main/', 9.3) + self.assertIsNone(result) + expected_msg = 'cannot access PostgreSQL cluster directory /var/lib/postgresql/9.3/main/: permission denied' + mocked_logger.warning.assert_called_with(expected_msg) + + @mock.patch('pg_view.models.parsers.readlines_file') + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.logger') + def test_detect_with_postmaster_pid_should_return_none_when_readline_files_error(self, mocked_logger, + mocked_os_access, + mocked_readlines_file): + mocked_readlines_file.side_effect = os.error('Msg error') + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main', 9.3) + self.assertIsNone(result) + expected_msg = 'could not read /var/lib/postgresql/9.3/main/postmaster.pid: Msg error'.format() + mocked_logger.error.assert_called_with(expected_msg) + + @mock.patch('pg_view.utils.open_universal') + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.logger') + def test_detect_with_postmaster_pid_should_return_none_when_postmaster_truncated(self, mocked_logger, + mocked_os_access, + mocked_open_universal): + postmaster_info_broken = os.path.join(TEST_DIR, 'postmaster_pg_files', 'postmaster_info_truncated') + mocked_open_universal.return_value = open(postmaster_info_broken, "rU") + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main', 9.3) + expected_msg = '/var/lib/postgresql/9.3/main/postmaster.pid seems to be truncated, ' \ + 'unable to read connection information' + mocked_logger.error.assert_called_with(expected_msg) + self.assertIsNone(result) + + @mock.patch('pg_view.utils.open_universal') + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.logger') + def test_detect_with_postmaster_pid_should_return_none_when_postmaster_info_missing_data(self, mocked_logger, + mocked_os_access, + mocked_open_universal): + postmaster_info_broken = os.path.join(TEST_DIR, 'postmaster_pg_files', 'postmaster_info_missing_data') + mocked_open_universal.return_value = open(postmaster_info_broken, "rU") + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main', 9.3) + expected_msg = 'could not acquire a socket postmaster at /var/lib/postgresql/9.3/main is listening on' + mocked_logger.error.assert_called_with(expected_msg) + self.assertIsNone(result) + + @mock.patch('pg_view.utils.open_universal') + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + def test_detect_with_postmaster_pid_should_fill_tcp_localhost_when_address_star(self, mocked_os_access, + mocked_open_universal): + postmaster_info_ok = os.path.join(TEST_DIR, 'postmaster_pg_files', 'postmaster_info_tcp') + mocked_open_universal.return_value = open(postmaster_info_ok, "rU") + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main', 9.3) + expected_result = { + 'unix': [('/var/run/postgresql', '5432')], + 'tcp': [('127.0.0.1', '5432')] + } + self.assertEqual(expected_result, result) + + @mock.patch('pg_view.utils.open_universal') + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + def test_detect_with_postmaster_pid_should_return_conn_params_when_ok(self, mocked_os_access, + mocked_open_universal): + postmaster_info_ok = os.path.join(TEST_DIR, 'postmaster_pg_files', 'postmaster_info_ok') + mocked_open_universal.return_value = open(postmaster_info_ok, "rU") + result = self.worker.detect_with_postmaster_pid('/var/lib/postgresql/9.3/main', 9.3) + expected_result = { + 'unix': [('/var/run/postgresql', '5432')], + 'tcp': [('localhost', '5432')] + } + self.assertEqual(expected_result, result) + + @mock.patch('pg_view.models.parsers.ProcWorker._get_postgres_processes') + @mock.patch('pg_view.models.parsers.ProcWorker.get_pg_version_from_file') + def test_get_postmasters_directories_should_return_postmaster_when_ppid_not_in_candidates(self, + mocked_get_pg_version_from_file, + mocked_get_postgres_processes): + mocked_get_pg_version_from_file.return_value = connection_params(1056, '9.3', 'db') + process = mock.Mock(pid=1056, name='postgres') + process.ppid.return_value = 1 + process.cwd.return_value = '/var/lib/postgresql/9.3/main' + mocked_get_postgres_processes.return_value = [process] + + result = self.worker.get_postmasters_directories() + expected_result = {'/var/lib/postgresql/9.3/main': connection_params(pid=1056, version='9.3', dbname='db')} + self.assertEqual(expected_result, result) + mocked_get_postgres_processes.assert_called_with() + + @mock.patch('pg_view.models.parsers.ProcWorker._get_postgres_processes') + @mock.patch('pg_view.models.parsers.ProcWorker.get_pg_version_from_file', return_value=None) + def test_get_postmasters_directories_should_ignore_process_when_no_pg_version_file(self, + mocked_get_pg_version_from_file, + mocked_get_postgres_processes): + process = mock.Mock(pid=1056, name='postgres') + process.ppid.return_value = 1 + process.cwd.return_value = '/var/lib/postgresql/9.3/main' + mocked_get_postgres_processes.return_value = [process] + + result = self.worker.get_postmasters_directories() + self.assertEqual({}, result) + mocked_get_postgres_processes.assert_called_with() + + @mock.patch('pg_view.models.parsers.ProcWorker._get_postgres_processes') + @mock.patch('pg_view.models.parsers.ProcWorker.get_pg_version_from_file') + def test_get_postmasters_directories_should_return_two_postmaster_when_ppid_not_in_candidates_and_separate_pd_dir( + self, + mocked_get_pg_version_from_file, + mocked_get_postgres_processes): + mocked_get_pg_version_from_file.side_effect = [ + connection_params(1056, '9.3', 'db'), connection_params(1057, '9.4', 'test_db') + ] + first_process = mock.Mock(pid=1056, name='postgres') + first_process.ppid.return_value = 1 + first_process.cwd.return_value = '/var/lib/postgresql/9.3/main' + + second_process = mock.Mock(pid=1057, name='postgres') + second_process.ppid.return_value = 1 + second_process.cwd.return_value = '/var/lib/postgresql/9.4/main' + + mocked_get_postgres_processes.return_value = [first_process, second_process] + + result = self.worker.get_postmasters_directories() + expected_result = { + '/var/lib/postgresql/9.3/main': connection_params(pid=1056, version='9.3', dbname='db'), + '/var/lib/postgresql/9.4/main': connection_params(pid=1057, version='9.4', dbname='test_db') + } + self.assertEqual(expected_result, result) + mocked_get_postgres_processes.assert_called_with() + + @mock.patch('pg_view.models.parsers.ProcWorker._get_postgres_processes') + @mock.patch('pg_view.models.parsers.ProcWorker.get_pg_version_from_file') + def test_get_postmasters_directories_should_exclude_second_process_when_same_pd_dir(self, + mocked_get_pg_version_from_file, + mocked_get_postgres_processes): + mocked_get_pg_version_from_file.side_effect = [ + connection_params(1056, '9.3', 'db'), connection_params(1057, '9.3', 'test_db') + ] + first_process = mock.Mock(pid=1056, name='postgres') + first_process.ppid.return_value = 1 + first_process.cwd.return_value = '/var/lib/postgresql/9.3/main' + + second_process = mock.Mock(pid=1057, name='postgres') + second_process.ppid.return_value = 1 + second_process.cwd.return_value = '/var/lib/postgresql/9.3/main' + + mocked_get_postgres_processes.return_value = [first_process, second_process] + + result = self.worker.get_postmasters_directories() + expected_result = { + '/var/lib/postgresql/9.3/main': connection_params(pid=1056, version='9.3', dbname='db'), + } + self.assertEqual(expected_result, result) + mocked_get_postgres_processes.assert_called_with() + + @mock.patch('pg_view.models.parsers.ProcWorker._get_postgres_processes') + @mock.patch('pg_view.models.parsers.ProcWorker.get_pg_version_from_file') + def test_get_postmasters_directories_should_exclude_process_when_ppid_in_process_candidates(self, + mocked_get_pg_version_from_file, + mocked_get_postgres_processes): + mocked_get_pg_version_from_file.side_effect = [ + connection_params(1056, '9.3', 'db'), connection_params(1057, '9.4', 'test_db') + ] + first_process = mock.Mock(pid=1056, name='postgres') + first_process.ppid.return_value = 1 + first_process.cwd.return_value = '/var/lib/postgresql/9.3/main' + + second_process = mock.Mock(pid=1057, name='postgres') + second_process.ppid.return_value = 1056 + second_process.cwd.return_value = '/var/lib/postgresql/9.4/main' + + mocked_get_postgres_processes.return_value = [first_process, second_process] + + result = self.worker.get_postmasters_directories() + expected_result = { + '/var/lib/postgresql/9.3/main': connection_params(pid=1056, version='9.3', dbname='db'), + } + self.assertEqual(expected_result, result) + mocked_get_postgres_processes.assert_called_with() + + @mock.patch('pg_view.models.parsers.psutil.process_iter') + def test_get_postgres_processes_should_filter_by_name(self, mocked_process_iter): + mocked_process_iter.return_value = [ + mock.Mock(**{'name.return_value': 'postgres', 'pid': 1}), + mock.Mock(**{'name.return_value': 'postmaster', 'pid': 2}), + mock.Mock(**{'name.return_value': 'test', 'pid': 3}), + mock.Mock(**{'name.return_value': 'process', 'pid': 4}) + ] + + postgres_processes = self.worker._get_postgres_processes() + self.assertEqual(2, len(postgres_processes)) + self.assertEqual([1, 2], [p.pid for p in postgres_processes]) + + @mock.patch('pg_view.models.parsers.os.access', return_value=False) + @mock.patch('pg_view.models.parsers.logger') + def test_get_pg_version_from_file_should_return_none_when_no_access_to_file(self, mocked_logger, mocked_os_access): + pg_version = self.worker.get_pg_version_from_file(10, '/var/lib/postgresql/9.3/main') + self.assertIsNone(pg_version) + + expected_msg = 'PostgreSQL candidate directory /var/lib/postgresql/9.3/main is missing PG_VERSION file, ' \ + 'have to skip it' + mocked_logger.warning.assert_called_once_with(expected_msg) + + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.read_file', return_value='9.3\n') + @mock.patch('pg_view.models.parsers.logger') + def test_get_pg_version_from_file_should_return_params_when_file_ok(self, mocked_logger, mocked_read_file, + mocked_os_access): + pg_version = self.worker.get_pg_version_from_file(10, '/var/lib/postgresql/9.3/main') + expected_result = connection_params(pid=10, version=9.3, dbname='/var/lib/postgresql/9.3/main') + self.assertEqual(expected_result, pg_version) + + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.read_file', return_value='9') + @mock.patch('pg_view.models.parsers.logger') + def test_get_pg_version_from_file_should_return_none_when_wrong_db_version_in_file(self, mocked_logger, + mocked_read_file, + mocked_os_access): + pg_version = self.worker.get_pg_version_from_file(10, '/var/lib/postgresql/9.3/main') + expected_msg = "PG_VERSION doesn't contain a valid version number: 9" + self.assertIsNone(pg_version) + mocked_logger.error.assert_called_once_with(expected_msg) + + @mock.patch('pg_view.models.parsers.os.access', return_value=True) + @mock.patch('pg_view.models.parsers.read_file', side_effect=os.error) + @mock.patch('pg_view.models.parsers.logger') + def test_get_pg_version_from_file_should_return_none_when_error_on_read_file(self, mocked_logger, + mocked_read_file, mocked_os_access): + pg_version = self.worker.get_pg_version_from_file(10, '/var/lib/postgresql/9.3/main') + expected_msg = 'unable to read version number from PG_VERSION directory /var/lib/postgresql/9.3/main, have to skip it' + self.assertIsNone(pg_version) + mocked_logger.error.assert_called_once_with(expected_msg) diff --git a/tests/test_models_db_client.py b/tests/test_models_db_client.py new file mode 100644 index 0000000..0950a8f --- /dev/null +++ b/tests/test_models_db_client.py @@ -0,0 +1,283 @@ +import unittest +from unittest import TestCase + +import mock +import psycopg2 + +from pg_view.exceptions import NotConnectedError, NoPidConnectionError, DuplicatedConnectionError +from pg_view.models.db_client import read_postmaster_pid, make_cluster_desc, DBConnectionFinder, DBClient, \ + prepare_connection_params + + +class DbClientUtilsTest(TestCase): + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.open', create=True) + def test_read_postmaster_pid_should_return_none_when_error(self, mocked_open, mocked_logger): + mocked_open.side_effect = Exception + data = read_postmaster_pid('/var/lib/postgresql/9.3/main', 'default') + self.assertIsNone(data) + expected_msg = 'Unable to read postmaster.pid for {name} at {wd}\n HINT: make sure Postgres is running' + mocked_logger.error.assert_called_with( + expected_msg.format(name='default', wd='/var/lib/postgresql/9.3/main')) + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.open', create=True) + def test_read_postmaster_pid_should_return_none_when_error_strip(self, mocked_open, mocked_logger): + mocked_open.return_value.readline.return_value = [] + data = read_postmaster_pid('/var/lib/postgresql/9.3/main', 'default') + self.assertIsNone(data) + expected_msg = 'Unable to read postmaster.pid for {name} at {wd}\n HINT: make sure Postgres is running' + mocked_logger.error.assert_called_with( + expected_msg.format(name='default', wd='/var/lib/postgresql/9.3/main')) + + @mock.patch('pg_view.models.db_client.open', create=True) + def test_read_postmaster_pid_should_return_pid_when_read_file(self, mocked_open): + mocked_open.return_value.readline.return_value = '123 ' + data = read_postmaster_pid('/var/lib/postgresql/9.3/main', 'default') + self.assertEqual('123', data) + + def test_make_cluster_desc_should_return_dict_when_ok(self): + cluster_desc = make_cluster_desc('name', 'version', 'workdir', 'pid', 'pgcon', 'con') + + self.assertEqual('name', cluster_desc['name']) + self.assertEqual('version', cluster_desc['ver']) + self.assertEqual('workdir', cluster_desc['wd']) + self.assertEqual('pid', cluster_desc['pid']) + self.assertEqual('pgcon', cluster_desc['pgcon']) + self.assertIn('reconnect', cluster_desc) + + def test_build_connection_should_create_full_connection(self): + connection = prepare_connection_params('host', '5432', 'user', 'database') + self.assertEqual( + {'host': 'host', 'port': '5432', 'user': 'user', 'database': 'database'}, connection) + + def test_build_connection_should_return_only_existing_parameters(self): + connection = prepare_connection_params('host', '5432') + self.assertEqual({'host': 'host', 'port': '5432'}, connection) + + +class DBConnectionFinderTest(TestCase): + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.DBConnectionFinder.detect_with_proc_net', return_value=None) + @mock.patch('pg_view.models.db_client.ProcWorker') + def test_detect_db_connection_arguments_should_return_none_when_no_conn_args(self, mocked_proc_worker, + mocked_detect_with_proc_net, + mocked_logger): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + mocked_proc_worker.return_value.detect_with_postmaster_pid.return_value = None + conn_args = finder.detect_db_connection_arguments() + self.assertIsNone(conn_args) + mocked_logger.error.assert_called_with( + 'unable to detect connection parameters for the PostgreSQL cluster at workdir') + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.DBConnectionFinder.detect_with_proc_net', return_value=None) + @mock.patch('pg_view.models.db_client.ProcWorker') + def test_detect_db_connection_arguments_should_return_none_when_not_pickable_conn_arguments(self, + mocked_proc_worker, + mocked_detect_with_proc_net, + mocked_logger): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + conn_params = {'unix_wrong': [('/var/run/postgresql', '5432')], 'tcp_wrong': [('localhost', '5432')]} + mocked_proc_worker.return_value.detect_with_postmaster_pid.return_value = conn_params + conn_args = finder.detect_db_connection_arguments() + self.assertIsNone(conn_args) + expected_msg = "unable to connect to PostgreSQL cluster at workdir using any of the detected connection " \ + "options: {0}".format(conn_params) + mocked_logger.error.assert_called_with(expected_msg) + + @mock.patch('pg_view.models.db_client.DBConnectionFinder.detect_with_proc_net', return_value=None) + @mock.patch('pg_view.models.db_client.DBConnectionFinder.can_connect_with_connection_arguments', return_value=True) + @mock.patch('pg_view.models.db_client.ProcWorker') + def test_detect_db_connection_arguments_should_return_params_when_detect_with_postmaster_pid(self, + mocked_proc_worker, + mocked_can_connect, + mocked_detect_with_proc_net): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + mocked_proc_worker.return_value.detect_with_postmaster_pid.return_value = { + 'unix': [('/var/run/postgresql', '5432')], + 'tcp': [('localhost', '5432')] + } + conn_args = finder.detect_db_connection_arguments() + self.assertEqual({'host': '/var/run/postgresql', 'port': '5432'}, conn_args) + + @mock.patch('pg_view.models.db_client.DBConnectionFinder.detect_with_proc_net') + @mock.patch('pg_view.models.db_client.DBConnectionFinder.can_connect_with_connection_arguments', return_value=True) + def test_detect_db_connection_arguments_should_return_params_when_detect_with_proc_net(self, mocked_can_connect, + mocked_detect_with_proc_net): + mocked_detect_with_proc_net.return_value = { + 'unix': [('/var/run/postgresql', '5432')], 'tcp': [('127.0.0.1', 5432)] + } + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + conn_args = finder.detect_db_connection_arguments() + self.assertEqual({'host': '/var/run/postgresql', 'port': '5432'}, conn_args) + + def test_pick_connection_arguments_should_return_empty_when_unknown_conn_types(self): + conn_args = {'unix1': [('/var/run/postgresql', '5432')], 'tcp1': [('127.0.0.1', 5432)]} + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + available_connection = finder.pick_connection_arguments(conn_args) + self.assertEqual({}, available_connection) + + @mock.patch('pg_view.models.db_client.DBConnectionFinder.can_connect_with_connection_arguments') + def test_pick_connection_arguments_should_return_first_available_conn_when_multiple(self, mocked_can_connect): + mocked_can_connect.side_effect = [False, True] + conn_args = { + 'unix': [('/var/run/postgresql', '5432')], 'tcp': [('127.0.0.1', 5431)] + } + + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + available_connection = finder.pick_connection_arguments(conn_args) + self.assertEqual({'host': '127.0.0.1', 'port': 5431}, available_connection) + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.psycopg2.connect', side_effect=psycopg2.OperationalError) + def test_can_connect_with_connection_arguments_should_return_false_when_no_connection(self, mocked_psycopg2_connect, + mocked_logger): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + connection_builder = prepare_connection_params(host='127.0.0.1', port=5431) + can_connect = finder.can_connect_with_connection_arguments(connection_builder) + self.assertFalse(can_connect) + mocked_psycopg2_connect.assert_called_once_with(host='127.0.0.1', port=5431) + + @mock.patch('pg_view.models.db_client.psycopg2.connect') + def test_can_connect_with_connection_arguments_should_return_true_when_connection_ok(self, mocked_psycopg2_connect): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + connection_builder = prepare_connection_params(host='127.0.0.1', port=5431) + can_connect = finder.can_connect_with_connection_arguments(connection_builder) + self.assertTrue(can_connect) + mocked_psycopg2_connect.assert_called_once_with(host='127.0.0.1', port=5431) + + @unittest.skip('psutil') + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.ProcNetParser') + def test_detect_with_proc_net_should_return_none_when_no_connections_from_sockets(self, mocked_proc_net_parser, + mocked_logger): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + mocked_proc_net_parser.return_value.get_connections_from_sockets.return_value = {} + conn_param = finder.detect_with_proc_net() + + self.assertIsNone(conn_param) + expected_msg = 'could not detect connection string from /proc/net for postgres process 1049' + mocked_logger.error.assert_called_once_with(expected_msg) + + @unittest.skip('psutil') + @mock.patch('pg_view.models.db_client.ProcNetParser') + def test_detect_with_proc_net_should_return_result_when_connections_from_socket(self, mocked_proc_net_parser): + finder = DBConnectionFinder('workdir', 1049, '9.3', 'username', 'atlas') + mocked_proc_net_parser.return_value.get_connections_from_sockets.return_value = { + 'unix': [('/var/run/postgresql', '5432')]} + conn_param = finder.detect_with_proc_net() + + self.assertEqual({'unix': [('/var/run/postgresql', '5432')]}, conn_param) + + +class DBClientTest(TestCase): + def test_from_config_should_init_class_properly(self): + config = {'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'} + client = DBClient.from_config(config) + self.assertIsInstance(client, DBClient) + self.assertEqual(config, client.connection_params) + + def test_from_options_should_init_class_properly(self): + options = mock.Mock(host='localhost', port='5432', username='user', dbname='db') + client = DBClient.from_options(options) + self.assertIsInstance(client, DBClient) + self.assertEqual( + {'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}, + client.connection_params + ) + + @mock.patch('pg_view.models.db_client.DBConnectionFinder') + def test_from_postmasters_should_return_none_when_no_detected_connection(self, mocked_db_finder): + mocked_db_finder.return_value.detect_db_connection_arguments.return_value = None + options = mock.Mock(username='username', dbname='db') + client = DBClient.from_postmasters('/var/lib/postgresql/9.3/main', 1056, 9.3, options) + self.assertIsNone(client) + mocked_db_finder.assert_called_once_with('/var/lib/postgresql/9.3/main', 1056, 9.3, 'username', 'db') + + @mock.patch('pg_view.models.db_client.DBConnectionFinder') + def test_from_postmasters_should_return_instance_when_detected_connection(self, mocked_db_finder): + mocked_db_finder.return_value.detect_db_connection_arguments.return_value = { + 'host': 'localhost', 'port': '5432', 'user': 'user1', 'database': 'db1'} + options = mock.Mock(username='username', dbname='db') + client = DBClient.from_postmasters('/var/lib/postgresql/9.3/main', 1056, 9.3, options) + self.assertIsInstance(client, DBClient) + self.assertEqual( + {'host': 'localhost', 'port': '5432', 'user': 'username', 'database': 'db'}, + client.connection_params + ) + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.psycopg2') + def test_establish_user_defined_connection_should_raise_error_when_cant_connect(self, mocked_psycopg2, + mocked_logger): + mocked_psycopg2.connect.side_effect = Exception + client = DBClient.from_config({'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}) + with self.assertRaises(NotConnectedError): + client.establish_user_defined_connection('instance', []) + + expected_msg = "failed to establish connection to instance via {0}".format(client.connection_params) + mocked_logger.error.assert_has_calls([mock.call(expected_msg), mock.call('PostgreSQL exception: ')]) + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.psycopg2') + @mock.patch('pg_view.models.db_client.read_postmaster_pid') + def test_establish_user_defined_connection_should_raise_error_when_not_pid_postmaster(self, + mocked_read_postmaster_pid, + mocked_psycopg2, + mocked_logger): + mocked_psycopg2.connect.return_value = mock.Mock( + **{'cursor.return_value': mock.MagicMock(), 'server_version': 93}) + mocked_read_postmaster_pid.return_value = None + + client = DBClient.from_config({'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}) + with self.assertRaises(NoPidConnectionError): + client.establish_user_defined_connection('default', []) + + expected_msg = "failed to read pid of the postmaster on {0}".format(client.connection_params) + mocked_logger.error.assert_called_once_with(expected_msg) + + @mock.patch('pg_view.models.db_client.logger') + @mock.patch('pg_view.models.db_client.psycopg2') + @mock.patch('pg_view.models.db_client.read_postmaster_pid') + def test_establish_user_defined_connection_should_raise_error_when_duplicated_connections(self, + mocked_read_postmaster_pid, + mocked_psycopg2, + mocked_logger): + mocked_psycopg2.connect.return_value = mock.Mock( + **{'cursor.return_value': mock.MagicMock(), 'server_version': 93}) + mocked_read_postmaster_pid.return_value = 10 + + client = DBClient.from_config({'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}) + with self.assertRaises(DuplicatedConnectionError): + client.establish_user_defined_connection('default', [{'pid': 10, 'name': 'cluster1'}]) + + expected_msg = "duplicate connection options detected for databases default and cluster1, same pid 10, skipping default" + mocked_logger.error.assert_called_once_with(expected_msg) + + @mock.patch('pg_view.models.db_client.psycopg2') + @mock.patch('pg_view.models.db_client.read_postmaster_pid') + def test_establish_user_defined_connection_should_create_cluster_desc_when_ok(self, mocked_read_postmaster_pid, + mocked_psycopg2): + cursor = mock.MagicMock(**{'fetchone.return_value': ['/var/lib/postgresql/9.3/main']}) + pg_con = mock.Mock(**{'cursor.return_value': cursor, 'server_version': 90314}) + mocked_psycopg2.connect.return_value = pg_con + mocked_read_postmaster_pid.return_value = 10 + + client = DBClient.from_config({'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}) + expected_cluster_desc = { + 'name': 'default', 'ver': 9.3, 'wd': '/var/lib/postgresql/9.3/main', 'pid': 10, 'pgcon': pg_con} + cluster_desc = client.establish_user_defined_connection('default', [{'pid': 11, 'name': 'cluster1'}]) + cluster_desc.pop('reconnect') + self.assertDictEqual( + expected_cluster_desc, cluster_desc) + + def test_execute_query_and_fetchone_should_call_show_command(self): + client = DBClient.from_config({'host': 'localhost', 'port': '5432', 'user': 'user', 'database': 'db'}) + cursor = mock.Mock(**{'fetchone.return_value': ['/var/lib/postgresql/9.3/main']}) + pg_conn = mock.Mock(**{'cursor.return_value': cursor}) + work_directory = client.execute_query_and_fetchone(pg_conn) + cursor.execute.assert_called_once_with('SHOW DATA_DIRECTORY') + cursor.close.assert_called_once_with() + pg_conn.commit.assert_called_once_with() + self.assertEqual('/var/lib/postgresql/9.3/main', work_directory) diff --git a/tests/test_models_formatters.py b/tests/test_models_formatters.py new file mode 100644 index 0000000..7c0f2e8 --- /dev/null +++ b/tests/test_models_formatters.py @@ -0,0 +1,165 @@ +from datetime import timedelta +from unittest import TestCase + +import mock + +from pg_view.collectors.host_collector import HostStatCollector +from pg_view.collectors.pg_collector import PgStatCollector +from pg_view.models.formatters import StatusFormatter, FnFormatter + + +class StatusFormatterTest(TestCase): + def setUp(self): + super(StatusFormatterTest, self).setUp() + self.cluster = { + 'ver': 9.3, + 'name': '/var/lib/postgresql/9.3/main', + 'pid': 1049, + 'reconnect': mock.Mock(), + 'pgcon': mock.MagicMock(), + } + + def test_load_avg_state_should_return_empty_when_no_load_avg(self): + collector = HostStatCollector() + formatter = StatusFormatter(collector) + row = ['', '2 days, 15:33:30', 'ubuntu-trusty-64', 1, 'Linux 3.13.0-100-generic'] + col = {'warning': 5, 'critical': 20, 'out': 'load average'} + self.assertEqual({}, formatter.load_avg_state(row, col)) + + def test_load_avg_state_should_return_every_state_when_warning_critical_ok(self): + collector = HostStatCollector() + formatter = StatusFormatter(collector) + row = ['0.0 5.01 20.05', '2 days, 15:33:30', 'ubuntu-trusty-64', 1, 'Linux 3.13.0-100-generic'] + col = {'warning': 5, 'critical': 20, 'out': 'load average'} + self.assertEqual({0: 0, 1: 1, 2: 2}, formatter.load_avg_state(row, col)) + + def test_age_status_fn_should_return_critical_when_age_bigger_than_critical(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '20:52:05', + 'postgres', 'postgres', False, 'idle in transaction for 20:51:17' + ] + col = {'warning': 300, 'critical': 500, 'out': 'age'} + self.assertEqual({-1: 2}, formatter.age_status_fn(row, col)) + + def test_age_status_fn_should_return_warning_when_age_bigger_than_warning(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '0:06:05', + 'postgres', 'postgres', False, 'idle in transaction for 20:51:17' + ] + col = {'warning': 300, 'critical': 500, 'out': 'age'} + self.assertEqual({-1: 1}, formatter.age_status_fn(row, col)) + + def test_age_status_fn_should_return_ok_when_age_less_than_warning(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '0:04:05', + 'postgres', 'postgres', False, 'idle in transaction for 20:51:17' + ] + col = {'warning': 300, 'critical': 500, 'out': 'age'} + self.assertEqual({-1: 0}, formatter.age_status_fn(row, col)) + + def test_query_status_fn_should_return_critical_when_waiting(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '21:05:45', 'postgres', + 'postgres', True, 'idle in transaction for 21:04:57' + ] + col = {'warning': 'idle in transaction', 'critical': 'locked', 'out': 'query'} + self.assertEqual({-1: 2}, formatter.query_status_fn(row, col)) + + def test_query_status_fn_should_return_warning_when_idle_in_transaction(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '21:05:45', 'postgres', + 'postgres', False, 'idle in transaction ' + ] + col = {'warning': 'idle in transaction', 'critical': 'locked', 'out': 'query'} + self.assertEqual({-1: 1}, formatter.query_status_fn(row, col)) + + def test_query_status_fn_should_return_warning_when_default_warning(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '21:05:45', 'postgres', + 'postgres', False, '! ' + ] + col = {'critical': 'locked', 'out': 'query'} + self.assertEqual({-1: 1}, formatter.query_status_fn(row, col)) + + def test_query_status_fn_should_return_ok_when_no_warning(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = StatusFormatter(collector) + row = [ + 11139, None, 'backend', None, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.9, '21:05:45', 'postgres', + 'postgres', False, 'ok' + ] + col = {'warning': 'idle in transaction', 'critical': 'locked', 'out': 'query'} + self.assertEqual({-1: 0}, formatter.query_status_fn(row, col)) + + +class FnFormatterTest(TestCase): + def setUp(self): + super(FnFormatterTest, self).setUp() + self.cluster = { + 'ver': 9.3, + 'name': '/var/lib/postgresql/9.3/main', + 'pid': 1049, + 'reconnect': mock.Mock(), + 'pgcon': mock.MagicMock(), + } + + def test_idle_format_fn_should_return_text_when_no_matches(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_idle = formatter.idle_format_fn('return text') + self.assertEqual('return text', formatted_idle) + + def test_idle_format_fn_should_return_formatted_for_version_bigger_than_92(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_idle = formatter.idle_format_fn('idle in transaction 1') + self.assertEqual('idle in transaction for 00:01', formatted_idle) + + def test_idle_format_fn_should_return_formatted_for_version_less_than_92(self): + self.cluster['ver'] = 9.1 + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_idle = formatter.idle_format_fn('idle in transaction 1') + self.assertEqual('idle in transaction 00:01 since the last query start', formatted_idle) + + def test_kb_pretty_print_should_return_formatted_when_mb(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_kb = formatter.kb_pretty_print(501708) + self.assertEqual('489.9MB', formatted_kb) + + def test_kb_pretty_print_should_return_formatted_when_kb(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_kb = formatter.kb_pretty_print(1024) + self.assertEqual('1024KB', formatted_kb) + + def test_time_interval_pretty_print_should_return_formatted_when_start_time_number(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_time = formatter.time_pretty_print(68852.0) + self.assertEqual('19:07:32', formatted_time) + + def test_time_interval_pretty_print_should_return_formatted_when_start_time_timedelta(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + formatted_time = formatter.time_pretty_print(timedelta(seconds=30)) + self.assertEqual('00:30', formatted_time) + + def test_time_interval_pretty_print_should_raise_error_when_non_valid_type(self): + collector = PgStatCollector.from_cluster(self.cluster, 1049) + formatter = FnFormatter(collector) + with self.assertRaises(ValueError): + formatter.time_pretty_print('None') diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..6adab4f --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,148 @@ +import os +import subprocess +from unittest import TestCase + +import mock + +from tests.common import TEST_DIR +from pg_view.exceptions import InvalidConnectionParamError +from pg_view.models.parsers import connection_params +from pg_view.utils import UnitConverter, read_configuration, validate_autodetected_conn_param, \ + exec_command_with_output, output_method_is_valid + + +class UnitConverterTest(TestCase): + def test_kb_to_mbytes_should_convert_when_ok(self): + self.assertEqual(3, UnitConverter.kb_to_mbytes(3072)) + + def test_kb_to_mbytes_should_return_none_when_none(self): + self.assertIsNone(UnitConverter.kb_to_mbytes(None)) + + def test_sectors_to_mbytes_should_convert_when_ok(self): + self.assertEqual(10, UnitConverter.sectors_to_mbytes(20480)) + + def test_sectors_to_mbytes_should_return_none_when_none(self): + self.assertIsNone(UnitConverter.sectors_to_mbytes(None)) + + def test_bytes_to_mbytes_should_convert_when_ok(self): + self.assertEqual(2, UnitConverter.bytes_to_mbytes(2097152)) + + def test_bytes_to_mbytes_should_return_none_when_none(self): + self.assertIsNone(UnitConverter.bytes_to_mbytes(None)) + + @mock.patch('pg_view.consts.USER_HZ', 100) + def test_ticks_to_seconds_should_convert_when_ok(self): + self.assertEqual(5, UnitConverter.ticks_to_seconds(500)) + + @mock.patch('pg_view.consts.USER_HZ', 100) + def test_ticks_to_seconds_should_return_none_when_none(self): + self.assertIsNone(UnitConverter.ticks_to_seconds(None)) + + def test_time_diff_to_percent_should_convert_when_ok(self): + self.assertEqual(1000.0, UnitConverter.time_diff_to_percent(10)) + + def test_time_diff_to_percent_should_return_none_when_none(self): + self.assertIsNone(UnitConverter.time_diff_to_percent(None)) + + +class ReadConfigurationTest(TestCase): + def test_read_configuration_should_return_none_when_not_config_file_name(self): + self.assertIsNone(read_configuration(None)) + + @mock.patch('pg_view.utils.logger') + def test_read_configuration_should_return_none_when_cannot_read_file(self, mocked_logger): + config_file_path = os.path.join(TEST_DIR, 'not-existing') + self.assertIsNone(read_configuration(config_file_path)) + expected_msg = 'Configuration file {0} is empty or not found'.format(config_file_path) + mocked_logger.error.assert_called_with(expected_msg) + + def test_read_configuration_should_return_config_data_when_config_file_ok(self): + config_file_path = os.path.join(TEST_DIR, 'configs', 'default_ok.cfg') + expected_conf = {'testdb': { + 'host': '/var/run/postgresql', 'port': '5432', 'dbname': 'postgres', 'user': 'username'} + } + config = read_configuration(config_file_path) + self.assertDictEqual(expected_conf, config) + + def test_read_configuration_should_skip_empty_options_when_not_exist(self): + config_file_path = os.path.join(TEST_DIR, 'configs', 'default_with_none_user.cfg') + expected_conf = {'testdb': { + 'host': '/var/run/postgresql', 'port': '5432', 'dbname': 'postgres'} + } + config = read_configuration(config_file_path) + self.assertDictEqual(expected_conf, config) + + +class ValidateConnParamTest(TestCase): + def test_validate_autodetected_conn_param_should_return_none_when_no_user_dbname(self): + self.assertIsNone(validate_autodetected_conn_param(None, '9.3', '/var/run/postgresql', {})) + + def test_validate_autodetected_conn_param_should_raise_invalid_param_when_different_dbnames(self): + conn_parameters = connection_params(pid=1049, version=9.3, dbname='/var/lib/postgresql/9.3/main') + with self.assertRaises(InvalidConnectionParamError): + validate_autodetected_conn_param('/var/lib/postgresql/9.5/main', 9.3, '/var/run/postgresql', + conn_parameters) + + def test_validate_autodetected_conn_param_should_raise_invalid_param_when_no_result_work_dir(self): + conn_parameters = connection_params(pid=1049, version=9.3, dbname='/var/lib/postgresql/9.3/main') + with self.assertRaises(InvalidConnectionParamError): + validate_autodetected_conn_param('/var/lib/postgresql/9.3/main', 9.3, '', conn_parameters) + + def test_validate_autodetected_conn_param_should_raise_invalid_param_when_no_connection_params_pid(self): + conn_parameters = connection_params(pid=None, version=9.3, dbname='/var/lib/postgresql/9.3/main') + with self.assertRaises(InvalidConnectionParamError): + validate_autodetected_conn_param( + '/var/lib/postgresql/9.3/main', 9.3, '/var/run/postgresql', conn_parameters) + + def test_validate_autodetected_conn_param_should_raise_invalid_param_when_different_versions(self): + conn_parameters = connection_params(pid=2, version=9.3, dbname='/var/lib/postgresql/9.3/main') + with self.assertRaises(InvalidConnectionParamError): + validate_autodetected_conn_param( + '/var/lib/postgresql/9.3/main', 9.5, '/var/run/postgresql', conn_parameters) + + +class CommandExecutorTest(TestCase): + @mock.patch('pg_view.utils.logger') + @mock.patch('pg_view.utils.subprocess.Popen') + def test_exec_command_with_output_should_log_info_when_cmd_return_not_zero_exit_code(self, mocked_popen, + mocked_logger): + cmdline = 'ps -o pid --ppid 1049 --noheaders' + proc = mock.MagicMock() + proc.wait.return_value = 1 + proc.stdout.read.return_value = ' 1051\n 1052\n 1053\n 1054\n 1055\n 11139\n 26585\n' + mocked_popen.return_value = proc + ret, stdout = exec_command_with_output(cmdline) + mocked_popen.assert_called_with(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + mocked_logger.info.assert_called_with( + 'The command ps -o pid --ppid 1049 --noheaders returned a non-zero exit code') + + self.assertEqual(1, ret) + self.assertEqual('1051\n 1052\n 1053\n 1054\n 1055\n 11139\n 26585', stdout) + + @mock.patch('pg_view.utils.logger') + @mock.patch('pg_view.utils.subprocess.Popen') + def test_exec_command_with_output_should_return_ret_stdout_when_cmd_return_zero_exit_code(self, mocked_popen, + mocked_logger): + cmdline = 'ps -o pid --ppid 1049 --noheaders' + proc = mock.MagicMock() + proc.wait.return_value = 0 + proc.stdout.read.return_value = ' 1051\n 1052\n 1053\n 1054\n 1055\n 11139\n 26585\n' + mocked_popen.return_value = proc + ret, stdout = exec_command_with_output(cmdline) + mocked_popen.assert_called_with(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + self.assertFalse(mocked_logger.info.called) + + self.assertEqual(0, ret) + self.assertEqual('1051\n 1052\n 1053\n 1054\n 1055\n 11139\n 26585', stdout) + + +class ValidatorTest(TestCase): + def test_output_method_is_valid_should_return_true_when_valid(self): + ALLOWED_OUTPUTS = ['console', 'json', 'curses'] + for output in ALLOWED_OUTPUTS: + self.assertTrue(output_method_is_valid(output)) + + def test_output_method_is_valid_should_return_false_when_invalid(self): + ALLOWED_OUTPUTS = ['test', 'foo', 1] + for output in ALLOWED_OUTPUTS: + self.assertFalse(output_method_is_valid(output))