From efbbe6310b96c28c2ee16ddc2bc84be862070aee Mon Sep 17 00:00:00 2001 From: Ilia Kurenkov Date: Tue, 17 Sep 2024 17:14:51 +0200 Subject: [PATCH 1/2] Remove six library from some integrations --- datadog_checks_dev/datadog_checks/dev/_env.py | 8 +- disk/datadog_checks/disk/disk.py | 9 +- .../datadog_checks/ecs_fargate/ecs_fargate.py | 13 +- .../kube_apiserver_metrics.py | 4 +- .../datadog_checks/mapreduce/mapreduce.py | 16 +- mcache/datadog_checks/mcache/mcache.py | 7 +- .../mesos_master/mesos_master.py | 10 +- mongo/datadog_checks/mongo/collectors/base.py | 13 +- .../mongo/collectors/coll_stats.py | 3 +- mongo/datadog_checks/mongo/collectors/top.py | 5 +- .../datadog_checks/mysql/collection_utils.py | 6 +- mysql/datadog_checks/mysql/innodb_metrics.py | 178 +++++++++--------- mysql/datadog_checks/mysql/mysql.py | 27 ++- nginx/datadog_checks/nginx/nginx.py | 20 +- redisdb/datadog_checks/redisdb/redisdb.py | 16 +- riakcs/datadog_checks/riakcs/riakcs.py | 7 +- sap_hana/datadog_checks/sap_hana/sap_hana.py | 4 +- spark/datadog_checks/spark/constants.py | 8 +- squid/datadog_checks/squid/squid.py | 4 +- .../datadog_checks/system_core/system_core.py | 5 +- .../datadog_checks/twemproxy/twemproxy.py | 5 +- .../datadog_checks/twistlock/twistlock.py | 3 +- varnish/datadog_checks/varnish/varnish.py | 22 +-- vsphere/datadog_checks/vsphere/api_rest.py | 3 +- vsphere/datadog_checks/vsphere/config.py | 9 +- .../vsphere/legacy/mor_cache.py | 8 +- vsphere/datadog_checks/vsphere/utils.py | 3 +- vsphere/datadog_checks/vsphere/vsphere.py | 9 +- vsphere/tests/legacy/utils.py | 3 +- yarn/datadog_checks/yarn/yarn.py | 12 +- zk/datadog_checks/zk/zk.py | 21 +-- 31 files changed, 198 insertions(+), 263 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/_env.py b/datadog_checks_dev/datadog_checks/dev/_env.py index 35b761a785d05..84f96bcab491a 100644 --- a/datadog_checks_dev/datadog_checks/dev/_env.py +++ b/datadog_checks_dev/datadog_checks/dev/_env.py @@ -5,8 +5,6 @@ import os from base64 import urlsafe_b64decode, urlsafe_b64encode -from six import iteritems - DDTRACE_OPTIONS_LIST = [ 'DD_TAGS', 'DD_TRACE*', @@ -56,7 +54,7 @@ def e2e_testing(): def set_env_vars(env_vars): - for key, value in iteritems(env_vars): + for key, value in env_vars.items(): key = '{}{}'.format(E2E_ENV_VAR_PREFIX, key) os.environ[key] = value @@ -68,11 +66,11 @@ def remove_env_vars(env_vars): def get_env_vars(raw=False): if raw: - return {key: value for key, value in iteritems(os.environ) if key.startswith(E2E_ENV_VAR_PREFIX)} + return {key: value for key, value in os.environ.items() if key.startswith(E2E_ENV_VAR_PREFIX)} else: env_vars = {} - for key, value in iteritems(os.environ): + for key, value in os.environ.items(): _, found, ev = key.partition(E2E_ENV_VAR_PREFIX) if found: # Normalize casing for Windows diff --git a/disk/datadog_checks/disk/disk.py b/disk/datadog_checks/disk/disk.py index 5999e32c9dc33..e4574f89b248e 100644 --- a/disk/datadog_checks/disk/disk.py +++ b/disk/datadog_checks/disk/disk.py @@ -9,7 +9,6 @@ from xml.etree import ElementTree as ET import psutil -from six import iteritems, string_types from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative from datadog_checks.base.utils.platform import Platform @@ -151,7 +150,7 @@ def check(self, _): self.log.debug('Passed: %s', part.device) tags = self._get_tags(part) - for metric_name, metric_value in iteritems(self._collect_part_metrics(part, disk_usage)): + for metric_name, metric_value in self._collect_part_metrics(part, disk_usage).items(): self.gauge(metric_name, metric_value, tags=tags) # Add in a disk read write or read only check @@ -324,7 +323,7 @@ def _collect_inodes_metrics(self, mountpoint): return metrics def collect_latency_metrics(self): - for disk_name, disk in iteritems(psutil.disk_io_counters(perdisk=True)): + for disk_name, disk in psutil.disk_io_counters(perdisk=True).items(): self.log.debug('IO Counters: %s -> %s', disk_name, disk) try: metric_tags = [] if self._custom_tags is None else self._custom_tags[:] @@ -389,7 +388,7 @@ def _compile_pattern_filters(self, instance): def _compile_valid_patterns(self, patterns, casing=IGNORE_CASE, extra_patterns=None): valid_patterns = [] - if isinstance(patterns, string_types): + if isinstance(patterns, str): patterns = [patterns] else: patterns = list(patterns) @@ -419,7 +418,7 @@ def _compile_tag_re(self): Compile regex strings from device_tag_re option and return list of compiled regex/tag pairs """ device_tag_list = [] - for regex_str, tags in iteritems(self._device_tag_re): + for regex_str, tags in self._device_tag_re.items(): try: device_tag_list.append([re.compile(regex_str, IGNORE_CASE), [t.strip() for t in tags.split(',')]]) except TypeError: diff --git a/ecs_fargate/datadog_checks/ecs_fargate/ecs_fargate.py b/ecs_fargate/datadog_checks/ecs_fargate/ecs_fargate.py index 9d55916a926cc..4ae362cc879b8 100644 --- a/ecs_fargate/datadog_checks/ecs_fargate/ecs_fargate.py +++ b/ecs_fargate/datadog_checks/ecs_fargate/ecs_fargate.py @@ -7,7 +7,6 @@ import requests from dateutil import parser -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.utils.common import round_value @@ -192,7 +191,7 @@ def check(self, _): ## Ephemeral Storage Metrics if 'EphemeralStorageMetrics' in metadata: es_metrics = metadata['EphemeralStorageMetrics'] - for field_name, metric_value in iteritems(es_metrics): + for field_name, metric_value in es_metrics.items(): metric_name = EPHEMERAL_STORAGE_GAUGE_METRICS.get(field_name) self.gauge(metric_name, metric_value, task_tags) @@ -229,7 +228,7 @@ def check(self, _): self.service_check('fargate_check', AgentCheck.WARNING, message=msg, tags=custom_tags) self.log.warning(msg, exc_info=True) - for container_id, container_stats in iteritems(stats): + for container_id, container_stats in stats.items(): if container_id not in exlcuded_cid: self.submit_perf_metrics(container_tags, container_id, container_stats) @@ -337,7 +336,7 @@ def submit_perf_metrics(self, container_tags, container_id, container_stats): self.gauge('ecs.fargate.mem.limit', value, tags) # I/O metrics - for blkio_cat, metric_name in iteritems(IO_METRICS): + for blkio_cat, metric_name in IO_METRICS.items(): read_counter = write_counter = 0 blkio_stats = container_stats.get("blkio_stats", {}).get(blkio_cat) @@ -363,13 +362,13 @@ def submit_perf_metrics(self, container_tags, container_id, container_stats): # Network metrics networks = container_stats.get('networks', {}) - for network_interface, network_stats in iteritems(networks): + for network_interface, network_stats in networks.items(): network_tags = tags + ["interface:{}".format(network_interface)] - for field_name, metric_name in iteritems(NETWORK_GAUGE_METRICS): + for field_name, metric_name in NETWORK_GAUGE_METRICS.items(): metric_value = network_stats.get(field_name) if metric_value is not None: self.gauge(metric_name, metric_value, network_tags) - for field_name, metric_name in iteritems(NETWORK_RATE_METRICS): + for field_name, metric_name in NETWORK_RATE_METRICS.items(): metric_value = network_stats.get(field_name) if metric_value is not None: self.rate(metric_name, metric_value, network_tags) diff --git a/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/kube_apiserver_metrics.py b/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/kube_apiserver_metrics.py index e8287b258a261..d07d8b168d411 100644 --- a/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/kube_apiserver_metrics.py +++ b/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/kube_apiserver_metrics.py @@ -4,8 +4,6 @@ from copy import deepcopy from re import match, search, sub -from six import iteritems - from datadog_checks.base.checks.openmetrics import OpenMetricsBaseCheck from datadog_checks.base.errors import CheckException @@ -200,7 +198,7 @@ def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monot # Explicit shallow copy of the instance tags _tags = list(scraper_config['custom_tags']) - for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]): + for label_name, label_value in sample[self.SAMPLE_LABELS].items(): _tags.append('{}:{}'.format(label_name, label_value)) if gauge: # submit raw metric diff --git a/mapreduce/datadog_checks/mapreduce/mapreduce.py b/mapreduce/datadog_checks/mapreduce/mapreduce.py index 893a6f3c63d48..87b54c1845e4a 100644 --- a/mapreduce/datadog_checks/mapreduce/mapreduce.py +++ b/mapreduce/datadog_checks/mapreduce/mapreduce.py @@ -1,12 +1,10 @@ # (C) Datadog, Inc. 2010-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) - +from urllib.parse import urljoin, urlsplit, urlunsplit from requests.exceptions import ConnectionError, HTTPError, InvalidURL, Timeout from simplejson import JSONDecodeError -from six import iteritems, itervalues -from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative from datadog_checks.mapreduce.metrics import ( @@ -93,7 +91,7 @@ def check(self, instance): # Report success after gathering all metrics from Application Master if running_jobs: - job_id, metrics = next(iteritems(running_jobs)) + job_id, metrics = next(running_jobs.items()) am_address = self._get_url_base(metrics['tracking_url']) self.service_check( @@ -249,7 +247,7 @@ def _mapreduce_job_metrics(self, running_apps, addl_tags): """ running_jobs = {} - for app_name, tracking_url in itervalues(running_apps): + for app_name, tracking_url in running_apps.items(): metrics_json = self._rest_request_to_json( tracking_url, self.MAPREDUCE_JOBS_PATH, self.MAPREDUCE_SERVICE_CHECK @@ -289,7 +287,7 @@ def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags): """ Get custom metrics specified for each counter """ - for job_metrics in itervalues(running_jobs): + for job_metrics in running_jobs.items(): job_name = job_metrics['job_name'] # Check if the job_name exist in the custom metrics @@ -344,7 +342,7 @@ def _mapreduce_task_metrics(self, running_jobs, addl_tags): Get metrics for each MapReduce task Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task """ - for job_stats in itervalues(running_jobs): + for job_stats in running_jobs.values(): metrics_json = self._rest_request_to_json( job_stats['tracking_url'], 'tasks', self.MAPREDUCE_SERVICE_CHECK, tags=addl_tags @@ -376,7 +374,7 @@ def _set_metrics_from_json(self, metrics_json, metrics, tags): """ Parse the JSON response and set the metrics """ - for status, (metric_name, metric_type) in iteritems(metrics): + for status, (metric_name, metric_type) in metrics.items(): metric_status = metrics_json.get(status) if metric_status is not None: @@ -415,7 +413,7 @@ def _rest_request_to_json(self, address, object_path, service_name=None, tags=No # Add kwargs as arguments if kwargs: - query = '&'.join(['{}={}'.format(key, value) for key, value in iteritems(kwargs)]) + query = '&'.join(['{}={}'.format(key, value) for key, value in kwargs.items()]) url = urljoin(url, '?' + query) try: diff --git a/mcache/datadog_checks/mcache/mcache.py b/mcache/datadog_checks/mcache/mcache.py index 92d42af29c083..07e945c7ea161 100644 --- a/mcache/datadog_checks/mcache/mcache.py +++ b/mcache/datadog_checks/mcache/mcache.py @@ -4,7 +4,6 @@ from __future__ import division import bmemcached -from six import iteritems, itervalues from datadog_checks.base import AgentCheck, ConfigurationError @@ -111,7 +110,7 @@ def _process_response(self, response): if len(response) != 1: raise BadResponseError("Malformed response: {}".format(response)) - stats = list(itervalues(response))[0] + stats = list(response.values())[0] if not len(stats): raise BadResponseError("Malformed response for host: {}".format(stats)) @@ -177,7 +176,7 @@ def _get_metrics(self, client, tags, service_check_tags=None): raise def _get_optional_metrics(self, client, tags, options=None): - for arg, metrics_args in iteritems(self.OPTIONAL_STATS): + for arg, metrics_args in self.OPTIONAL_STATS.items(): if not options or options.get(arg, False): try: optional_rates = metrics_args[0] @@ -187,7 +186,7 @@ def _get_optional_metrics(self, client, tags, options=None): stats = self._process_response(client.stats(arg)) prefix = "memcache.{}".format(arg) - for metric, val in iteritems(stats): + for metric, val in stats.items(): # Check if metric is a gauge or rate metric_tags = [] if optional_fn: diff --git a/mesos_master/datadog_checks/mesos_master/mesos_master.py b/mesos_master/datadog_checks/mesos_master/mesos_master.py index e82cc2fb33c95..aaa2adc6fa489 100644 --- a/mesos_master/datadog_checks/mesos_master/mesos_master.py +++ b/mesos_master/datadog_checks/mesos_master/mesos_master.py @@ -6,9 +6,9 @@ Collects metrics from mesos master node, only the leader is sending metrics. """ +from urllib.parse import urlparse + import requests -from six import iteritems -from six.moves.urllib.parse import urlparse from datadog_checks.base import AgentCheck from datadog_checks.base.errors import CheckException @@ -305,7 +305,7 @@ def check(self, instance): framework_tags = ['framework_name:' + framework['name']] + tags self.GAUGE('mesos.framework.total_tasks', len(framework['tasks']), tags=framework_tags) resources = framework['used_resources'] - for key_name, (metric_name, metric_func) in iteritems(self.FRAMEWORK_METRICS): + for key_name, (metric_name, metric_func) in self.FRAMEWORK_METRICS.items(): metric_func(self, metric_name, resources[key_name], tags=framework_tags) role_metrics = self._get_master_roles(url, instance_tags) @@ -314,7 +314,7 @@ def check(self, instance): role_tags = ['mesos_role:' + role['name']] + tags self.GAUGE('mesos.role.frameworks.count', len(role['frameworks']), tags=role_tags) self.GAUGE('mesos.role.weight', role['weight'], tags=role_tags) - for key_name, (metric_name, metric_func) in iteritems(self.ROLE_RESOURCES_METRICS): + for key_name, (metric_name, metric_func) in self.ROLE_RESOURCES_METRICS.items(): try: metric_func(self, metric_name, role['resources'][key_name], tags=role_tags) except KeyError: @@ -335,7 +335,7 @@ def check(self, instance): self.STATS_METRICS, ] for m in metrics: - for key_name, (metric_name, metric_func) in iteritems(m): + for key_name, (metric_name, metric_func) in m.items(): if key_name in stats_metrics: metric_func(self, metric_name, stats_metrics[key_name], tags=tags) diff --git a/mongo/datadog_checks/mongo/collectors/base.py b/mongo/datadog_checks/mongo/collectors/base.py index 68d884ea1a81d..be88dfcf179af 100644 --- a/mongo/datadog_checks/mongo/collectors/base.py +++ b/mongo/datadog_checks/mongo/collectors/base.py @@ -4,14 +4,9 @@ import re -from six import PY3, iteritems - from datadog_checks.base import AgentCheck from datadog_checks.mongo.metrics import CASE_SENSITIVE_METRIC_NAME_SUFFIXES -if PY3: - long = int - class MongoCollector(object): """The base collector object, can be considered abstract. @@ -46,7 +41,7 @@ def _normalize(self, metric_name, submit_method, prefix=None): metric_suffix = "ps" if submit_method == AgentCheck.rate else "" # Replace case-sensitive metric name characters - for pattern, repl in iteritems(CASE_SENSITIVE_METRIC_NAME_SUFFIXES): + for pattern, repl in CASE_SENSITIVE_METRIC_NAME_SUFFIXES.items(): metric_name = re.compile(pattern).sub(repl, metric_name) # Normalize, and wrap @@ -93,11 +88,9 @@ def _submit_payload(self, payload, additional_tags=None, metrics_to_collect=None continue # value is now status[x][y][z] - if not isinstance(value, (int, long, float)): + if not isinstance(value, (int, float)): raise TypeError( - u"{0} value is a {1}, it should be an int, a float or a long instead.".format( - metric_name, type(value) - ) + u"{0} value is a {1}, it should be an int, or a float instead.".format(metric_name, type(value)) ) # Submit the metric diff --git a/mongo/datadog_checks/mongo/collectors/coll_stats.py b/mongo/datadog_checks/mongo/collectors/coll_stats.py index 68778a5546e16..02c8809641a24 100644 --- a/mongo/datadog_checks/mongo/collectors/coll_stats.py +++ b/mongo/datadog_checks/mongo/collectors/coll_stats.py @@ -3,7 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) from pymongo.errors import OperationFailure -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.mongo.collectors.base import MongoCollector @@ -96,7 +95,7 @@ def collect(self, api): # Submit the indexSizes metrics manually if index_sizes: metric_name_alias = self._normalize("collection.indexSizes", AgentCheck.gauge) - for idx, val in iteritems(index_sizes): + for idx, val in index_sizes.items(): # we tag the index idx_tags = self.base_tags + additional_tags + ["index:%s" % idx] self.gauge(metric_name_alias, val, tags=idx_tags) diff --git a/mongo/datadog_checks/mongo/collectors/top.py b/mongo/datadog_checks/mongo/collectors/top.py index fe51fd92ae8ad..9c7b186661c7d 100644 --- a/mongo/datadog_checks/mongo/collectors/top.py +++ b/mongo/datadog_checks/mongo/collectors/top.py @@ -1,9 +1,6 @@ # (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) - -from six import iteritems - from datadog_checks.mongo.collectors.base import MongoCollector from datadog_checks.mongo.common import MongosDeployment, ReplicaSetDeployment from datadog_checks.mongo.metrics import TOP_METRICS @@ -26,7 +23,7 @@ def compatible_with(self, deployment): def collect(self, api): dbtop = api["admin"].command('top') - for ns, ns_metrics in iteritems(dbtop['totals']): + for ns, ns_metrics in dbtop['totals'].items(): if "." not in ns: continue diff --git a/mysql/datadog_checks/mysql/collection_utils.py b/mysql/datadog_checks/mysql/collection_utils.py index f87c952227385..0846401c70038 100644 --- a/mysql/datadog_checks/mysql/collection_utils.py +++ b/mysql/datadog_checks/mysql/collection_utils.py @@ -3,8 +3,6 @@ # Licensed under Simplified BSD License (see LICENSE) import logging -from six import iteritems, text_type - log = logging.getLogger(__name__) @@ -12,7 +10,7 @@ def collect_all_scalars(key, dictionary): if key not in dictionary or dictionary[key] is None: yield None, None elif isinstance(dictionary[key], dict): - for tag, _ in iteritems(dictionary[key]): + for tag, _ in dictionary[key].items(): yield tag, collect_type(tag, dictionary[key], float) else: yield None, collect_type(key, dictionary, float) @@ -23,7 +21,7 @@ def collect_scalar(key, mapping): def collect_string(key, mapping): - return collect_type(key, mapping, text_type) + return collect_type(key, mapping, str) def collect_type(key, mapping, the_type): diff --git a/mysql/datadog_checks/mysql/innodb_metrics.py b/mysql/datadog_checks/mysql/innodb_metrics.py index cd4debe4d8448..b33bcc3868ae1 100644 --- a/mysql/datadog_checks/mysql/innodb_metrics.py +++ b/mysql/datadog_checks/mysql/innodb_metrics.py @@ -6,7 +6,6 @@ from contextlib import closing import pymysql -from six import PY3, iteritems from datadog_checks.base import is_affirmative from datadog_checks.base.log import get_check_logger @@ -15,9 +14,6 @@ from .collection_utils import collect_scalar from .const import OPTIONAL_INNODB_VARS -if PY3: - long = int - def _are_values_numeric(array): return all(v.isdigit() for v in array) @@ -79,38 +75,38 @@ def get_stats_from_innodb_status(self, db): row = [item.strip(']') for item in row] if line.startswith('---BUFFER POOL'): - buffer_id = long(row[2]) + buffer_id = int(row[2]) # SEMAPHORES if line.find('Mutex spin waits') == 0: # Mutex spin waits 79626940, rounds 157459864, OS waits 698719 # Mutex spin waits 0, rounds 247280272495, OS waits 316513438 - results['Innodb_mutex_spin_waits'] = long(row[3]) - results['Innodb_mutex_spin_rounds'] = long(row[5]) - results['Innodb_mutex_os_waits'] = long(row[8]) + results['Innodb_mutex_spin_waits'] = int(row[3]) + results['Innodb_mutex_spin_rounds'] = int(row[5]) + results['Innodb_mutex_os_waits'] = int(row[8]) elif line.find('RW-shared spins') == 0 and line.find(';') > 0: # RW-shared spins 3859028, OS waits 2100750; RW-excl spins # 4641946, OS waits 1530310 - results['Innodb_s_lock_spin_waits'] = long(row[2]) - results['Innodb_x_lock_spin_waits'] = long(row[8]) - results['Innodb_s_lock_os_waits'] = long(row[5]) - results['Innodb_x_lock_os_waits'] = long(row[11]) + results['Innodb_s_lock_spin_waits'] = int(row[2]) + results['Innodb_x_lock_spin_waits'] = int(row[8]) + results['Innodb_s_lock_os_waits'] = int(row[5]) + results['Innodb_x_lock_os_waits'] = int(row[11]) elif line.find('RW-shared spins') == 0 and line.find('; RW-excl spins') == -1: # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax # RW-shared spins 604733, rounds 8107431, OS waits 241268 - results['Innodb_s_lock_spin_waits'] = long(row[2]) - results['Innodb_s_lock_spin_rounds'] = long(row[4]) - results['Innodb_s_lock_os_waits'] = long(row[7]) + results['Innodb_s_lock_spin_waits'] = int(row[2]) + results['Innodb_s_lock_spin_rounds'] = int(row[4]) + results['Innodb_s_lock_os_waits'] = int(row[7]) elif line.find('RW-excl spins') == 0: # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax # RW-excl spins 604733, rounds 8107431, OS waits 241268 - results['Innodb_x_lock_spin_waits'] = long(row[2]) - results['Innodb_x_lock_spin_rounds'] = long(row[4]) - results['Innodb_x_lock_os_waits'] = long(row[7]) + results['Innodb_x_lock_spin_waits'] = int(row[2]) + results['Innodb_x_lock_spin_rounds'] = int(row[4]) + results['Innodb_x_lock_os_waits'] = int(row[7]) elif line.find('seconds the semaphore:') > 0: # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: results['Innodb_semaphore_waits'] += 1 - results['Innodb_semaphore_wait_time'] += long(float(row[9])) * 1000 + results['Innodb_semaphore_wait_time'] += int(float(row[9])) * 1000 # TRANSACTIONS elif line.find('Trx id counter') == 0: @@ -121,7 +117,7 @@ def get_stats_from_innodb_status(self, db): txn_seen = True elif line.find('History list length') == 0: # History list length 132 - results['Innodb_history_list_length'] = long(row[3]) + results['Innodb_history_list_length'] = int(row[3]) elif txn_seen and line.find('---TRANSACTION') == 0: # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 results['Innodb_current_transactions'] += 1 @@ -129,76 +125,76 @@ def get_stats_from_innodb_status(self, db): results['Innodb_active_transactions'] += 1 elif line.find('read views open inside InnoDB') > 0: # 1 read views open inside InnoDB - results['Innodb_read_views'] = long(row[0]) + results['Innodb_read_views'] = int(row[0]) elif line.find('mysql tables in use') == 0: # mysql tables in use 2, locked 2 - results['Innodb_tables_in_use'] += long(row[4]) - results['Innodb_locked_tables'] += long(row[6]) + results['Innodb_tables_in_use'] += int(row[4]) + results['Innodb_locked_tables'] += int(row[6]) elif txn_seen and line.find('lock struct(s)') > 0: # 23 lock struct(s), heap size 3024, undo log entries 27 # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 # LOCK WAIT 2 lock struct(s), heap size 368 if line.find('LOCK WAIT') == 0: - results['Innodb_lock_structs'] += long(row[2]) + results['Innodb_lock_structs'] += int(row[2]) results['Innodb_locked_transactions'] += 1 elif line.find('ROLLING BACK') == 0: # ROLLING BACK 127539 lock struct(s), heap size 15201832, # 4411492 row lock(s), undo log entries 1042488 - results['Innodb_lock_structs'] += long(row[2]) + results['Innodb_lock_structs'] += int(row[2]) else: - results['Innodb_lock_structs'] += long(row[0]) + results['Innodb_lock_structs'] += int(row[0]) # FILE I/O elif line.find(' OS file reads, ') > 0: # 8782182 OS file reads, 15635445 OS file writes, 947800 OS # fsyncs - results['Innodb_os_file_reads'] = long(row[0]) - results['Innodb_os_file_writes'] = long(row[4]) - results['Innodb_os_file_fsyncs'] = long(row[8]) + results['Innodb_os_file_reads'] = int(row[0]) + results['Innodb_os_file_writes'] = int(row[4]) + results['Innodb_os_file_fsyncs'] = int(row[8]) elif line.find('Pending normal aio reads:') == 0: try: if len(row) == 8: # (len(row) == 8) Pending normal aio reads: 0, aio writes: 0, - results['Innodb_pending_normal_aio_reads'] = long(row[4]) - results['Innodb_pending_normal_aio_writes'] = long(row[7]) + results['Innodb_pending_normal_aio_reads'] = int(row[4]) + results['Innodb_pending_normal_aio_writes'] = int(row[7]) elif len(row) == 14: # (len(row) == 14) Pending normal aio reads: 0 [0, 0] , aio writes: 0 [0, 0] , - results['Innodb_pending_normal_aio_reads'] = long(row[4]) - results['Innodb_pending_normal_aio_writes'] = long(row[10]) + results['Innodb_pending_normal_aio_reads'] = int(row[4]) + results['Innodb_pending_normal_aio_writes'] = int(row[10]) elif len(row) == 16: # (len(row) == 16) Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] , if _are_values_numeric(row[4:8]) and _are_values_numeric(row[11:15]): results['Innodb_pending_normal_aio_reads'] = ( - long(row[4]) + long(row[5]) + long(row[6]) + long(row[7]) + int(row[4]) + int(row[5]) + int(row[6]) + int(row[7]) ) results['Innodb_pending_normal_aio_writes'] = ( - long(row[11]) + long(row[12]) + long(row[13]) + long(row[14]) + int(row[11]) + int(row[12]) + int(row[13]) + int(row[14]) ) # (len(row) == 16) Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0] , elif _are_values_numeric(row[4:9]) and _are_values_numeric(row[12:15]): - results['Innodb_pending_normal_aio_reads'] = long(row[4]) - results['Innodb_pending_normal_aio_writes'] = long(row[12]) + results['Innodb_pending_normal_aio_reads'] = int(row[4]) + results['Innodb_pending_normal_aio_writes'] = int(row[12]) else: self.log.warning("Can't parse result line %s", line) elif len(row) == 18: # (len(row) == 18) Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] , - results['Innodb_pending_normal_aio_reads'] = long(row[4]) - results['Innodb_pending_normal_aio_writes'] = long(row[12]) + results['Innodb_pending_normal_aio_reads'] = int(row[4]) + results['Innodb_pending_normal_aio_writes'] = int(row[12]) elif len(row) == 22: # (len(row) == 22) # Pending normal aio reads: 0 [0, 0, 0, 0, 0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] , - results['Innodb_pending_normal_aio_reads'] = long(row[4]) - results['Innodb_pending_normal_aio_writes'] = long(row[16]) + results['Innodb_pending_normal_aio_reads'] = int(row[4]) + results['Innodb_pending_normal_aio_writes'] = int(row[16]) except ValueError as e: self.log.warning("Can't parse result line %s: %s", line, e) elif line.find('ibuf aio reads') == 0: # ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0 # or ibuf aio reads:, log i/o's:, sync i/o's: if len(row) == 10: - results['Innodb_pending_ibuf_aio_reads'] = long(row[3]) - results['Innodb_pending_aio_log_ios'] = long(row[6]) - results['Innodb_pending_aio_sync_ios'] = long(row[9]) + results['Innodb_pending_ibuf_aio_reads'] = int(row[3]) + results['Innodb_pending_aio_log_ios'] = int(row[6]) + results['Innodb_pending_aio_sync_ios'] = int(row[9]) elif len(row) == 7: results['Innodb_pending_ibuf_aio_reads'] = 0 results['Innodb_pending_aio_log_ios'] = 0 @@ -206,11 +202,11 @@ def get_stats_from_innodb_status(self, db): elif line.find('Pending flushes (fsync)') == 0: if len(row) == 4: # Pending flushes (fsync): 0 - results['Innodb_pending_buffer_pool_flushes'] = long(row[3]) + results['Innodb_pending_buffer_pool_flushes'] = int(row[3]) else: # Pending flushes (fsync) log: 0; buffer pool: 0 - results['Innodb_pending_log_flushes'] = long(row[4]) - results['Innodb_pending_buffer_pool_flushes'] = long(row[7]) + results['Innodb_pending_log_flushes'] = int(row[4]) + results['Innodb_pending_buffer_pool_flushes'] = int(row[7]) # INSERT BUFFER AND ADAPTIVE HASH INDEX elif line.find('Ibuf for space 0: size ') == 0: @@ -218,24 +214,24 @@ def get_stats_from_innodb_status(self, db): # had two lines in the output. Newer has just one line, see below. # Ibuf for space 0: size 1, free list len 887, seg size 889, is not empty # Ibuf for space 0: size 1, free list len 887, seg size 889, - results['Innodb_ibuf_size'] = long(row[5]) - results['Innodb_ibuf_free_list'] = long(row[9]) - results['Innodb_ibuf_segment_size'] = long(row[12]) + results['Innodb_ibuf_size'] = int(row[5]) + results['Innodb_ibuf_free_list'] = int(row[9]) + results['Innodb_ibuf_segment_size'] = int(row[12]) elif line.find('Ibuf: size ') == 0: # Ibuf: size 1, free list len 4634, seg size 4636, - results['Innodb_ibuf_size'] = long(row[2]) - results['Innodb_ibuf_free_list'] = long(row[6]) - results['Innodb_ibuf_segment_size'] = long(row[9]) + results['Innodb_ibuf_size'] = int(row[2]) + results['Innodb_ibuf_free_list'] = int(row[6]) + results['Innodb_ibuf_segment_size'] = int(row[9]) if line.find('merges') > -1: - results['Innodb_ibuf_merges'] = long(row[10]) + results['Innodb_ibuf_merges'] = int(row[10]) elif line.find(', delete mark ') > 0 and prev_line.find('merged operations:') == 0: # Output of show engine innodb status has changed in 5.5 # merged operations: # insert 593983, delete mark 387006, delete 73092 - results['Innodb_ibuf_merged_inserts'] = long(row[1]) - results['Innodb_ibuf_merged_delete_marks'] = long(row[4]) - results['Innodb_ibuf_merged_deletes'] = long(row[6]) + results['Innodb_ibuf_merged_inserts'] = int(row[1]) + results['Innodb_ibuf_merged_delete_marks'] = int(row[4]) + results['Innodb_ibuf_merged_deletes'] = int(row[6]) results['Innodb_ibuf_merged'] = ( results['Innodb_ibuf_merged_inserts'] + results['Innodb_ibuf_merged_delete_marks'] @@ -243,85 +239,85 @@ def get_stats_from_innodb_status(self, db): ) elif line.find(' merged recs, ') > 0: # 19817685 inserts, 19817684 merged recs, 3552620 merges - results['Innodb_ibuf_merged_inserts'] = long(row[0]) - results['Innodb_ibuf_merged'] = long(row[2]) - results['Innodb_ibuf_merges'] = long(row[5]) + results['Innodb_ibuf_merged_inserts'] = int(row[0]) + results['Innodb_ibuf_merged'] = int(row[2]) + results['Innodb_ibuf_merges'] = int(row[5]) elif line.find('Hash table size ') == 0: # In some versions of InnoDB, the used cells is omitted. # Hash table size 4425293, used cells 4229064, .... # Hash table size 57374437, node heap has 72964 buffer(s) <-- # no used cells - results['Innodb_hash_index_cells_total'] = long(row[3]) - results['Innodb_hash_index_cells_used'] = long(row[6]) if line.find('used cells') > 0 else 0 + results['Innodb_hash_index_cells_total'] = int(row[3]) + results['Innodb_hash_index_cells_used'] = int(row[6]) if line.find('used cells') > 0 else 0 # LOG elif line.find(" log i/o's done, ") > 0: # 3430041 log i/o's done, 17.44 log i/o's/second # 520835887 log i/o's done, 17.28 log i/o's/second, 518724686 # syncs, 2980893 checkpoints - results['Innodb_log_writes'] = long(row[0]) + results['Innodb_log_writes'] = int(row[0]) elif line.find(" pending log writes, ") > 0: # 0 pending log writes, 0 pending chkp writes - results['Innodb_pending_log_writes'] = long(row[0]) - results['Innodb_pending_checkpoint_writes'] = long(row[4]) + results['Innodb_pending_log_writes'] = int(row[0]) + results['Innodb_pending_checkpoint_writes'] = int(row[4]) elif line.find("Log sequence number") == 0: # This number is NOT printed in hex in InnoDB plugin. # Log sequence number 272588624 - results['Innodb_lsn_current'] = long(row[3]) + results['Innodb_lsn_current'] = int(row[3]) elif line.find("Log flushed up to") == 0: # This number is NOT printed in hex in InnoDB plugin. # Log flushed up to 272588624 - results['Innodb_lsn_flushed'] = long(row[4]) + results['Innodb_lsn_flushed'] = int(row[4]) elif line.find("Last checkpoint at") == 0: # Last checkpoint at 272588624 - results['Innodb_lsn_last_checkpoint'] = long(row[3]) + results['Innodb_lsn_last_checkpoint'] = int(row[3]) # BUFFER POOL AND MEMORY elif line.find("Total memory allocated") == 0 and line.find("in additional pool allocated") > 0: # Total memory allocated 29642194944; in additional pool allocated 0 # Total memory allocated by read views 96 - results['Innodb_mem_total'] = long(row[3]) - results['Innodb_mem_additional_pool'] = long(row[8]) + results['Innodb_mem_total'] = int(row[3]) + results['Innodb_mem_additional_pool'] = int(row[8]) elif line.find('Adaptive hash index ') == 0: # Adaptive hash index 1538240664 (186998824 + 1351241840) - results['Innodb_mem_adaptive_hash'] = long(row[3]) + results['Innodb_mem_adaptive_hash'] = int(row[3]) elif line.find('Page hash ') == 0: # Page hash 11688584 - results['Innodb_mem_page_hash'] = long(row[2]) + results['Innodb_mem_page_hash'] = int(row[2]) elif line.find('Dictionary cache ') == 0: # Dictionary cache 145525560 (140250984 + 5274576) - results['Innodb_mem_dictionary'] = long(row[2]) + results['Innodb_mem_dictionary'] = int(row[2]) elif line.find('File system ') == 0: # File system 313848 (82672 + 231176) - results['Innodb_mem_file_system'] = long(row[2]) + results['Innodb_mem_file_system'] = int(row[2]) elif line.find('Lock system ') == 0: # Lock system 29232616 (29219368 + 13248) - results['Innodb_mem_lock_system'] = long(row[2]) + results['Innodb_mem_lock_system'] = int(row[2]) elif line.find('Recovery system ') == 0: # Recovery system 0 (0 + 0) - results['Innodb_mem_recovery_system'] = long(row[2]) + results['Innodb_mem_recovery_system'] = int(row[2]) elif line.find('Threads ') == 0: # Threads 409336 (406936 + 2400) - results['Innodb_mem_thread_hash'] = long(row[1]) + results['Innodb_mem_thread_hash'] = int(row[1]) elif line.find("Buffer pool size ") == 0: # The " " after size is necessary to avoid matching the wrong line: # Buffer pool size 1769471 # Buffer pool size, bytes 28991012864 if buffer_id == -1: - results['Innodb_buffer_pool_pages_total'] = long(row[3]) + results['Innodb_buffer_pool_pages_total'] = int(row[3]) elif line.find("Free buffers") == 0: # Free buffers 0 if buffer_id == -1: - results['Innodb_buffer_pool_pages_free'] = long(row[2]) + results['Innodb_buffer_pool_pages_free'] = int(row[2]) elif line.find("Database pages") == 0: # Database pages 1696503 if buffer_id == -1: - results['Innodb_buffer_pool_pages_data'] = long(row[2]) + results['Innodb_buffer_pool_pages_data'] = int(row[2]) elif line.find("Modified db pages") == 0: # Modified db pages 160602 if buffer_id == -1: - results['Innodb_buffer_pool_pages_dirty'] = long(row[3]) + results['Innodb_buffer_pool_pages_dirty'] = int(row[3]) elif line.find("Pages read ahead") == 0: # Must do this BEFORE the next test, otherwise it'll get fooled by this # line from the new plugin: @@ -330,22 +326,22 @@ def get_stats_from_innodb_status(self, db): elif line.find("Pages read") == 0: # Pages read 15240822, created 1770238, written 21705836 if buffer_id == -1: - results['Innodb_pages_read'] = long(row[2]) - results['Innodb_pages_created'] = long(row[4]) - results['Innodb_pages_written'] = long(row[6]) + results['Innodb_pages_read'] = int(row[2]) + results['Innodb_pages_created'] = int(row[4]) + results['Innodb_pages_written'] = int(row[6]) # ROW OPERATIONS elif line.find('Number of rows inserted') == 0: # Number of rows inserted 50678311, updated 66425915, deleted # 20605903, read 454561562 - results['Innodb_rows_inserted'] = long(row[4]) - results['Innodb_rows_updated'] = long(row[6]) - results['Innodb_rows_deleted'] = long(row[8]) - results['Innodb_rows_read'] = long(row[10]) + results['Innodb_rows_inserted'] = int(row[4]) + results['Innodb_rows_updated'] = int(row[6]) + results['Innodb_rows_deleted'] = int(row[8]) + results['Innodb_rows_read'] = int(row[10]) elif line.find(" queries inside InnoDB, ") > 0: # 0 queries inside InnoDB, 0 queries in queue - results['Innodb_queries_inside'] = long(row[0]) - results['Innodb_queries_queued'] = long(row[4]) + results['Innodb_queries_inside'] = int(row[0]) + results['Innodb_queries_queued'] = int(row[4]) prev_line = line @@ -357,7 +353,7 @@ def get_stats_from_innodb_status(self, db): # Finally we change back the metrics values to string to make the values # consistent with how they are reported by SHOW GLOBAL STATUS - for metric, value in list(iteritems(results)): + for metric, value in list(results.items()): results[metric] = str(value) return results diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index a4c2eeb6a4ec2..de45c43013417 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -13,7 +13,6 @@ import pymysql from cachetools import TTLCache -from six import PY3, iteritems, itervalues from datadog_checks.base import AgentCheck, is_affirmative from datadog_checks.base.utils.db import QueryExecutor, QueryManager @@ -96,10 +95,6 @@ from ..stubs import datadog_agent -if PY3: - long = int - - class MySql(AgentCheck): SERVICE_CHECK_NAME = 'mysql.can_connect' SLAVE_SERVICE_CHECK_NAME = 'mysql.replication.slave_running' @@ -726,9 +721,9 @@ def _check_replication_status(self, results): if replica_sql_running is None: replica_sql_running = collect_type('Replica_SQL_Running', results, dict) if replica_io_running: - replica_io_running = any(v.lower().strip() == 'yes' for v in itervalues(replica_io_running)) + replica_io_running = any(v.lower().strip() == 'yes' for v in replica_io_running.values()) if replica_sql_running: - replica_sql_running = any(v.lower().strip() == 'yes' for v in itervalues(replica_sql_running)) + replica_sql_running = any(v.lower().strip() == 'yes' for v in replica_sql_running.values()) binlog_running = results.get('Binlog_enabled', False) # replicas will only be collected if user has PROCESS privileges. @@ -813,7 +808,7 @@ def _is_group_replication_active(self, db): return False def _submit_metrics(self, variables, db_results, tags): - for variable, metric in iteritems(variables): + for variable, metric in variables.items(): if isinstance(metric, list): for m in metric: metric_name, metric_type = m @@ -856,7 +851,7 @@ def _collect_dict(self, metric_type, field_metric_map, query, db, tags): cursor.execute(query) result = cursor.fetchone() if result is not None: - for field, metric in list(iteritems(field_metric_map)): + for field, metric in field_metric_map.items(): # Find the column name in the cursor description to identify the column index # http://www.python.org/dev/peps/pep-0249/ # cursor.description is a tuple of (column_name, ..., ...) @@ -903,7 +898,7 @@ def _get_runtime_aurora_tags(self, db): def _collect_system_metrics(self, host, db, tags): pid = None # The server needs to run locally, accessed by TCP or socket - if host in ["localhost", "127.0.0.1", "0.0.0.0"] or db.port == long(0): + if host in ["localhost", "127.0.0.1", "0.0.0.0"] or db.port == int(0): pid = self._get_server_pid(db) if pid: @@ -1021,7 +1016,7 @@ def _get_binary_log_stats(self, db): master_logs = {result[0]: result[1] for result in cursor_results} binary_log_space = 0 - for value in itervalues(master_logs): + for value in master_logs.values(): binary_log_space += value return binary_log_space @@ -1059,7 +1054,7 @@ def _get_replica_stats(self, db, is_mariadb, replication_channel): # MySQL <5.7 does not have Channel_Name. # For MySQL >=5.7 'Channel_Name' is set to an empty string by default channel = replication_channel or replica_result.get('Channel_Name') or 'default' - for key, value in iteritems(replica_result): + for key, value in replica_result.items(): if value is not None: replica_results[key]['channel:{0}'.format(channel)] = value except (pymysql.err.InternalError, pymysql.err.OperationalError) as e: @@ -1161,7 +1156,7 @@ def _query_exec_time_per_schema(self, db): schema_query_avg_run_time = {} for row in cursor.fetchall(): schema_name = str(row[0]) - avg_us = long(row[1]) + avg_us = int(row[1]) # set the tag as the dictionary key schema_query_avg_run_time["schema:{0}".format(schema_name)] = avg_us @@ -1216,7 +1211,7 @@ def _query_size_per_schema(self, db): schema_size = {} for row in cursor.fetchall(): schema_name = str(row[0]) - size = long(row[1]) + size = int(row[1]) # set the tag as the dictionary key schema_size["schema:{0}".format(schema_name)] = size @@ -1241,8 +1236,8 @@ def _query_rows_stats_per_table(self, db): for row in cursor.fetchall(): table_schema = str(row[0]) table_name = str(row[1]) - rows_read_total = long(row[2]) - rows_changed_total = long(row[3]) + rows_read_total = int(row[2]) + rows_changed_total = int(row[3]) # set the tag as the dictionary key table_rows_read_total["schema:{},table:{}".format(table_schema, table_name)] = rows_read_total diff --git a/nginx/datadog_checks/nginx/nginx.py b/nginx/datadog_checks/nginx/nginx.py index 0f7006abfd52f..3dc525a9eea7c 100644 --- a/nginx/datadog_checks/nginx/nginx.py +++ b/nginx/datadog_checks/nginx/nginx.py @@ -4,10 +4,9 @@ import re from datetime import datetime from itertools import chain +from urllib.parse import urljoin, urlparse import simplejson as json -from six import PY3, iteritems, text_type -from six.moves.urllib.parse import urljoin, urlparse from datadog_checks.base import AgentCheck, ConfigurationError, to_native_string from datadog_checks.base.utils.time import get_timestamp @@ -15,9 +14,6 @@ from .const import PLUS_API_ENDPOINTS, PLUS_API_STREAM_ENDPOINTS, TAGGED_KEYS from .metrics import COUNT_METRICS, METRICS_SEND_AS_COUNT, METRICS_SEND_AS_HISTOGRAM, VTS_METRIC_MAP -if PY3: - long = int - if hasattr(datetime, 'fromisoformat'): fromisoformat = datetime.fromisoformat else: @@ -133,7 +129,7 @@ def _get_enabled_endpoints(self): supported_endpoints = self._supported_endpoints(available_endpoints) self.log.debug("Supported endpoints are %s", supported_endpoints) - return chain(iteritems(supported_endpoints)) + return chain(supported_endpoints.items()) except Exception as e: self.log.warning( "Could not determine available endpoints from the API, " @@ -250,13 +246,13 @@ def _get_plus_api_endpoints(self, use_stream=False): Returns all of either stream or default endpoints that the integration supports collecting metrics from based on the Plus API version """ - endpoints = iteritems({}) + endpoints = iter([]) available_plus_endpoints = PLUS_API_STREAM_ENDPOINTS if use_stream else PLUS_API_ENDPOINTS for earliest_version, new_endpoints in available_plus_endpoints.items(): if int(self.plus_api_version) >= int(earliest_version): - endpoints = chain(endpoints, iteritems(new_endpoints)) + endpoints = chain(endpoints, new_endpoints.items()) return endpoints def _get_all_plus_api_endpoints(self): @@ -370,10 +366,10 @@ def _flatten_json(cls, metric_base, val, tags): if tags is None: tags = [] tags = tags + [server] - for key, val2 in iteritems(val): + for key, val2 in val.items(): if key in TAGGED_KEYS: metric_name = '%s.%s' % (metric_base, TAGGED_KEYS[key]) - for tag_val, data in iteritems(val2): + for tag_val, data in val2.items(): tag = '%s:%s' % (TAGGED_KEYS[key], tag_val) output.extend(cls._flatten_json(metric_name, data, tags + [tag])) else: @@ -387,10 +383,10 @@ def _flatten_json(cls, metric_base, val, tags): elif isinstance(val, bool): output.append((metric_base, int(val), tags, 'gauge')) - elif isinstance(val, (int, float, long)): + elif isinstance(val, (int, float)): output.append((metric_base, val, tags, 'gauge')) - elif isinstance(val, (text_type, str)) and val[-1] == "Z": + elif isinstance(val, str) and val[-1] == "Z": try: # In the new Plus API, timestamps are now formatted # strings, some include microseconds, some don't... diff --git a/redisdb/datadog_checks/redisdb/redisdb.py b/redisdb/datadog_checks/redisdb/redisdb.py index 4155bcdb03e81..86298169a0b26 100644 --- a/redisdb/datadog_checks/redisdb/redisdb.py +++ b/redisdb/datadog_checks/redisdb/redisdb.py @@ -9,7 +9,6 @@ from copy import deepcopy import redis -from six import PY2, iteritems from datadog_checks.base import AgentCheck, ConfigurationError, ensure_unicode, is_affirmative from datadog_checks.base.utils.common import round_value @@ -266,7 +265,7 @@ def _check_db(self): elif info_name in self.RATE_KEYS: self.rate(self.RATE_KEYS[info_name], info[info_name], tags=tags) - for config_key, value in iteritems(config): + for config_key, value in config.items(): metric_name = self.CONFIG_GAUGE_KEYS.get(config_key) if metric_name is not None: self.gauge(metric_name, value, tags=tags) @@ -404,7 +403,7 @@ def _check_key_lengths(self, conn, tags): lengths[text_key]["key_type"] = key_type # Send the metrics for each db in the redis instance. - for key, total in iteritems(lengths): + for key, total in lengths.items(): # Only send non-zeros if tagged per db. if total["length"] > 0: self.gauge( @@ -416,7 +415,7 @@ def _check_key_lengths(self, conn, tags): # Warn if a key is missing from the entire redis instance. # Send 0 if the key is missing/empty from the entire redis instance. - for key, total in iteritems(lengths_overall): + for key, total in lengths_overall.items(): if total == 0: key_tags = ['key:{}'.format(key)] if instance_db: @@ -552,7 +551,7 @@ def _check_command_stats(self, conn, tags): self.warning('Could not retrieve command stats from Redis. INFO COMMANDSTATS only works with Redis >= 2.6.') return - for key, stats in iteritems(command_stats): + for key, stats in command_stats.items(): command = key.split('_', 1)[1] command_tags = tags + ['command:{}'.format(command)] @@ -572,11 +571,8 @@ def _collect_metadata(self, info): self.set_metadata('version', info['redis_version']) -_timer = time.time if PY2 else time.perf_counter - - def _call_and_time(func): - start_time = _timer() + start_time = time.perf_counter() rv = func() - end_time = _timer() + end_time = time.perf_counter() return rv, round_value((end_time - start_time) * 1000, 2) diff --git a/riakcs/datadog_checks/riakcs/riakcs.py b/riakcs/datadog_checks/riakcs/riakcs.py index 5f5f12ec8dffe..8185985da4140 100644 --- a/riakcs/datadog_checks/riakcs/riakcs.py +++ b/riakcs/datadog_checks/riakcs/riakcs.py @@ -8,7 +8,6 @@ import boto3 import simplejson as json from botocore.config import Config -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.config import _is_affirmative @@ -22,7 +21,7 @@ def multidict(ordered_pairs): d[k].append(v) # unpack lists that have only 1 item dict_copy = deepcopy(d) - for k, v in iteritems(dict_copy): + for k, v in dict_copy.items(): if len(v) == 1: d[k] = v[0] return dict(d) @@ -62,7 +61,7 @@ def process_stats(self, stats, tags, metrics): metrics.update(V21_DEFAULT_METRICS) else: metrics = V21_DEFAULT_METRICS - for key, value in iteritems(stats): + for key, value in stats.items(): if key not in metrics: continue suffix = key.rsplit("_", 1)[-1] @@ -72,7 +71,7 @@ def process_stats(self, stats, tags, metrics): # pre 2.1 stats format legends = {len(k): k for k in stats["legend"]} del stats["legend"] - for key, values in iteritems(stats): + for key, values in stats.items(): legend = legends[len(values)] for i, value in enumerate(values): metric_name = "riakcs.{0}.{1}".format(key, legend[i]) diff --git a/sap_hana/datadog_checks/sap_hana/sap_hana.py b/sap_hana/datadog_checks/sap_hana/sap_hana.py index d0f173b6c316e..227b93a411a39 100644 --- a/sap_hana/datadog_checks/sap_hana/sap_hana.py +++ b/sap_hana/datadog_checks/sap_hana/sap_hana.py @@ -18,8 +18,6 @@ from hdbcli.dbapi import Connection as HanaConnection except ImportError: HanaConnection = None -from six import iteritems -from six.moves import zip from datadog_checks.base import AgentCheck, is_affirmative from datadog_checks.base.utils.common import total_time_to_temporal_percent @@ -265,7 +263,7 @@ def query_connection_overview(self): for conn in self.iter_rows(queries.GlobalSystemConnectionsStatus(schema=self._schema)): db_counts[(conn['db_name'], conn['host'], conn['port'])][conn['status'].lower()] += conn['total'] - for (db, host, port), counts in iteritems(db_counts): + for (db, host, port), counts in db_counts.items(): tags = ['db:{}'.format(db), 'hana_port:{}'.format(port)] tags.extend(self._tags) tags.append('hana_host:{}'.format(host)) diff --git a/spark/datadog_checks/spark/constants.py b/spark/datadog_checks/spark/constants.py index d66b3b410977d..4097fce403f4f 100644 --- a/spark/datadog_checks/spark/constants.py +++ b/spark/datadog_checks/spark/constants.py @@ -3,8 +3,6 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import re -from six import iteritems - # Identifier for cluster master address in `spark.yaml` MASTER_ADDRESS = 'spark_url' DEPRECATED_MASTER_ADDRESS = 'resourcemanager_uri' @@ -137,15 +135,15 @@ } SPARK_DRIVER_METRICS = { - key: (value[0].format('driver'), value[1]) for key, value in iteritems(SPARK_EXECUTOR_TEMPLATE_METRICS) + key: (value[0].format('driver'), value[1]) for key, value in SPARK_EXECUTOR_TEMPLATE_METRICS.items() } SPARK_EXECUTOR_METRICS = { - key: (value[0].format('executor'), value[1]) for key, value in iteritems(SPARK_EXECUTOR_TEMPLATE_METRICS) + key: (value[0].format('executor'), value[1]) for key, value in SPARK_EXECUTOR_TEMPLATE_METRICS.items() } SPARK_EXECUTOR_LEVEL_METRICS = { - key: (value[0].format('executor.id'), value[1]) for key, value in iteritems(SPARK_EXECUTOR_TEMPLATE_METRICS) + key: (value[0].format('executor.id'), value[1]) for key, value in SPARK_EXECUTOR_TEMPLATE_METRICS.items() } SPARK_RDD_METRICS = { diff --git a/squid/datadog_checks/squid/squid.py b/squid/datadog_checks/squid/squid.py index 42b2ead2f8406..a11875e6a74bb 100644 --- a/squid/datadog_checks/squid/squid.py +++ b/squid/datadog_checks/squid/squid.py @@ -5,9 +5,7 @@ import re import requests -from six import iteritems -# project from datadog_checks.base import AgentCheck EVENT_TYPE = SOURCE_TYPE_NAME = 'squid' @@ -100,7 +98,7 @@ def check(self, instance): counters = self.get_counters(host, port, tags + custom_tags) # Send these values as rate - for counter, value in iteritems(counters): + for counter, value in counters.items(): self.rate(counter, value, tags=tags + custom_tags) def get_counters(self, host, port, tags): diff --git a/system_core/datadog_checks/system_core/system_core.py b/system_core/datadog_checks/system_core/system_core.py index 8ab0d0a452b9a..979836880a5aa 100644 --- a/system_core/datadog_checks/system_core/system_core.py +++ b/system_core/datadog_checks/system_core/system_core.py @@ -2,7 +2,6 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import psutil -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.utils.platform import Platform @@ -22,11 +21,11 @@ def check(self, instance): for i, cpu in enumerate(cpu_times): tags = instance_tags + ['core:{0}'.format(i)] - for key, value in iteritems(cpu._asdict()): + for key, value in cpu._asdict().items(): self.rate('system.core.{0}'.format(key), 100.0 * value, tags=tags) total_cpu_times = psutil.cpu_times() - for key, value in iteritems(total_cpu_times._asdict()): + for key, value in total_cpu_times._asdict().items(): self.rate('system.core.{0}.total'.format(key), 100.0 * value / n_cpus, tags=instance_tags) # https://psutil.readthedocs.io/en/latest/#psutil.cpu_freq diff --git a/twemproxy/datadog_checks/twemproxy/twemproxy.py b/twemproxy/datadog_checks/twemproxy/twemproxy.py index 303357af28eb6..b6a2534a5eadc 100644 --- a/twemproxy/datadog_checks/twemproxy/twemproxy.py +++ b/twemproxy/datadog_checks/twemproxy/twemproxy.py @@ -4,7 +4,6 @@ import socket import simplejson as json -from six import iteritems from datadog_checks.base import AgentCheck, ensure_unicode @@ -158,11 +157,11 @@ def parse_json(cls, raw, tags=None): version = parsed.get('version', None) - for key, val in iteritems(parsed): + for key, val in parsed.items(): if isinstance(val, dict): # server pool pool_tags = tags + ['pool:%s' % key] - for server_key, server_val in iteritems(val): + for server_key, server_val in val.items(): if isinstance(server_val, dict): # server server_tags = pool_tags + ['server:%s' % server_key] diff --git a/twistlock/datadog_checks/twistlock/twistlock.py b/twistlock/datadog_checks/twistlock/twistlock.py index e8dd42f60fdb7..f543ee88d733f 100644 --- a/twistlock/datadog_checks/twistlock/twistlock.py +++ b/twistlock/datadog_checks/twistlock/twistlock.py @@ -7,7 +7,6 @@ from datetime import datetime, timedelta from dateutil import parser, tz -from six import iteritems from datadog_checks.base import AgentCheck from datadog_checks.base.utils.serialization import json @@ -284,7 +283,7 @@ def _report_vuln_info(self, namespace, data, tags): cve_tags += ["package:{}".format(cve['packageName'])] self.gauge('{}.cve.details'.format(namespace), float(1), cve_tags) # Send counts to avoid no-data on zeroes - for severity, count in iteritems(summary): + for severity, count in summary.items(): cve_tags = SEVERITY_TAGS.get(severity, []) + tags self.gauge('{}.cve.count'.format(namespace), float(count), cve_tags) diff --git a/varnish/datadog_checks/varnish/varnish.py b/varnish/datadog_checks/varnish/varnish.py index 57bd381452d11..f617cbf27650e 100644 --- a/varnish/datadog_checks/varnish/varnish.py +++ b/varnish/datadog_checks/varnish/varnish.py @@ -8,16 +8,12 @@ from os import geteuid from packaging.version import Version -from six import PY3, iteritems from six.moves import filter from datadog_checks.base import ConfigurationError from datadog_checks.base.checks import AgentCheck from datadog_checks.base.utils.subprocess_output import get_subprocess_output -if PY3: - long = int - class BackendStatus(object): HEALTHY = 'healthy' @@ -100,11 +96,11 @@ def _end_element(self, name): if name == "stat": m_name = self.normalize(self._current_metric) if self._current_type in ("a", "c"): - self.rate(m_name, long(self._current_value), tags=self.tags) + self.rate(m_name, int(self._current_value), tags=self.tags) elif self._current_type in ("i", "g"): - self.gauge(m_name, long(self._current_value), tags=self.tags) + self.gauge(m_name, int(self._current_value), tags=self.tags) if 'n_purges' in m_name: - self.rate('varnish.n_purgesps', long(self._current_value), tags=self.tags) + self.rate('varnish.n_purgesps', int(self._current_value), tags=self.tags) else: # Unsupported data type, ignore self._reset() @@ -120,7 +116,7 @@ def _char_data(self, data): data = data.strip() if len(data) > 0 and self._current_element != "": if self._current_element == "value": - self._current_value = long(data) + self._current_value = int(data) elif self._current_element == "flag": self._current_type = data else: @@ -243,7 +239,7 @@ def _parse_varnishstat(self, output, varnishstat_format): json_output = json.loads(output) if "counters" in json_output: json_output = json_output["counters"] - for name, metric in iteritems(json_output): + for name, metric in json_output.items(): if not isinstance(metric, dict): # skip 'timestamp' field continue @@ -254,11 +250,11 @@ def _parse_varnishstat(self, output, varnishstat_format): value = metric.get("value", 0) if metric.get("flag") in ("a", "c"): - self.rate(metric_name, long(value), tags=self.tags) + self.rate(metric_name, int(value), tags=self.tags) elif metric.get("flag") in ("g", "i"): - self.gauge(metric_name, long(value), tags=self.tags) + self.gauge(metric_name, int(value), tags=self.tags) if 'n_purges' in self.normalize(name, prefix="varnish"): - self.rate('varnish.n_purgesps', long(value), tags=self.tags) + self.rate('varnish.n_purgesps', int(value), tags=self.tags) elif 'flag' not in metric: self.log.warning("Could not determine the type of metric %s, skipping submission", metric_name) self.log.debug("Raw metric %s is missing the `flag` field", str(metric)) @@ -367,7 +363,7 @@ def _submit_backend_service_checks(self, backends_by_status): if backends_by_status is None: return - for status, backends in iteritems(backends_by_status): + for status, backends in backends_by_status.items(): check_status = BackendStatus.to_check_status(status) for backend, message in backends: service_checks_tags = ['backend:%s' % backend] + self.custom_tags diff --git a/vsphere/datadog_checks/vsphere/api_rest.py b/vsphere/datadog_checks/vsphere/api_rest.py index 5de5d17b78f8d..cdf6df833c0c4 100644 --- a/vsphere/datadog_checks/vsphere/api_rest.py +++ b/vsphere/datadog_checks/vsphere/api_rest.py @@ -6,7 +6,6 @@ from typing import Any, Dict, Iterator, List, Set # noqa: F401 from pyVmomi import vim -from six import iteritems from datadog_checks.base.log import CheckLoggingAdapter # noqa: F401 from datadog_checks.base.utils.http import RequestsWrapper @@ -24,7 +23,7 @@ 'ClusterComputeResource': vim.ClusterComputeResource, } -MOR_TYPE_MAPPING_TO_STRING = {v: k for k, v in iteritems(MOR_TYPE_MAPPING_FROM_STRING)} +MOR_TYPE_MAPPING_TO_STRING = {v: k for k, v in MOR_TYPE_MAPPING_FROM_STRING.items()} class VSphereRestAPI(object): diff --git a/vsphere/datadog_checks/vsphere/config.py b/vsphere/datadog_checks/vsphere/config.py index 3cda46f780e52..366b091f3117d 100644 --- a/vsphere/datadog_checks/vsphere/config.py +++ b/vsphere/datadog_checks/vsphere/config.py @@ -6,7 +6,6 @@ from typing import Any, Dict, List # noqa: F401 from pyVmomi import vim -from six import iteritems, string_types from datadog_checks.base import ConfigurationError, is_affirmative from datadog_checks.base.log import CheckLoggingAdapter # noqa: F401 @@ -208,9 +207,7 @@ def _parse_resource_filters(self, all_resource_filters): ) # Check required fields and their types - for field, field_type in iteritems( - {'resource': string_types, 'property': string_types, 'type': string_types, 'patterns': list} - ): + for field, field_type in {'resource': str, 'property': str, 'type': str, 'patterns': list}.items(): if field not in resource_filter: self.log.warning( "Ignoring filter %r because it doesn't contain a %s field.", resource_filter, field @@ -282,7 +279,7 @@ def _parse_metric_regex_filters(self, all_metric_filters): # type: (MetricFilterConfig) -> MetricFilters allowed_resource_types = [MOR_TYPE_AS_STRING[k] for k in self.collected_resource_types] metric_filters = {} - for resource_type, filters in iteritems(all_metric_filters): + for resource_type, filters in all_metric_filters.items(): if resource_type not in allowed_resource_types: self.log.warning( "Ignoring metric_filter for resource '%s'. When collection_type is '%s', it should be one of '%s'", @@ -293,7 +290,7 @@ def _parse_metric_regex_filters(self, all_metric_filters): continue metric_filters[resource_type] = filters - return {k: [re.compile(r) for r in v] for k, v in iteritems(metric_filters)} + return {k: [re.compile(r) for r in v] for k, v in metric_filters.items()} def _normalize_event_resource_filters(self, filters): return [filter.lower() for filter in filters] diff --git a/vsphere/datadog_checks/vsphere/legacy/mor_cache.py b/vsphere/datadog_checks/vsphere/legacy/mor_cache.py index 276fa653a609f..2edb5081cf18b 100644 --- a/vsphere/datadog_checks/vsphere/legacy/mor_cache.py +++ b/vsphere/datadog_checks/vsphere/legacy/mor_cache.py @@ -4,8 +4,6 @@ import threading import time -from six import iteritems - from datadog_checks.vsphere.legacy.common import REALTIME_RESOURCES @@ -88,7 +86,7 @@ def mors(self, key): Generator returning all the mors in the cache for the given instance key. """ with self._mor_lock: - for k, v in iteritems(self._mor.get(key, {})): + for k, v in self._mor.get(key, {}).items(): yield k, v def mors_batch(self, key, batch_size, max_historical_metrics=None): @@ -111,7 +109,7 @@ def mors_batch(self, key, batch_size, max_historical_metrics=None): batch = {} nb_hist_metrics = 0 - for mor_name, mor in iteritems(mors_dict): + for mor_name, mor in mors_dict.items(): if mor['mor_type'] not in REALTIME_RESOURCES and mor.get('metrics'): # Those metrics are historical, let's make sure we don't have too # many of them in the same batch. @@ -182,7 +180,7 @@ def purge(self, key, ttl): with self._mor_lock: # Don't change the dict during iteration! # First collect the names of the Mors to remove... - for name, mor in iteritems(self._mor[key]): + for name, mor in self._mor[key].items(): age = now - mor['creation_time'] if age > ttl: mors_to_purge.append(name) diff --git a/vsphere/datadog_checks/vsphere/utils.py b/vsphere/datadog_checks/vsphere/utils.py index 136b041ed304b..99298b575cced 100644 --- a/vsphere/datadog_checks/vsphere/utils.py +++ b/vsphere/datadog_checks/vsphere/utils.py @@ -4,7 +4,6 @@ from typing import Any, Dict, List, Optional, Type # noqa: F401 from pyVmomi import vim -from six import iteritems from datadog_checks.base import to_string from datadog_checks.vsphere.constants import ( @@ -238,7 +237,7 @@ def get_mapped_instance_tag(metric_name): tag cannot be guessed by looking at the api results and has to be inferred using documentation or experience. This method acts as a utility to map a metric_name to the meaning of its instance tag. """ - for prefix, tag_key in iteritems(METRIC_TO_INSTANCE_TAG_MAPPING): + for prefix, tag_key in METRIC_TO_INSTANCE_TAG_MAPPING.items(): if metric_name.startswith(prefix): return tag_key return 'instance' diff --git a/vsphere/datadog_checks/vsphere/vsphere.py b/vsphere/datadog_checks/vsphere/vsphere.py index 80acdcb990ad9..f50cfdf73743c 100644 --- a/vsphere/datadog_checks/vsphere/vsphere.py +++ b/vsphere/datadog_checks/vsphere/vsphere.py @@ -11,7 +11,6 @@ from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Type, cast # noqa: F401 from pyVmomi import vim, vmodl -from six import iteritems from datadog_checks.base import AgentCheck, is_affirmative, to_string from datadog_checks.base.checks.libs.timer import Timer @@ -182,7 +181,7 @@ def collect_tags(self, infrastructure_data): resource_filters_without_tags = [f for f in self._config.resource_filters if not isinstance(f, TagFilter)] filtered_infra_data = { mor: props - for mor, props in iteritems(infrastructure_data) + for mor, props in infrastructure_data.items() if isinstance(mor, tuple(self._config.collected_resource_types)) and is_resource_collected_by_filters(mor, infrastructure_data, resource_filters_without_tags) } @@ -234,7 +233,7 @@ def refresh_infrastructure_cache(self): all_tags = self.collect_tags(infrastructure_data) self.infrastructure_cache.set_all_tags(all_tags) - for mor, properties in iteritems(infrastructure_data): + for mor, properties in infrastructure_data.items(): if not isinstance(mor, tuple(self._config.collected_resource_types)): # Do nothing for the resource types we do not collect continue @@ -487,7 +486,7 @@ def make_query_specs(self): counters = self.metrics_metadata_cache.get_metadata(resource_type) metric_ids = [] # type: List[vim.PerformanceManager.MetricId] is_historical_batch = metric_type == HISTORICAL - for counter_key, metric_name in iteritems(counters): + for counter_key, metric_name in counters.items(): # PerformanceManager.MetricId `instance` kwarg: # - An asterisk (*) to specify all instances of the metric for the specified counterId # - Double-quotes ("") to specify aggregated statistics @@ -504,7 +503,7 @@ def make_query_specs(self): for batch in self.make_batch(mors, metric_ids, resource_type, is_historical_batch=is_historical_batch): query_specs = [] - for mor, metrics in iteritems(batch): + for mor, metrics in batch.items(): query_spec = vim.PerformanceManager.QuerySpec() # type: vim.PerformanceManager.QuerySpec query_spec.entity = mor query_spec.metricId = metrics diff --git a/vsphere/tests/legacy/utils.py b/vsphere/tests/legacy/utils.py index bee31bf3dc758..5842f9817fded 100644 --- a/vsphere/tests/legacy/utils.py +++ b/vsphere/tests/legacy/utils.py @@ -7,7 +7,6 @@ from mock import MagicMock, Mock from pyVmomi import vim -from six import iteritems HERE = os.path.abspath(os.path.dirname(__file__)) @@ -90,7 +89,7 @@ def assertMOR(check, instance, name=None, spec=None, tags=None, count=None, subs instance_name = instance['name'] candidates = [] - mor_list = [mor for _, mors in iteritems(check.mor_objects_queue._objects_queue[instance_name]) for mor in mors] + mor_list = [mor for mors in check.mor_objects_queue._objects_queue[instance_name].values() for mor in mors] for mor in mor_list: if name is not None and name != mor['hostname']: diff --git a/yarn/datadog_checks/yarn/yarn.py b/yarn/datadog_checks/yarn/yarn.py index 461a02dd0f620..c6d94672f5469 100644 --- a/yarn/datadog_checks/yarn/yarn.py +++ b/yarn/datadog_checks/yarn/yarn.py @@ -1,9 +1,9 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +from urllib.parse import urljoin, urlsplit, urlunsplit + from requests.exceptions import ConnectionError, HTTPError, InvalidURL, SSLError, Timeout -from six import iteritems -from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit from datadog_checks.base import AgentCheck, is_affirmative from datadog_checks.base.errors import ConfigurationError @@ -198,7 +198,7 @@ def check(self, instance): app_tags = {} filtered_app_tags = {} - for dd_prefix, yarn_key in iteritems(app_tags): + for dd_prefix, yarn_key in app_tags.items(): if yarn_key in self._ALLOWED_APPLICATION_TAGS: filtered_app_tags[dd_prefix] = yarn_key app_tags = filtered_app_tags @@ -292,7 +292,7 @@ def _yarn_app_metrics(self, rm_address, app_tags, addl_tags): def _get_app_tags(self, app_json, app_tags): split_app_tags = self.instance.get('split_yarn_application_tags', DEFAULT_SPLIT_YARN_APPLICATION_TAGS) tags = [] - for dd_tag, yarn_key in iteritems(app_tags): + for dd_tag, yarn_key in app_tags.items(): try: val = app_json[yarn_key] if val: @@ -416,7 +416,7 @@ def _set_yarn_metrics_from_json(self, tags, metrics_json, yarn_metrics): """ Parse the JSON response and set the metrics """ - for dict_path, metric in iteritems(yarn_metrics): + for dict_path, metric in yarn_metrics.items(): metric_name, metric_type = metric metric_value = self._get_value_from_json(dict_path, metrics_json) @@ -465,7 +465,7 @@ def _rest_request_to_json(self, url, object_path, tags, *args, **kwargs): # Add kwargs as arguments if kwargs: - query = '&'.join(['{}={}'.format(key, value) for key, value in iteritems(kwargs)]) + query = '&'.join(['{}={}'.format(key, value) for key, value in kwargs.items()]) url = urljoin(url, '?' + query) try: diff --git a/zk/datadog_checks/zk/zk.py b/zk/datadog_checks/zk/zk.py index 05d9493e1889b..dfa63e9415cb8 100644 --- a/zk/datadog_checks/zk/zk.py +++ b/zk/datadog_checks/zk/zk.py @@ -61,15 +61,12 @@ import struct from collections import defaultdict from contextlib import closing +from io import StringIO from packaging.version import Version -from six import PY3, StringIO, iteritems from datadog_checks.base import AgentCheck, ensure_bytes, ensure_unicode, is_affirmative -if PY3: - long = int - class ZKConnectionFailure(Exception): """Raised when we are unable to connect or get the output of a command.""" @@ -226,7 +223,7 @@ def report_instance_mode(self, mode): tags = self.base_tags + ['mode:%s' % mode] self.gauge('zookeeper.instances', 1, tags=tags) gauges[mode] = 1 - for k, v in iteritems(gauges): + for k, v in gauges.items(): gauge_name = 'zookeeper.instances.%s' % k self.gauge(gauge_name, v, tags=self.base_tags) @@ -309,15 +306,15 @@ def parse_stat(self, buf): _, value = buf.readline().split(':') # Fixme: This metric name is wrong. It should be removed in a major version of the agent # See https://github.com/DataDog/integrations-core/issues/816 - metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip()))) - metrics.append(ZKMetric('zookeeper.packets.received', long(value.strip()), "rate")) + metrics.append(ZKMetric('zookeeper.bytes_received', int(value.strip()))) + metrics.append(ZKMetric('zookeeper.packets.received', int(value.strip()), "rate")) # Sent: 1324 _, value = buf.readline().split(':') # Fixme: This metric name is wrong. It should be removed in a major version of the agent # See https://github.com/DataDog/integrations-core/issues/816 - metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip()))) - metrics.append(ZKMetric('zookeeper.packets.sent', long(value.strip()), "rate")) + metrics.append(ZKMetric('zookeeper.bytes_sent', int(value.strip()))) + metrics.append(ZKMetric('zookeeper.packets.sent', int(value.strip()), "rate")) if has_connections_val: # Connections: 1 @@ -330,12 +327,12 @@ def parse_stat(self, buf): # Outstanding: 0 _, value = buf.readline().split(':') - metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip()))) + metrics.append(ZKMetric('zookeeper.outstanding_requests', int(value.strip()))) # Zxid: 0x1034799c7 _, value = buf.readline().split(':') # Parse as a 64 bit hex int - zxid = long(value.strip(), 16) + zxid = int(value.strip(), 16) # convert to bytes zxid_bytes = struct.pack('>q', zxid) # the higher order 4 bytes is the epoch @@ -353,7 +350,7 @@ def parse_stat(self, buf): # Node count: 487 _, value = buf.readline().split(':') - metrics.append(ZKMetric('zookeeper.nodes', long(value.strip()))) + metrics.append(ZKMetric('zookeeper.nodes', int(value.strip()))) return metrics, tags, mode, version From 5f8df39c33bc4faec0c3209a4182772ba19cc16b Mon Sep 17 00:00:00 2001 From: Ilia Kurenkov Date: Wed, 18 Sep 2024 10:07:38 +0200 Subject: [PATCH 2/2] fix mapreduce --- mapreduce/datadog_checks/mapreduce/mapreduce.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mapreduce/datadog_checks/mapreduce/mapreduce.py b/mapreduce/datadog_checks/mapreduce/mapreduce.py index 87b54c1845e4a..31b850abaad47 100644 --- a/mapreduce/datadog_checks/mapreduce/mapreduce.py +++ b/mapreduce/datadog_checks/mapreduce/mapreduce.py @@ -91,7 +91,7 @@ def check(self, instance): # Report success after gathering all metrics from Application Master if running_jobs: - job_id, metrics = next(running_jobs.items()) + job_id, metrics = next(iter(running_jobs.items())) am_address = self._get_url_base(metrics['tracking_url']) self.service_check( @@ -247,7 +247,7 @@ def _mapreduce_job_metrics(self, running_apps, addl_tags): """ running_jobs = {} - for app_name, tracking_url in running_apps.items(): + for app_name, tracking_url in running_apps.values(): metrics_json = self._rest_request_to_json( tracking_url, self.MAPREDUCE_JOBS_PATH, self.MAPREDUCE_SERVICE_CHECK @@ -287,7 +287,7 @@ def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags): """ Get custom metrics specified for each counter """ - for job_metrics in running_jobs.items(): + for job_metrics in running_jobs.values(): job_name = job_metrics['job_name'] # Check if the job_name exist in the custom metrics