Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use comprehensions -- ruff check --select=C4 #83

Merged
merged 1 commit into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:

- name: Analysing the code with pylint
run: |
python -m pylint -E partitionmanager
python -m pylint --errors-only partitionmanager
jcjones marked this conversation as resolved.
Show resolved Hide resolved

- name: Lint Python code with Ruff
run: |
Expand Down
12 changes: 6 additions & 6 deletions partitionmanager/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,8 @@ def _extract_single_column(row):
def list_tables(conf):
"""List all tables for the current database."""
rows = conf.dbcmd.run("SHOW TABLES;")
table_names = map(lambda row: _extract_single_column(row), rows)
table_objects = map(lambda name: partitionmanager.types.Table(name), table_names)
table_names = (_extract_single_column(row) for row in rows)
table_objects = (partitionmanager.types.Table(name) for name in table_names)
return list(table_objects)


Expand Down Expand Up @@ -287,7 +287,7 @@ def do_partition(conf):
log.info("Database is read-only, only emitting statistics")
if conf.prometheus_stats_path:
do_stats(conf)
return dict()
return {}

if conf.noop:
log.info("Running in noop mode, no changes will be made")
Expand All @@ -304,7 +304,7 @@ def do_partition(conf):
type_name="counter",
)

all_results = dict()
all_results = {}
for table in conf.tables:
time_start = None
try:
Expand Down Expand Up @@ -384,7 +384,7 @@ def do_stats(conf, metrics=partitionmanager.stats.PrometheusMetrics()):

log = logging.getLogger("do_stats")

all_results = dict()
all_results = {}
for table in list_tables(conf):
table_problems = pm_tap.get_table_compatibility_problems(conf.dbcmd, table)
if table_problems:
Expand Down Expand Up @@ -476,7 +476,7 @@ def drop_cmd(args):


def do_find_drops_for_tables(conf):
all_results = dict()
all_results = {}
for table in conf.tables:
log = logging.getLogger(f"do_find_drops_for_tables:{table.name}")

Expand Down
52 changes: 22 additions & 30 deletions partitionmanager/cli_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def test_partition_cmd_several_tables(self):
output = partition_cmd(args)

self.assertEqual(len(output), 2)
self.assertSetEqual(set(output), set(["testtable", "another_table"]))
self.assertSetEqual(set(output), {"testtable", "another_table"})

def test_partition_unpartitioned_table(self):
o = run_partition_cmd_yaml(
Expand Down Expand Up @@ -186,7 +186,7 @@ def test_partition_cmd_two_tables(self):
mariadb: {str(fake_exec)}
"""
)
self.assertSetEqual(set(o), set(["test", "test_with_retention"]))
self.assertSetEqual(set(o), {"test", "test_with_retention"})

def test_partition_period_daily(self):
o = run_partition_cmd_yaml(
Expand All @@ -201,7 +201,7 @@ def test_partition_period_daily(self):
"""
)
self.assertSequenceEqual(
set(o), set(["partitioned_last_week", "partitioned_yesterday"])
set(o), {"partitioned_last_week", "partitioned_yesterday"}
)

def test_partition_period_seven_days(self):
Expand All @@ -221,16 +221,14 @@ def test_partition_period_seven_days(self):

self.assertEqual(
set(logctx.output),
set(
[
"INFO:partition:Evaluating Table partitioned_last_week "
"(duration=7 days, 0:00:00)",
"DEBUG:partition:Table partitioned_last_week has no pending SQL updates.", # noqa: E501
"INFO:partition:Evaluating Table partitioned_yesterday "
"(duration=7 days, 0:00:00)",
"DEBUG:partition:Table partitioned_yesterday has no pending SQL updates.", # noqa: E501
]
),
{
"INFO:partition:Evaluating Table partitioned_last_week "
"(duration=7 days, 0:00:00)",
"DEBUG:partition:Table partitioned_last_week has no pending SQL updates.", # noqa: E501
"INFO:partition:Evaluating Table partitioned_yesterday "
"(duration=7 days, 0:00:00)",
"DEBUG:partition:Table partitioned_yesterday has no pending SQL updates.", # noqa: E501
},
)
self.assertSequenceEqual(list(o), [])

Expand All @@ -249,7 +247,7 @@ def test_partition_period_different_per_table(self):
"""
)
self.assertSequenceEqual(
set(o), set(["partitioned_yesterday", "partitioned_last_week"])
set(o), {"partitioned_yesterday", "partitioned_last_week"}
)

def test_partition_with_db_url(self):
Expand Down Expand Up @@ -283,7 +281,7 @@ def assert_stats_results(self, results):

def assert_stats_prometheus_outfile(self, prom_file):
lines = prom_file.split("\n")
metrics = dict()
metrics = {}
for line in lines:
if not line.startswith("#") and len(line) > 0:
key, value = line.split(" ")
Expand Down Expand Up @@ -350,9 +348,7 @@ def test_cli_tables_override_yaml(self):
""",
datetime.now(tz=timezone.utc),
)
self.assertEqual(
{str(x.name) for x in conf.tables}, set(["table_one", "table_two"])
)
self.assertEqual({str(x.name) for x in conf.tables}, {"table_one", "table_two"})

def test_cli_mariadb_override_yaml(self):
args = PARSER.parse_args(["--mariadb", "/usr/bin/true", "stats"])
Expand Down Expand Up @@ -651,12 +647,10 @@ def test_drop_invalid_config(self):
)
self.assertEqual(
set(logctx.output),
set(
[
"WARNING:do_find_drops_for_tables:unused:"
"Cannot process Table unused: no retention specified"
]
),
{
"WARNING:do_find_drops_for_tables:unused:"
"Cannot process Table unused: no retention specified"
},
)

def test_drop_no_sql(self):
Expand All @@ -675,10 +669,8 @@ def test_drop_no_sql(self):
)
self.assertEqual(
set(logctx.output),
set(
[
"WARNING:do_find_drops_for_tables:unused:"
"Cannot process Table unused: no date query specified"
]
),
{
"WARNING:do_find_drops_for_tables:unused:"
"Cannot process Table unused: no date query specified"
},
)
2 changes: 1 addition & 1 deletion partitionmanager/database_helpers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

class MockDatabase(DatabaseCommand):
def __init__(self):
self._responses = list()
self._responses = []
self.num_queries = 0

def add_response(self, expected, response):
Expand Down
2 changes: 1 addition & 1 deletion partitionmanager/dropper.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def _drop_statement(table, partition_list):
if not partition_list:
raise ValueError("Partition list may not be empty")

partitions = ",".join(map(lambda x: f"`{x.name}`", partition_list))
partitions = ",".join(f"`{x.name}`" for x in partition_list)

alter_cmd = f"ALTER TABLE `{table.name}` DROP PARTITION IF EXISTS {partitions};"

Expand Down
2 changes: 1 addition & 1 deletion partitionmanager/dropper_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def _timestamp_rsp(year, mo, day):

class MockDatabase(DatabaseCommand):
def __init__(self):
self._responses = list()
self._responses = []
self.num_queries = 0

def add_response(self, expected, response):
Expand Down
8 changes: 4 additions & 4 deletions partitionmanager/migrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def write_state_info(conf, out_fp):
log = logging.getLogger("write_state_info")

log.info("Writing current state information")
state_info = {"time": conf.curtime, "tables": dict()}
state_info = {"time": conf.curtime, "tables": {}}
for table in conf.tables:
map_data = _get_map_data_from_config(conf, table)

Expand Down Expand Up @@ -90,7 +90,7 @@ def _plan_partitions_for_time_offsets(

rate_of_change: an ordered list of positions per RATE_UNIT.
"""
changes = list()
changes = []
for (i, offset), is_final in partitionmanager.tools.iter_show_end(
enumerate(time_offsets)
):
Expand Down Expand Up @@ -243,7 +243,7 @@ def calculate_sql_alters_from_state_info(conf, in_fp):
f"{prior_data['time']} = {time_delta}"
)

commands = dict()
commands = {}

for table_name, prior_pos in prior_data["tables"].items():
table = None
Expand All @@ -270,7 +270,7 @@ def calculate_sql_alters_from_state_info(conf, in_fp):
delta_positions = list(
map(operator.sub, ordered_current_pos, ordered_prior_pos)
)
rate_of_change = list(map(lambda pos: pos / time_delta, delta_positions))
rate_of_change = [pos / time_delta for pos in delta_positions]

max_val_part = map_data["partitions"][-1]
if not isinstance(max_val_part, partitionmanager.types.MaxValuePartition):
Expand Down
2 changes: 1 addition & 1 deletion partitionmanager/migrate_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

class MockDatabase(DatabaseCommand):
def __init__(self):
self._response = list()
self._response = []
self._select_response = [[{"id": 150}]]
self.num_queries = 0

Expand Down
6 changes: 3 additions & 3 deletions partitionmanager/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,15 @@ def __init__(self):
self.rows = None
self.current_row = None
self.current_field = None
self.current_elements = list()
self.current_elements = []
self.statement = None

def parse(self, data):
"""Return rows from an XML Result object."""
if self.rows is not None:
raise ValueError("XmlResult objects can only be used once")

self.rows = list()
self.rows = []
self.xmlparser.Parse(data)

if self.current_elements:
Expand Down Expand Up @@ -186,4 +186,4 @@ def run(self, sql_cmd):
logging.debug(f"IntegratedDatabaseCommand executing {sql_cmd}")
with self.connection.cursor() as cursor:
cursor.execute(sql_cmd)
return [row for row in cursor]
return list(cursor)
10 changes: 5 additions & 5 deletions partitionmanager/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ class PrometheusMetrics:
"""A set of metrics that can be rendered for Prometheus."""

def __init__(self):
self.metrics = dict()
self.help = dict()
self.types = dict()
self.metrics = {}
self.help = {}
self.types = {}

def add(self, name, table, data):
"""Record metric data representing the name and table."""
if name not in self.metrics:
self.metrics[name] = list()
self.metrics[name] = []
self.metrics[name].append(PrometheusMetric(name, table, data))

def describe(self, name, help_text=None, type_name=None):
Expand All @@ -50,7 +50,7 @@ def render(self, fp):
if n in self.types:
print(f"# TYPE {name} {self.types[n]}", file=fp)
for m in metrics:
labels = list()
labels = []
if m.table:
labels = [f'table="{m.table}"']
print(f"{name}{{{','.join(labels)}}} {m.data}", file=fp)
Expand Down
4 changes: 2 additions & 2 deletions partitionmanager/stats_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

class TestStatistics(unittest.TestCase):
def test_statistics_no_partitions(self):
s = get_statistics(list(), ts, Table("no_parts"))
s = get_statistics([], ts, Table("no_parts"))
self.assertEqual(s, {"partitions": 0})

def test_statistics_single_unnamed_partition(self):
Expand Down Expand Up @@ -44,7 +44,7 @@ def test_statistics_two_partitions(self):
)

def test_statistics_weekly_partitions_year(self):
parts = list()
parts = []
base = datetime(2020, 5, 20, tzinfo=timezone.utc)
for w in range(52):
partName = f"p_{base + timedelta(weeks=w):%Y%m%d}"
Expand Down
Loading