From 5b3b6f3aed0cc367c7498867cfd068aa98042aed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Chapoton?= Date: Wed, 11 Oct 2023 20:29:04 +0200 Subject: [PATCH] ruff fixes for list, dict, set comprehension --- lmfdb/abvar/fq/stats.py | 4 ++-- lmfdb/api/api.py | 10 +++++----- lmfdb/api2/utils.py | 2 +- lmfdb/app.py | 4 ++-- lmfdb/backend/searchtable.py | 2 +- lmfdb/backend/statstable.py | 4 ++-- lmfdb/characters/main.py | 8 ++++---- lmfdb/characters/web_character.py | 8 ++++---- lmfdb/classical_modular_forms/web_newform.py | 4 ++-- lmfdb/classical_modular_forms/web_space.py | 2 +- lmfdb/ecnf/isog_class.py | 2 +- lmfdb/elliptic_curves/elliptic_curve.py | 4 ++-- lmfdb/elliptic_curves/web_ec.py | 2 +- lmfdb/galois_groups/transitive_group.py | 6 +++--- lmfdb/genus2_curves/web_g2c.py | 2 +- lmfdb/groups/abstract/main.py | 6 ++---- lmfdb/groups/abstract/web_groups.py | 6 +++--- lmfdb/hilbert_modular_forms/hmf_stats.py | 2 +- lmfdb/homepage/sidebar.py | 4 ++-- lmfdb/hypergm/main.py | 2 +- lmfdb/knowledge/knowl.py | 19 +++++++++---------- lmfdb/knowledge/main.py | 2 +- lmfdb/lfunctions/LfunctionDatabase.py | 4 ++-- lmfdb/local_fields/main.py | 2 +- lmfdb/modular_curves/main.py | 2 +- lmfdb/number_fields/number_field.py | 2 +- lmfdb/siegel_modular_forms/sample.py | 8 ++++---- lmfdb/typed_data/type_generation.py | 4 ++-- lmfdb/users/pwdmanager.py | 4 ++-- lmfdb/utils/search_parsing.py | 4 ++-- lmfdb/utils/trace_hash.py | 2 +- lmfdb/verify/mf_newforms.py | 2 +- lmfdb/verify/verification.py | 2 +- 33 files changed, 69 insertions(+), 72 deletions(-) diff --git a/lmfdb/abvar/fq/stats.py b/lmfdb/abvar/fq/stats.py index 7673a46583..19647afa4b 100644 --- a/lmfdb/abvar/fq/stats.py +++ b/lmfdb/abvar/fq/stats.py @@ -178,11 +178,11 @@ def _counts(self): @lazy_attribute def qs(self): - return sorted(set(q for g, q in self._counts)) + return sorted({q for g, q in self._counts}) @lazy_attribute def gs(self): - return sorted(set(g for g, q in self._counts)) + return sorted({g for g, q in self._counts}) @lazy_attribute def isogeny_knowl(self): diff --git a/lmfdb/api/api.py b/lmfdb/api/api.py index fbe82062c1..e013f18410 100644 --- a/lmfdb/api/api.py +++ b/lmfdb/api/api.py @@ -130,15 +130,15 @@ def split_db(tablename): info['dataSize'] = mb(dataSize) info['indexSize'] = mb(indexSize) if info['sortby'] == 'name': - sortedkeys = sorted(list(stats)) + sortedkeys = sorted(stats) elif info['sortby'] == 'objects' and info['groupby'] == 'db': - sortedkeys = sorted(list(stats),key=lambda x: (-stats[x]['dbObjects'],stats[x]['db'],-stats[x]['nrows'],stats[x]['table'])) + sortedkeys = sorted(stats, key=lambda x: (-stats[x]['dbObjects'],stats[x]['db'],-stats[x]['nrows'],stats[x]['table'])) elif info['sortby'] == 'objects': - sortedkeys = sorted(list(stats),key=lambda x: (-stats[x]['nrows'],stats[x]['db'],stats[x]['table'])) + sortedkeys = sorted(stats, key=lambda x: (-stats[x]['nrows'],stats[x]['db'],stats[x]['table'])) elif info['sortby'] == 'size' and info['groupby'] == 'db': - sortedkeys = sorted(list(stats),key=lambda x: (-stats[x]['dbSize'],stats[x]['db'],-stats[x]['size'],stats[x]['table'])) + sortedkeys = sorted(stats, key=lambda x: (-stats[x]['dbSize'],stats[x]['db'],-stats[x]['size'],stats[x]['table'])) else: - sortedkeys = sorted(list(stats),key=lambda x: (-stats[x]['size'],stats[x]['db'],stats[x]['table'])) + sortedkeys = sorted(stats, key=lambda x: (-stats[x]['size'],stats[x]['db'],stats[x]['table'])) info['stats'] = [stats[key] for key in sortedkeys] return render_template('api-stats.html', info=info) diff --git a/lmfdb/api2/utils.py b/lmfdb/api2/utils.py index e4e45bf56c..414ceeffee 100644 --- a/lmfdb/api2/utils.py +++ b/lmfdb/api2/utils.py @@ -444,7 +444,7 @@ def simple_search_postgres(search_dict, projection=None): metadata['record_count'] = info['number'] metadata['correct_count'] = info['exact_count'] if data: - data_out = list(list(data)) + data_out = list(data) else: data_out = [] metadata['view_count'] = len(data_out) diff --git a/lmfdb/app.py b/lmfdb/app.py index 0a02d9b261..c631615b75 100644 --- a/lmfdb/app.py +++ b/lmfdb/app.py @@ -163,7 +163,7 @@ def modify_url(**replace): @app.context_processor def inject_sidebar(): from .homepage import get_sidebar - return dict(sidebar=get_sidebar()) + return {"sidebar": get_sidebar()} ############################## # Bottom link to google code # @@ -657,7 +657,7 @@ def add_colors(): if color is None: from .utils.config import Configuration color = Configuration().get_color() - return dict(color=all_color_schemes[color].dict()) + return {"color": all_color_schemes[color].dict()} @app.route("/style.css") diff --git a/lmfdb/backend/searchtable.py b/lmfdb/backend/searchtable.py index 713cd38f50..4fe1a8fc40 100644 --- a/lmfdb/backend/searchtable.py +++ b/lmfdb/backend/searchtable.py @@ -81,7 +81,7 @@ def _parse_projection(self, projection): elif projection == 3: return tuple(["id"] + self.search_cols), tuple(self.extra_cols) elif isinstance(projection, dict): - projvals = set(bool(val) for val in projection.values()) + projvals = {bool(val) for val in projection.values()} if len(projvals) > 1: raise ValueError("You cannot both include and exclude.") including = projvals.pop() diff --git a/lmfdb/backend/statstable.py b/lmfdb/backend/statstable.py index 1fca522599..fa0c4c7f87 100644 --- a/lmfdb/backend/statstable.py +++ b/lmfdb/backend/statstable.py @@ -545,7 +545,7 @@ def column_counts(self, cols, constraint=None, threshold=1, split_list=False): ccols, cvals, allcols = Json([]), Json([]), cols else: ccols, cvals = self._split_dict(constraint) - allcols = sorted(list(set(cols + list(constraint)))) + allcols = sorted(set(cols + list(constraint))) # Ideally we would include the constraint in the query, but it's not easy to do that # So we check the results in Python jcols = Json(cols) @@ -1155,7 +1155,7 @@ def _process_constraint(self, cols, constraint): else: ccols, cvals = self._split_dict(constraint) # We need to include the constraints in the count table if we're not grouping by that column - allcols = sorted(list(set(cols + list(constraint)))) + allcols = sorted(set(cols + list(constraint))) if any(key.startswith("$") for key in constraint): raise ValueError("Top level special keys not allowed") qstr, values = self.table._parse_dict(constraint) diff --git a/lmfdb/characters/main.py b/lmfdb/characters/main.py index e3668741cc..14e7276fd4 100644 --- a/lmfdb/characters/main.py +++ b/lmfdb/characters/main.py @@ -274,7 +274,7 @@ def render_DirichletNavigation(): headers, entries, rows, cols = get_character_modulus(modulus_start, modulus_end, limit=8) info['entries'] = entries info['rows'] = list(range(modulus_start, modulus_end + 1)) - info['cols'] = sorted(list({r[1] for r in entries})) + info['cols'] = sorted({r[1] for r in entries}) return render_template("ModulusList.html", **info) except ValueError as err: flash_error("Error raised in parsing: %s", err) @@ -428,7 +428,7 @@ def render_Dirichletwebpage(modulus=None, orbit_label=None, number=None): info['title'] = 'Group of Dirichlet characters of modulus ' + str(modulus) info['bread'] = bread([('%s' % modulus, url_for(".render_Dirichletwebpage", modulus=modulus))]) info['learnmore'] = learn() - info['code'] = dict([(k[4:], info[k]) for k in info if k[0:4] == "code"]) + info['code'] = {k[4:]: info[k] for k in info if k[0:4] == "code"} info['code']['show'] = {lang: '' for lang in info['codelangs']} # use default show names if 'gens' in info: info['generators'] = ', '.join(r'$\chi_{%s}(%s,\cdot)$' % (url_for(".render_Dirichletwebpage", modulus=modulus, number=g), modulus, g) for g in info['gens']) @@ -446,7 +446,7 @@ def render_Dirichletwebpage(modulus=None, orbit_label=None, number=None): info['show_orbit_label'] = True info['downloads'] = [('Underlying data', url_for('.dirchar_data', label=f"{modulus}.{orbit_label}"))] info['learnmore'] = learn() - info['code'] = dict([(k[4:], info[k]) for k in info if k[0:4] == "code"]) + info['code'] = {k[4:]: info[k] for k in info if k[0:4] == "code"} info['code']['show'] = {lang: '' for lang in info['codelangs']} # use default show names info['bread'] = bread( [('%s' % modulus, url_for(".render_Dirichletwebpage", modulus=modulus)), @@ -509,7 +509,7 @@ def render_Dirichletwebpage(modulus=None, orbit_label=None, number=None): info['bread'] = bread_crumbs info['learnmore'] = learn() info['downloads'] = downloads - info['code'] = dict([(k[4:], info[k]) for k in info if k[0:4] == "code"]) + info['code'] = {k[4:]: info[k] for k in info if k[0:4] == "code"} info['code']['show'] = {lang: '' for lang in info['codelangs']} # use default show names info['KNOWL_ID'] = 'character.dirichlet.%s.%s' % (modulus, number) return render_template('Character.html', **info) diff --git a/lmfdb/characters/web_character.py b/lmfdb/characters/web_character.py index b3ba459094..5ba51283d2 100644 --- a/lmfdb/characters/web_character.py +++ b/lmfdb/characters/web_character.py @@ -681,9 +681,9 @@ def _set_galoisorbit(self, gal_orbit): return upper_limit = min(31, self.order + 1) gal_orbit = gal_orbit[:upper_limit] - self.galoisorbit = list( + self.galoisorbit = [ self._char_desc(num, prim=self.isprimitive) for num in gal_orbit - ) + ] def _set_kernel_field_poly(self): if self.order <= 100: @@ -1165,9 +1165,9 @@ def _populate_from_db(self): self.rowtruncate = True self.galorbnums = self.first_chi.galois_orbit(upper_limit) logger.info(f"[WebDBDirichletOrbit.populate] found galois orbit {self.galorbnums}") - self.galoisorbit = list( + self.galoisorbit = [ self._char_desc(num, prim=orbit_data['is_primitive']) for num in self.galorbnums - ) + ] def _set_kernel_field_poly(self, orbit_data): an_orbit_rep = int(orbit_data['first_label'].split(".")[1]) diff --git a/lmfdb/classical_modular_forms/web_newform.py b/lmfdb/classical_modular_forms/web_newform.py index 03e8fedc91..48c2cfd10d 100644 --- a/lmfdb/classical_modular_forms/web_newform.py +++ b/lmfdb/classical_modular_forms/web_newform.py @@ -286,8 +286,8 @@ def __init__(self, data, space=None, all_m=False, all_n=False, embedding_label=N # Breadcrumbs @property def bread(self): - kwds = dict(level=self.level, weight=self.weight, char_orbit_label=self.char_orbit_label, - hecke_orbit=cremona_letter_code(self.hecke_orbit - 1)) + kwds = {"level": self.level, "weight": self.weight, "char_orbit_label": self.char_orbit_label, + "hecke_orbit": cremona_letter_code(self.hecke_orbit - 1)} if self.embedding_label is not None: kwds['embedding_label'] = self.embedding_label return get_bread(**kwds) diff --git a/lmfdb/classical_modular_forms/web_space.py b/lmfdb/classical_modular_forms/web_space.py index 95b329cb9a..12e329ddda 100644 --- a/lmfdb/classical_modular_forms/web_space.py +++ b/lmfdb/classical_modular_forms/web_space.py @@ -98,7 +98,7 @@ def url_sign_char(x): return "-" if x else "%2B" continue b = list(reversed(ZZ(i).bits())) b = [0 for j in range(num_primes-len(b))] + b - row = list(map(lambda x:r'\(%s\)'%sign_char(x),b)) + row = [r'\(%s\)'%sign_char(x) for x in b] sign = sum(b) % 2 if num_primes > 1: row.append(r"$%s$"%sign_char(sign)) diff --git a/lmfdb/ecnf/isog_class.py b/lmfdb/ecnf/isog_class.py index 427a80ba42..bbcef18dff 100644 --- a/lmfdb/ecnf/isog_class.py +++ b/lmfdb/ecnf/isog_class.py @@ -256,7 +256,7 @@ def make_graph(M): def make_iso_matrix(clist): # clist is a list of ECNFs Elist = [E.E for E in clist] cl = Elist[0].isogeny_class() - perm = dict([(i, cl.index(E)) for i, E in enumerate(Elist)]) + perm = {i: cl.index(E) for i, E in enumerate(Elist)} return permute_mat(cl.matrix(), perm, True) diff --git a/lmfdb/elliptic_curves/elliptic_curve.py b/lmfdb/elliptic_curves/elliptic_curve.py index 4f0c420bc0..ee13e59972 100644 --- a/lmfdb/elliptic_curves/elliptic_curve.py +++ b/lmfdb/elliptic_curves/elliptic_curve.py @@ -99,8 +99,8 @@ def rational_elliptic_curves(err_args=None): counts = get_stats() conductor_list_endpoints = [1, 100, 1000, 10000, 100000, int(counts.max_N_Cremona) + 1] - conductor_list = dict([(r,r) for r in ["%s-%s" % (start, end - 1) for start, end in zip(conductor_list_endpoints[:-1], - conductor_list_endpoints[1:])]]) + conductor_list = {r: r for r in ["%s-%s" % (start, end - 1) for start, end in zip(conductor_list_endpoints[:-1], + conductor_list_endpoints[1:])]} conductor_list[">{}".format(counts.max_N_Cremona)] = "{}-".format(counts.max_N_Cremona) rank_list = list(range(counts.max_rank + 1)) diff --git a/lmfdb/elliptic_curves/web_ec.py b/lmfdb/elliptic_curves/web_ec.py index d12a68b9b1..342cb2f8c1 100644 --- a/lmfdb/elliptic_curves/web_ec.py +++ b/lmfdb/elliptic_curves/web_ec.py @@ -646,7 +646,7 @@ def make_mwbsd(self): mwbsd['heights'] = [RR(h) for h in mwbsd['heights']] # Mordell-Weil group - invs = [0 for a in range(self.rank)] + [n for n in self.torsion_structure] + invs = [0 for a in range(self.rank)] + list(self.torsion_structure) mwbsd['mw_struct'] = "trivial" if len(invs) == 0 else r'\(' + r' \oplus '.join((r'\Z' if n == 0 else r'\Z/{%s}\Z' % n) for n in invs) + r'\)' # Torsion structure and generators: diff --git a/lmfdb/galois_groups/transitive_group.py b/lmfdb/galois_groups/transitive_group.py index 80a5cf685f..e1c6015492 100644 --- a/lmfdb/galois_groups/transitive_group.py +++ b/lmfdb/galois_groups/transitive_group.py @@ -858,12 +858,12 @@ def complete_group_codes(codes): # Load all sibling representations from the database labels = ["%sT%s" % elt[0] for elt in aliases.values()] -siblings = dict( - (elt["label"], [tuple(z[0]) for z in elt["siblings"]]) +siblings = { + elt["label"]: [tuple(z[0]) for z in elt["siblings"]] for elt in db.gps_transitive.search( {"label": {"$in": labels}}, ["label", "siblings"] ) -) +} for ky in aliases.keys(): nt = aliases[ky][0] label = "%sT%s"% nt diff --git a/lmfdb/genus2_curves/web_g2c.py b/lmfdb/genus2_curves/web_g2c.py index d84496ac06..3fcecaf9f1 100644 --- a/lmfdb/genus2_curves/web_g2c.py +++ b/lmfdb/genus2_curves/web_g2c.py @@ -508,7 +508,7 @@ def split_statement(coeffs, labels, condnorms): # Otherwise give defining equation: else: statement += r"
  \(y^2 = x^3 - g_4 / 48 x - g_6 / 864\) with" - statement += r"
  \(g_4 = %s\)
  \(g_6 = %s\)" % tuple(map (lambda x: strlist_to_nfelt(x, 'b'),coeffs[n])) + statement += r"
  \(g_4 = %s\)
  \(g_6 = %s\)" % tuple((strlist_to_nfelt(x, 'b') for x in coeffs[n])) statement += "
   Conductor norm: %s" % condnorms[n] return statement diff --git a/lmfdb/groups/abstract/main.py b/lmfdb/groups/abstract/main.py index f32c2d44cc..6094790a69 100644 --- a/lmfdb/groups/abstract/main.py +++ b/lmfdb/groups/abstract/main.py @@ -520,8 +520,7 @@ def create_boolean_string(gp, type="normal"): "quasisimple", "almost_simple", ] - short_show = set( - [ + short_show = { "cyclic", "abelian", "nonabelian", @@ -530,8 +529,7 @@ def create_boolean_string(gp, type="normal"): "nab_simple", "nonsolvable", "nab_perfect", - ] - ) + } short_string = type == "knowl" # Implications should give edges of a DAG, and should be listed in the group.properties_interdependencies knowl diff --git a/lmfdb/groups/abstract/web_groups.py b/lmfdb/groups/abstract/web_groups.py index 67daaa8538..ea4ae519af 100644 --- a/lmfdb/groups/abstract/web_groups.py +++ b/lmfdb/groups/abstract/web_groups.py @@ -1109,7 +1109,7 @@ def impose_limit(n): if any(H.aut_label is None or H.diagramx is None for H in subs): # We don't know subgroups up to automorphism or can't lay out the subgroups return 0 - return impose_limit(len(set(H.aut_label for H in subs))) + return impose_limit(len({H.aut_label for H in subs})) else: if self.outer_equivalence or any(H.diagramx is None for H in subs): # We don't know subgroups up to conjugacy or can't lay out subgroups @@ -1490,7 +1490,7 @@ def aut_class_counts(self): @lazy_attribute def tex_images(self): - all_tex = list(set(H.subgroup_tex for H in self.subgroups.values())) + ["?"] + all_tex = list({H.subgroup_tex for H in self.subgroups.values()}) + ["?"] return { rec["label"]: rec["image"] for rec in db.gps_images.search({"label": {"$in": all_tex}}, ["label", "image"]) @@ -2159,7 +2159,7 @@ def FrattiniSubgroup(self): def Phiquotient(self): # Make all exponents by 1 - snf1 = [prod([z for z in ZZ(n).prime_factors()]) for n in self.snf] + snf1 = [prod(list(ZZ(n).prime_factors())) for n in self.snf] return LiveAbelianGroup(snf1) def FittingSubgroup(self): diff --git a/lmfdb/hilbert_modular_forms/hmf_stats.py b/lmfdb/hilbert_modular_forms/hmf_stats.py index df0d53213c..09253ea10d 100644 --- a/lmfdb/hilbert_modular_forms/hmf_stats.py +++ b/lmfdb/hilbert_modular_forms/hmf_stats.py @@ -68,7 +68,7 @@ def counts(self): attrs = ["degree", "discriminant", "label"] fields = list(db.hmf_fields.search({}, attrs, sort=attrs)) - degrees = sorted(set(F["degree"] for F in fields)) + degrees = sorted({F["degree"] for F in fields}) by_deg = {d: [F for F in fields if F["degree"] == d] for d in degrees} counts["degrees"] = degrees counts["nfields"] = len(fields) diff --git a/lmfdb/homepage/sidebar.py b/lmfdb/homepage/sidebar.py index ea05b4de07..878c704dab 100755 --- a/lmfdb/homepage/sidebar.py +++ b/lmfdb/homepage/sidebar.py @@ -10,14 +10,14 @@ def linked_name(item, level=""): """ if level == "heading": if 'url_for' in item: - url = url_for(item['url_for'],**item.get('url_args',dict())) + url = url_for(item['url_for'],**item.get('url_args',{})) return ''.join(['
\n']) else: return ''.join(['

',item['title'],'

\n']) else: if 'url_for' in item and not ('status' in item and item['status'] == 'future'): - url = url_for(item['url_for'],**item.get('url_args',dict())) + url = url_for(item['url_for'],**item.get('url_args',{})) this_entry = ''.join(['',item['title'],'']) else: this_entry = item['title'] diff --git a/lmfdb/hypergm/main.py b/lmfdb/hypergm/main.py index ca93ec3e1f..58f33b5ab9 100644 --- a/lmfdb/hypergm/main.py +++ b/lmfdb/hypergm/main.py @@ -603,7 +603,7 @@ def parse_pandt(info, family): try: if info.get('t'): - info['ts'] = sorted(list(set(map(QQ, info.get('t').split(","))))) + info['ts'] = sorted(set(map(QQ, info.get('t').split(",")))) info['t'] = ",".join(map(str, info['ts'])) else: info['ts'] = None diff --git a/lmfdb/knowledge/knowl.py b/lmfdb/knowledge/knowl.py index 50a78f8f90..a753856a08 100644 --- a/lmfdb/knowledge/knowl.py +++ b/lmfdb/knowledge/knowl.py @@ -50,8 +50,7 @@ # this one is different from the hashtag regex in main.py, # because of the match-group ( ... ) hashtag_keywords = re.compile(r'#[a-zA-Z][a-zA-Z0-9-_]{1,}\b') -common_words = set( - ['and', 'an', 'or', 'some', 'many', 'has', 'have', 'not', 'too', 'mathbb', 'title', 'for']) +common_words = {'and', 'an', 'or', 'some', 'many', 'has', 'have', 'not', 'too', 'mathbb', 'title', 'for'} # categories, level 0, never change this id #CAT_ID = 'categories' @@ -110,7 +109,7 @@ def extract_typ(kid): def extract_links(content): - return sorted(set(x[2] for x in link_finder_re.findall(content) if x[2])) + return sorted({x[2] for x in link_finder_re.findall(content) if x[2]}) def normalize_define(term): @@ -122,7 +121,7 @@ def normalize_define(term): def extract_defines(content): - return sorted(set(x.strip() for x in defines_finder_re.findall(content))) + return sorted({x.strip() for x in defines_finder_re.findall(content)}) # We don't use the PostgresTable from lmfdb.backend.database # since it's aimed at constructing queries for mathematical objects @@ -145,7 +144,7 @@ def titles(self): now = time.time() if now - self.cached_titles_timestamp > self.caching_time: self.cached_titles_timestamp = now - self.cached_titles = dict([(elt['id'], elt['title']) for elt in self.get_all_knowls(['id','title'])]) + self.cached_titles = {elt['id']: elt['title'] for elt in self.get_all_knowls(['id','title'])} return self.cached_titles @property @@ -180,7 +179,7 @@ def get_knowl(self, ID, if not beta: cur = self._execute(selecter, [ID, 1]) if cur.rowcount > 0: - return {k:v for k,v in zip(fields, cur.fetchone())} + return dict(zip(fields, cur.fetchone())) cur = self._execute(selecter, [ID, -2 if allow_deleted else 0]) if cur.rowcount > 0: return dict(zip(fields, cur.fetchone())) @@ -462,7 +461,7 @@ def orphans(self, old=False, beta=None): """ Returns lists of knowl ids (grouped by category) that are not referenced by any code or other knowl. """ - kids = set(k['id'] for k in self.get_all_knowls(['id'], types=[0]) if not any(k['id'].startswith(x) for x in ["users.", "test."])) + kids = {k['id'] for k in self.get_all_knowls(['id'], types=[0]) if not any(k['id'].startswith(x) for x in ["users.", "test."])} def filter_from_matches(pattern): matches = subprocess.check_output(['git', 'grep', '-E', '--full-name', '--line-number', '--context', '2', pattern],encoding='utf-8').split('\n--\n') @@ -666,7 +665,7 @@ def broken_links_code(self): as in ``code_references``, and ``links`` is a list of purported knowl ids that show up in an expression of the form ``KNOWL('BAD_ID')``. """ - all_kids = set(k['id'] for k in self.get_all_knowls(['id'])) + all_kids = {k['id'] for k in self.get_all_knowls(['id'])} if sys.version_info[0] == 3: matches = subprocess.check_output(['git', 'grep', '-E', '--full-name', '--line-number', '--context', '2', link_finder_re.pattern],encoding='utf-8').split('\n--\n') else: @@ -854,9 +853,9 @@ def __init__(self, ID, template_kwargs=None, data=None, editing=False, showing=F # "status":0}] uids = [ elt['last_author'] for elt in self.edit_history] if uids: - full_names = dict([ (elt['username'], elt['full_name']) for elt in userdb.full_names(uids)]) + full_names = {elt['username']: elt['full_name'] for elt in userdb.full_names(uids)} else: - full_names = dict({}) + full_names = {} self.previous_review_spot = None for i, elt in enumerate(self.edit_history): elt['ms_timestamp'] = datetime_to_timestamp_in_ms(elt['timestamp']) diff --git a/lmfdb/knowledge/main.py b/lmfdb/knowledge/main.py index 642e58216f..c6e029b7a0 100644 --- a/lmfdb/knowledge/main.py +++ b/lmfdb/knowledge/main.py @@ -748,7 +748,7 @@ def render_knowl(ID, footer=None, kwargs=None, include *just* the string and not the response object. """ # logger.debug("kwargs: %s", request.args) - kwargs = kwargs or dict(((k, v) for k, v in request.args.items())) + kwargs = kwargs or dict(request.args.items()) # logger.debug("kwargs: %s" , kwargs) if timestamp is None: # fetch and convert the ms timestamp to datetime diff --git a/lmfdb/lfunctions/LfunctionDatabase.py b/lmfdb/lfunctions/LfunctionDatabase.py index 2693e00bdf..816fc52ab6 100644 --- a/lmfdb/lfunctions/LfunctionDatabase.py +++ b/lmfdb/lfunctions/LfunctionDatabase.py @@ -70,8 +70,8 @@ def get_multiples_by_Lhash_and_trace_hash(Lhash, degree, trace_hash): # hence, self.degree = 2, self.type = CMF if degree == 2: # our only hope is to find the missing genus 2 curve with a CMF - for Lhash in set(elt['Lhash'] for elt in instances - if elt['type'] == 'CMF'): + for Lhash in {elt['Lhash'] for elt in instances + if elt['type'] == 'CMF'}: other_trace_hash = db.lfunc_lfunctions.lucky( {'Lhash': Lhash, 'degree': 4}, 'trace_hash') if other_trace_hash is not None: diff --git a/lmfdb/local_fields/main.py b/lmfdb/local_fields/main.py index 44338071fb..f04f00f0fc 100644 --- a/lmfdb/local_fields/main.py +++ b/lmfdb/local_fields/main.py @@ -310,7 +310,7 @@ class LF_download(Downloader): db_cols=["c", "coeffs", "e", "f", "gal", "label", "n", "p", "slopes", "t", "u", "visible", "ind_of_insep", "associated_inertia"]) def lf_postprocess(res, info, query): - cache = knowl_cache(list(set(f"{rec['n']}T{rec['gal']}" for rec in res))) + cache = knowl_cache(list({f"{rec['n']}T{rec['gal']}" for rec in res})) for rec in res: rec["cache"] = cache return res diff --git a/lmfdb/modular_curves/main.py b/lmfdb/modular_curves/main.py index 7494e3aaf5..487ea10831 100644 --- a/lmfdb/modular_curves/main.py +++ b/lmfdb/modular_curves/main.py @@ -957,7 +957,7 @@ def low_degree_points(): FloatCol("j_height", "nf.weil_height", "$j$-height", default=True)]) def ratpoint_postprocess(res, info, query): - labels = list(set(rec["curve_label"] for rec in res)) + labels = list({rec["curve_label"] for rec in res}) RSZBlabels = {rec["label"]: rec["RSZBlabel"] for rec in db.gps_gl2zhat_fine.search({"label":{"$in":labels}}, ["label", "RSZBlabel"])} for rec in res: rec["curve_RSZBlabel"] = RSZBlabels.get(rec["curve_label"], "") diff --git a/lmfdb/number_fields/number_field.py b/lmfdb/number_fields/number_field.py index 0d81c2dafa..321ae3b85f 100644 --- a/lmfdb/number_fields/number_field.py +++ b/lmfdb/number_fields/number_field.py @@ -515,7 +515,7 @@ def render_field_webpage(args): table = "" reflex_fields = db.nf_fields_reflex.search({"nf_label" : label}) reflex_fields_list = [] - field_labels_dict = dict() + field_labels_dict = {} for reflex_field in reflex_fields: if len(reflex_field['rf_coeffs']) > 1: reflex_fields_list.append(['', reflex_field['rf_coeffs'], reflex_field['multiplicity']]) diff --git a/lmfdb/siegel_modular_forms/sample.py b/lmfdb/siegel_modular_forms/sample.py index 1fc5e131bb..7fc07a63ef 100644 --- a/lmfdb/siegel_modular_forms/sample.py +++ b/lmfdb/siegel_modular_forms/sample.py @@ -85,7 +85,7 @@ def available_eigenvalues(self, index_list=None): def eigenvalues(self, index_list): query = {'owner_id': self.__id, 'index': {'$in': index_list}} evs = db.smf_ev.search(query, ['index', 'data']) - return dict((ev['index'], self.__field(str(ev['data']))) for ev in evs) + return {ev['index']: self.__field(str(ev['data'])) for ev in evs} def available_Fourier_coefficients(self, det_list=None): query = {'owner_id': self.__id} @@ -97,7 +97,7 @@ def Fourier_coefficients(self, det_list): query = {'owner_id': self.__id, 'det': {'$in': det_list}} fcs = db.smf_fc.search(query, ['det', 'data']) P = PolynomialRing(self.__field, names='x,y') - return dict((fcd['det'], dict((tuple(literal_eval(f)), P(str(poly))) for f, poly in fcd['data'].items() )) for fcd in fcs) + return {fcd['det']: {tuple(literal_eval(f)): P(str(poly)) for f, poly in fcd['data'].items() } for fcd in fcs} def Sample(collection, name): @@ -129,10 +129,10 @@ def export(collection, name): # Fourier coefficients and eigenvalues fcs = db.smf_fc.search({'owner_id': id_link}, ['det', 'data']) - doc['Fourier_coefficients'] = dict((fc['det'], fc['data']) for fc in fcs) + doc['Fourier_coefficients'] = {fc['det']: fc['data'] for fc in fcs} evs = db.smf_ev.search({'owner_id': id_link}, ['index', 'data']) - doc['eigenvalues'] = dict((ev['index'], ev['data']) for ev in evs) + doc['eigenvalues'] = {ev['index']: ev['data'] for ev in evs} label = doc['collection'][0] + '.' + doc['name'] doc['label']= label diff --git a/lmfdb/typed_data/type_generation.py b/lmfdb/typed_data/type_generation.py index 19d52f38ea..11ecb38bd8 100644 --- a/lmfdb/typed_data/type_generation.py +++ b/lmfdb/typed_data/type_generation.py @@ -113,13 +113,13 @@ class SmartDict(dict): pass def initConstantValueTypes(self, x): - tmp = dict([((f[0])(k), (f[1])(v)) for (k, v) in x.items()]) + tmp = {(f[0])(k): (f[1])(v) for (k, v) in x.items()} # tmp = dict([(wrapper(f[0])(k), wrapper(f[1])(v)) for (k,v) in x.items()]) dict.__init__(self, tmp) def initVariableValueTypes(self, x): # tmp = dict([(k,wrapper(f[0][k])(v)) for (k,v) in x.items()]) - tmp = dict([(k, (f[0][k])(v)) for (k, v) in x.items()]) + tmp = {k: (f[0][k])(v) for (k, v) in x.items()} dict.__init__(self, tmp) if len(f) == 2: diff --git a/lmfdb/users/pwdmanager.py b/lmfdb/users/pwdmanager.py index 5b09e2acb1..f0c7156206 100755 --- a/lmfdb/users/pwdmanager.py +++ b/lmfdb/users/pwdmanager.py @@ -191,7 +191,7 @@ def full_names(self, uids): #TODO: use identifiers selecter = SQL("SELECT username, full_name FROM userdb.users WHERE username = ANY(%s)") cur = self._execute(selecter, [Array(uids)]) - return [{k:v for k,v in zip(["username","full_name"], rec)} for rec in cur] + return [dict(zip(["username","full_name"], rec)) for rec in cur] def create_tokens(self, tokens): if not self._rw_userdb: @@ -247,7 +247,7 @@ def __init__(self, uid): self._uid = uid self._authenticated = False self._dirty = False # flag if we have to save - self._data = dict([(_, None) for _ in LmfdbUser.properties]) + self._data = {_: None for _ in LmfdbUser.properties} self.exists = userdb.user_exists(uid) if self.exists: diff --git a/lmfdb/utils/search_parsing.py b/lmfdb/utils/search_parsing.py index ed11fdcf78..fcac7198f8 100644 --- a/lmfdb/utils/search_parsing.py +++ b/lmfdb/utils/search_parsing.py @@ -443,7 +443,7 @@ def integer_options(arg, max_opts=None, contained_in=None): ans.add(int(interval)) if max_opts is not None and len(ans) >= max_opts: raise ValueError("Too many options") - return sorted(list(ans)) + return sorted(ans) def collapse_ors(parsed, query): # work around syntax for $or @@ -1064,7 +1064,7 @@ def parse_galgrp(inp, query, qfield, err_msg=None, list_ok=True): galfield, nfield = qfield if nfield and nfield not in query: - nvals = list(set(s[0] for s in gcs)) + nvals = list({s[0] for s in gcs}) if len(nvals) == 1: query[nfield] = nvals[0] else: diff --git a/lmfdb/utils/trace_hash.py b/lmfdb/utils/trace_hash.py index 4c990b0c9c..ad3dea76a1 100644 --- a/lmfdb/utils/trace_hash.py +++ b/lmfdb/utils/trace_hash.py @@ -118,7 +118,7 @@ def TraceHash(E): return TraceHash_from_ap([E_pari.ellap(p) for p in TH_P]) if K not in TH_P_cache: - TH_P_cache[K] = dict([(p,[P for P in K.primes_above(p) if P.norm()==p]) for p in TH_P]) + TH_P_cache[K] = {p: [P for P in K.primes_above(p) if P.norm()==p] for p in TH_P} def ap(p): return sum([E.reduction(P).trace_of_frobenius() for P in TH_P_cache[K][p]], 0) diff --git a/lmfdb/verify/mf_newforms.py b/lmfdb/verify/mf_newforms.py index fe7006ae8e..85fb008c33 100644 --- a/lmfdb/verify/mf_newforms.py +++ b/lmfdb/verify/mf_newforms.py @@ -392,7 +392,7 @@ def check_self_twist_disc(self, rec, verbose=False): """ check that self_twist_discs = is compatible with the last entries of inner_twists. """ - return self._test_equality(set(rec['self_twist_discs']), set(elt[6] for elt in rec['inner_twists'] if elt[6] not in [None, 0, 1]), verbose) + return self._test_equality(set(rec['self_twist_discs']), {elt[6] for elt in rec['inner_twists'] if elt[6] not in [None, 0, 1]}, verbose) #### slow #### diff --git a/lmfdb/verify/verification.py b/lmfdb/verify/verification.py index a6b0b21220..225aefba36 100644 --- a/lmfdb/verify/verification.py +++ b/lmfdb/verify/verification.py @@ -686,5 +686,5 @@ def check_uniqueness_constraints(self): """ check that the uniqueness constraints are satisfied """ - constraints = set(tuple(sorted(D['columns'])) for D in self.table.list_constraints().values() if D['type'] == 'UNIQUE') + constraints = {tuple(sorted(D['columns'])) for D in self.table.list_constraints().values() if D['type'] == 'UNIQUE'} return [constraint for constraint in self.uniqueness_constraints if tuple(sorted(constraint)) not in constraints]