diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 79d13215..b1e76aae 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,7 +23,7 @@ jobs: pip install -e .[test] pip install mock coverage pytest - name: Test - run: SKIP_UT_WITH_DFLOW=0 DFLOW_DEBUG=1 coverage run --source=./dpgen2 -m unittest -v -f && coverage report + run: SKIP_UT_WITH_DFLOW=0 DFLOW_DEBUG=1 coverage run --source=./dpgen2 -m unittest -v && coverage report - uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a96c7d1d..afc333ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,11 +17,13 @@ repos: # Python - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.1.3 + rev: v0.6.3 hooks: + - id: ruff + args: ["--fix"] - id: ruff-format - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + rev: 5.13.2 hooks: - id: isort files: \.py$ diff --git a/docs/conf.py b/docs/conf.py index 92a536b7..a61d5cd7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,20 +6,21 @@ # -- Path setup -------------------------------------------------------------- +import datetime + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys -from datetime import ( - date, -) # -- Project information ----------------------------------------------------- project = "DPGEN2" -copyright = "2022-%d, DeepModeling" % date.today().year +copyright = ( + "2022-%d, DeepModeling" % datetime.datetime.now(tz=datetime.timezone.utc).year +) author = "DeepModeling" diff --git a/docs/input.md b/docs/input.md index 6fc03bd1..2a1740e4 100644 --- a/docs/input.md +++ b/docs/input.md @@ -70,11 +70,11 @@ This section defines how the configuration space is explored. "command": "lmp -var restart 0" }, "convergence": { - "type" : "fixed-levels", - "conv_accuracy" : 0.9, - "level_f_lo": 0.05, - "level_f_hi": 0.50, - "_comment" : "all" + "type" : "fixed-levels", + "conv_accuracy" : 0.9, + "level_f_lo": 0.05, + "level_f_hi": 0.50, + "_comment" : "all" }, "max_numb_iter" : 5, "fatal_at_max" : false, @@ -94,29 +94,29 @@ This section defines how the configuration space is explored. } ], "stages": [ - [ + [ { - "_comment" : "stage 0, task group 0", - "type" : "lmp-md", - "ensemble": "nvt", "nsteps": 50, "temps": [50, 100], "trj_freq": 10, - "conf_idx": [0], "n_sample" : 3 + "_comment" : "stage 0, task group 0", + "type" : "lmp-md", + "ensemble": "nvt", "nsteps": 50, "temps": [50, 100], "trj_freq": 10, + "conf_idx": [0], "n_sample" : 3 }, { - "_comment" : "stage 0, task group 1", - "type" : "lmp-template", - "lmp" : "template.lammps", "plm" : "template.plumed", - "trj_freq" : 10, "revisions" : {"V_NSTEPS" : [40], "V_TEMP" : [150, 200]}, - "conf_idx": [0], "n_sample" : 3 + "_comment" : "stage 0, task group 1", + "type" : "lmp-template", + "lmp" : "template.lammps", "plm" : "template.plumed", + "trj_freq" : 10, "revisions" : {"V_NSTEPS" : [40], "V_TEMP" : [150, 200]}, + "conf_idx": [0], "n_sample" : 3 } - ], - [ + ], + [ { - "_comment" : "stage 1, task group 0", - "type" : "lmp-md", - "ensemble": "npt", "nsteps": 50, "press": [1e0], "temps": [50, 100, 200], "trj_freq": 10, - "conf_idx": [1], "n_sample" : 3 + "_comment" : "stage 1, task group 0", + "type" : "lmp-md", + "ensemble": "npt", "nsteps": 50, "press": [1e0], "temps": [50, 100, 200], "trj_freq": 10, + "conf_idx": [1], "n_sample" : 3 } - ] + ] ] } ``` @@ -197,7 +197,7 @@ Any of the config in the {dargs:argument}`"step_configs"` can be o ```json "default_step_config" : { "template_config" : { - "image" : "dpgen2:x.x.x" + "image" : "dpgen2:x.x.x" } }, ``` diff --git a/dpgen2/__init__.py b/dpgen2/__init__.py index 4217e612..f15091ba 100644 --- a/dpgen2/__init__.py +++ b/dpgen2/__init__.py @@ -4,3 +4,6 @@ from .__about__ import ( __version__, ) + + +__all__ = ["__version__"] diff --git a/dpgen2/conf/__init__.py b/dpgen2/conf/__init__.py index 059469c7..b81191ae 100644 --- a/dpgen2/conf/__init__.py +++ b/dpgen2/conf/__init__.py @@ -12,3 +12,11 @@ "alloy": AlloyConfGenerator, "file": FileConfGenerator, } + + +__all__ = [ + "AlloyConfGenerator", + "ConfGenerator", + "FileConfGenerator", + "conf_styles", +] diff --git a/dpgen2/conf/alloy_conf.py b/dpgen2/conf/alloy_conf.py index a885e623..7c67bdae 100644 --- a/dpgen2/conf/alloy_conf.py +++ b/dpgen2/conf/alloy_conf.py @@ -5,7 +5,6 @@ ) from typing import ( List, - Optional, Tuple, Union, ) @@ -14,7 +13,6 @@ import numpy as np from dargs import ( Argument, - Variant, ) from .conf_generator import ( @@ -184,7 +182,7 @@ def __init__( sys.data["atom_numbs"] = [0] * self.ntypes sys.data["atom_numbs"][0] = self.natoms sys.data["atom_types"] = np.array([0] * self.natoms, dtype=int) - self.type_population = [ii for ii in range(self.ntypes)] + self.type_population = list(range(self.ntypes)) # record sys self.sys = sys diff --git a/dpgen2/conf/conf_generator.py b/dpgen2/conf/conf_generator.py index e5e3b7b0..4768405d 100644 --- a/dpgen2/conf/conf_generator.py +++ b/dpgen2/conf/conf_generator.py @@ -41,7 +41,7 @@ def get_file_content( type_map, fmt="lammps/lmp", ) -> List[str]: - r"""Get the file content of configurations + r"""Get the file content of configurations. Parameters ---------- diff --git a/dpgen2/conf/file_conf.py b/dpgen2/conf/file_conf.py index e8ae2443..270f3d67 100644 --- a/dpgen2/conf/file_conf.py +++ b/dpgen2/conf/file_conf.py @@ -1,19 +1,16 @@ import glob -import os from pathlib import ( Path, ) from typing import ( List, Optional, - Tuple, Union, ) import dpdata from dargs import ( Argument, - Variant, ) from .conf_generator import ( diff --git a/dpgen2/conf/unit_cells.py b/dpgen2/conf/unit_cells.py index ad392023..e297cc52 100644 --- a/dpgen2/conf/unit_cells.py +++ b/dpgen2/conf/unit_cells.py @@ -40,16 +40,16 @@ def gen_box(self): def poscar_unit(self, latt): box = self.gen_box() ret = "" - ret += "BCC : a = %f \n" % latt - ret += "%.16f\n" % (latt) - ret += "%.16f %.16f %.16f\n" % (box[0][0], box[0][1], box[0][2]) - ret += "%.16f %.16f %.16f\n" % (box[1][0], box[1][1], box[1][2]) - ret += "%.16f %.16f %.16f\n" % (box[2][0], box[2][1], box[2][2]) + ret += f"BCC : a = {latt:f} \n" + ret += f"{latt:.16f}\n" + ret += f"{box[0][0]:.16f} {box[0][1]:.16f} {box[0][2]:.16f}\n" + ret += f"{box[1][0]:.16f} {box[1][1]:.16f} {box[1][2]:.16f}\n" + ret += f"{box[2][0]:.16f} {box[2][1]:.16f} {box[2][2]:.16f}\n" ret += "Type\n" ret += "%d\n" % self.numb_atoms() ret += "Direct\n" - ret += "%.16f %.16f %.16f\n" % (0.0, 0.0, 0.0) - ret += "%.16f %.16f %.16f\n" % (0.5, 0.5, 0.5) + ret += f"{0.0:.16f} {0.0:.16f} {0.0:.16f}\n" + ret += f"{0.5:.16f} {0.5:.16f} {0.5:.16f}\n" return ret @@ -63,18 +63,18 @@ def gen_box(self): def poscar_unit(self, latt): box = self.gen_box() ret = "" - ret += "FCC : a = %f \n" % latt - ret += "%.16f\n" % (latt) - ret += "%.16f %.16f %.16f\n" % (box[0][0], box[0][1], box[0][2]) - ret += "%.16f %.16f %.16f\n" % (box[1][0], box[1][1], box[1][2]) - ret += "%.16f %.16f %.16f\n" % (box[2][0], box[2][1], box[2][2]) + ret += f"FCC : a = {latt:f} \n" + ret += f"{latt:.16f}\n" + ret += f"{box[0][0]:.16f} {box[0][1]:.16f} {box[0][2]:.16f}\n" + ret += f"{box[1][0]:.16f} {box[1][1]:.16f} {box[1][2]:.16f}\n" + ret += f"{box[2][0]:.16f} {box[2][1]:.16f} {box[2][2]:.16f}\n" ret += "Type\n" ret += "%d\n" % self.numb_atoms() ret += "Direct\n" - ret += "%.16f %.16f %.16f\n" % (0.0, 0.0, 0.0) - ret += "%.16f %.16f %.16f\n" % (0.5, 0.5, 0.0) - ret += "%.16f %.16f %.16f\n" % (0.5, 0.0, 0.5) - ret += "%.16f %.16f %.16f\n" % (0.0, 0.5, 0.5) + ret += f"{0.0:.16f} {0.0:.16f} {0.0:.16f}\n" + ret += f"{0.5:.16f} {0.5:.16f} {0.0:.16f}\n" + ret += f"{0.5:.16f} {0.0:.16f} {0.5:.16f}\n" + ret += f"{0.0:.16f} {0.5:.16f} {0.5:.16f}\n" return ret @@ -91,16 +91,16 @@ def gen_box(self): def poscar_unit(self, latt): box = self.gen_box() ret = "" - ret += "HCP : a = %f / sqrt(2)\n" % latt + ret += f"HCP : a = {latt:f} / sqrt(2)\n" ret += "%.16f\n" % (latt / np.sqrt(2)) - ret += "%.16f %.16f %.16f\n" % (box[0][0], box[0][1], box[0][2]) - ret += "%.16f %.16f %.16f\n" % (box[1][0], box[1][1], box[1][2]) - ret += "%.16f %.16f %.16f\n" % (box[2][0], box[2][1], box[2][2]) + ret += f"{box[0][0]:.16f} {box[0][1]:.16f} {box[0][2]:.16f}\n" + ret += f"{box[1][0]:.16f} {box[1][1]:.16f} {box[1][2]:.16f}\n" + ret += f"{box[2][0]:.16f} {box[2][1]:.16f} {box[2][2]:.16f}\n" ret += "Type\n" ret += "%d\n" % self.numb_atoms() ret += "Direct\n" - ret += "%.16f %.16f %.16f\n" % (0, 0, 0) - ret += "%.16f %.16f %.16f\n" % (1.0 / 3, 1.0 / 3, 1.0 / 2) + ret += f"{0:.16f} {0:.16f} {0:.16f}\n" + ret += f"{1.0 / 3:.16f} {1.0 / 3:.16f} {1.0 / 2:.16f}\n" return ret @@ -114,15 +114,15 @@ def gen_box(self): def poscar_unit(self, latt): box = self.gen_box() ret = "" - ret += "SC : a = %f \n" % latt - ret += "%.16f\n" % (latt) - ret += "%.16f %.16f %.16f\n" % (box[0][0], box[0][1], box[0][2]) - ret += "%.16f %.16f %.16f\n" % (box[1][0], box[1][1], box[1][2]) - ret += "%.16f %.16f %.16f\n" % (box[2][0], box[2][1], box[2][2]) + ret += f"SC : a = {latt:f} \n" + ret += f"{latt:.16f}\n" + ret += f"{box[0][0]:.16f} {box[0][1]:.16f} {box[0][2]:.16f}\n" + ret += f"{box[1][0]:.16f} {box[1][1]:.16f} {box[1][2]:.16f}\n" + ret += f"{box[2][0]:.16f} {box[2][1]:.16f} {box[2][2]:.16f}\n" ret += "Type\n" ret += "%d\n" % self.numb_atoms() ret += "Direct\n" - ret += "%.16f %.16f %.16f\n" % (0.0, 0.0, 0.0) + ret += f"{0.0:.16f} {0.0:.16f} {0.0:.16f}\n" return ret @@ -142,21 +142,17 @@ def poscar_unit(self, latt): box = self.gen_box() ret = "" ret += "DIAMOND\n" - ret += "%.16f\n" % (latt) - ret += "%.16f %.16f %.16f\n" % (box[0][0], box[0][1], box[0][2]) - ret += "%.16f %.16f %.16f\n" % (box[1][0], box[1][1], box[1][2]) - ret += "%.16f %.16f %.16f\n" % (box[2][0], box[2][1], box[2][2]) + ret += f"{latt:.16f}\n" + ret += f"{box[0][0]:.16f} {box[0][1]:.16f} {box[0][2]:.16f}\n" + ret += f"{box[1][0]:.16f} {box[1][1]:.16f} {box[1][2]:.16f}\n" + ret += f"{box[2][0]:.16f} {box[2][1]:.16f} {box[2][2]:.16f}\n" ret += "Type\n" ret += "%d\n" % self.numb_atoms() ret += "Direct\n" - ret += "%.16f %.16f %.16f\n" % ( - 0.12500000000000, - 0.12500000000000, - 0.12500000000000, + ret += ( + f"{0.12500000000000:.16f} {0.12500000000000:.16f} {0.12500000000000:.16f}\n" ) - ret += "%.16f %.16f %.16f\n" % ( - 0.87500000000000, - 0.87500000000000, - 0.87500000000000, + ret += ( + f"{0.87500000000000:.16f} {0.87500000000000:.16f} {0.87500000000000:.16f}\n" ) return ret diff --git a/dpgen2/entrypoint/args.py b/dpgen2/entrypoint/args.py index df11ff7f..97ec5ee9 100644 --- a/dpgen2/entrypoint/args.py +++ b/dpgen2/entrypoint/args.py @@ -9,13 +9,9 @@ Variant, ) -import dpgen2 from dpgen2.conf import ( conf_styles, ) -from dpgen2.constants import ( - default_image, -) from dpgen2.exploration.report import ( conv_styles, ) @@ -186,7 +182,7 @@ def variant_filter(): kk, dict, conf_filter_styles[kk].args(), - doc="Configuration filter of type %s" % kk, + doc=f"Configuration filter of type {kk}", ) ) return Variant( diff --git a/dpgen2/entrypoint/common.py b/dpgen2/entrypoint/common.py index 0d0af9e8..36d8b94c 100644 --- a/dpgen2/entrypoint/common.py +++ b/dpgen2/entrypoint/common.py @@ -3,9 +3,7 @@ Path, ) from typing import ( - Dict, List, - Optional, Union, ) @@ -13,14 +11,8 @@ from dpgen2.utils import ( bohrium_config_from_dict, - dump_object_to_file, - load_object_from_file, - matched_step_key, - print_keys_in_nice_format, - sort_slice_ops, workflow_config_from_dict, ) -from dpgen2.utils.step_config import normalize as normalize_step_dict def global_config_workflow( @@ -64,5 +56,5 @@ def expand_idx(in_list) -> List[int]: ret += [int(range_str[0])] else: raise RuntimeError("not expected range string", step_str[0]) - ret = sorted(list(set(ret))) + ret = sorted(set(ret)) return ret diff --git a/dpgen2/entrypoint/download.py b/dpgen2/entrypoint/download.py index 7f095039..b67ad37e 100644 --- a/dpgen2/entrypoint/download.py +++ b/dpgen2/entrypoint/download.py @@ -3,7 +3,6 @@ Dict, List, Optional, - Union, ) from dflow import ( @@ -14,9 +13,6 @@ from dpgen2.entrypoint.common import ( global_config_workflow, ) -from dpgen2.utils.dflow_query import ( - matched_step_key, -) from dpgen2.utils.download_dpgen2_artifacts import ( download_dpgen2_artifacts, download_dpgen2_artifacts_by_def, diff --git a/dpgen2/entrypoint/main.py b/dpgen2/entrypoint/main.py index 08493917..62e2eff8 100644 --- a/dpgen2/entrypoint/main.py +++ b/dpgen2/entrypoint/main.py @@ -1,22 +1,12 @@ import argparse import json import logging -import os import textwrap from typing import ( List, Optional, ) -import dflow -from dflow import ( - Step, - Steps, - Workflow, - download_artifact, - upload_artifact, -) - from dpgen2 import ( __version__, ) @@ -41,11 +31,8 @@ status, ) from .submit import ( - make_concurrent_learning_op, - make_naive_exploration_scheduler, resubmit_concurrent_learning, submit_concurrent_learning, - workflow_concurrent_learning, ) from .watch import ( default_watching_keys, @@ -297,7 +284,7 @@ def main_parser() -> argparse.ArgumentParser: "-v", "--version", action="version", - version="DPGEN v%s" % __version__, + version=f"DPGEN v{__version__}", ) return parser @@ -368,7 +355,7 @@ def main(): config = json.load(fp) wfid = args.ID if args.list_supported is not None and args.list_supported: - print(print_op_download_setting()) + print(print_op_download_setting()) # noqa: T201 elif args.keys is not None: download( wfid, diff --git a/dpgen2/entrypoint/showkey.py b/dpgen2/entrypoint/showkey.py index 36055426..2aaecedf 100644 --- a/dpgen2/entrypoint/showkey.py +++ b/dpgen2/entrypoint/showkey.py @@ -1,11 +1,6 @@ -import glob -import os -import pickle -from pathlib import ( - Path, -) +import functools +import operator -import dpdata from dflow import ( Workflow, ) @@ -32,9 +27,9 @@ def showkey( wf = Workflow(id=wf_id) folded_keys = get_resubmit_keys(wf) - all_step_keys = sum(folded_keys.values(), []) + all_step_keys = functools.reduce(operator.iadd, folded_keys.values(), []) prt_str = print_keys_in_nice_format( all_step_keys, ["run-train", "run-lmp", "run-fp", "diffcsp-gen", "run-relax"], ) - print(prt_str) + print(prt_str) # noqa: T201 diff --git a/dpgen2/entrypoint/status.py b/dpgen2/entrypoint/status.py index d6164647..7806c01e 100644 --- a/dpgen2/entrypoint/status.py +++ b/dpgen2/entrypoint/status.py @@ -1,9 +1,7 @@ import logging from typing import ( Dict, - List, Optional, - Union, ) from dflow import ( @@ -15,7 +13,6 @@ global_config_workflow, ) from dpgen2.utils.dflow_query import ( - get_all_schedulers, get_last_scheduler, ) @@ -36,6 +33,6 @@ def status( if scheduler is not None: ptr_str = scheduler.print_convergence() - print(ptr_str) + print(ptr_str) # noqa: T201 else: logging.warn("no scheduler is finished") diff --git a/dpgen2/entrypoint/submit.py b/dpgen2/entrypoint/submit.py index 76f944c6..dd1dd137 100644 --- a/dpgen2/entrypoint/submit.py +++ b/dpgen2/entrypoint/submit.py @@ -1,9 +1,9 @@ import copy -import glob +import functools import json import logging +import operator import os -import pickle import re from copy import ( deepcopy, @@ -15,44 +15,20 @@ Dict, List, Optional, - Tuple, - Type, - Union, ) -import dpdata from dflow import ( ArgoStep, - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, S3Artifact, Step, - Steps, Workflow, - argo_range, - download_artifact, upload_artifact, ) -from dflow.python import ( - OP, - OPIO, - Artifact, - FatalError, - OPIOSign, - PythonOPTemplate, - TransientError, - upload_packages, -) from dpgen2.conf import ( conf_styles, ) from dpgen2.constants import ( - default_host, default_image, ) from dpgen2.entrypoint.args import normalize as normalize_args @@ -65,7 +41,6 @@ TrajRenderLammps, ) from dpgen2.exploration.report import ( - ExplorationReportTrustLevelsRandom, conv_styles, ) from dpgen2.exploration.scheduler import ( @@ -78,11 +53,7 @@ conf_filter_styles, ) from dpgen2.exploration.task import ( - CustomizedLmpTemplateTaskGroup, ExplorationStage, - ExplorationTask, - LmpTemplateTaskGroup, - NPTTaskGroup, caly_normalize, diffcsp_normalize, make_calypso_task_group_from_config, @@ -130,16 +101,12 @@ ) from dpgen2.utils import ( BinaryFileInput, - bohrium_config_from_dict, - dump_object_to_file, get_artifact_from_uri, get_subkey, - load_object_from_file, matched_step_key, print_keys_in_nice_format, sort_slice_ops, upload_artifact_and_print_uri, - workflow_config_from_dict, ) from dpgen2.utils.step_config import normalize as normalize_step_dict @@ -464,7 +431,7 @@ def get_systems_from_data(data, data_prefix=None): assert isinstance(data, list) if data_prefix is not None: data = [os.path.join(data_prefix, ii) for ii in data] - data = sum([expand_sys_str(ii) for ii in data], []) + data = functools.reduce(operator.iadd, [expand_sys_str(ii) for ii in data], []) return data @@ -587,7 +554,7 @@ def workflow_concurrent_learning( if fp_style == "deepmd": assert ( "teacher_model_path" in fp_config["run"] - ), f"Cannot find 'teacher_model_path' in config['fp']['run_config'] when fp_style == 'deepmd'" + ), "Cannot find 'teacher_model_path' in config['fp']['run_config'] when fp_style == 'deepmd'" assert os.path.exists( fp_config["run"]["teacher_model_path"] ), f"No such file: {fp_config['run']['teacher_model_path']}" @@ -811,7 +778,7 @@ def successful_step_keys(wf): all_step_keys = [] steps = wf.query_step() # For reused steps whose startedAt are identical, sort them by key - steps.sort(key=lambda x: "%s-%s" % (x.startedAt, x.key)) + steps.sort(key=lambda x: f"{x.startedAt}-{x.key}") for step in steps: if step.key is not None and step.phase == "Succeeded": all_step_keys.append(step.key) @@ -954,14 +921,14 @@ def resubmit_concurrent_learning( old_wf = Workflow(id=wfid) folded_keys = get_resubmit_keys(old_wf) - all_step_keys = sum(folded_keys.values(), []) + all_step_keys = functools.reduce(operator.iadd, folded_keys.values(), []) if list_steps: prt_str = print_keys_in_nice_format( all_step_keys, ["run-train", "run-lmp", "run-fp", "diffcsp-gen", "run-relax"], ) - print(prt_str) + print(prt_str) # noqa: T201 if reuse is None: return None @@ -981,10 +948,10 @@ def resubmit_concurrent_learning( # reuse the super OP iif all steps within it are reused if v != [k] and k in folded_keys and set(v) == set(folded_keys[k]): reused_folded_keys[k] = [k] - reused_keys = sum(reused_folded_keys.values(), []) + reused_keys = functools.reduce(operator.iadd, reused_folded_keys.values(), []) reuse_step = old_wf.query_step(key=reused_keys) # For reused steps whose startedAt are identical, sort them by key - reuse_step.sort(key=lambda x: "%s-%s" % (x.startedAt, x.key)) + reuse_step.sort(key=lambda x: f"{x.startedAt}-{x.key}") wf = submit_concurrent_learning( wf_config, diff --git a/dpgen2/entrypoint/watch.py b/dpgen2/entrypoint/watch.py index 891be817..57712551 100644 --- a/dpgen2/entrypoint/watch.py +++ b/dpgen2/entrypoint/watch.py @@ -4,7 +4,6 @@ Dict, List, Optional, - Union, ) from dflow import ( @@ -43,7 +42,7 @@ def update_finished_steps( if finished_keys is not None: diff_keys = [] for kk in wf_keys: - if not (kk in finished_keys): + if kk not in finished_keys: diff_keys.append(kk) else: diff_keys = wf_keys diff --git a/dpgen2/entrypoint/workflow.py b/dpgen2/entrypoint/workflow.py index 8cc4cc04..4b3e2012 100644 --- a/dpgen2/entrypoint/workflow.py +++ b/dpgen2/entrypoint/workflow.py @@ -1,7 +1,4 @@ import argparse -import json -import logging -import os from typing import ( Optional, ) diff --git a/dpgen2/exploration/deviation/__init__.py b/dpgen2/exploration/deviation/__init__.py index 537c2f94..2319d069 100644 --- a/dpgen2/exploration/deviation/__init__.py +++ b/dpgen2/exploration/deviation/__init__.py @@ -4,3 +4,5 @@ from .deviation_std import ( DeviManagerStd, ) + +__all__ = ["DeviManager", "DeviManagerStd"] diff --git a/dpgen2/exploration/deviation/deviation_manager.py b/dpgen2/exploration/deviation/deviation_manager.py index cbc227c5..e1964ff1 100644 --- a/dpgen2/exploration/deviation/deviation_manager.py +++ b/dpgen2/exploration/deviation/deviation_manager.py @@ -81,5 +81,5 @@ def clear(self) -> None: @abstractmethod def _check_data(self) -> None: - r"""Check if data is valid""" + r"""Check if data is valid.""" pass diff --git a/dpgen2/exploration/deviation/deviation_std.py b/dpgen2/exploration/deviation/deviation_std.py index f927b75b..a4eafa05 100644 --- a/dpgen2/exploration/deviation/deviation_std.py +++ b/dpgen2/exploration/deviation/deviation_std.py @@ -2,7 +2,6 @@ defaultdict, ) from typing import ( - Dict, List, Optional, ) @@ -38,7 +37,7 @@ def _add(self, name: str, deviation: np.ndarray) -> None: ), f"Error: deviation(type: {type(deviation)}) is not a np.ndarray" assert len(deviation.shape) == 1, ( f"Error: deviation(shape: {deviation.shape}) is not a " - + f"one-dimensional array" + + "one-dimensional array" ) self._data[name].append(deviation) @@ -57,7 +56,7 @@ def clear(self) -> None: return None def _check_data(self) -> None: - r"""Check if data is valid""" + r"""Check if data is valid.""" model_devi_names = ( DeviManager.MAX_DEVI_V, DeviManager.MIN_DEVI_V, @@ -73,7 +72,7 @@ def _check_data(self) -> None: assert len(self._data[name]) == self.ntraj, ( f"Error: the number of model deviation {name} " + f"({len(self._data[name])}) and trajectory files ({self.ntraj}) " - + f"are not equal." + + "are not equal." ) for idx, ndarray in enumerate(self._data[name]): assert isinstance(ndarray, np.ndarray), ( diff --git a/dpgen2/exploration/render/__init__.py b/dpgen2/exploration/render/__init__.py index e926c484..36e6ddce 100644 --- a/dpgen2/exploration/render/__init__.py +++ b/dpgen2/exploration/render/__init__.py @@ -4,3 +4,8 @@ from .traj_render_lammps import ( TrajRenderLammps, ) + +__all__ = [ + "TrajRender", + "TrajRenderLammps", +] diff --git a/dpgen2/exploration/render/traj_render.py b/dpgen2/exploration/render/traj_render.py index 5c9f0c41..236b6efd 100644 --- a/dpgen2/exploration/render/traj_render.py +++ b/dpgen2/exploration/render/traj_render.py @@ -9,12 +9,10 @@ TYPE_CHECKING, List, Optional, - Tuple, Union, ) import dpdata -import numpy as np from dflow.python.opio import ( HDF5Dataset, ) diff --git a/dpgen2/exploration/render/traj_render_lammps.py b/dpgen2/exploration/render/traj_render_lammps.py index 28eb07f6..4a669586 100644 --- a/dpgen2/exploration/render/traj_render_lammps.py +++ b/dpgen2/exploration/render/traj_render_lammps.py @@ -9,7 +9,6 @@ TYPE_CHECKING, List, Optional, - Tuple, Union, ) @@ -76,7 +75,7 @@ def _load_one_model_devi(self, fname, model_devi): def get_ele_temp(self, optional_outputs): ele_temp = [] for ii in range(len(optional_outputs)): - with open(optional_outputs[ii], "r") as f: + with open(optional_outputs[ii]) as f: data = json.load(f) if self.use_ele_temp: ele_temp.append(data["ele_temp"]) diff --git a/dpgen2/exploration/report/__init__.py b/dpgen2/exploration/report/__init__.py index 282068c2..f2c2d996 100644 --- a/dpgen2/exploration/report/__init__.py +++ b/dpgen2/exploration/report/__init__.py @@ -16,3 +16,10 @@ "fixed-levels-max-select": ExplorationReportTrustLevelsMax, "adaptive-lower": ExplorationReportAdaptiveLower, } +__all__ = [ + "ExplorationReport", + "ExplorationReportAdaptiveLower", + "ExplorationReportTrustLevelsMax", + "ExplorationReportTrustLevelsRandom", + "conv_styles", +] diff --git a/dpgen2/exploration/report/report.py b/dpgen2/exploration/report/report.py index a86e629f..efc7cfee 100644 --- a/dpgen2/exploration/report/report.py +++ b/dpgen2/exploration/report/report.py @@ -5,11 +5,8 @@ from typing import ( List, Optional, - Tuple, ) -import numpy as np - from ..deviation import ( DeviManager, ) @@ -18,7 +15,7 @@ class ExplorationReport(ABC): @abstractmethod def clear(self): - r"""Clear the report""" + r"""Clear the report.""" pass @abstractmethod @@ -26,7 +23,7 @@ def record( self, model_devi: DeviManager, ): - r"""Record the model deviations of the trajectories + r"""Record the model deviations of the trajectories. Parameters ---------- @@ -61,15 +58,15 @@ def converged( pass def no_candidate(self) -> bool: - r"""If no candidate configuration is found""" - return all([len(ii) == 0 for ii in self.get_candidate_ids()]) + r"""If no candidate configuration is found.""" + return all(len(ii) == 0 for ii in self.get_candidate_ids()) @abstractmethod def get_candidate_ids( self, max_nframes: Optional[int] = None, ) -> List[List[int]]: - r"""Get indexes of candidate configurations + r"""Get indexes of candidate configurations. Parameters ---------- @@ -87,7 +84,7 @@ def get_candidate_ids( @abstractmethod def print_header(self) -> str: - r"""Print the header of report""" + r"""Print the header of report.""" pass @abstractmethod @@ -97,5 +94,5 @@ def print( idx_in_stage: int, iter_idx: int, ) -> str: - r"""Print the report""" + r"""Print the report.""" pass diff --git a/dpgen2/exploration/report/report_adaptive_lower.py b/dpgen2/exploration/report/report_adaptive_lower.py index 49dd9dd2..4d8be302 100644 --- a/dpgen2/exploration/report/report_adaptive_lower.py +++ b/dpgen2/exploration/report/report_adaptive_lower.py @@ -475,9 +475,7 @@ def _histo_idx( self, devi_f: float, ) -> int: - """ - return the index in histogram given a force model deviation. - """ + """Return the index in histogram given a force model deviation.""" dh = (self.level_f_hi - self.level_f_lo) / self.nhist hist_idx = int((devi_f - self.level_f_lo) / dh) if hist_idx < 0: @@ -487,7 +485,7 @@ def _histo_idx( return hist_idx def print_header(self) -> str: - r"""Print the header of report""" + r"""Print the header of report.""" return self.header_str def print( @@ -496,7 +494,7 @@ def print( idx_in_stage: int, iter_idx: int, ) -> str: - r"""Print the report""" + r"""Print the report.""" fmt_str = self.fmt_str fmt_flt = self.fmt_flt print_tuple = ( diff --git a/dpgen2/exploration/report/report_trust_levels_base.py b/dpgen2/exploration/report/report_trust_levels_base.py index 2598cba1..df8c0175 100644 --- a/dpgen2/exploration/report/report_trust_levels_base.py +++ b/dpgen2/exploration/report/report_trust_levels_base.py @@ -1,11 +1,9 @@ -import random from abc import ( abstractmethod, ) from typing import ( List, Optional, - Tuple, ) import numpy as np @@ -157,10 +155,7 @@ def _record_one_traj( id_v_cand, id_v_fail, ): - """ - Record one trajctory. inputs are the indexes of candidate, accurate and failed frames. - - """ + """Record one trajctory. inputs are the indexes of candidate, accurate and failed frames.""" # check consistency novirial = id_v_cand is None if novirial: @@ -176,9 +171,9 @@ def _record_one_traj( set_f_accu = set(id_f_accu) set_f_cand = set(id_f_cand) set_f_fail = set(id_f_fail) - set_v_accu = set([ii for ii in range(nframes)]) if novirial else set(id_v_accu) - set_v_cand = set([]) if novirial else set(id_v_cand) - set_v_fail = set([]) if novirial else set(id_v_fail) + set_v_accu = set(range(nframes)) if novirial else set(id_v_accu) + set_v_cand = set() if novirial else set(id_v_cand) + set_v_fail = set() if novirial else set(id_v_fail) # accu, cand, fail set_accu = set_f_accu & set_v_accu set_cand = ( @@ -230,7 +225,7 @@ def get_candidate_ids( pass def print_header(self) -> str: - r"""Print the header of report""" + r"""Print the header of report.""" return self.header_str def print( @@ -239,7 +234,7 @@ def print( idx_in_stage: int, iter_idx: int, ) -> str: - r"""Print the report""" + r"""Print the report.""" fmt_str = self.fmt_str fmt_flt = self.fmt_flt print_tuple = ( diff --git a/dpgen2/exploration/report/report_trust_levels_max.py b/dpgen2/exploration/report/report_trust_levels_max.py index 636572e2..b325834c 100644 --- a/dpgen2/exploration/report/report_trust_levels_max.py +++ b/dpgen2/exploration/report/report_trust_levels_max.py @@ -1,18 +1,9 @@ -import random from typing import ( List, Optional, Tuple, ) -import numpy as np -from dargs import ( - Argument, -) -from dflow.python import ( - FatalError, -) - from ..deviation import ( DeviManager, ) diff --git a/dpgen2/exploration/report/report_trust_levels_random.py b/dpgen2/exploration/report/report_trust_levels_random.py index fb69c46c..e17101b0 100644 --- a/dpgen2/exploration/report/report_trust_levels_random.py +++ b/dpgen2/exploration/report/report_trust_levels_random.py @@ -5,17 +5,6 @@ Tuple, ) -import numpy as np -from dargs import ( - Argument, -) -from dflow.python import ( - FatalError, -) - -from ..deviation import ( - DeviManager, -) from . import ( ExplorationReport, ) diff --git a/dpgen2/exploration/scheduler/__init__.py b/dpgen2/exploration/scheduler/__init__.py index 5c05e006..29fc1f86 100644 --- a/dpgen2/exploration/scheduler/__init__.py +++ b/dpgen2/exploration/scheduler/__init__.py @@ -7,3 +7,9 @@ from .stage_scheduler import ( StageScheduler, ) + +__all__ = [ + "ConvergenceCheckStageScheduler", + "ExplorationScheduler", + "StageScheduler", +] diff --git a/dpgen2/exploration/scheduler/convergence_check_stage_scheduler.py b/dpgen2/exploration/scheduler/convergence_check_stage_scheduler.py index 8ab8662f..04c5c9ff 100644 --- a/dpgen2/exploration/scheduler/convergence_check_stage_scheduler.py +++ b/dpgen2/exploration/scheduler/convergence_check_stage_scheduler.py @@ -24,7 +24,6 @@ from dpgen2.exploration.task import ( BaseExplorationTaskGroup, ExplorationStage, - ExplorationTaskGroup, ) from .stage_scheduler import ( diff --git a/dpgen2/exploration/scheduler/scheduler.py b/dpgen2/exploration/scheduler/scheduler.py index ff55fa23..581e213e 100644 --- a/dpgen2/exploration/scheduler/scheduler.py +++ b/dpgen2/exploration/scheduler/scheduler.py @@ -23,7 +23,6 @@ ConfSelector, ) from dpgen2.exploration.task import ( - ExplorationStage, ExplorationTaskGroup, ) @@ -31,12 +30,13 @@ StageScheduler, ) +__all__ = [ + "ExplorationScheduler", +] -class ExplorationScheduler: - """ - The exploration scheduler. - """ +class ExplorationScheduler: + """The exploration scheduler.""" def __init__( self, @@ -91,17 +91,11 @@ def get_iteration(self): return tot_iter def complete(self): - """ - Tell if all stages are converged. - - """ + """Tell if all stages are converged.""" return self.complete_ def force_stage_complete(self): - """ - Force complete the current stage - - """ + """Force complete the current stage.""" self.stage_schedulers[self.cur_stage].force_complete() self.cur_stage += 1 if self.cur_stage < len(self.stage_schedulers): @@ -136,7 +130,6 @@ def plan_next_iteration( The configuration selector for the next iteration. Should be `None` if converged. """ - try: stg_complete, expl_task_grp, conf_selector = self.stage_schedulers[ self.cur_stage @@ -145,7 +138,7 @@ def plan_next_iteration( trajs, ) except FatalError as e: - raise FatalError(f"stage {self.cur_stage}: " + str(e)) + raise FatalError(f"stage {self.cur_stage}: " + str(e)) from e if stg_complete: self.cur_stage += 1 @@ -164,10 +157,7 @@ def plan_next_iteration( return stg_complete, expl_task_grp, conf_selector def get_stage_of_iterations(self): - """ - Get the stage index and the index in the stage of iterations. - - """ + """Get the stage index and the index in the stage of iterations.""" stages = self.stage_schedulers n_stage_iters = [] for ii in range(self.get_stage() + 1): @@ -196,7 +186,7 @@ def get_stage_of_iterations(self): def get_convergence_ratio(self): """ - Get the accurate, candidate and failed ratios of the iterations + Get the accurate, candidate and failed ratios of the iterations. Returns ------- @@ -252,8 +242,8 @@ def print_last_iteration(self, print_header=False): ) if self.complete(): - ret.append(f"# All stages converged") - return "\n".join(ret + [""]) + ret.append("# All stages converged") + return "\n".join([*ret, ""]) def print_convergence(self): ret = [] @@ -288,5 +278,5 @@ def print_convergence(self): _summary = self._print_prev_summary(prev_stg_idx) assert _summary is not None ret.append(_summary) - ret.append(f"# All stages converged") - return "\n".join(ret + [""]) + ret.append("# All stages converged") + return "\n".join([*ret, ""]) diff --git a/dpgen2/exploration/scheduler/stage_scheduler.py b/dpgen2/exploration/scheduler/stage_scheduler.py index 18fe5593..1e9f2492 100644 --- a/dpgen2/exploration/scheduler/stage_scheduler.py +++ b/dpgen2/exploration/scheduler/stage_scheduler.py @@ -22,20 +22,17 @@ ConfSelector, ) from dpgen2.exploration.task import ( - ExplorationStage, ExplorationTaskGroup, ) class StageScheduler(ABC): - """ - The scheduler for an exploration stage. - """ + """The scheduler for an exploration stage.""" @abstractmethod def converged(self) -> bool: """ - Tell if the stage is converged + Tell if the stage is converged. Returns ------- @@ -47,7 +44,7 @@ def converged(self) -> bool: @abstractmethod def complete(self) -> bool: """ - Tell if the stage is complete + Tell if the stage is complete. Returns ------- @@ -58,16 +55,13 @@ def complete(self) -> bool: @abstractmethod def force_complete(self): - """ - For complete the stage - - """ + """For complete the stage.""" pass @abstractmethod def next_iteration(self) -> int: """ - Return the index of the next iteration + Return the index of the next iteration. Returns ------- @@ -79,7 +73,7 @@ def next_iteration(self) -> int: @abstractmethod def get_reports(self) -> List[ExplorationReport]: """ - Return all exploration reports + Return all exploration reports. Returns ------- diff --git a/dpgen2/exploration/selector/__init__.py b/dpgen2/exploration/selector/__init__.py index cfed094f..eda3a596 100644 --- a/dpgen2/exploration/selector/__init__.py +++ b/dpgen2/exploration/selector/__init__.py @@ -19,3 +19,10 @@ "box_skewness": BoxSkewnessConfFilter, "box_length": BoxLengthFilter, } +__all__ = [ + "ConfFilter", + "ConfFilters", + "ConfSelector", + "ConfSelectorFrames", + "conf_filter_styles", +] diff --git a/dpgen2/exploration/selector/conf_selector.py b/dpgen2/exploration/selector/conf_selector.py index f24a7d31..b8d69941 100644 --- a/dpgen2/exploration/selector/conf_selector.py +++ b/dpgen2/exploration/selector/conf_selector.py @@ -8,12 +8,10 @@ from typing import ( List, Optional, - Set, Tuple, Union, ) -import dpdata from dflow.python.opio import ( HDF5Dataset, ) @@ -22,10 +20,6 @@ ExplorationReport, ) -from . import ( - ConfFilters, -) - class ConfSelector(ABC): """Select configurations from trajectory and model deviation files.""" diff --git a/dpgen2/exploration/selector/conf_selector_frame.py b/dpgen2/exploration/selector/conf_selector_frame.py index fc116f88..da90f9bd 100644 --- a/dpgen2/exploration/selector/conf_selector_frame.py +++ b/dpgen2/exploration/selector/conf_selector_frame.py @@ -1,7 +1,4 @@ import copy -from collections import ( - Counter, -) from pathlib import ( Path, ) @@ -12,8 +9,6 @@ Union, ) -import dpdata -import numpy as np from dflow.python.opio import ( HDF5Dataset, ) @@ -34,7 +29,8 @@ class ConfSelectorFrames(ConfSelector): """Select frames from trajectories as confs. - Parameters: + Parameters + ---------- trust_level: TrustLevel The trust level conf_filter: ConfFilters @@ -61,7 +57,7 @@ def select( type_map: Optional[List[str]] = None, optional_outputs: Optional[List[Path]] = None, ) -> Tuple[List[Path], ExplorationReport]: - """Select configurations + """Select configurations. Parameters ---------- diff --git a/dpgen2/exploration/selector/distance_conf_filter.py b/dpgen2/exploration/selector/distance_conf_filter.py index 4d5a8c33..ff6a3db5 100644 --- a/dpgen2/exploration/selector/distance_conf_filter.py +++ b/dpgen2/exploration/selector/distance_conf_filter.py @@ -196,7 +196,6 @@ def args() -> List[dargs.Argument]: arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of the `ConfFilter`. """ - doc_custom_safe_dist = "Custom safe distance (in unit of bohr) for each element" doc_safe_dist_ratio = "The ratio multiplied to the safe distance" return [ @@ -260,7 +259,6 @@ def args() -> List[dargs.Argument]: arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of the `ConfFilter`. """ - doc_theta = "The threshold for angles between the edges of the cell. If all angles are larger than this value the check is passed" return [ Argument( @@ -303,7 +301,7 @@ def check( c = cell[2][2] # type: ignore if check_multiples(a, b, c, self.length_ratio): - logging.warning("One side is %s larger than another" % self.length_ratio) + logging.warning(f"One side is {self.length_ratio} larger than another") return False return True @@ -316,7 +314,6 @@ def args() -> List[dargs.Argument]: arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of the `ConfFilter`. """ - doc_length_ratio = "The threshold for the length ratio between the edges of the cell. If all length ratios are smaller than this value the check is passed" return [ Argument( diff --git a/dpgen2/exploration/task/__init__.py b/dpgen2/exploration/task/__init__.py index 534a8828..0e5d1ecd 100644 --- a/dpgen2/exploration/task/__init__.py +++ b/dpgen2/exploration/task/__init__.py @@ -41,3 +41,25 @@ BaseExplorationTaskGroup, ExplorationTaskGroup, ) + +__all__ = [ + "BaseExplorationTaskGroup", + "ExplorationTask", + "ExplorationTaskGroup", + "ExplorationStage", + "CalyTaskGroup", + "ConfSamplingTaskGroup", + "CustomizedLmpTemplateTaskGroup", + "LmpTemplateTaskGroup", + "caly_normalize", + "caly_task_group_args", + "normalize_lmp_task_group_config", + "lmp_task_group_args", + "make_calypso_task_group_from_config", + "make_lmp_task_group_from_config", + "variant_task_group", + "NPTTaskGroup", + "DiffCSPTaskGroup", + "diffcsp_normalize", + "make_diffcsp_task_group_from_config", +] diff --git a/dpgen2/exploration/task/caly_task_group.py b/dpgen2/exploration/task/caly_task_group.py index 14594f38..731b60ed 100644 --- a/dpgen2/exploration/task/caly_task_group.py +++ b/dpgen2/exploration/task/caly_task_group.py @@ -109,22 +109,20 @@ def set_params( max_numb_atoms: int = 100, opt_step: int = 1000, ): - """ - Set calypso parameters - """ + """Set calypso parameters.""" self.numb_of_species = numb_of_species self.numb_of_atoms = numb_of_atoms if isinstance(name_of_atoms, list) and all( - [isinstance(i, list) for i in name_of_atoms] + isinstance(i, list) for i in name_of_atoms ): overlap = set(name_of_atoms[0]) for temp in name_of_atoms[1:]: overlap = overlap & set(temp) - if any(map(lambda s: (set(s) - overlap) == 0, name_of_atoms)): + if any((set(s) - overlap) == 0 for s in name_of_atoms): raise ValueError( - f"Any sub-list should not equal with intersection, e.g. [[A,B,C], [B,C], [C]] is not allowed." + "Any sub-list should not equal with intersection, e.g. [[A,B,C], [B,C], [C]] is not allowed." ) while True: diff --git a/dpgen2/exploration/task/calypso/__init__.py b/dpgen2/exploration/task/calypso/__init__.py index 61f3cdcd..33d15515 100644 --- a/dpgen2/exploration/task/calypso/__init__.py +++ b/dpgen2/exploration/task/calypso/__init__.py @@ -4,3 +4,10 @@ calypso_run_opt_str_end, make_calypso_input, ) + +__all__ = [ + "calypso_check_opt_str", + "calypso_run_opt_str", + "calypso_run_opt_str_end", + "make_calypso_input", +] diff --git a/dpgen2/exploration/task/calypso/caly_input.py b/dpgen2/exploration/task/calypso/caly_input.py index 66f2b511..7b31530a 100644 --- a/dpgen2/exploration/task/calypso/caly_input.py +++ b/dpgen2/exploration/task/calypso/caly_input.py @@ -1,19 +1,8 @@ -import random from typing import ( List, - Optional, ) -import dpdata import numpy as np -import scipy.constants as pc -from packaging.version import ( - Version, -) - -from dpgen2.constants import ( - lmp_traj_name, -) calypso_run_opt_str = """#!/usr/bin/env python3 @@ -322,9 +311,9 @@ def make_calypso_input( file_str = "" for key, value in necessary_keys.items(): - file_str += f"{key} = {str(value)}\n" + file_str += f"{key} = {value!s}\n" for key, value in default_key_value.items(): - file_str += f"{key} = {str(value)}\n" + file_str += f"{key} = {value!s}\n" file_str += "@DistanceOfIon\n" file_str += distance_of_ions_str + "\n" file_str += "@End\n" diff --git a/dpgen2/exploration/task/conf_sampling_task_group.py b/dpgen2/exploration/task/conf_sampling_task_group.py index 4c5ee0c3..98965e6b 100644 --- a/dpgen2/exploration/task/conf_sampling_task_group.py +++ b/dpgen2/exploration/task/conf_sampling_task_group.py @@ -1,19 +1,9 @@ -import itertools import random from typing import ( List, Optional, ) -from dpgen2.constants import ( - lmp_conf_name, - lmp_input_name, - model_name_pattern, -) - -from .task import ( - ExplorationTask, -) from .task_group import ( ExplorationTaskGroup, ) @@ -33,7 +23,7 @@ def set_conf( random_sample: bool = False, ): """ - Set the configurations of exploration + Set the configurations of exploration. Parameters ---------- diff --git a/dpgen2/exploration/task/customized_lmp_template_task_group.py b/dpgen2/exploration/task/customized_lmp_template_task_group.py index d7022516..39e895da 100644 --- a/dpgen2/exploration/task/customized_lmp_template_task_group.py +++ b/dpgen2/exploration/task/customized_lmp_template_task_group.py @@ -18,8 +18,6 @@ from dpgen2.constants import ( lmp_conf_name, lmp_input_name, - model_name_pattern, - plm_input_name, ) from dpgen2.utils import ( run_command, @@ -29,15 +27,9 @@ from .conf_sampling_task_group import ( ConfSamplingTaskGroup, ) -from .lmp import ( - make_lmp_input, -) from .lmp_template_task_group import ( LmpTemplateTaskGroup, ) -from .task import ( - ExplorationTask, -) class CustomizedLmpTemplateTaskGroup(ConfSamplingTaskGroup): diff --git a/dpgen2/exploration/task/lmp/__init__.py b/dpgen2/exploration/task/lmp/__init__.py index ceaa824a..bd66130e 100644 --- a/dpgen2/exploration/task/lmp/__init__.py +++ b/dpgen2/exploration/task/lmp/__init__.py @@ -1,3 +1,7 @@ from .lmp_input import ( make_lmp_input, ) + +__all__ = [ + "make_lmp_input", +] diff --git a/dpgen2/exploration/task/lmp/lmp_input.py b/dpgen2/exploration/task/lmp/lmp_input.py index 5898000f..4e6d366a 100644 --- a/dpgen2/exploration/task/lmp/lmp_input.py +++ b/dpgen2/exploration/task/lmp/lmp_input.py @@ -17,8 +17,9 @@ def _sample_sphere(): + rng = np.random.default_rng() while True: - vv = np.array([np.random.normal(), np.random.normal(), np.random.normal()]) + vv = np.array([rng.normal(), rng.normal(), rng.normal()]) vn = np.linalg.norm(vv) if vn < 0.2: continue @@ -64,16 +65,16 @@ def make_lmp_input( ret = "variable NSTEPS equal %d\n" % nsteps ret += "variable THERMO_FREQ equal %d\n" % trj_freq ret += "variable DUMP_FREQ equal %d\n" % trj_freq - ret += "variable TEMP equal %f\n" % temp + ret += f"variable TEMP equal {temp:f}\n" if ele_temp_f is not None: - ret += "variable ELE_TEMP equal %f\n" % ele_temp_f + ret += f"variable ELE_TEMP equal {ele_temp_f:f}\n" if ele_temp_a is not None: - ret += "variable ELE_TEMP equal %f\n" % ele_temp_a + ret += f"variable ELE_TEMP equal {ele_temp_a:f}\n" if pres is not None: - ret += "variable PRES equal %f\n" % pres - ret += "variable TAU_T equal %f\n" % tau_t + ret += f"variable PRES equal {pres:f}\n" + ret += f"variable TAU_T equal {tau_t:f}\n" if pres is not None: - ret += "variable TAU_P equal %f\n" % tau_p + ret += f"variable TAU_P equal {tau_p:f}\n" ret += "\n" ret += "units metal\n" if nopbc: @@ -87,10 +88,7 @@ def make_lmp_input( ret += "neigh_modify delay %d\n" % neidelay ret += "\n" ret += "box tilt large\n" - ret += ( - 'if "${restart} > 0" then "read_restart dpgen.restart.*" else "read_data %s"\n' - % conf_file - ) + ret += f'if "${{restart}} > 0" then "read_restart dpgen.restart.*" else "read_data {conf_file}"\n' ret += "change_box all triclinic\n" for jj in range(len(mass_map)): ret += "mass %d %f\n" % (jj + 1, mass_map[jj]) @@ -99,24 +97,21 @@ def make_lmp_input( graph_list += ii + " " if Version(deepmd_version) < Version("1"): # 0.x - ret += "pair_style deepmd %s ${THERMO_FREQ} model_devi.out\n" % graph_list + ret += f"pair_style deepmd {graph_list} ${{THERMO_FREQ}} model_devi.out\n" else: # 1.x keywords = "" if use_clusters: keywords += "atomic " if relative_f_epsilon is not None: - keywords += "relative %s " % relative_f_epsilon + keywords += f"relative {relative_f_epsilon} " if relative_v_epsilon is not None: - keywords += "relative_v %s " % relative_v_epsilon + keywords += f"relative_v {relative_v_epsilon} " if ele_temp_f is not None: keywords += "fparam ${ELE_TEMP}" if ele_temp_a is not None: keywords += "aparam ${ELE_TEMP}" - ret += ( - "pair_style deepmd %s out_freq ${THERMO_FREQ} out_file model_devi.out %s\n" - % (graph_list, keywords) - ) + ret += f"pair_style deepmd {graph_list} out_freq ${{THERMO_FREQ}} out_file model_devi.out {keywords}\n" ret += "pair_coeff * *\n" ret += "\n" ret += "thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz\n" @@ -124,10 +119,7 @@ def make_lmp_input( if trj_seperate_files: ret += "dump 1 all custom ${DUMP_FREQ} traj/*.lammpstrj id type x y z fx fy fz\n" else: - ret += ( - "dump 1 all custom ${DUMP_FREQ} %s id type x y z fx fy fz\n" - % lmp_traj_name - ) + ret += f"dump 1 all custom ${{DUMP_FREQ}} {lmp_traj_name} id type x y z fx fy fz\n" ret += "restart 10000 dpgen.restart\n" ret += "\n" if pka_e is None: @@ -144,21 +136,16 @@ def make_lmp_input( / (0.5 * pka_mass * 1e-3 / pc.Avogadro * (pc.angstrom / pc.pico) ** 2) ) # type: ignore pka_vn = np.sqrt(pka_vn) - print(pka_vn) pka_vec = _sample_sphere() pka_vec *= pka_vn ret += "group first id 1\n" - ret += 'if "${restart} == 0" then "velocity first set %f %f %f"\n' % ( - pka_vec[0], - pka_vec[1], - pka_vec[2], - ) - ret += "fix 2 all momentum 1 linear 1 1 1\n" + ret += f'if "${{restart}} == 0" then "velocity first set {pka_vec[0]:f} {pka_vec[1]:f} {pka_vec[2]:f}"\n' + ret += "fix 2 all momentum 1 linear 1 1 1\n" ret += "\n" if ensemble.split("-")[0] == "npt": assert pres is not None if nopbc: - raise RuntimeError("ensemble %s is conflicting with nopbc" % ensemble) + raise RuntimeError(f"ensemble {ensemble} is conflicting with nopbc") if ensemble == "npt" or ensemble == "npt-i" or ensemble == "npt-iso": ret += "fix 1 all npt temp ${TEMP} ${TEMP} ${TAU_T} iso ${PRES} ${PRES} ${TAU_P}\n" elif ensemble == "npt-a" or ensemble == "npt-aniso": @@ -175,6 +162,6 @@ def make_lmp_input( ret += "velocity all zero linear\n" ret += "fix fm all momentum 1 linear 1 1 1\n" ret += "\n" - ret += "timestep %f\n" % dt + ret += f"timestep {dt:f}\n" ret += "run ${NSTEPS} upto\n" return ret diff --git a/dpgen2/exploration/task/lmp_template_task_group.py b/dpgen2/exploration/task/lmp_template_task_group.py index b82e1695..9a85ae98 100644 --- a/dpgen2/exploration/task/lmp_template_task_group.py +++ b/dpgen2/exploration/task/lmp_template_task_group.py @@ -1,10 +1,8 @@ import itertools -import random from pathlib import ( Path, ) from typing import ( - List, Optional, ) @@ -20,9 +18,6 @@ from .conf_sampling_task_group import ( ConfSamplingTaskGroup, ) -from .lmp import ( - make_lmp_input, -) from .task import ( ExplorationTask, ) @@ -139,7 +134,7 @@ def find_only_one_key(lmp_lines, key): if len(found) > 1: raise RuntimeError("found %d keywords %s" % (len(found), key)) if len(found) == 0: - raise RuntimeError("failed to find keyword %s" % (key)) + raise RuntimeError(f"failed to find keyword {key}") return found[0] @@ -172,9 +167,8 @@ def revise_lmp_input_dump(lmp_lines, trj_freq): def revise_lmp_input_plm(lmp_lines, in_plm, out_plm="output.plumed"): idx = find_only_one_key(lmp_lines, ["fix", "dpgen_plm"]) - lmp_lines[idx] = "fix dpgen_plm all plumed plumedfile %s outfile %s" % ( - in_plm, - out_plm, + lmp_lines[idx] = ( + f"fix dpgen_plm all plumed plumedfile {in_plm} outfile {out_plm}" ) return lmp_lines diff --git a/dpgen2/exploration/task/make_task_group_from_config.py b/dpgen2/exploration/task/make_task_group_from_config.py index c467fd8e..c3873709 100644 --- a/dpgen2/exploration/task/make_task_group_from_config.py +++ b/dpgen2/exploration/task/make_task_group_from_config.py @@ -1,4 +1,3 @@ -import dargs from dargs import ( Argument, Variant, @@ -7,7 +6,6 @@ from dpgen2.constants import ( lmp_conf_name, lmp_input_name, - model_name_pattern, plm_input_name, ) from dpgen2.exploration.task import ( @@ -634,4 +632,4 @@ def make_lmp_task_group_from_config( if __name__ == "__main__": - print(lmp_normalize({"type": "lmp-md"})) + pass diff --git a/dpgen2/exploration/task/npt_task_group.py b/dpgen2/exploration/task/npt_task_group.py index 4c999638..395251bd 100644 --- a/dpgen2/exploration/task/npt_task_group.py +++ b/dpgen2/exploration/task/npt_task_group.py @@ -1,5 +1,4 @@ import itertools -import random from typing import ( List, Optional, @@ -50,9 +49,7 @@ def set_md( ele_temp_f: Optional[float] = None, ele_temp_a: Optional[float] = None, ): - """ - Set MD parameters - """ + """Set MD parameters.""" self.graphs = [model_name_pattern % ii for ii in range(numb_models)] self.mass_map = mass_map self.temps = temps diff --git a/dpgen2/exploration/task/stage.py b/dpgen2/exploration/task/stage.py index 17ab150e..bd85c5e0 100644 --- a/dpgen2/exploration/task/stage.py +++ b/dpgen2/exploration/task/stage.py @@ -1,20 +1,3 @@ -from abc import ( - ABC, - abstractmethod, -) -from typing import ( - List, -) - -from dpgen2.constants import ( - lmp_conf_name, - lmp_input_name, - model_name_pattern, -) - -from .task import ( - ExplorationTask, -) from .task_group import ( BaseExplorationTaskGroup, ExplorationTaskGroup, @@ -22,19 +5,13 @@ class ExplorationStage: - """ - The exploration stage. - - """ + """The exploration stage.""" def __init__(self): self.clear() def clear(self): - """ - Clear all exploration group. - - """ + """Clear all exploration group.""" self.explor_groups = [] def add_task_group( @@ -42,7 +19,7 @@ def add_task_group( grp: ExplorationTaskGroup, ): """ - Add an exploration group + Add an exploration group. Parameters ---------- @@ -67,7 +44,6 @@ def make_task( added to the stage. """ - lmp_task_grp = BaseExplorationTaskGroup() for ii in self.explor_groups: # lmp_task_grp.add_group(ii.make_task()) diff --git a/dpgen2/exploration/task/task.py b/dpgen2/exploration/task/task.py index 24f7d02f..b68a0abe 100644 --- a/dpgen2/exploration/task/task.py +++ b/dpgen2/exploration/task/task.py @@ -1,11 +1,5 @@ -import os -from collections.abc import ( - Sequence, -) from typing import ( Dict, - List, - Tuple, ) @@ -17,7 +11,7 @@ class ExplorationTask: >>> # this example dumps all files needed by the task. >>> files = exploration_task.files() ... for file_name, file_content in files.items(): - ... with open(file_name, 'w') as fp: + ... with open(file_name, "w") as fp: ... fp.write(file_content) """ @@ -32,7 +26,7 @@ def add_file( fname: str, fcont: str, ): - """Add file to the task + """Add file to the task. Parameters ---------- diff --git a/dpgen2/exploration/task/task_group.py b/dpgen2/exploration/task/task_group.py index c603f80b..0438b790 100644 --- a/dpgen2/exploration/task/task_group.py +++ b/dpgen2/exploration/task/task_group.py @@ -6,9 +6,7 @@ Sequence, ) from typing import ( - Dict, List, - Tuple, ) from .task import ( @@ -24,11 +22,11 @@ def __init__(self): self.clear() def __getitem__(self, ii: int) -> ExplorationTask: - """Get the `ii`th task""" + """Get the `ii`th task.""" return self.task_list[ii] def __len__(self) -> int: - """Get the number of tasks in the group""" + """Get the number of tasks in the group.""" return len(self.task_list) def clear(self) -> None: @@ -36,7 +34,7 @@ def clear(self) -> None: @property def task_list(self) -> List[ExplorationTask]: - """Get the `list` of `ExplorationTask`""" + """Get the `list` of `ExplorationTask`.""" return self._task_list def add_task(self, task: ExplorationTask): @@ -110,4 +108,3 @@ def task_list(self): grp = FooTaskGroup(3) for ii in grp: fcs = ii.files() - print(fcs) diff --git a/dpgen2/flow/__init__.py b/dpgen2/flow/__init__.py index 98c490c2..2a77f3ab 100644 --- a/dpgen2/flow/__init__.py +++ b/dpgen2/flow/__init__.py @@ -1,3 +1,7 @@ from .dpgen_loop import ( ConcurrentLearning, ) + +__all__ = [ + "ConcurrentLearning", +] diff --git a/dpgen2/flow/dpgen_loop.py b/dpgen2/flow/dpgen_loop.py index 190a1090..18bd8ad4 100644 --- a/dpgen2/flow/dpgen_loop.py +++ b/dpgen2/flow/dpgen_loop.py @@ -1,5 +1,4 @@ import os -import pickle from copy import ( deepcopy, ) @@ -12,7 +11,6 @@ Union, ) -import jsonpickle from dflow import ( InputArtifact, InputParameter, @@ -23,13 +21,7 @@ Outputs, Step, Steps, - Workflow, - argo_len, - argo_range, - argo_sequence, - download_artifact, if_expression, - upload_artifact, ) from dflow.python import ( OP, @@ -39,7 +31,6 @@ HDF5Datasets, OPIOSign, PythonOPTemplate, - Slices, ) from dpgen2.exploration.report import ( @@ -57,10 +48,6 @@ from dpgen2.superop.block import ( ConcurrentLearningBlock, ) -from dpgen2.utils import ( - dump_object_to_file, - load_object_from_file, -) from dpgen2.utils.step_config import ( init_executor, ) @@ -214,7 +201,7 @@ def __init__( self.step_keys = {} for ii in self._my_keys: self.step_keys[ii] = "--".join( - ["%s" % self.inputs.parameters["block_id"], ii] + ["{}".format(self.inputs.parameters["block_id"]), ii] ) self = _loop( @@ -338,7 +325,7 @@ def init_keys(self): @property def loop_keys(self): - return [self.loop_key] + self.loop.keys + return [self.loop_key, *self.loop.keys] def _loop( @@ -449,24 +436,24 @@ def _loop( "init_data": steps.inputs.artifacts["init_data"], "iter_data": block_step.outputs.artifacts["iter_data"], }, - when="%s == false" % (scheduler_step.outputs.parameters["converged"]), + when="{} == false".format(scheduler_step.outputs.parameters["converged"]), ) steps.add(next_step) steps.outputs.parameters[ "exploration_scheduler" ].value_from_expression = if_expression( - _if=(scheduler_step.outputs.parameters["converged"] == True), + _if=(scheduler_step.outputs.parameters["converged"] is True), _then=scheduler_step.outputs.parameters["exploration_scheduler"], _else=next_step.outputs.parameters["exploration_scheduler"], ) steps.outputs.artifacts["models"].from_expression = if_expression( - _if=(scheduler_step.outputs.parameters["converged"] == True), + _if=(scheduler_step.outputs.parameters["converged"] is True), _then=block_step.outputs.artifacts["models"], _else=next_step.outputs.artifacts["models"], ) steps.outputs.artifacts["iter_data"].from_expression = if_expression( - _if=(scheduler_step.outputs.parameters["converged"] == True), + _if=(scheduler_step.outputs.parameters["converged"] is True), _then=block_step.outputs.artifacts["iter_data"], _else=next_step.outputs.artifacts["iter_data"], ) @@ -553,7 +540,7 @@ def _dpgen( "init_data": steps.inputs.artifacts["init_data"], "iter_data": steps.inputs.artifacts["iter_data"], }, - key="--".join(["%s" % id_step.outputs.parameters["block_id"], loop_key]), + key="--".join(["{}".format(id_step.outputs.parameters["block_id"]), loop_key]), ) steps.add(loop_step) diff --git a/dpgen2/fp/abacus.py b/dpgen2/fp/abacus.py index 28769b01..9558aaea 100644 --- a/dpgen2/fp/abacus.py +++ b/dpgen2/fp/abacus.py @@ -115,7 +115,7 @@ def execute( s["atom_types"][i] = atom_names.index(s["atom_names"][t]) # type: ignore https://github.com/microsoft/pyright/issues/5620 s.data["atom_numbs"] = atom_numbs s.data["atom_names"] = atom_names - target = "output/%s" % system + target = f"output/{system}" s.to("deepmd/npy", target) confs.append(Path(target)) else: @@ -189,7 +189,7 @@ def execute( workdir = op_out["backward_dir"].parent # convert the output to deepmd/npy format - with open("%s/INPUT" % workdir, "r") as f: + with open(f"{workdir}/INPUT") as f: INPUT = f.readlines() _, calculation = get_suffix_calculation(INPUT) if calculation == "scf": @@ -199,7 +199,7 @@ def execute( elif calculation in ["relax", "cell-relax"]: sys = dpdata.LabeledSystem(str(workdir), fmt="abacus/relax") else: - raise ValueError("Type of calculation %s not supported" % calculation) + raise ValueError(f"Type of calculation {calculation} not supported") out_name = fp_default_out_data_name sys.to("deepmd/npy", workdir / out_name) diff --git a/dpgen2/fp/cp2k.py b/dpgen2/fp/cp2k.py index 67ed5d55..fe69a384 100644 --- a/dpgen2/fp/cp2k.py +++ b/dpgen2/fp/cp2k.py @@ -85,7 +85,7 @@ def execute( s["atom_types"][i] = atom_names.index(s["atom_names"][t]) # type: ignore https://github.com/microsoft/pyright/issues/5620 s.data["atom_numbs"] = atom_numbs s.data["atom_names"] = atom_names - target = "output/%s" % system + target = f"output/{system}" s.to("deepmd/npy", target) confs.append(Path(target)) else: @@ -152,7 +152,7 @@ def execute( file_path = os.path.join(str(workdir), "output.log") # convert the output to deepmd/npy format - with open(workdir / "input.inp", "r") as f: + with open(workdir / "input.inp") as f: lines = f.readlines() # 获取 RUN_TYPE diff --git a/dpgen2/fp/deepmd.py b/dpgen2/fp/deepmd.py index 43fb200a..3094396f 100644 --- a/dpgen2/fp/deepmd.py +++ b/dpgen2/fp/deepmd.py @@ -1,4 +1,5 @@ """Prep and Run Gaussian tasks.""" + import os from pathlib import ( Path, @@ -6,7 +7,6 @@ from typing import ( Any, List, - Optional, Tuple, ) @@ -18,7 +18,6 @@ ) from dflow.python import ( FatalError, - TransientError, ) from dpgen2.constants import ( @@ -102,7 +101,7 @@ def run_task( out: str, log: str, ) -> Tuple[str, str]: - r"""Defines how one FP task runs + r"""Defines how one FP task runs. Parameters ---------- @@ -205,7 +204,6 @@ def args() -> List[dargs.Argument]: arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of `run_task` method. """ - doc_deepmd_teacher_model = ( "The path of teacher model, which can be loaded by deepmd.infer.DeepPot" ) diff --git a/dpgen2/fp/gaussian.py b/dpgen2/fp/gaussian.py index b6aba200..d3af8d8b 100644 --- a/dpgen2/fp/gaussian.py +++ b/dpgen2/fp/gaussian.py @@ -1,4 +1,5 @@ """Prep and Run Gaussian tasks.""" + import logging from typing import ( Any, @@ -104,7 +105,6 @@ def prep_task( inputs : GaussianInputs The GaussianInputs object handels all other input files of the task. """ - conf_frame.to("gaussian/gjf", gaussian_input_name, **inputs.data) @@ -137,7 +137,7 @@ def run_task( out: str, post_command: Optional[str] = None, ) -> Tuple[str, str]: - r"""Defines how one FP task runs + r"""Defines how one FP task runs. Parameters ---------- @@ -203,7 +203,6 @@ def args() -> List[dargs.Argument]: arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of `run_task` method. """ - doc_gaussian_cmd = "The command of Gaussian" doc_gaussian_out = "The output dir name of labeled data. In `deepmd/npy` format provided by `dpdata`." doc_post_command = "The command after Gaussian" diff --git a/dpgen2/fp/prep_fp.py b/dpgen2/fp/prep_fp.py index a43f582a..44dde09a 100644 --- a/dpgen2/fp/prep_fp.py +++ b/dpgen2/fp/prep_fp.py @@ -1,5 +1,3 @@ -import json -import os from abc import ( ABC, abstractmethod, @@ -9,11 +7,8 @@ ) from typing import ( Any, - Dict, List, - Set, Tuple, - Union, ) import dpdata @@ -105,7 +100,6 @@ def execute( - `task_names`: (`List[str]`) The name of tasks. Will be used as the identities of the tasks. The names of different tasks are different. - `task_paths`: (`Artifact(List[Path])`) The parepared working paths of the tasks. Contains all input files needed to start the FP. The order fo the Paths should be consistent with `op["task_names"]` """ - inputs = ip["config"]["inputs"] confs = ip["confs"] type_map = ip["type_map"] diff --git a/dpgen2/fp/run_fp.py b/dpgen2/fp/run_fp.py index 64e0c3a8..5dbe0976 100644 --- a/dpgen2/fp/run_fp.py +++ b/dpgen2/fp/run_fp.py @@ -1,4 +1,3 @@ -import json import os from abc import ( ABC, @@ -10,12 +9,10 @@ from typing import ( Dict, List, - Set, Tuple, ) import dargs -import dpdata from dflow.python import ( OP, OPIO, @@ -23,7 +20,6 @@ BigParameter, FatalError, OPIOSign, - TransientError, ) from dpgen2.utils.chdir import ( @@ -91,7 +87,7 @@ def run_task( self, **kwargs, ) -> Tuple[str, str]: - r"""Defines how one FP task runs + r"""Defines how one FP task runs. Parameters ---------- diff --git a/dpgen2/fp/vasp.py b/dpgen2/fp/vasp.py index a8ab6b88..4a9c1b5e 100644 --- a/dpgen2/fp/vasp.py +++ b/dpgen2/fp/vasp.py @@ -6,29 +6,16 @@ Path, ) from typing import ( - Dict, List, - Optional, - Set, Tuple, - Union, ) import dpdata import numpy as np from dargs import ( Argument, - ArgumentEncoder, - Variant, - dargs, ) from dflow.python import ( - OP, - OPIO, - Artifact, - BigParameter, - FatalError, - OPIOSign, TransientError, ) @@ -49,7 +36,6 @@ ) from .vasp_input import ( VaspInputs, - make_kspacing_kpoints, ) # global static variables @@ -68,8 +54,9 @@ def clean_lines(string_list, remove_empty_lines=True): remove_empty_lines: Set to True to skip lines which are empty after stripping. - Returns: - List of clean strings with no whitespaces. + Returns + ------- + List of clean strings with no whitespaces. """ for s in string_list: clean_s = s @@ -138,7 +125,6 @@ def prep_task( vasp_inputs : VaspInputs The VaspInputs object handels all other input files of the task. """ - conf_frame.to("vasp/poscar", vasp_conf_name) incar = vasp_inputs.incar_template self.set_ele_temp(conf_frame, incar) @@ -175,7 +161,7 @@ def optional_input_files(self) -> List[str]: def set_ele_temp(self, system): if os.path.exists("job.json"): - with open("job.json", "r") as f: + with open("job.json") as f: data = json.load(f) if "use_ele_temp" in data and "ele_temp" in data: if data["use_ele_temp"] == 1: @@ -193,7 +179,7 @@ def run_task( out: str, log: str, ) -> Tuple[str, str]: - r"""Defines how one FP task runs + r"""Defines how one FP task runs. Parameters ---------- @@ -211,7 +197,6 @@ def run_task( log_name: str The file name of the log. """ - log_name = log out_name = out # run vasp @@ -239,7 +224,6 @@ def args(): arguments: List[dargs.Argument] List of dargs.Argument defines the arguments of `run_task` method. """ - doc_vasp_cmd = "The command of VASP" doc_vasp_log = "The log file name of VASP" doc_vasp_out = "The output dir name of labeled data. In `deepmd/npy` format provided by `dpdata`." diff --git a/dpgen2/fp/vasp_input.py b/dpgen2/fp/vasp_input.py index 8bbad820..81b6cdd5 100644 --- a/dpgen2/fp/vasp_input.py +++ b/dpgen2/fp/vasp_input.py @@ -4,19 +4,12 @@ from typing import ( Dict, List, - Optional, - Set, - Tuple, Union, ) -import dpdata import numpy as np from dargs import ( Argument, - ArgumentEncoder, - Variant, - dargs, ) diff --git a/dpgen2/op/__init__.py b/dpgen2/op/__init__.py index c79c3946..4fd89a1b 100644 --- a/dpgen2/op/__init__.py +++ b/dpgen2/op/__init__.py @@ -44,3 +44,22 @@ from .select_confs import ( SelectConfs, ) + +__all__ = [ + "CollectData", + "CollRunCaly", + "PrepCalyDPOptim", + "PrepCalyInput", + "PrepCalyModelDevi", + "PrepDPTrain", + "PrepLmp", + "RunCalyDPOptim", + "RunCalyModelDevi", + "RunDPTrain", + "RunLmp", + "SelectConfs", + "PrepRelax", + "RunRelax", + "RunRelaxHDF5", + "DiffCSPGen", +] diff --git a/dpgen2/op/caly_evo_step_merge.py b/dpgen2/op/caly_evo_step_merge.py index 8abda6dd..12c328e3 100644 --- a/dpgen2/op/caly_evo_step_merge.py +++ b/dpgen2/op/caly_evo_step_merge.py @@ -1,13 +1,8 @@ -import json -import logging -import pickle -import shutil from pathlib import ( Path, ) from typing import ( List, - Tuple, ) from dflow import ( @@ -24,29 +19,14 @@ OPIOSign, Parameter, Slices, - TransientError, ) from dflow.utils import ( flatten, ) -from dpgen2.constants import ( - calypso_check_opt_file, - calypso_run_opt_file, -) -from dpgen2.exploration.task import ( - ExplorationTaskGroup, -) from dpgen2.superop.caly_evo_step import ( CalyEvoStep, ) -from dpgen2.utils import ( - BinaryFileInput, - set_directory, -) -from dpgen2.utils.run_command import ( - run_command, -) class CalyEvoStepMerge(OP): @@ -119,7 +99,7 @@ def execute( path_list = download_artifact(step.outputs.artifacts[k]) if output_sign[k].type == List[Path]: if not isinstance(path_list, list) or any( - [p is not None and not isinstance(p, str) for p in path_list] + p is not None and not isinstance(p, str) for p in path_list ): path_list = list(flatten(path_list).values()) out[k] = [Path(p) for p in path_list] diff --git a/dpgen2/op/collect_data.py b/dpgen2/op/collect_data.py index b1e057e5..29323d8e 100644 --- a/dpgen2/op/collect_data.py +++ b/dpgen2/op/collect_data.py @@ -1,12 +1,11 @@ -import json -import os from pathlib import ( Path, ) from typing import ( + Any, + ClassVar, + Dict, List, - Set, - Tuple, ) import dpdata @@ -14,7 +13,6 @@ OP, OPIO, Artifact, - BigParameter, OPIOSign, Parameter, ) @@ -35,7 +33,7 @@ class CollectData(OP): """ - default_optional_parameter = { + default_optional_parameter: ClassVar[Dict[str, Any]] = { "mixed_type": False, } diff --git a/dpgen2/op/collect_run_caly.py b/dpgen2/op/collect_run_caly.py index 4b6148f6..1b52252e 100644 --- a/dpgen2/op/collect_run_caly.py +++ b/dpgen2/op/collect_run_caly.py @@ -1,31 +1,20 @@ -import json import logging -import os -import random -import re import shutil from pathlib import ( Path, ) from typing import ( List, - Optional, - Set, - Tuple, ) from dargs import ( Argument, - ArgumentEncoder, - Variant, - dargs, ) from dflow.python import ( OP, OPIO, Artifact, BigParameter, - FatalError, OPIOSign, Parameter, TransientError, @@ -35,7 +24,6 @@ calypso_log_name, ) from dpgen2.utils import ( - BinaryFileInput, set_directory, ) from dpgen2.utils.run_command import ( @@ -246,7 +234,7 @@ def prep_last_calypso_file(step, results, opt_results_dir, qhull_input, vsc): def get_value_from_inputdat(filename): max_step = 0 vsc = False - with open(filename, "r") as f: + with open(filename) as f: lines = f.readlines() for line in lines: if "MaxStep" in line: diff --git a/dpgen2/op/diffcsp_gen.py b/dpgen2/op/diffcsp_gen.py index a7ed5ef1..f5dc4a68 100644 --- a/dpgen2/op/diffcsp_gen.py +++ b/dpgen2/op/diffcsp_gen.py @@ -55,7 +55,7 @@ def convert_pt_to_cif(input_file, output_dir): lattice, atom_type, frac_coord, coords_are_cartesian=False ) - filename = "%s.cif" % i + filename = f"{i}.cif" file_path = os.path.join(output_dir, filename) structure.to(filename=file_path) now_atom += atom_num @@ -88,12 +88,12 @@ def execute( args = cmd.split() try: i = args.index("--model_path") - except ValueError: - raise RuntimeError("Path of DiffCSP model not provided.") + except ValueError as e: + raise RuntimeError("Path of DiffCSP model not provided.") from e model_path = args[i + 1] subprocess.run(cmd, shell=True, check=True) result_file = os.path.join(model_path, "eval_gen.pt") - task_dir = "diffcsp.%s" % ip["task_id"] + task_dir = "diffcsp.{}".format(ip["task_id"]) convert_pt_to_cif(result_file, task_dir) return OPIO( { diff --git a/dpgen2/op/prep_caly_dp_optim.py b/dpgen2/op/prep_caly_dp_optim.py index d2e4d8b0..aaf48acc 100644 --- a/dpgen2/op/prep_caly_dp_optim.py +++ b/dpgen2/op/prep_caly_dp_optim.py @@ -1,13 +1,8 @@ -import json -import logging -import pickle -import shutil from pathlib import ( Path, ) from typing import ( List, - Tuple, ) from dflow.python import ( @@ -17,25 +12,11 @@ BigParameter, OPIOSign, Parameter, - TransientError, ) -from dpgen2.constants import ( - calypso_check_opt_file, - calypso_opt_dir_name, - calypso_run_opt_file, - model_name_pattern, -) -from dpgen2.exploration.task import ( - ExplorationTaskGroup, -) from dpgen2.utils import ( - BinaryFileInput, set_directory, ) -from dpgen2.utils.run_command import ( - run_command, -) class PrepCalyDPOptim(OP): diff --git a/dpgen2/op/prep_caly_input.py b/dpgen2/op/prep_caly_input.py index e3da359a..2ec9349d 100644 --- a/dpgen2/op/prep_caly_input.py +++ b/dpgen2/op/prep_caly_input.py @@ -1,11 +1,8 @@ -import json -import pickle from pathlib import ( Path, ) from typing import ( List, - Tuple, ) from dflow.python import ( @@ -22,14 +19,9 @@ calypso_input_file, calypso_run_opt_file, calypso_task_pattern, - model_name_pattern, ) from dpgen2.exploration.task import ( BaseExplorationTaskGroup, - ExplorationTaskGroup, -) -from dpgen2.utils import ( - set_directory, ) vsc_keys = { @@ -349,7 +341,6 @@ def execute( - `caly_run_opt_files`: (`Artifact(List[Path])`) - `caly_check_opt_files`: (`Artifact(List[Path])`) """ - cc = 0 task_paths = [] input_dat_files = [] diff --git a/dpgen2/op/prep_caly_model_devi.py b/dpgen2/op/prep_caly_model_devi.py index 3b070959..2a4b46e5 100644 --- a/dpgen2/op/prep_caly_model_devi.py +++ b/dpgen2/op/prep_caly_model_devi.py @@ -1,13 +1,8 @@ -import json -import logging -import pickle -import shutil from pathlib import ( Path, ) from typing import ( List, - Tuple, ) from dflow.python import ( @@ -17,25 +12,11 @@ BigParameter, OPIOSign, Parameter, - TransientError, ) -from dpgen2.constants import ( - calypso_check_opt_file, - calypso_opt_dir_name, - calypso_run_opt_file, - model_name_pattern, -) -from dpgen2.exploration.task import ( - ExplorationTaskGroup, -) from dpgen2.utils import ( - BinaryFileInput, set_directory, ) -from dpgen2.utils.run_command import ( - run_command, -) class PrepCalyModelDevi(OP): diff --git a/dpgen2/op/prep_dp_train.py b/dpgen2/op/prep_dp_train.py index 20fe58c2..4f497b59 100644 --- a/dpgen2/op/prep_dp_train.py +++ b/dpgen2/op/prep_dp_train.py @@ -6,7 +6,6 @@ ) from typing import ( List, - Tuple, Union, ) @@ -79,7 +78,7 @@ def execute( template = ip["template_script"] numb_models = ip["numb_models"] osubdirs = [] - if type(template) != list: + if not isinstance(template, list): template = [template for ii in range(numb_models)] else: if not (len(template) == numb_models): diff --git a/dpgen2/op/prep_lmp.py b/dpgen2/op/prep_lmp.py index e1b5c026..9b9d86af 100644 --- a/dpgen2/op/prep_lmp.py +++ b/dpgen2/op/prep_lmp.py @@ -1,11 +1,8 @@ -import json -import pickle from pathlib import ( Path, ) from typing import ( List, - Tuple, ) from dflow.python import ( @@ -21,7 +18,6 @@ ) from dpgen2.exploration.task import ( BaseExplorationTaskGroup, - ExplorationTaskGroup, ) @@ -74,7 +70,6 @@ def execute( - `task_names`: (`List[str]`) The name of tasks. Will be used as the identities of the tasks. The names of different tasks are different. - `task_paths`: (`Artifact(List[Path])`) The parepared working paths of the tasks. Contains all input files needed to start the LAMMPS simulation. The order fo the Paths should be consistent with `op["task_names"]` """ - lmp_task_grp = ip["lmp_task_grp"] cc = 0 task_paths = [] diff --git a/dpgen2/op/prep_relax.py b/dpgen2/op/prep_relax.py index 1ee2869a..9906779b 100644 --- a/dpgen2/op/prep_relax.py +++ b/dpgen2/op/prep_relax.py @@ -47,7 +47,7 @@ def execute( task_dir = Path("task.%06d" % i) task_dir.mkdir(exist_ok=True) for j in range(group_size * i, min(group_size * (i + 1), ncifs)): - os.symlink(ip["cifs"][j], task_dir / ("%s.cif" % j)) + os.symlink(ip["cifs"][j], task_dir / (f"{j}.cif")) task_paths.append(task_dir) return OPIO( { diff --git a/dpgen2/op/run_caly_dp_optim.py b/dpgen2/op/run_caly_dp_optim.py index 639c97d5..6d2de21e 100644 --- a/dpgen2/op/run_caly_dp_optim.py +++ b/dpgen2/op/run_caly_dp_optim.py @@ -1,14 +1,8 @@ -import json import logging -import pickle import shutil from pathlib import ( Path, ) -from typing import ( - List, - Tuple, -) from dflow.python import ( OP, @@ -20,15 +14,7 @@ TransientError, ) -from dpgen2.constants import ( - calypso_check_opt_file, - calypso_run_opt_file, -) -from dpgen2.exploration.task import ( - ExplorationTaskGroup, -) from dpgen2.utils import ( - BinaryFileInput, set_directory, ) from dpgen2.utils.run_command import ( diff --git a/dpgen2/op/run_caly_model_devi.py b/dpgen2/op/run_caly_model_devi.py index 9e191cf2..cdf06577 100644 --- a/dpgen2/op/run_caly_model_devi.py +++ b/dpgen2/op/run_caly_model_devi.py @@ -14,7 +14,6 @@ OP, OPIO, Artifact, - BigParameter, OPIOSign, Parameter, ) @@ -158,7 +157,7 @@ def execute( def atoms2lmpdump(atoms, struc_idx, type_map, ignore=False): - """down triangle cell can be obtained from + """Down triangle cell can be obtained from cell params: a, b, c, alpha, beta, gamma. cell = cellpar_to_cell([a, b, c, alpha, beta, gamma]) lx, ly, lz = cell[0][0], cell[1][1], cell[2][2] @@ -169,7 +168,7 @@ def atoms2lmpdump(atoms, struc_idx, type_map, ignore=False): ylo_bound = ylo + MIN(0.0,yz) yhi_bound = yhi + MAX(0.0,yz) zlo_bound = zlo - zhi_bound = zhi + zhi_bound = zhi. ref: https://docs.lammps.org/Howto_triclinic.html """ @@ -208,19 +207,15 @@ def atoms2lmpdump(atoms, struc_idx, type_map, ignore=False): zhi_bound = zhi dump_str += "ITEM: BOX BOUNDS xy xz yz pp pp pp\n" - dump_str += "%20.10f %20.10f %20.10f\n" % (xlo_bound, xhi_bound, xy) - dump_str += "%20.10f %20.10f %20.10f\n" % (ylo_bound, yhi_bound, xz) - dump_str += "%20.10f %20.10f %20.10f\n" % (zlo_bound, zhi_bound, yz) + dump_str += f"{xlo_bound:20.10f} {xhi_bound:20.10f} {xy:20.10f}\n" + dump_str += f"{ylo_bound:20.10f} {yhi_bound:20.10f} {xz:20.10f}\n" + dump_str += f"{zlo_bound:20.10f} {zhi_bound:20.10f} {yz:20.10f}\n" dump_str += "ITEM: ATOMS id type x y z fx fy fz\n" for idx, atom in enumerate(new_atoms): type_id = type_map.index(atom.symbol) + 1 # type: ignore dump_str += "%5d %5d" % (idx + 1, type_id) - dump_str += "%20.10f %20.10f %20.10f" % ( - atom.position[0], # type: ignore - atom.position[1], # type: ignore - atom.position[2], # type: ignore - ) - dump_str += "%20.10f %20.10f %20.10f\n" % (0, 0, 0) + dump_str += f"{atom.position[0]:20.10f} {atom.position[1]:20.10f} {atom.position[2]:20.10f}" # type: ignore + dump_str += f"{0:20.10f} {0:20.10f} {0:20.10f}\n" # dump_str = dump_str.strip("\n") return dump_str @@ -284,9 +279,8 @@ def parse_traj(traj_file): if len(trajs) >= 20: selected_traj = [trajs[iii] for iii in [4, 9, -10, -5, -1]] elif 5 <= len(trajs) < 20: - selected_traj = [ - trajs[np.random.randint(3, len(trajs) - 1)] for _ in range(4) - ] + rng = np.random.default_rng() + selected_traj = [trajs[rng.integers(3, len(trajs) - 1)] for _ in range(4)] selected_traj.append(trajs[-1]) elif 3 <= len(trajs) < 5: selected_traj = [trajs[round((len(trajs) - 1) / 2)]] diff --git a/dpgen2/op/run_dp_train.py b/dpgen2/op/run_dp_train.py index dccbc518..6a22c1ee 100644 --- a/dpgen2/op/run_dp_train.py +++ b/dpgen2/op/run_dp_train.py @@ -1,25 +1,25 @@ +import functools import glob import json import logging +import operator import os import shutil from pathlib import ( Path, ) from typing import ( + Any, + ClassVar, Dict, List, Optional, - Tuple, Union, ) import dpdata from dargs import ( Argument, - ArgumentEncoder, - Variant, - dargs, ) from dflow.python import ( OP, @@ -30,12 +30,10 @@ NestedDict, OPIOSign, Parameter, - TransientError, ) from dpgen2.constants import ( train_script_name, - train_task_pattern, ) from dpgen2.utils.chdir import ( set_directory, @@ -60,14 +58,14 @@ def _make_train_command( if impl == "tensorflow" and os.path.isfile("checkpoint"): checkpoint = "model.ckpt" elif impl == "pytorch" and len(glob.glob("model.ckpt-[0-9]*.pt")) > 0: - checkpoint = "model.ckpt-%s.pt" % max( - [int(f[11:-3]) for f in glob.glob("model.ckpt-[0-9]*.pt")] + checkpoint = "model.ckpt-{}.pt".format( + max([int(f[11:-3]) for f in glob.glob("model.ckpt-[0-9]*.pt")]) ) else: checkpoint = None # case of restart if checkpoint is not None: - command = dp_command + ["train", "--restart", checkpoint, train_script_name] + command = [*dp_command, "train", "--restart", checkpoint, train_script_name] return command # case of init model and finetune assert checkpoint is None @@ -77,25 +75,18 @@ def _make_train_command( ) if case_init_model: init_flag = "--init-frz-model" if impl == "tensorflow" else "--init-model" - command = dp_command + [ + command = [*dp_command, "train", init_flag, str(init_model), train_script_name] + elif case_finetune: + command = [ + *dp_command, "train", - init_flag, - str(init_model), train_script_name, + "--finetune", + str(init_model), + *finetune_args.split(), ] - elif case_finetune: - command = ( - dp_command - + [ - "train", - train_script_name, - "--finetune", - str(init_model), - ] - + finetune_args.split() - ) else: - command = dp_command + ["train", train_script_name] + command = [*dp_command, "train", train_script_name] command += train_args.split() return command @@ -110,7 +101,7 @@ class RunDPTrain(OP): """ - default_optional_parameter = { + default_optional_parameter: ClassVar[Dict[str, Any]] = { "mixed_type": False, "finetune_mode": "no", } @@ -480,7 +471,7 @@ def decide_init_model( old_data_size_level = int(config["init_model_policy"].split(":")[-1]) if isinstance(init_data, dict): init_data_size = _get_data_size_of_all_systems( - sum(init_data.values(), []) + functools.reduce(operator.iadd, init_data.values(), []) ) else: init_data_size = _get_data_size_of_all_systems(init_data) diff --git a/dpgen2/op/run_lmp.py b/dpgen2/op/run_lmp.py index 2822a325..8b710571 100644 --- a/dpgen2/op/run_lmp.py +++ b/dpgen2/op/run_lmp.py @@ -9,28 +9,21 @@ from typing import ( List, Optional, - Set, - Tuple, ) from dargs import ( Argument, - ArgumentEncoder, - Variant, - dargs, ) from dflow.python import ( OP, OPIO, Artifact, BigParameter, - FatalError, OPIOSign, TransientError, ) from dpgen2.constants import ( - lmp_conf_name, lmp_input_name, lmp_log_name, lmp_model_devi_name, @@ -134,7 +127,7 @@ def execute( ext = os.path.splitext(teacher_model.file_name)[-1] teacher_model_file = "teacher_model" + ext teacher_model.save_as_file(teacher_model_file) - model_files = [Path(teacher_model_file).resolve()] + model_files + model_files = [Path(teacher_model_file).resolve(), *model_files] with set_directory(work_dir): # link input files @@ -151,10 +144,12 @@ def execute( elif ext == ".pt": # freeze model mname = pytorch_model_name_pattern % (idx) + freeze_model(mm, mname, config.get("model_frozen_head")) + else: raise RuntimeError( - "Model file with extension '%s' is not supported" % ext + f"Model file with extension '{ext}' is not supported" ) model_names.append(mname) @@ -280,7 +275,7 @@ def set_models(lmp_input_name: str, model_names: List[str]): f"cannot file model pattern {pattern} in line " f" {lmp_input_lines[idx]}" ) if match_last == -1: - raise RuntimeError(f"last matching index should not be -1, terribly wrong ") + raise RuntimeError("last matching index should not be -1, terribly wrong ") for ii in range(match_last, len(new_line_split)): if re.fullmatch(pattern, new_line_split[ii]) is not None: raise RuntimeError( @@ -305,7 +300,7 @@ def find_only_one_key(lmp_lines, key, raise_not_found=True): raise RuntimeError("found %d keywords %s" % (len(found), key)) if len(found) == 0: if raise_not_found: - raise RuntimeError("failed to find keyword %s" % (key)) + raise RuntimeError(f"failed to find keyword {key}") else: return None return found[0] @@ -334,10 +329,10 @@ def get_ele_temp(lmp_log_name): def freeze_model(input_model, frozen_model, head=None): - freeze_args = "-o %s" % frozen_model + freeze_args = f"-o {frozen_model}" if head is not None: - freeze_args += " --head %s" % head - freeze_cmd = "dp --pt freeze -c %s %s" % (input_model, freeze_args) + freeze_args += f" --head {head}" + freeze_cmd = f"dp --pt freeze -c {input_model} {freeze_args}" ret, out, err = run_command(freeze_cmd, shell=True) if ret != 0: logging.error( diff --git a/dpgen2/op/run_relax.py b/dpgen2/op/run_relax.py index 672275d8..4509d5b1 100644 --- a/dpgen2/op/run_relax.py +++ b/dpgen2/op/run_relax.py @@ -1,4 +1,3 @@ -import logging import os from pathlib import ( Path, @@ -193,14 +192,14 @@ def execute( ) forces_list[j] = forces virial_list[j] = virial / len(atype) - traj_file = ip["task_path"] / ("traj.%s.dump" % fname) + traj_file = ip["task_path"] / (f"traj.{fname}.dump") traj_file = self.write_traj(dump_str, traj_file) trajs.append(traj_file) devi = [np.array(step_list)] devi += list(calc_model_devi_v(np.array(virial_list))) devi += list(calc_model_devi_f(np.array(forces_list))) devi = np.vstack(devi).T - model_devi_file = ip["task_path"] / ("model_devi.%s.out" % fname) + model_devi_file = ip["task_path"] / (f"model_devi.{fname}.out") model_devi_file = self.write_model_devi(devi, model_devi_file) model_devis.append(model_devi_file) return OPIO( diff --git a/dpgen2/op/select_confs.py b/dpgen2/op/select_confs.py index cddcca55..3e64fdc6 100644 --- a/dpgen2/op/select_confs.py +++ b/dpgen2/op/select_confs.py @@ -1,12 +1,8 @@ -import json -import os from pathlib import ( Path, ) from typing import ( List, - Set, - Tuple, Union, ) @@ -77,7 +73,6 @@ def execute( - `conf`: (`Artifact(List[Path])`) The selected configurations. """ - conf_selector = ip["conf_selector"] type_map = ip["type_map"] diff --git a/dpgen2/superop/__init__.py b/dpgen2/superop/__init__.py index 0223605f..516400a1 100644 --- a/dpgen2/superop/__init__.py +++ b/dpgen2/superop/__init__.py @@ -16,3 +16,12 @@ from .prep_run_lmp import ( PrepRunLmp, ) + +__all__ = [ + "ConcurrentLearningBlock", + "PrepRunCaly", + "PrepRunDPTrain", + "PrepRunFp", + "PrepRunLmp", + "PrepRunDiffCSP", +] diff --git a/dpgen2/superop/block.py b/dpgen2/superop/block.py index 0e39ab38..1acfc544 100644 --- a/dpgen2/superop/block.py +++ b/dpgen2/superop/block.py @@ -2,15 +2,11 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( Any, Dict, List, Optional, - Set, Type, Union, ) @@ -25,25 +21,12 @@ Outputs, Step, Steps, - Workflow, - argo_len, - argo_range, - argo_sequence, - download_artifact, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, - Slices, ) -from dpgen2.op import ( - CollectData, -) from dpgen2.utils.step_config import ( init_executor, ) @@ -148,7 +131,7 @@ def __init__( self.step_keys = {} for ii in self._my_keys: self.step_keys[ii] = "--".join( - ["%s" % self.inputs.parameters["block_id"], ii] + ["{}".format(self.inputs.parameters["block_id"]), ii] ) self = _block_cl( @@ -228,7 +211,7 @@ def _block_cl( "iter_data": block_steps.inputs.artifacts["iter_data"], }, key="--".join( - ["%s" % block_steps.inputs.parameters["block_id"], "prep-run-train"] + ["{}".format(block_steps.inputs.parameters["block_id"]), "prep-run-train"] ), ) block_steps.add(prep_run_dp_train) @@ -246,7 +229,7 @@ def _block_cl( "models": prep_run_dp_train.outputs.artifacts["models"], }, key="--".join( - ["%s" % block_steps.inputs.parameters["block_id"], "prep-run-explore"] + ["{}".format(block_steps.inputs.parameters["block_id"]), "prep-run-explore"] ), ) block_steps.add(prep_run_explore) @@ -288,7 +271,7 @@ def _block_cl( "confs": select_confs.outputs.artifacts["confs"], }, key="--".join( - ["%s" % block_steps.inputs.parameters["block_id"], "prep-run-fp"] + ["{}".format(block_steps.inputs.parameters["block_id"]), "prep-run-fp"] ), ) block_steps.add(prep_run_fp) diff --git a/dpgen2/superop/caly_evo_step.py b/dpgen2/superop/caly_evo_step.py index cd2be501..8d64c20e 100644 --- a/dpgen2/superop/caly_evo_step.py +++ b/dpgen2/superop/caly_evo_step.py @@ -2,13 +2,9 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( List, Optional, - Set, Type, ) @@ -17,30 +13,17 @@ InputParameter, Inputs, OutputArtifact, - OutputParameter, Outputs, Step, Steps, - Workflow, - argo_len, - argo_range, - argo_sequence, - download_artifact, if_expression, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) -from dpgen2.constants import ( - calypso_index_pattern, -) from dpgen2.utils.step_config import ( init_executor, ) @@ -181,8 +164,7 @@ def wise_executor(expl_mode, origin_executor_config): "opt_results_dir": caly_evo_step_steps.inputs.artifacts["opt_results_dir"], "qhull_input": caly_evo_step_steps.inputs.artifacts["qhull_input"], }, - key="%s--collect-run-calypso-%s-%s" - % ( + key="{}--collect-run-calypso-{}-{}".format( caly_evo_step_steps.inputs.parameters["block_id"], caly_evo_step_steps.inputs.parameters["iter_num"], caly_evo_step_steps.inputs.parameters["cnt_num"], @@ -215,8 +197,7 @@ def wise_executor(expl_mode, origin_executor_config): "caly_check_opt_file" ], }, - key="%s--prep-dp-optim-%s-%s" - % ( + key="{}--prep-dp-optim-{}-{}".format( caly_evo_step_steps.inputs.parameters["block_id"], caly_evo_step_steps.inputs.parameters["iter_num"], caly_evo_step_steps.inputs.parameters["cnt_num"], @@ -248,8 +229,7 @@ def wise_executor(expl_mode, origin_executor_config): artifacts={ "task_dir": prep_dp_optim.outputs.artifacts["task_dirs"], }, - key="%s--run-dp-optim-%s-%s-{{item}}" - % ( + key="{}--run-dp-optim-{}-{}-{{{{item}}}}".format( caly_evo_step_steps.inputs.parameters["block_id"], caly_evo_step_steps.inputs.parameters["iter_num"], caly_evo_step_steps.inputs.parameters["cnt_num"], @@ -284,7 +264,7 @@ def wise_executor(expl_mode, origin_executor_config): "caly_check_opt_file" ], }, - when="%s == false" % (collect_run_calypso.outputs.parameters["finished"]), + when="{} == false".format(collect_run_calypso.outputs.parameters["finished"]), ) caly_evo_step_steps.add(next_step) diff --git a/dpgen2/superop/prep_run_calypso.py b/dpgen2/superop/prep_run_calypso.py index daa48143..ec39c0fb 100644 --- a/dpgen2/superop/prep_run_calypso.py +++ b/dpgen2/superop/prep_run_calypso.py @@ -2,9 +2,6 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( Any, Dict, @@ -20,29 +17,17 @@ Inputs, OPTemplate, OutputArtifact, - OutputParameter, Outputs, Step, Steps, - Workflow, - argo_len, argo_range, - argo_sequence, - download_artifact, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) -from dpgen2.constants import ( - calypso_index_pattern, -) from dpgen2.utils.step_config import ( init_executor, ) @@ -102,13 +87,21 @@ def __init__( ] self.step_keys = {} ii = "prep-caly-input" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "caly-evo-step-{{item}}" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "prep-caly-model-devi" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "run-caly-model-devi" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) self = _prep_run_caly( self, @@ -260,8 +253,9 @@ def _prep_run_caly( artifacts={ "traj_results": caly_evo_step.outputs.artifacts["traj_results"], }, - key="%s--prep-caly-model-devi" - % (prep_run_caly_steps.inputs.parameters["block_id"],), + key="{}--prep-caly-model-devi".format( + prep_run_caly_steps.inputs.parameters["block_id"] + ), executor=prep_executor, ) prep_run_caly_steps.add(prep_caly_model_devi) @@ -287,8 +281,9 @@ def _prep_run_caly( "traj_dirs": prep_caly_model_devi.outputs.artifacts["grouped_traj_list"], "models": prep_run_caly_steps.inputs.artifacts["models"], }, - key="%s--run-caly-model-devi-{{item}}" - % (prep_run_caly_steps.inputs.parameters["block_id"],), + key="{}--run-caly-model-devi-{{{{item}}}}".format( + prep_run_caly_steps.inputs.parameters["block_id"] + ), executor=run_executor, **prep_config, ) diff --git a/dpgen2/superop/prep_run_diffcsp.py b/dpgen2/superop/prep_run_diffcsp.py index c44851fe..832532bc 100644 --- a/dpgen2/superop/prep_run_diffcsp.py +++ b/dpgen2/superop/prep_run_diffcsp.py @@ -2,40 +2,24 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( - Any, - Dict, List, Optional, Type, - Union, ) from dflow import ( InputArtifact, InputParameter, Inputs, - OPTemplate, OutputArtifact, - OutputParameter, Outputs, Step, Steps, - Workflow, - argo_len, - argo_range, argo_sequence, - download_artifact, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) @@ -157,7 +141,7 @@ def _prep_run_diffcsp( "task_id": "{{item}}", "config": expl_config, }, - key="%s--diffcsp-gen-{{item}}" % block_id, + key=f"{block_id}--diffcsp-gen-{{{{item}}}}", executor=prep_executor, with_sequence=argo_sequence(expl_config["gen_tasks"], format="%06d"), # type: ignore ) @@ -176,7 +160,7 @@ def _prep_run_diffcsp( artifacts={ "cifs": diffcsp_gen.outputs.artifacts["cifs"], }, - key="%s--prep-relax" % block_id, + key=f"{block_id}--prep-relax", executor=prep_executor, ) prep_run_diffcsp_steps.add(prep_relax) @@ -202,7 +186,7 @@ def _prep_run_diffcsp( "models": models, "task_path": prep_relax.outputs.artifacts["task_paths"], }, - key="%s--run-relax-{{item}}" % block_id, + key=f"{block_id}--run-relax-{{{{item}}}}", executor=run_executor, with_sequence=argo_sequence( prep_relax.outputs.parameters["ntasks"], format="%06d" diff --git a/dpgen2/superop/prep_run_dp_train.py b/dpgen2/superop/prep_run_dp_train.py index 0fd988e4..41c5d4f1 100644 --- a/dpgen2/superop/prep_run_dp_train.py +++ b/dpgen2/superop/prep_run_dp_train.py @@ -1,15 +1,10 @@ -import json import os from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( List, Optional, - Set, Type, ) @@ -23,27 +18,18 @@ S3Artifact, Step, Steps, - Workflow, argo_len, - argo_range, argo_sequence, - download_artifact, upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - BigParameter, - OPIOSign, PythonOPTemplate, Slices, ) from dpgen2.constants import ( train_index_pattern, - train_script_name, - train_task_pattern, ) from dpgen2.op import ( RunDPTrain, @@ -107,10 +93,12 @@ def __init__( self._keys = ["prep-train", "run-train"] self.step_keys = {} ii = "prep-train" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "run-train" self.step_keys[ii] = "--".join( - ["%s" % self.inputs.parameters["block_id"], ii + "-{{item}}"] + ["{}".format(self.inputs.parameters["block_id"]), ii + "-{{item}}"] ) self = _prep_run_dp_train( diff --git a/dpgen2/superop/prep_run_fp.py b/dpgen2/superop/prep_run_fp.py index ba659c6d..52d9e207 100644 --- a/dpgen2/superop/prep_run_fp.py +++ b/dpgen2/superop/prep_run_fp.py @@ -2,13 +2,9 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( List, Optional, - Set, Type, ) @@ -21,18 +17,11 @@ Outputs, Step, Steps, - Workflow, argo_len, - argo_range, argo_sequence, - download_artifact, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) @@ -88,10 +77,12 @@ def __init__( self._keys = ["prep-fp", "run-fp"] self.step_keys = {} ii = "prep-fp" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "run-fp" self.step_keys[ii] = "--".join( - ["%s" % self.inputs.parameters["block_id"], ii + "-{{item}}"] + ["{}".format(self.inputs.parameters["block_id"]), ii + "-{{item}}"] ) self = _prep_run_fp( diff --git a/dpgen2/superop/prep_run_lmp.py b/dpgen2/superop/prep_run_lmp.py index 48d4028a..b223e36c 100644 --- a/dpgen2/superop/prep_run_lmp.py +++ b/dpgen2/superop/prep_run_lmp.py @@ -2,13 +2,9 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) from typing import ( List, Optional, - Set, Type, ) @@ -21,18 +17,11 @@ Outputs, Step, Steps, - Workflow, argo_len, - argo_range, argo_sequence, - download_artifact, - upload_artifact, ) from dflow.python import ( OP, - OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) @@ -93,10 +82,12 @@ def __init__( self._keys = ["prep-lmp", "run-lmp"] self.step_keys = {} ii = "prep-lmp" - self.step_keys[ii] = "--".join(["%s" % self.inputs.parameters["block_id"], ii]) + self.step_keys[ii] = "--".join( + ["{}".format(self.inputs.parameters["block_id"]), ii] + ) ii = "run-lmp" self.step_keys[ii] = "--".join( - ["%s" % self.inputs.parameters["block_id"], ii + "-{{item}}"] + ["{}".format(self.inputs.parameters["block_id"]), ii + "-{{item}}"] ) self = _prep_run_lmp( diff --git a/dpgen2/utils/__init__.py b/dpgen2/utils/__init__.py index b7824f57..b2003f24 100644 --- a/dpgen2/utils/__init__.py +++ b/dpgen2/utils/__init__.py @@ -45,3 +45,31 @@ from .step_config import ( step_conf_args, ) + +__all__ = [ + "get_artifact_from_uri", + "upload_artifact_and_print_uri", + "BinaryFileInput", + "bohrium_config_from_dict", + "chdir", + "set_directory", + "dflow_config", + "dflow_s3_config", + "workflow_config_from_dict", + "find_slice_ranges", + "get_iteration", + "get_last_iteration", + "get_last_scheduler", + "get_subkey", + "matched_step_key", + "print_keys_in_nice_format", + "sort_slice_ops", + "dump_object_to_file", + "load_object_from_file", + "run_command", + "gen_doc_step_dict", + "init_executor", + "normalize_step_dict", + "step_conf_args", + "setup_ele_temp", +] diff --git a/dpgen2/utils/artifact_uri.py b/dpgen2/utils/artifact_uri.py index 4f7ec035..ba4d8b03 100644 --- a/dpgen2/utils/artifact_uri.py +++ b/dpgen2/utils/artifact_uri.py @@ -11,13 +11,13 @@ def get_artifact_from_uri(uri): elif uri.startswith("oss://"): return S3Artifact(uri[6:]) else: - raise ValueError("Unrecognized scheme of URI: %s" % uri) + raise ValueError(f"Unrecognized scheme of URI: {uri}") def upload_artifact_and_print_uri(files, name): art = upload_artifact(files) if s3_config["repo_type"] == "s3" and hasattr(art, "key"): - print("%s has been uploaded to s3://%s" % (name, art.key)) + print(f"{name} has been uploaded to s3://{art.key}") # noqa: T201 elif s3_config["repo_type"] == "oss" and hasattr(art, "key"): - print("%s has been uploaded to oss://%s" % (name, art.key)) + print(f"{name} has been uploaded to oss://{art.key}") # noqa: T201 return art diff --git a/dpgen2/utils/binary_file_input.py b/dpgen2/utils/binary_file_input.py index db099a00..b6912477 100644 --- a/dpgen2/utils/binary_file_input.py +++ b/dpgen2/utils/binary_file_input.py @@ -1,30 +1,20 @@ -"""Binary file inputs""" +"""Binary file inputs.""" + import os import warnings from pathlib import ( Path, ) from typing import ( - Any, - List, Optional, - Tuple, Union, ) -from dargs import ( - Argument, - dargs, -) -from dflow.python import ( - TransientError, -) - class BinaryFileInput: def __init__(self, path: Union[str, Path], ext: Optional[str] = None) -> None: path = str(path) - assert os.path.exists(path), f"No such file: {str(path)}" + assert os.path.exists(path), f"No such file: {path!s}" if ext and not ext.startswith("."): ext = "." + ext self.ext = ext @@ -32,7 +22,7 @@ def __init__(self, path: Union[str, Path], ext: Optional[str] = None) -> None: if self.ext: assert ( os.path.splitext(path)[-1] == self.ext - ), f'File extension mismatch, require "{ext}", current "{os.path.splitext(path)[-1]}", file path: {str(path)}' + ), f'File extension mismatch, require "{ext}", current "{os.path.splitext(path)[-1]}", file path: {path!s}' self.file_name = os.path.basename(path) with open(path, "rb") as f: diff --git a/dpgen2/utils/bohrium_config.py b/dpgen2/utils/bohrium_config.py index e94dc28a..c0574c09 100644 --- a/dpgen2/utils/bohrium_config.py +++ b/dpgen2/utils/bohrium_config.py @@ -1,7 +1,5 @@ import importlib -import os -import dflow from dflow.config import ( config, s3_config, diff --git a/dpgen2/utils/chdir.py b/dpgen2/utils/chdir.py index 3bc0b5c0..5be879e5 100644 --- a/dpgen2/utils/chdir.py +++ b/dpgen2/utils/chdir.py @@ -33,7 +33,7 @@ def set_directory(path: Path): Examples -------- >>> with set_directory("some_path"): - ... do_something() + ... do_something() """ cwd = Path().absolute() path.mkdir(exist_ok=True, parents=True) diff --git a/dpgen2/utils/dflow_config.py b/dpgen2/utils/dflow_config.py index e6a7b9ba..928e580e 100644 --- a/dpgen2/utils/dflow_config.py +++ b/dpgen2/utils/dflow_config.py @@ -1,5 +1,3 @@ -import copy - from dflow.config import ( config, s3_config, @@ -19,7 +17,7 @@ def dflow_config_lower( dflow_config, ): dflow_s3_config = {} - keys = [kk for kk in dflow_config.keys()] + keys = list(dflow_config.keys()) for kk in keys: if kk[:3] == "s3_": dflow_s3_config[kk[3:]] = dflow_config.pop(kk) @@ -40,7 +38,7 @@ def dflow_config( config_data, ): """ - set the dflow config by `config_data` + set the dflow config by `config_data`. the keys starting with "s3_" will be treated as s3_config keys, other keys are treated as config keys. @@ -53,9 +51,6 @@ def dflow_config( def dflow_s3_config( config_data, ): - """ - set the s3 config by `config_data` - - """ + """Set the s3 config by `config_data`.""" if config_data is not None: dflow_s3_config_lower(config_data) diff --git a/dpgen2/utils/dflow_query.py b/dpgen2/utils/dflow_query.py index 933c8cc6..f3ee1d72 100644 --- a/dpgen2/utils/dflow_query.py +++ b/dpgen2/utils/dflow_query.py @@ -6,8 +6,6 @@ Optional, ) -import numpy as np - def get_subkey( key: str, @@ -26,9 +24,7 @@ def matched_step_key( all_keys: List[str], step_keys: Optional[List[str]] = None, ): - """ - returns the keys in `all_keys` that matches any of the `step_keys` - """ + """Returns the keys in `all_keys` that matches any of the `step_keys`.""" if step_keys is None: return all_keys ret = [] @@ -48,9 +44,7 @@ def get_last_scheduler( wf: Any, keys: List[str], ): - """ - get the output Scheduler of the last successful iteration - """ + """Get the output Scheduler of the last successful iteration.""" outputs = wf.query_global_outputs() if ( outputs is not None @@ -74,7 +68,7 @@ def get_last_scheduler( return None else: skey = sorted(scheduler_keys)[-1] - step = [step for step in scheduler_steps if step.key == skey][0] + step = next(step for step in scheduler_steps if step.key == skey) return step.outputs.parameters["exploration_scheduler"].value @@ -82,9 +76,7 @@ def get_all_schedulers( wf: Any, keys: List[str], ): - """ - get the output Scheduler of the all the iterations - """ + """Get the output Scheduler of the all the iterations.""" scheduler_keys = sorted(matched_step_key(keys, ["scheduler"])) if len(scheduler_keys) == 0: return None @@ -99,9 +91,7 @@ def get_all_schedulers( def get_last_iteration( keys: List[str], ): - """ - get the index of the last iteraction from a list of step keys. - """ + """Get the index of the last iteraction from a list of step keys.""" return int(sorted([get_subkey(ii, 0) for ii in keys])[-1].split("-")[1]) @@ -109,9 +99,7 @@ def find_slice_ranges( keys: List[str], sliced_subkey: str, ): - """ - find range of sliced OPs that matches the pattern 'iter-[0-9]*--{sliced_subkey}-[0-9]*' - """ + """Find range of sliced OPs that matches the pattern 'iter-[0-9]*--{sliced_subkey}-[0-9]*'.""" found_range = [] tmp_range = [] status = "not-found" @@ -142,9 +130,7 @@ def sort_slice_ops( keys: List[str], sliced_subkey: List[str], ): - """ - sort the keys of the sliced ops. the keys of the sliced ops contains sliced_subkey - """ + """Sort the keys of the sliced ops. the keys of the sliced ops contains sliced_subkey.""" if isinstance(sliced_subkey, str): sliced_subkey = [sliced_subkey] for ii in sliced_subkey: @@ -166,7 +152,7 @@ def print_keys_in_nice_format( slice_1 = [ii[1] for ii in slice_range] normal_fmt = f"%{idx_fmt_len*2+4}d" - range_fmt = f"%d -> %d" + range_fmt = "%d -> %d" range_s_fmt = f"%{idx_fmt_len*2+4}s" idx = 0 @@ -187,4 +173,4 @@ def print_keys_in_nice_format( except ValueError: ret.append((normal_fmt + " : " + "%s") % (idx, keys[idx])) idx += 1 - return "\n".join(ret + [""]) + return "\n".join([*ret, ""]) diff --git a/dpgen2/utils/download_dpgen2_artifacts.py b/dpgen2/utils/download_dpgen2_artifacts.py index b5f69153..d759525f 100644 --- a/dpgen2/utils/download_dpgen2_artifacts.py +++ b/dpgen2/utils/download_dpgen2_artifacts.py @@ -8,7 +8,6 @@ Optional, ) -import numpy as np from dflow import ( Workflow, download_artifact, @@ -105,13 +104,12 @@ def download_dpgen2_artifacts( the key should be of format 'iter-xxxxxx--subkey-of-step-xxxxxx' the input and output artifacts will be downloaded to prefix/iter-xxxxxx/key-of-step/inputs/ and - prefix/iter-xxxxxx/key-of-step/outputs/ + prefix/iter-xxxxxx/key-of-step/outputs/. the downloaded input and output artifacts of steps are defined by `op_download_setting` """ - iteration = get_iteration(key) subkey = get_subkey(key) mypath = Path(iteration) @@ -272,7 +270,7 @@ def _get_all_iterations(step_keys): if ii.startswith("iter-"): ii = int(ii.split("-")[1]) ret.append(ii) - ret = sorted(list(set(ret))) + ret = sorted(set(ret)) return ret @@ -282,7 +280,7 @@ def _get_all_queried_steps(wf_step_keys, dld_items): ret.append(ii.split(global_step_def_split)[0]) ret = set(ret) ret = ret.intersection(set(wf_step_keys)) - return sorted(list(ret)) + return sorted(ret) def _get_dld_items( diff --git a/dpgen2/utils/obj_artifact.py b/dpgen2/utils/obj_artifact.py index 05ab85eb..9d1e712c 100644 --- a/dpgen2/utils/obj_artifact.py +++ b/dpgen2/utils/obj_artifact.py @@ -8,10 +8,7 @@ def dump_object_to_file( obj, fname, ): - """ - pickle dump object to a file - - """ + """Pickle dump object to a file.""" with open(fname, "wb") as fp: pickle.dump(obj, fp) return Path(fname) @@ -20,10 +17,7 @@ def dump_object_to_file( def load_object_from_file( fname, ): - """ - pickle load object from a file - - """ + """Pickle load object from a file.""" with open(fname, "rb") as fp: obj = pickle.load(fp) return obj diff --git a/dpgen2/utils/run_command.py b/dpgen2/utils/run_command.py index 2d5c5764..69670184 100644 --- a/dpgen2/utils/run_command.py +++ b/dpgen2/utils/run_command.py @@ -1,4 +1,3 @@ -import os from typing import ( List, Tuple, diff --git a/dpgen2/utils/step_config.py b/dpgen2/utils/step_config.py index 591f474e..86104b4a 100644 --- a/dpgen2/utils/step_config.py +++ b/dpgen2/utils/step_config.py @@ -1,13 +1,7 @@ -import os - -import dargs from dargs import ( Argument, Variant, ) -from dflow.config import ( - config, -) from dflow.plugins.dispatcher import ( DispatcherExecutor, ) @@ -18,12 +12,12 @@ def dispatcher_args(): - """free style dispatcher args""" + """Free style dispatcher args.""" return [] def variant_executor(): - doc = f"The type of the executor." + doc = "The type of the executor." return Variant( "type", [ diff --git a/examples/almg/dp_template.json b/examples/almg/dp_template.json index 79cabe0a..eee8009c 100644 --- a/examples/almg/dp_template.json +++ b/examples/almg/dp_template.json @@ -2,19 +2,19 @@ "model" : { "type_map": ["Al", "Mg"], "descriptor": { - "type": "se_a", - "sel": [90, 90], - "rcut_smth": 1.80, - "rcut": 6.00, - "neuron": [25, 50, 100], - "resnet_dt": false, - "axis_neuron": 4, - "seed": 1 + "type": "se_a", + "sel": [90, 90], + "rcut_smth": 1.80, + "rcut": 6.00, + "neuron": [25, 50, 100], + "resnet_dt": false, + "axis_neuron": 4, + "seed": 1 }, "fitting_net" : { - "neuron": [128, 128, 128], - "resnet_dt": true, - "seed": 1 + "neuron": [128, 128, 128], + "resnet_dt": true, + "seed": 1 } }, "loss" : { @@ -32,8 +32,8 @@ }, "training" : { "training_data": { - "systems": [], - "batch_size":"auto" + "systems": [], + "batch_size":"auto" }, "numb_steps":1000, "seed":10, diff --git a/examples/almg/input-v005.json b/examples/almg/input-v005.json index 09f2f12c..55fef5a7 100644 --- a/examples/almg/input-v005.json +++ b/examples/almg/input-v005.json @@ -5,63 +5,63 @@ "default_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, "run_train_config" : { "template_config" : { - "image" : "deepmd-kit:wanghan", - "_comment" : "all" + "image" : "deepmd-kit:wanghan", + "_comment" : "all" }, "executor" : { - "type" : "lebesgue_v2", - "extra" : { + "type" : "lebesgue_v2", + "extra" : { "scass_type": "c6_m64_1 * NVIDIA 3090", "machine_type": "c6_m64_1 * NVIDIA 3090", "platform": "paratera", "program_id": "xxxx", "job_type": "container", "region" : "default" - } + } }, "_comment" : "all" }, "run_explore_config" : { "template_config" : { - "image" : "deepmd-kit:wanghan", - "_comment" : "all" + "image" : "deepmd-kit:wanghan", + "_comment" : "all" }, "executor" : { - "type" : "lebesgue_v2", - "extra" : { + "type" : "lebesgue_v2", + "extra" : { "scass_type": "c8_m32_cpu", "machine_type": "c8_m32_cpu", "platform": "paratera", "program_id": "xxxx", "job_type": "container", "region" : "default" - } + } }, "_comment" : "all" }, "run_fp_config" : { "template_config" : { - "image" : "vasp:wanghan", - "_comment" : "all" + "image" : "vasp:wanghan", + "_comment" : "all" }, "executor" : { - "type" : "lebesgue_v2", - "extra" : { + "type" : "lebesgue_v2", + "extra" : { "scass_type": "c16_m64_cpu", "machine_type": "c16_m64_cpu", "platform": "paratera", "program_id": "xxxx", "job_type": "container", "region" : "default" - } + } }, "_comment" : "all" }, @@ -82,12 +82,12 @@ "lebesgue_context_config": { "executor" : "lebesgue_v2", "extra" : { - "scass_type": "c8_m32_cpu", - "machine_type": "c8_m32_cpu", + "scass_type": "c8_m32_cpu", + "machine_type": "c8_m32_cpu", "platform": "xxxx", "program_id": "xxxx", "job_type": "container", - "region" : "default" + "region" : "default" }, "app_name" : "Default", "org_id" : "xxx", @@ -113,10 +113,10 @@ "sys_configs_prefix": "", "sys_configs": [ { - "lattice" : ["fcc", 4.57], - "replicate" : [2, 2, 2], - "numb_confs" : 30, - "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] + "lattice" : ["fcc", 4.57], + "replicate" : [2, 2, 2], + "numb_confs" : 30, + "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] } ], @@ -124,8 +124,8 @@ "numb_models": 4, "default_training_param" : { "model" : { - "type_map": ["Al", "Mg"], - "descriptor": { + "type_map": ["Al", "Mg"], + "descriptor": { "type": "se_a", "sel": [90, 90], "rcut_smth": 1.80, @@ -134,39 +134,39 @@ "resnet_dt": false, "axis_neuron": 4, "seed": 1 - }, - "fitting_net" : { + }, + "fitting_net" : { "neuron": [128, 128, 128], "resnet_dt": true, "seed": 1 - } + } }, "loss" : { - "start_pref_e": 0.02, - "limit_pref_e": 1, - "start_pref_f": 1000, - "limit_pref_f": 1, - "start_pref_v": 0, - "limit_pref_v": 0 + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 }, "learning_rate" : { - "start_lr": 0.001, - "stop_lr": 1e-8, + "start_lr": 0.001, + "stop_lr": 1e-8, "decay_steps": 100 }, "training" : { - "training_data": { + "training_data": { "systems": [], "batch_size":"auto" - }, - "numb_steps":1000, - "seed":10, - "disp_file":"lcurve.out", - "disp_freq":100, - "save_freq":1000 + }, + "numb_steps":1000, + "seed":10, + "disp_file":"lcurve.out", + "disp_freq":100, + "save_freq":1000 } }, diff --git a/examples/almg/input.json b/examples/almg/input.json index 4702993a..5af04f0f 100644 --- a/examples/almg/input.json +++ b/examples/almg/input.json @@ -9,13 +9,13 @@ "default_step_config": { "template_config": { - "image" : "dpgen2:master", - "_comment" : "all" + "image" : "dpgen2:master", + "_comment" : "all" }, "executor" : { - "type" : "dispatcher", - "image_pull_policy" : "IfNotPresent", - "machine_dict": { + "type" : "dispatcher", + "image_pull_policy" : "IfNotPresent", + "machine_dict": { "batch_type": "Bohrium", "context_type": "Bohrium", "remote_profile": { @@ -25,89 +25,89 @@ "scass_type": "c2_m8_cpu" } } - } + } }, "_comment" : "all" }, "step_configs":{ "run_train_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type" : "dispatcher", "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c2_m8_cpu" + "job_type": "container", + "platform": "ali", + "scass_type": "c2_m8_cpu" } - } + } } - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_explore_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, + }, "continue_on_success_ratio" : 0.80, - "executor" : { + "executor" : { "type" : "dispatcher", "retry_on_submission_error": 10, "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c8_m32_1 * NVIDIA V100" + "job_type": "container", + "platform": "ali", + "scass_type": "c8_m32_1 * NVIDIA V100" } - } + } } - }, - "template_slice_config":{ + }, + "template_slice_config":{ "group_size": 20, "pool_size": 5 - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_fp_config" : { - "template_config" : { + "template_config" : { "image" : "vasp:wanghan", "_comment" : "all" - }, + }, "continue_on_success_ratio" : 0.80, - "executor" : { + "executor" : { "type" : "dispatcher", "retry_on_submission_error": 10, "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c16_m64_cpu" + "job_type": "container", + "platform": "ali", + "scass_type": "c16_m64_cpu" } - } + } } - }, + }, "template_slice_config":{ - "group_size": 20, + "group_size": 20, "pool_size": 1 - }, - "_comment" : "all" + }, + "_comment" : "all" }, "_comment" : "all" }, @@ -120,8 +120,8 @@ "mixed_type": false, "init_data_prefix": null, "init_data_sys": [ - "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", - "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" + "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", + "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" ], "_comment" : "all" }, @@ -129,7 +129,7 @@ "type" : "dp", "numb_models" : 4, "config" : { - "init_model_policy" : "no" + "init_model_policy" : "no" }, "template_script" : "dp_template.json", "_comment" : "all" @@ -138,47 +138,47 @@ "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "convergence": { - "type" : "fixed-levels", - "conv_accuracy" : 0.9, - "level_f_lo": 0.05, - "level_f_hi": 0.50, - "_comment" : "all" + "type" : "fixed-levels", + "conv_accuracy" : 0.9, + "level_f_lo": 0.05, + "level_f_hi": 0.50, + "_comment" : "all" }, "max_numb_iter" : 5, "fatal_at_max" : false, "output_nopbc": false, "configurations": [ - { + { "type": "alloy", "lattice" : ["fcc", 4.57], "replicate" : [2, 2, 2], "numb_confs" : 30, "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] - }, - { + }, + { "type" : "file", "files" : ["/path/to/confs/*"], "fmt" : "deepmd/npy" - } + } ], "_comment" : "Stage is of type List[List[dict]]. ", "_comment" : "The outer list gives stages, the inner list gives the task groups of the stage, and dict describes the task group.", "stages": [ - [ + [ { - "type" : "lmp-md", - "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, - "conf_idx": [0], "n_sample" : 3 + "type" : "lmp-md", + "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, + "conf_idx": [0], "n_sample" : 3 }, { - "type" : "lmp-template", - "lmp" : "template.lammps", "trj_freq" : 10, "revisions" : {"V_NSTEPS" : [40], "V_TEMP" : [100, 200]}, - "conf_idx": [0], "n_sample" : 3 + "type" : "lmp-template", + "lmp" : "template.lammps", "trj_freq" : 10, "revisions" : {"V_NSTEPS" : [40], "V_TEMP" : [100, 200]}, + "conf_idx": [0], "n_sample" : 3 } - ] + ] ], "_comment" : "all" }, @@ -186,13 +186,13 @@ "type" : "vasp", "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, - "incar": "vasp/INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, + "incar": "vasp/INCAR", + "kspacing": 0.32, + "kgamma": true }, "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "_comment" : "all" } diff --git a/examples/calypso/dpa2_train.json b/examples/calypso/dpa2_train.json index e6019597..3206b31c 100644 --- a/examples/calypso/dpa2_train.json +++ b/examples/calypso/dpa2_train.json @@ -72,14 +72,14 @@ "stat_file": "./dpa2", "training_data": { "systems": [ - "/personal/workplace/DP/dpgen2/Mg12Al8/deepmd" + "/personal/workplace/DP/dpgen2/Mg12Al8/deepmd" ], "batch_size": 1, "_comment": "that's all" }, "_validation_data": { "systems": [ - "/personal/workplace/DP/dpgen2/Mg12Al8/deepmd" + "/personal/workplace/DP/dpgen2/Mg12Al8/deepmd" ], "batch_size": 1, "_comment": "that's all" diff --git a/examples/calypso/input.test.json b/examples/calypso/input.test.json index c93e658c..cf538b86 100644 --- a/examples/calypso/input.test.json +++ b/examples/calypso/input.test.json @@ -448,14 +448,14 @@ 3, 2, 1 ], "name_of_atoms": [ - ["Mg", "Si", "O"], ["La", "Li"], ["H"] + ["Mg", "Si", "O"], ["La", "Li"], ["H"] ], "pop_size": 5, "max_step": 2, "distance_of_ions": {"Mg": 0.7, "Si": 0.7, "O": 0.7, "La": 0.7, "Li": 0.7, "H": 0.7}, - "opt_step": 1, - "fmax": 1, - "pressure": 10 + "opt_step": 1, + "fmax": 1, + "pressure": 10 }, { "numb_of_species": 2, diff --git a/examples/chno/input.json b/examples/chno/input.json index 5ea9024b..3732e5a1 100644 --- a/examples/chno/input.json +++ b/examples/chno/input.json @@ -9,13 +9,13 @@ "default_step_config": { "template_config": { - "image" : "registry.dp.tech/dptech/prod-11881/dpgen2-utils:1.2", - "_comment" : "all" + "image" : "registry.dp.tech/dptech/prod-11881/dpgen2-utils:1.2", + "_comment" : "all" }, "executor" : { - "type" : "dispatcher", - "image_pull_policy" : "IfNotPresent", - "machine_dict": { + "type" : "dispatcher", + "image_pull_policy" : "IfNotPresent", + "machine_dict": { "batch_type": "Bohrium", "context_type": "Bohrium", "remote_profile": { @@ -25,89 +25,89 @@ "scass_type": "c2_m8_cpu" } } - } + } }, "_comment" : "all" }, "step_configs":{ "run_train_config" : { - "template_config" : { + "template_config" : { "image" : "registry.dp.tech/dptech/deepmd-kit:2.1.5-cuda11.6", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type" : "dispatcher", "retry_on_submission_error": 10, "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c8_m32_1 * NVIDIA V100" + "job_type": "container", + "platform": "ali", + "scass_type": "c8_m32_1 * NVIDIA V100" } - } + } } - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_explore_config" : { - "template_config" : { + "template_config" : { "image" : "registry.dp.tech/dptech/deepmd-kit:2.1.5-cuda11.6", "_comment" : "all" - }, + }, "continue_on_success_ratio" : 0.80, - "executor" : { + "executor" : { "type" : "dispatcher", "retry_on_submission_error": 10, "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c8_m32_1 * NVIDIA V100" + "job_type": "container", + "platform": "ali", + "scass_type": "c8_m32_1 * NVIDIA V100" } - } + } } - }, + }, "template_slice_config":{ "group_size": 20, "pool_size": 5 }, - "_comment" : "all" + "_comment" : "all" }, "run_fp_config" : { - "template_config" : { + "template_config" : { "image" : "registry.dp.tech/dptech/prod-11881/manyi-dalibao:1.3", "_comment" : "all" - }, + }, "continue_on_success_ratio" : 0.80, - "executor" : { + "executor" : { "type" : "dispatcher", "retry_on_submission_error": 10, "image_pull_policy" : "IfNotPresent", "machine_dict": { - "batch_type": "Bohrium", - "context_type": "Bohrium", - "remote_profile": { + "batch_type": "Bohrium", + "context_type": "Bohrium", + "remote_profile": { "input_data": { - "job_type": "container", - "platform": "ali", - "scass_type": "c16_m64_cpu" + "job_type": "container", + "platform": "ali", + "scass_type": "c16_m64_cpu" } - } + } } - }, + }, "template_slice_config":{ - "group_size": 20, + "group_size": 20, "pool_size": 1 - }, + }, "_comment" : "all" }, "_comment" : "all" @@ -132,19 +132,19 @@ "type" : "dp", "numb_models" : 4, "init_models_paths":[ - "/data/druglike.dpgen2.mixed.8-b/init.models/task.0000/frozen_model.pb", - "/data/druglike.dpgen2.mixed.8-b/init.models/task.0001/frozen_model.pb", - "/data/druglike.dpgen2.mixed.8-b/init.models/task.0002/frozen_model.pb", - "/data/druglike.dpgen2.mixed.8-b/init.models/task.0003/frozen_model.pb" + "/data/druglike.dpgen2.mixed.8-b/init.models/task.0000/frozen_model.pb", + "/data/druglike.dpgen2.mixed.8-b/init.models/task.0001/frozen_model.pb", + "/data/druglike.dpgen2.mixed.8-b/init.models/task.0002/frozen_model.pb", + "/data/druglike.dpgen2.mixed.8-b/init.models/task.0003/frozen_model.pb" ], "config" : { - "init_model_policy" : "yes", - "init_model_old_ratio" : 0.98, - "init_model_numb_steps" : 600000, - "init_model_start_lr" : 1e-4, - "init_model_start_pref_e" : 0.25, - "init_model_start_pref_f" : 100, - "_comment" : "all" + "init_model_policy" : "yes", + "init_model_old_ratio" : 0.98, + "init_model_numb_steps" : 600000, + "init_model_start_lr" : 1e-4, + "init_model_start_pref_e" : 0.25, + "init_model_start_pref_f" : 100, + "_comment" : "all" }, "template_script" : "dpa_manyi.json", "_comment" : "all" @@ -153,7 +153,7 @@ "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "convergence": { "type": "adaptive-lower", @@ -168,21 +168,21 @@ "fatal_at_max" : false, "output_nopbc": true, "configurations": [ - { + { "type" : "file", "files" : ["md.data/*N1[56].equ/*"], "fmt" : "deepmd/npy", "remove_pbc" : true - } + } ], "stages": [ - [ + [ { - "type" : "lmp-template", - "sys_idx": [0], "n_sample" : 4000, - "lmp" : "template.lammps", "trj_freq" : 500, "revisions" : {"V_NSTEPS" : [20000], "V_TEMP" : [750], "V_DUMPFREQ": [2000]} + "type" : "lmp-template", + "sys_idx": [0], "n_sample" : 4000, + "lmp" : "template.lammps", "trj_freq" : 500, "revisions" : {"V_NSTEPS" : [20000], "V_TEMP" : [750], "V_DUMPFREQ": [2000]} } - ] + ] ], "_comment" : "all" }, @@ -190,14 +190,14 @@ "type" : "gaussian", "task_max": 5000, "run_config" : { - "command": "ulimit -s unlimited && export g16root='/root/Gaussian_16' && source $g16root/g16/bsd/g16.profile && g16 " + "command": "ulimit -s unlimited && export g16root='/root/Gaussian_16' && source $g16root/g16/bsd/g16.profile && g16 " }, "inputs_config" : { "keywords" : "wB97XD/6-31G** nosymm scf(conver=8) force", - "multiplicity" : "auto", - "charge" : 0, - "nproc" : 16, - "_comment" : "all" + "multiplicity" : "auto", + "charge" : 0, + "nproc" : 16, + "_comment" : "all" }, "_comment" : "all" } diff --git a/pyproject.toml b/pyproject.toml index bd8c92f2..ff387ae6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,3 +80,46 @@ exclude = [ [tool.isort] profile = "black" force_grid_wrap = 1 + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] +select = [ + "E", # errors + "W", # warning + "F", # pyflakes + "D", # pydocstyle + "UP", # pyupgrade + "C4", # flake8-comprehensions + "RUF", # ruff + "NPY", # numpy + "TID251", # banned-api + "TID253", # banned-module-level-imports + "T20", # ban print + "B904", # raise-without-from-inside-except + "N804", # invalid-first-argument-name-for-class-method + "N805", # invalid-first-argument-name-for-method + "DTZ", # datetime +] + +ignore = [ + "E501", # line too long + "F841", # local variable is assigned to but never used + "E741", # ambiguous variable name + "E402", # module level import not at top of file + "D100", # TODO: missing docstring in public module + "D101", # TODO: missing docstring in public class + "D102", # TODO: missing docstring in public method + "D103", # TODO: missing docstring in public function + "D104", # TODO: missing docstring in public package + "D105", # TODO: missing docstring in magic method + "D205", # 1 blank line required between summary line and description + "D401", # TODO: first line should be in imperative mood + "D404", # TODO: first word of the docstring should not be This +] +exclude = [ +] + +[tool.ruff.lint.pydocstyle] +convention = "numpy" diff --git a/tests/conf/context.py b/tests/conf/context.py index d6638725..f2118984 100644 --- a/tests/conf/context.py +++ b/tests/conf/context.py @@ -2,4 +2,4 @@ import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/conf/test_alloy_conf.py b/tests/conf/test_alloy_conf.py index 0dd4cc5a..cdf484bd 100644 --- a/tests/conf/test_alloy_conf.py +++ b/tests/conf/test_alloy_conf.py @@ -1,7 +1,4 @@ -import json -import os import random -import shutil import tempfile import unittest from pathlib import ( @@ -12,7 +9,6 @@ import numpy as np # isort: off -from .context import dpgen2 from dpgen2.conf.alloy_conf import ( AlloyConf, AlloyConfGenerator, diff --git a/tests/conf/test_file_conf.py b/tests/conf/test_file_conf.py index 381f0d88..a4d57807 100644 --- a/tests/conf/test_file_conf.py +++ b/tests/conf/test_file_conf.py @@ -1,8 +1,4 @@ -import json -import os -import random import shutil -import tempfile import textwrap import unittest from pathlib import ( @@ -13,9 +9,6 @@ import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.conf.file_conf import ( FileConfGenerator, ) @@ -25,60 +18,60 @@ pos0 = textwrap.dedent( """POSCAR file written by OVITO 1 -1 0 0 -0 1 0 +1 0 0 +0 1 0 0 0 1 -Al +Al 1 Cartesian -0 0 0 +0 0 0 """ ) pos1 = textwrap.dedent( """POSCAR file written by OVITO 1 -2 0 0 +2 0 0 0 2 0 0 0 2 Mg 1 Cartesian -0 0 0 +0 0 0 """ ) -ifc0 = """Al1 +ifc0 = """Al1 1.0 2.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 2.0 -Al -1 +Al +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ ofc0 = "\n1 atoms\n2 atom types\n 0.0000000000 2.0000000000 xlo xhi\n 0.0000000000 2.0000000000 ylo yhi\n 0.0000000000 2.0000000000 zlo zhi\n 0.0000000000 0.0000000000 0.0000000000 xy xz yz\n\nAtoms # atomic\n\n 1 1 0.0000000000 0.0000000000 0.0000000000\n" -ifc1 = """Mg1 +ifc1 = """Mg1 1.0 3.0 0.0 0.0 0.0 3.0 0.0 0.0 0.0 3.0 -Mg -1 +Mg +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ ofc1 = "\n1 atoms\n2 atom types\n 0.0000000000 3.0000000000 xlo xhi\n 0.0000000000 3.0000000000 ylo yhi\n 0.0000000000 3.0000000000 zlo zhi\n 0.0000000000 0.0000000000 0.0000000000 xy xz yz\n\nAtoms # atomic\n\n 1 2 0.0000000000 0.0000000000 0.0000000000\n" -ifc2 = """Mg1 +ifc2 = """Mg1 1.0 4.0 0.0 0.0 0.0 4.0 0.0 0.0 0.0 4.0 -Mg -1 +Mg +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ diff --git a/tests/conf/test_unit_cell.py b/tests/conf/test_unit_cell.py index 19544f0e..72603835 100644 --- a/tests/conf/test_unit_cell.py +++ b/tests/conf/test_unit_cell.py @@ -1,17 +1,8 @@ -import json -import os -import shutil import unittest -from pathlib import ( - Path, -) import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.conf.unit_cells import ( generate_unit_cell, ) diff --git a/tests/context.py b/tests/context.py index 0fce7a85..ef78c2aa 100644 --- a/tests/context.py +++ b/tests/context.py @@ -3,7 +3,7 @@ dpgen_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.insert(0, dpgen_path) -import dpgen2 +import dpgen2 # noqa: F401 from dpgen2.utils import ( dflow_config, ) diff --git a/tests/entrypoint/context.py b/tests/entrypoint/context.py index ee903610..a01d3fba 100644 --- a/tests/entrypoint/context.py +++ b/tests/entrypoint/context.py @@ -3,4 +3,4 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/entrypoint/test_argparse.py b/tests/entrypoint/test_argparse.py index 6f494afb..3cab08b3 100644 --- a/tests/entrypoint/test_argparse.py +++ b/tests/entrypoint/test_argparse.py @@ -1,15 +1,8 @@ -import json -import os -import shutil import unittest # isort: off -from .context import ( - dpgen2, -) from dpgen2.entrypoint.main import ( main_parser, - parse_args, workflow_subcommands, ) diff --git a/tests/entrypoint/test_submit.py b/tests/entrypoint/test_submit.py index 6352df90..68b1375d 100644 --- a/tests/entrypoint/test_submit.py +++ b/tests/entrypoint/test_submit.py @@ -1,21 +1,13 @@ import json import os -import random -import shutil -import tempfile import textwrap import unittest from pathlib import ( Path, ) -import dpdata -import numpy as np - # isort: off -from .context import ( - dpgen2, -) +from .context import dpgen2 # noqa: F401 from dpgen2.entrypoint.submit import ( copy_scheduler_plans, expand_idx, @@ -28,7 +20,6 @@ TrajRenderLammps, ) from dpgen2.exploration.report import ( - ExplorationReport, ExplorationReportTrustLevelsRandom, ) from dpgen2.exploration.scheduler import ( @@ -38,10 +29,6 @@ from dpgen2.exploration.selector import ( ConfSelectorFrames, ) -from dpgen2.exploration.task import ( - ExplorationStage, - ExplorationTaskGroup, -) from mocked_ops import ( MockedExplorationReport, MockedExplorationTaskGroup, @@ -53,37 +40,37 @@ # isort: on -ifc0 = """Al1 +ifc0 = """Al1 1.0 2.0 0.0 0.0 0.0 2.0 0.0 0.0 0.0 2.0 -Al -1 +Al +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ ofc0 = "\n1 atoms\n2 atom types\n 0.0000000000 2.0000000000 xlo xhi\n 0.0000000000 2.0000000000 ylo yhi\n 0.0000000000 2.0000000000 zlo zhi\n 0.0000000000 0.0000000000 0.0000000000 xy xz yz\n\nAtoms # atomic\n\n 1 1 0.0000000000 0.0000000000 0.0000000000\n" -ifc1 = """Mg1 +ifc1 = """Mg1 1.0 3.0 0.0 0.0 0.0 3.0 0.0 0.0 0.0 3.0 -Mg -1 +Mg +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ ofc1 = "\n1 atoms\n2 atom types\n 0.0000000000 3.0000000000 xlo xhi\n 0.0000000000 3.0000000000 ylo yhi\n 0.0000000000 3.0000000000 zlo zhi\n 0.0000000000 0.0000000000 0.0000000000 xy xz yz\n\nAtoms # atomic\n\n 1 2 0.0000000000 0.0000000000 0.0000000000\n" -ifc2 = """Mg1 +ifc2 = """Mg1 1.0 4.0 0.0 0.0 0.0 4.0 0.0 0.0 0.0 4.0 -Mg -1 +Mg +1 cartesian 0.0000000000 0.0000000000 0.0000000000 """ @@ -431,7 +418,7 @@ def tearDown(self): ) config["mode"] = None - for ii in self.touched_files + ["POSCAR"]: + for ii in [*self.touched_files, "POSCAR"]: os.remove(ii) def test(self): @@ -482,45 +469,45 @@ def test(self): { "default_step_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, "step_configs":{ "run_train_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_explore_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_fp_config" : { - "template_config" : { + "template_config" : { "image" : "vasp:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "_comment" : "all" }, @@ -530,7 +517,7 @@ def test(self): "mass_map": [27, 24], "init_data_prefix": null, "init_data_sys": [ - "init" + "init" ], "_comment" : "all" }, @@ -546,48 +533,48 @@ def test(self): "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "convergence": { - "type" : "fixed-levels", - "conv_accuracy" : 0.9, - "level_f_lo": 0.05, - "level_f_hi": 0.50, - "_comment" : "all" + "type" : "fixed-levels", + "conv_accuracy" : 0.9, + "level_f_lo": 0.05, + "level_f_hi": 0.50, + "_comment" : "all" }, "max_numb_iter" : 5, "fatal_at_max" : false, "output_nopbc": false, "configuration_prefix": null, "configurations": [ - { + { "type": "alloy", "lattice" : ["fcc", 4.57], "replicate" : [2, 2, 2], "numb_confs" : 30, "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] - } + } ], "_comment" : "Stage is of type List[List[dict]]. ", "_comment" : "The outer list gives stages, the inner list gives the task groups of the stage, and dict describes the task group.", "stages": [ - [ + [ { - "type" : "lmp-md", - "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, - "conf_idx": [0], "n_sample" : 3 + "type" : "lmp-md", + "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, + "conf_idx": [0], "n_sample" : 3 }, { - "type" : "customized-lmp-template", + "type" : "customized-lmp-template", "custom_shell_commands": ["mkdir aaa && cp conf.lmp lmp.template plm.template aaa"], - "input_lmp_tmpl_name": "lmp.template", + "input_lmp_tmpl_name": "lmp.template", "input_plm_tmpl_name": "plm.template", "output_dir_pattern": ["aaa"], "output_lmp_tmpl_name": "lmp.template", "output_plm_tmpl_name": "plm.template", - "conf_idx": [0], "n_sample" : 3 + "conf_idx": [0], "n_sample" : 3 } - ] + ] ], "_comment" : "all" }, @@ -595,13 +582,13 @@ def test(self): "type" : "vasp", "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "POTCAR.Al", "Mg" : "POTCAR.Mg"}, - "incar": "INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "POTCAR.Al", "Mg" : "POTCAR.Mg"}, + "incar": "INCAR", + "kspacing": 0.32, + "kgamma": true }, "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "_comment" : "all" } @@ -792,45 +779,45 @@ def test(self): { "default_step_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, "step_configs":{ "run_train_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_explore_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_fp_config" : { - "template_config" : { + "template_config" : { "image" : "vasp:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "_comment" : "all" }, @@ -839,7 +826,7 @@ def test(self): "mass_map": [27, 24], "init_data_prefix": null, "init_data_sys": [ - "init" + "init" ], "_comment" : "all" }, @@ -856,38 +843,38 @@ def test(self): "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "convergence": { - "type" : "fixed-levels", - "conv_accuracy" : 0.9, - "level_f_lo": 0.05, - "level_f_hi": 0.50, - "_comment" : "all" + "type" : "fixed-levels", + "conv_accuracy" : 0.9, + "level_f_lo": 0.05, + "level_f_hi": 0.50, + "_comment" : "all" }, "max_numb_iter" : 5, "fatal_at_max" : false, "output_nopbc": false, "configuration_prefix": null, "configurations": [ - { + { "type": "alloy", "lattice" : ["fcc", 4.57], "replicate" : [2, 2, 2], "numb_confs" : 30, "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] - } + } ], "_comment" : "Stage is of type List[List[dict]]. ", "_comment" : "The outer list gives stages, the inner list gives the task groups of the stage, and dict describes the task group.", "stages": [ - [ + [ { - "type" : "lmp-md", - "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, - "conf_idx": [0], "n_sample" : 3 + "type" : "lmp-md", + "ensemble": "nvt", "nsteps": 50, "press": [1e0], "temps": [50], "trj_freq": 10, + "conf_idx": [0], "n_sample" : 3 } - ] + ] ], "_comment" : "all" }, @@ -895,13 +882,13 @@ def test(self): "type" : "vasp", "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "POTCAR.Al", "Mg" : "POTCAR.Mg"}, - "incar": "INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "POTCAR.Al", "Mg" : "POTCAR.Mg"}, + "incar": "INCAR", + "kspacing": 0.32, + "kgamma": true }, "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "_comment" : "all" } @@ -931,7 +918,7 @@ def test(self): mass 1 27.000000 mass 2 24.000000 -pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out +pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out pair_coeff * * fix dpgen_plm diff --git a/tests/entrypoint/test_submit_args.py b/tests/entrypoint/test_submit_args.py index ec7e4582..a2b6a6cd 100644 --- a/tests/entrypoint/test_submit_args.py +++ b/tests/entrypoint/test_submit_args.py @@ -1,21 +1,8 @@ import json -import os -import random -import shutil -import tempfile import textwrap import unittest -from pathlib import ( - Path, -) - -import dpdata -import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.entrypoint.args import ( normalize, ) @@ -165,16 +152,16 @@ def test_bohrium(self): "default_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, "run_train_config" : { "template_config" : { - "image" : "deepmd-kit:wanghan", - "_comment" : "all" + "image" : "deepmd-kit:wanghan", + "_comment" : "all" }, "executor" : { "type": "dispatcher", @@ -184,8 +171,8 @@ def test_bohrium(self): }, "run_explore_config" : { "template_config" : { - "image" : "deepmd-kit:wanghan", - "_comment" : "all" + "image" : "deepmd-kit:wanghan", + "_comment" : "all" }, "executor" : { "type": "dispatcher", @@ -195,8 +182,8 @@ def test_bohrium(self): }, "run_fp_config" : { "template_config" : { - "image" : "vasp:wanghan", - "_comment" : "all" + "image" : "vasp:wanghan", + "_comment" : "all" }, "executor" : { "type": "dispatcher", @@ -235,15 +222,15 @@ def test_bohrium(self): "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" ], - "sys_configs_prefix": "", + "sys_configs_prefix": "", "sys_configs": [ { - "lattice" : ["fcc", 4.57], - "replicate" : [2, 2, 2], - "numb_confs" : 30, + "lattice" : ["fcc", 4.57], + "replicate" : [2, 2, 2], + "numb_confs" : 30, "atom_pert_dist" : 0.0, "cell_pert_frac" : 0.0, - "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] + "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] } ], @@ -251,8 +238,8 @@ def test_bohrium(self): "numb_models": 4, "default_training_param" : { "model" : { - "type_map": ["Al", "Mg"], - "descriptor": { + "type_map": ["Al", "Mg"], + "descriptor": { "type": "se_a", "sel": [90, 90], "rcut_smth": 1.80, @@ -261,39 +248,39 @@ def test_bohrium(self): "resnet_dt": false, "axis_neuron": 4, "seed": 1 - }, - "fitting_net" : { + }, + "fitting_net" : { "neuron": [128, 128, 128], "resnet_dt": true, "seed": 1 - } + } }, "loss" : { - "start_pref_e": 0.02, - "limit_pref_e": 1, - "start_pref_f": 1000, - "limit_pref_f": 1, - "start_pref_v": 0, - "limit_pref_v": 0 + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 }, "learning_rate" : { - "start_lr": 0.001, - "stop_lr": 1e-8, + "start_lr": 0.001, + "stop_lr": 1e-8, "decay_steps": 100 }, "training" : { - "training_data": { + "training_data": { "systems": [], "batch_size":"auto" - }, - "numb_steps":1000, - "seed":10, - "disp_file":"lcurve.out", - "disp_freq":100, - "save_freq":1000 + }, + "numb_steps":1000, + "seed":10, + "disp_file":"lcurve.out", + "disp_freq":100, + "save_freq":1000 } }, @@ -305,7 +292,7 @@ def test_bohrium(self): { "_idx": 0, "ensemble": "nvt", "nsteps": 20, "press": [1.0,2.0], "sys_idx": [0], "temps": [50,100], "trj_freq": 10, "n_sample" : 3 } ], - "_comment": " 02.fp ", + "_comment": " 02.fp ", "fp_style": "vasp", "fp_task_max": 2, "fp_pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, @@ -329,45 +316,45 @@ def test_bohrium(self): "default_step_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, "step_configs":{ "run_train_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_explore_config" : { - "template_config" : { + "template_config" : { "image" : "deepmd-kit:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "run_fp_config" : { - "template_config" : { + "template_config" : { "image" : "vasp:wanghan", "_comment" : "all" - }, - "executor" : { + }, + "executor" : { "type": "dispatcher", "username": "foo" - }, - "_comment" : "all" + }, + "_comment" : "all" }, "_comment" : "all" }, @@ -379,8 +366,8 @@ def test_bohrium(self): "mass_map": [27, 24], "init_data_prefix": "", "init_data_sys": [ - "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", - "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" + "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", + "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" ], "_comment" : "all" }, @@ -393,7 +380,7 @@ def test_bohrium(self): "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "max_numb_iter" : 5, "fatal_at_max" : false, @@ -403,33 +390,33 @@ def test_bohrium(self): "level_f_hi": 0.50, "conv_accuracy" : 0.9 }, - "configuration_prefix": null, + "configuration_prefix": null, "configuration": [ - { + { "type" : "alloy", "lattice" : ["fcc", 4.57], "replicate" : [2, 2, 2], "numb_confs" : 30, "concentration" : [[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]] - } + } ], "stages": [[ - { "_idx": 0, "ensemble": "nvt", "nsteps": 20, "press": [1.0,2.0], "sys_idx": [0], "temps": [50,100], "trj_freq": 10, "n_sample" : 3 - } + { "_idx": 0, "ensemble": "nvt", "nsteps": 20, "press": [1.0,2.0], "sys_idx": [0], "temps": [50,100], "trj_freq": 10, "n_sample" : 3 + } ]], "_comment" : "all" }, "fp" : { "type" : "vasp", "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, - "incar": "vasp/INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, + "incar": "vasp/INCAR", + "kspacing": 0.32, + "kgamma": true }, "_comment" : "all" } @@ -449,8 +436,8 @@ def test_bohrium(self): "default_step_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, @@ -466,8 +453,8 @@ def test_bohrium(self): "mass_map": [27, 24], "init_data_prefix": "", "init_data_sys": [ - "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", - "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" + "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", + "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" ], "_comment" : "all" }, @@ -482,7 +469,7 @@ def test_bohrium(self): "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "max_numb_iter" : 5, "fatal_at_max" : false, @@ -492,7 +479,7 @@ def test_bohrium(self): "level_f_hi": 0.50, "conv_accuracy" : 0.9 }, - "configuration_prefix": null, + "configuration_prefix": null, "configuration": [ ], "stages": [ @@ -502,14 +489,14 @@ def test_bohrium(self): "fp" : { "type" : "vasp", "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, - "incar": "vasp/INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, + "incar": "vasp/INCAR", + "kspacing": 0.32, + "kgamma": true }, "_comment" : "all" } diff --git a/tests/entrypoint/test_workflow.py b/tests/entrypoint/test_workflow.py index 5b48bcca..a8cc0855 100644 --- a/tests/entrypoint/test_workflow.py +++ b/tests/entrypoint/test_workflow.py @@ -1,19 +1,11 @@ import json -import os -import shutil import textwrap import unittest - -import dflow -import mock -from dflow import ( - Workflow, +from unittest import ( + mock, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.entrypoint.workflow import ( execute_workflow_subcommand, ) @@ -62,8 +54,8 @@ def test_resume(self, mocked_f): { "default_step_config" : { "template_config" : { - "image" : "dflow:1.1.4", - "_comment" : "all" + "image" : "dflow:1.1.4", + "_comment" : "all" }, "_comment" : "all" }, @@ -79,8 +71,8 @@ def test_resume(self, mocked_f): "mass_map": [27, 24], "init_data_prefix": "", "init_data_sys": [ - "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", - "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" + "init/al.fcc.01x01x01/02.md/sys-0004/deepmd", + "init/mg.fcc.01x01x01/02.md/sys-0004/deepmd" ], "_comment" : "all" }, @@ -95,7 +87,7 @@ def test_resume(self, mocked_f): "explore" : { "type" : "lmp", "config" : { - "command": "lmp -var restart 0" + "command": "lmp -var restart 0" }, "max_numb_iter" : 5, "fatal_at_max" : false, @@ -105,7 +97,7 @@ def test_resume(self, mocked_f): "level_f_hi": 0.50, "conv_accuracy" : 0.9 }, - "configuration_prefix": null, + "configuration_prefix": null, "configuration": [ ], "stages": [ @@ -115,14 +107,14 @@ def test_resume(self, mocked_f): "fp" : { "type" : "vasp", "run_config" : { - "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" + "command": "source /opt/intel/oneapi/setvars.sh && mpirun -n 16 vasp_std" }, "task_max": 2, "inputs_config" : { - "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, - "incar": "vasp/INCAR", - "kspacing": 0.32, - "kgamma": true + "pp_files": {"Al" : "vasp/POTCAR.Al", "Mg" : "vasp/POTCAR.Mg"}, + "incar": "vasp/INCAR", + "kspacing": 0.32, + "kgamma": true }, "_comment" : "all" } diff --git a/tests/exploration/context.py b/tests/exploration/context.py index ee903610..a01d3fba 100644 --- a/tests/exploration/context.py +++ b/tests/exploration/context.py @@ -3,4 +3,4 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/exploration/test_conf_filter.py b/tests/exploration/test_conf_filter.py index 0022e63b..de5fdeb4 100644 --- a/tests/exploration/test_conf_filter.py +++ b/tests/exploration/test_conf_filter.py @@ -1,19 +1,18 @@ -import os import unittest +from typing import ( + ClassVar, + List, +) +from unittest.mock import ( + patch, +) import dpdata -import numpy as np from fake_data_set import ( fake_system, ) -from mock import ( - patch, -) # isort: off -from .context import ( - dpgen2, -) from dpgen2.exploration.selector import ( ConfFilter, ConfFilters, @@ -32,7 +31,7 @@ def check( class faked_filter: myiter = -1 - myret = [True] + myret: ClassVar[List[bool]] = [True] @classmethod def faked_check(cls, frame): diff --git a/tests/exploration/test_conf_selector_frame.py b/tests/exploration/test_conf_selector_frame.py index 52b30248..00ef0dfc 100644 --- a/tests/exploration/test_conf_selector_frame.py +++ b/tests/exploration/test_conf_selector_frame.py @@ -7,12 +7,8 @@ ) import dpdata -import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.exploration.render import ( TrajRenderLammps, ) @@ -37,10 +33,10 @@ def setUp(self): 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 - ITEM: ATOMS id type x y z fx fy fz - 1 2 11.09 2.87 2.74 0.183043 -0.287677 -0.0974527 - 2 1 11.83 2.56 2.18 -0.224674 0.5841 0.074659 - 3 2 12.25 3.32 1.68 0.0416311 -0.296424 0.0227936 + ITEM: ATOMS id type x y z fx fy fz + 1 2 11.09 2.87 2.74 0.183043 -0.287677 -0.0974527 + 2 1 11.83 2.56 2.18 -0.224674 0.5841 0.074659 + 3 2 12.25 3.32 1.68 0.0416311 -0.296424 0.0227936 ITEM: TIMESTEP 1 ITEM: NUMBER OF ATOMS @@ -49,10 +45,10 @@ def setUp(self): 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 - ITEM: ATOMS id type x y z fx fy fz - 1 2 11.09 3.87 2.74 0.183043 -0.287677 -0.0974527 - 2 1 11.83 3.56 2.18 -0.224674 0.5841 0.074659 - 3 2 12.25 4.32 1.68 0.0416311 -0.296424 0.0227936 + ITEM: ATOMS id type x y z fx fy fz + 1 2 11.09 3.87 2.74 0.183043 -0.287677 -0.0974527 + 2 1 11.83 3.56 2.18 -0.224674 0.5841 0.074659 + 3 2 12.25 4.32 1.68 0.0416311 -0.296424 0.0227936 ITEM: TIMESTEP 1 ITEM: NUMBER OF ATOMS @@ -61,10 +57,10 @@ def setUp(self): 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 0.0000000000000000e+00 1.2444699999999999e+01 0.0000000000000000e+00 - ITEM: ATOMS id type x y z fx fy fz - 1 2 11.09 4.87 2.74 0.183043 -0.287677 -0.0974527 - 2 1 11.83 4.56 2.18 -0.224674 0.5841 0.074659 - 3 2 12.25 5.32 1.68 0.0416311 -0.296424 0.0227936 + ITEM: ATOMS id type x y z fx fy fz + 1 2 11.09 4.87 2.74 0.183043 -0.287677 -0.0974527 + 2 1 11.83 4.56 2.18 -0.224674 0.5841 0.074659 + 3 2 12.25 5.32 1.68 0.0416311 -0.296424 0.0227936 """ ) self.model_devi_file = textwrap.dedent( diff --git a/tests/exploration/test_customized_lmp_templ_task_group.py b/tests/exploration/test_customized_lmp_templ_task_group.py index 57462759..c33148a9 100644 --- a/tests/exploration/test_customized_lmp_templ_task_group.py +++ b/tests/exploration/test_customized_lmp_templ_task_group.py @@ -1,29 +1,16 @@ import itertools import os -import shutil import textwrap import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) - -import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass -from unittest.mock import ( - Mock, - patch, -) from dpgen2.constants import ( lmp_conf_name, @@ -32,7 +19,6 @@ ) from dpgen2.exploration.task import ( CustomizedLmpTemplateTaskGroup, - ExplorationStage, ) from .test_lmp_templ_task_group import ( diff --git a/tests/exploration/test_devi_manager.py b/tests/exploration/test_devi_manager.py index a0a29e68..a2296360 100644 --- a/tests/exploration/test_devi_manager.py +++ b/tests/exploration/test_devi_manager.py @@ -1,15 +1,8 @@ -import os import unittest -from pathlib import ( - Path, -) import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.exploration.deviation import ( DeviManager, DeviManagerStd, @@ -53,7 +46,7 @@ def test_add_invalid_deviation(self): self.assertRaisesRegex( AssertionError, - "Error: deviation\(shape: ", + r"Error: deviation\(shape: ", model_devi.add, DeviManager.MAX_DEVI_F, np.array([[1], [2], [3]]), @@ -61,7 +54,7 @@ def test_add_invalid_deviation(self): self.assertRaisesRegex( AssertionError, - "Error: deviation\(type: ", + r"Error: deviation\(type: ", model_devi.add, DeviManager.MAX_DEVI_F, "foo", @@ -99,13 +92,13 @@ def test_devi_manager_std_check_data(self): model_devi.add(DeviManager.MAX_DEVI_V, np.array([4, 5])) self.assertRaisesRegex( AssertionError, - f"Error: the number of frames in", + "Error: the number of frames in", model_devi.get, DeviManager.MAX_DEVI_F, ) self.assertRaisesRegex( AssertionError, - f"Error: the number of frames in", + "Error: the number of frames in", model_devi.get, DeviManager.MAX_DEVI_V, ) diff --git a/tests/exploration/test_distance_conf_filter.py b/tests/exploration/test_distance_conf_filter.py index 98b7ba4e..3ce49085 100644 --- a/tests/exploration/test_distance_conf_filter.py +++ b/tests/exploration/test_distance_conf_filter.py @@ -2,7 +2,6 @@ import unittest import dpdata -import numpy as np from dpgen2.exploration.selector import ( BoxLengthFilter, @@ -10,10 +9,6 @@ DistanceConfFilter, ) -from .context import ( - dpgen2, -) - POSCAR_valid = """ Er 1.0 7.00390434172054 0.000000000000000E+000 0.000000000000000E+000 diff --git a/tests/exploration/test_exploration_group.py b/tests/exploration/test_exploration_group.py index f71ce8f4..e0be2b49 100644 --- a/tests/exploration/test_exploration_group.py +++ b/tests/exploration/test_exploration_group.py @@ -1,31 +1,17 @@ -import os import textwrap import unittest -from pathlib import ( - Path, -) -from typing import ( - List, - Set, -) - -import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass from unittest.mock import ( - Mock, patch, ) from dpgen2.constants import ( calypso_input_file, - calypso_run_opt_file, lmp_conf_name, lmp_input_name, ) @@ -56,7 +42,7 @@ change_box all triclinic mass 1 10.000000 mass 2 20.000000 -pair_style deepmd model.000.pb model.001.pb model.002.pb out_freq ${THERMO_FREQ} out_file model_devi.out +pair_style deepmd model.000.pb model.001.pb model.002.pb out_freq ${THERMO_FREQ} out_file model_devi.out pair_coeff * * thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz @@ -127,7 +113,7 @@ change_box all triclinic mass 1 10.000000 mass 2 20.000000 -pair_style deepmd model.000.pb model.001.pb model.002.pb out_freq ${THERMO_FREQ} out_file model_devi.out +pair_style deepmd model.000.pb model.001.pb model.002.pb out_freq ${THERMO_FREQ} out_file model_devi.out pair_coeff * * thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz diff --git a/tests/exploration/test_exploration_scheduler.py b/tests/exploration/test_exploration_scheduler.py index 324904b0..4029ef0b 100644 --- a/tests/exploration/test_exploration_scheduler.py +++ b/tests/exploration/test_exploration_scheduler.py @@ -1,20 +1,9 @@ -import os -import textwrap import unittest -from pathlib import ( - Path, -) -from typing import ( - List, - Set, -) import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -39,13 +28,8 @@ from dpgen2.exploration.selector import ( ConfSelectorFrames, ) -from dpgen2.exploration.task import ( - ExplorationStage, - ExplorationTaskGroup, -) # isort: off -import exploration.context from mocked_ops import ( MockedExplorationReport, MockedExplorationTaskGroup, diff --git a/tests/exploration/test_lmp_templ_task_group.py b/tests/exploration/test_lmp_templ_task_group.py index 5ef67f72..80fce94d 100644 --- a/tests/exploration/test_lmp_templ_task_group.py +++ b/tests/exploration/test_lmp_templ_task_group.py @@ -5,24 +5,12 @@ from pathlib import ( Path, ) -from typing import ( - List, - Set, -) - -import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass -from unittest.mock import ( - Mock, - patch, -) from dpgen2.constants import ( lmp_conf_name, @@ -30,7 +18,6 @@ plm_input_name, ) from dpgen2.exploration.task import ( - ExplorationStage, LmpTemplateTaskGroup, ) @@ -55,7 +42,7 @@ mass 1 27.000000 mass 2 24.000000 -pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out +pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out pair_coeff * * thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz @@ -130,7 +117,7 @@ mass 1 27.000000 mass 2 24.000000 -pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out +pair_style deepmd ../graph.003.pb ../graph.001.pb ../graph.002.pb ../graph.000.pb out_freq ${THERMO_FREQ} out_file model_devi.out pair_coeff * * fix dpgen_plm diff --git a/tests/exploration/test_make_task_group_from_config.py b/tests/exploration/test_make_task_group_from_config.py index f9fe93be..4c929b9c 100644 --- a/tests/exploration/test_make_task_group_from_config.py +++ b/tests/exploration/test_make_task_group_from_config.py @@ -1,21 +1,11 @@ -import itertools import os -import textwrap import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) - -import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass diff --git a/tests/exploration/test_report_adaptive_lower.py b/tests/exploration/test_report_adaptive_lower.py index 41ea5f8b..921e2da1 100644 --- a/tests/exploration/test_report_adaptive_lower.py +++ b/tests/exploration/test_report_adaptive_lower.py @@ -1,20 +1,14 @@ -import os -import textwrap import unittest -from collections import ( - Counter, +from unittest import ( + mock, ) -import mock import numpy as np from dargs import ( Argument, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.exploration.deviation import ( DeviManager, DeviManagerStd, @@ -52,8 +46,8 @@ def test_fv(self): for idx, ii in enumerate(expected_fail_): for jj in ii: expected_fail.add((idx, jj)) - expected_cand = set([(0, 5), (0, 6), (1, 8), (1, 0), (1, 5)]) - expected_accu = set([(0, 1), (1, 6), (1, 7)]) + expected_cand = {(0, 5), (0, 6), (1, 8), (1, 0), (1, 5)} + expected_accu = {(0, 1), (1, 6), (1, 7)} ter = ExplorationReportAdaptiveLower( level_f_hi=0.7, @@ -135,10 +129,18 @@ def test_f(self): for idx, ii in enumerate(expected_fail_): for jj in ii: expected_fail.add((idx, jj)) - expected_cand = set([(0, 6), (0, 7), (0, 5)]) - expected_accu = set( - [(0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (1, 5), (1, 6), (1, 7), (1, 8)] - ) + expected_cand = {(0, 6), (0, 7), (0, 5)} + expected_accu = { + (0, 1), + (0, 3), + (0, 4), + (1, 0), + (1, 1), + (1, 5), + (1, 6), + (1, 7), + (1, 8), + } ter = ExplorationReportAdaptiveLower( level_f_hi=0.7, @@ -182,11 +184,21 @@ def test_f_inv_pop(self): for idx, ii in enumerate(expected_fail_): for jj in ii: expected_fail.add((idx, jj)) - expected_cand = set( - [(0, 6), (0, 7), (0, 5)] - + [(0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (1, 5), (1, 6), (1, 7), (1, 8)] - ) - expected_accu = set([]) + expected_cand = { + (0, 6), + (0, 7), + (0, 5), + (0, 1), + (0, 3), + (0, 4), + (1, 0), + (1, 1), + (1, 5), + (1, 6), + (1, 7), + (1, 8), + } + expected_accu = set() ter = ExplorationReportAdaptiveLower( level_f_hi=0.7, @@ -253,10 +265,18 @@ def test_v(self): for idx, ii in enumerate(expected_fail_): for jj in ii: expected_fail.add((idx, jj)) - expected_cand = set([(0, 6), (0, 7), (0, 5)]) - expected_accu = set( - [(0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (1, 5), (1, 6), (1, 7), (1, 8)] - ) + expected_cand = {(0, 6), (0, 7), (0, 5)} + expected_accu = { + (0, 1), + (0, 3), + (0, 4), + (1, 0), + (1, 1), + (1, 5), + (1, 6), + (1, 7), + (1, 8), + } ter = ExplorationReportAdaptiveLower( level_f_hi=1.0, diff --git a/tests/exploration/test_report_trust_levels.py b/tests/exploration/test_report_trust_levels.py index 7f6c79a9..e67cc5f2 100644 --- a/tests/exploration/test_report_trust_levels.py +++ b/tests/exploration/test_report_trust_levels.py @@ -1,9 +1,4 @@ -import os -import textwrap import unittest -from collections import ( - Counter, -) import numpy as np from dargs import ( @@ -11,9 +6,6 @@ ) # isort: off -from context import ( - dpgen2, -) from dpgen2.exploration.deviation import ( DeviManager, DeviManagerStd, diff --git a/tests/exploration/test_traj_render_lammps.py b/tests/exploration/test_traj_render_lammps.py index 3659ffd8..b968802f 100644 --- a/tests/exploration/test_traj_render_lammps.py +++ b/tests/exploration/test_traj_render_lammps.py @@ -6,9 +6,6 @@ import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.exploration.render import TrajRenderLammps # isort: on diff --git a/tests/fp/context.py b/tests/fp/context.py index d6638725..f2118984 100644 --- a/tests/fp/context.py +++ b/tests/fp/context.py @@ -2,4 +2,4 @@ import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/fp/data.vasp.kp.gf/make_kp_test.py b/tests/fp/data.vasp.kp.gf/make_kp_test.py index dddf4584..b543e295 100644 --- a/tests/fp/data.vasp.kp.gf/make_kp_test.py +++ b/tests/fp/data.vasp.kp.gf/make_kp_test.py @@ -8,10 +8,11 @@ def make_one(out_dir): + rng = np.random.default_rng() # [0.5, 1) - [aa, bb, cc] = np.random.random(3) * 0.5 + 0.5 + [aa, bb, cc] = rng.random(3) * 0.5 + 0.5 # [1, 179) - [alpha, beta, gamma] = np.random.random(3) * (178 / 180) + 1 + [alpha, beta, gamma] = rng.random(3) * (178 / 180) + 1 # make cell cell = ase.geometry.cellpar_to_cell([aa, bb, cc, alpha, beta, gamma]) sys = dpdata.System("POSCAR") diff --git a/tests/fp/test_abacus.py b/tests/fp/test_abacus.py index 32d6c979..d4008626 100644 --- a/tests/fp/test_abacus.py +++ b/tests/fp/test_abacus.py @@ -40,8 +40,9 @@ def test_abacus(self): data_path / "INPUT", {"Na": data_path / "Na_ONCV_PBE-1.0.upf"} ), "run": { - "command": "cp -r %s OUT.ABACUS && cat %s" - % (data_path / "OUT.ABACUS", data_path / "log"), + "command": "cp -r {} OUT.ABACUS && cat {}".format( + data_path / "OUT.ABACUS", data_path / "log" + ), }, "extra_output_files": [], } diff --git a/tests/fp/test_cp2k.py b/tests/fp/test_cp2k.py index 22c87b28..8f12400c 100644 --- a/tests/fp/test_cp2k.py +++ b/tests/fp/test_cp2k.py @@ -45,8 +45,9 @@ def test_cp2k(self): fp_config = { "inputs": FpOpCp2kInputs(data_path / "input.inp"), "run": { - "command": "cp -r %s output.log && cat %s" - % (data_path / "output.log", data_path / "output.log"), + "command": "cp -r {} output.log && cat {}".format( + data_path / "output.log", data_path / "output.log" + ), }, "extra_output_files": [], } diff --git a/tests/fp/test_prep_vasp.py b/tests/fp/test_prep_vasp.py index 08957f79..cd3352e8 100644 --- a/tests/fp/test_prep_vasp.py +++ b/tests/fp/test_prep_vasp.py @@ -1,8 +1,5 @@ -import glob -import json import os import shutil -import sys import textwrap import unittest from pathlib import ( @@ -10,24 +7,14 @@ ) import dpdata -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - FatalError, - OPIOSign, - TransientError, ) from fake_data_set import ( fake_multi_sys, - fake_system, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( fp_task_pattern, ) @@ -39,9 +26,6 @@ vasp_kp_name, vasp_pot_name, ) -from dpgen2.utils import ( - dump_object_to_file, -) # isort: on diff --git a/tests/fp/test_run_vasp.py b/tests/fp/test_run_vasp.py index 7202d6ce..65f989a2 100644 --- a/tests/fp/test_run_vasp.py +++ b/tests/fp/test_run_vasp.py @@ -1,23 +1,20 @@ -import json import shutil import unittest from pathlib import ( Path, ) +from unittest import ( + mock, +) +from unittest.mock import ( + call, + patch, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, TransientError, ) -from mock import ( - call, - mock, - patch, -) # isort: off from .context import ( diff --git a/tests/fp/test_vasp.py b/tests/fp/test_vasp.py index 8d9bee25..a381c55a 100644 --- a/tests/fp/test_vasp.py +++ b/tests/fp/test_vasp.py @@ -1,8 +1,6 @@ import glob import json import os -import shutil -import sys import textwrap import unittest from pathlib import ( @@ -13,9 +11,6 @@ import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.fp.vasp import ( PrepVasp, RunVasp, @@ -107,8 +102,8 @@ def test_vasp_input_kp(self): 0.00 6.00 6.00 8.00 0.00 8.00 9.00 9.00 0.00 -O -1 +O +1 Selective dynamics Cartesian 0.00 0.00 0.00 T T F @@ -122,7 +117,7 @@ def test_vasp_input_kp(self): kps = vi.make_kpoints(ss["cells"][0]) self.assertEqual(ref, kps) - def test_vasp_input_kp(self): + def test_vasp_input_kp2(self): ref = textwrap.dedent( """K-Points 0 @@ -137,8 +132,8 @@ def test_vasp_input_kp(self): 0.00 6.00 6.00 8.00 0.00 8.00 9.00 9.00 0.00 -O -1 +O +1 Selective dynamics Cartesian 0.00 0.00 0.00 T T F diff --git a/tests/mocked_ops.py b/tests/mocked_ops.py index 9cd13c00..6c76e395 100644 --- a/tests/mocked_ops.py +++ b/tests/mocked_ops.py @@ -1,9 +1,7 @@ from dflow.python import ( OP, OPIO, - Artifact, FatalError, - OPIOSign, upload_packages, ) @@ -11,7 +9,6 @@ import json import os -import pickle import re import shutil from pathlib import ( @@ -24,25 +21,18 @@ ) try: - from flow.context import ( - dpgen2, - ) + from flow.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass from dpgen2.constants import ( - calypso_check_opt_file, - calypso_run_opt_file, fp_task_pattern, lmp_conf_name, lmp_input_name, lmp_log_name, lmp_model_devi_name, - lmp_task_pattern, lmp_traj_name, model_name_pattern, - train_log_name, - train_script_name, train_task_pattern, ) from dpgen2.exploration.report import ( @@ -64,7 +54,6 @@ RunVasp, ) from dpgen2.fp.vasp import ( - VaspInputs, vasp_conf_name, vasp_input_name, ) @@ -80,9 +69,6 @@ from dpgen2.op.prep_dp_train import ( PrepDPTrain, ) -from dpgen2.op.prep_lmp import ( - PrepExplorationTaskGroup, -) from dpgen2.op.run_caly_dp_optim import ( RunCalyDPOptim, ) @@ -221,15 +207,15 @@ def execute( assert init_model.exists() with log.open("w") as f: - f.write(f"init_model {str(init_model)} OK\n") + f.write(f"init_model {init_model!s} OK\n") for ii in jtmp["data"]: assert Path(ii).exists() assert (ii in init_data_str) or (ii in iter_data_str) with log.open("a") as f: - f.write(f"data {str(ii)} OK\n") + f.write(f"data {ii!s} OK\n") assert script.exists() with log.open("a") as f: - f.write(f"script {str(script)} OK\n") + f.write(f"script {script!s} OK\n") with model.open("w") as f: f.write("read from init model: \n") @@ -328,10 +314,10 @@ def execute( assert Path(ii).exists() assert (ii in init_data_str) or (ii in iter_data_str) with log.open("a") as f: - f.write(f"data {str(ii)} OK\n") + f.write(f"data {ii!s} OK\n") assert script.exists() with log.open("a") as f: - f.write(f"script {str(script)} OK\n") + f.write(f"script {script!s} OK\n") with model.open("w") as f: f.write("read from init model: \n") @@ -864,7 +850,7 @@ def select( self, trajs: List[Path], model_devis: List[Path], - type_map: List[str] = None, + type_map: Optional[List[str]] = None, optional_outputs: Optional[List[Path]] = None, ) -> Tuple[List[Path], ExplorationReport]: confs = [] @@ -921,7 +907,7 @@ def __init__( self, stage: ExplorationStage, conv_accuracy: float = 0.9, - max_numb_iter: int = None, + max_numb_iter: Optional[int] = None, ): self.selector = MockedConfSelector( conv_accuracy=conv_accuracy, @@ -981,7 +967,7 @@ def execute( finished = "true" if int(cnt_num) == int(max_step) else "false" if finished == "false": for i in range(5): - Path(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + Path(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") if step is None: Path("step").write_text("2") diff --git a/tests/op/context.py b/tests/op/context.py index d6638725..f2118984 100644 --- a/tests/op/context.py +++ b/tests/op/context.py @@ -2,4 +2,4 @@ import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/op/test_collect_data.py b/tests/op/test_collect_data.py index 2e558193..4f2ae88d 100644 --- a/tests/op/test_collect_data.py +++ b/tests/op/test_collect_data.py @@ -1,4 +1,3 @@ -import json import shutil import unittest from pathlib import ( @@ -6,35 +5,15 @@ ) import dpdata -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - TransientError, ) from fake_data_set import ( fake_multi_sys, fake_system, ) -from mock import ( - call, - mock, - patch, -) # isort: off -from .context import ( - dpgen2, -) -from dpgen2.constants import ( - lmp_conf_name, - lmp_input_name, - lmp_log_name, - lmp_model_devi_name, - lmp_traj_name, -) from dpgen2.op.collect_data import ( CollectData, ) diff --git a/tests/op/test_collect_run_caly.py b/tests/op/test_collect_run_caly.py index 22ec37db..fb27f973 100644 --- a/tests/op/test_collect_run_caly.py +++ b/tests/op/test_collect_run_caly.py @@ -1,37 +1,23 @@ -import os import shutil import unittest from pathlib import ( Path, ) +from unittest.mock import ( + patch, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, TransientError, ) -from mock import ( - call, - mock, - patch, -) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( calypso_task_pattern, calypso_input_file, - calypso_log_name, ) from dpgen2.op.collect_run_caly import CollRunCaly, get_value_from_inputdat -from dpgen2.utils import ( - BinaryFileInput, -) # isort: on @@ -71,13 +57,13 @@ def tearDown(self): def test_get_max_step(self): max_step, vsc = get_value_from_inputdat(self.input_file) self.assertTrue(max_step == 3) - self.assertTrue(vsc == True) + self.assertTrue(vsc is True) temp_input_file = self.input_file_path.joinpath("temp_input_dat") temp_input_file.write_text("input.dat\n") max_step, vsc = get_value_from_inputdat(temp_input_file) self.assertTrue(max_step == 0) - self.assertTrue(vsc == False) + self.assertTrue(vsc is False) @patch("dpgen2.op.collect_run_caly.run_command") def test_step_st_maxstep_01(self, mocked_run): @@ -86,7 +72,7 @@ def test_step_st_maxstep_01(self, mocked_run): def side_effect(*args, **kwargs): for i in range(5): - Path().joinpath(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + Path().joinpath(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") Path("step").write_text("3") Path("results").mkdir(parents=True, exist_ok=True) return (0, "foo\n", "") @@ -122,7 +108,7 @@ def test_step_no_eq_maxstep_02(self, mocked_run): def side_effect(*args, **kwargs): for i in range(5): - Path().joinpath(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + Path().joinpath(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") Path("step").write_text("2") Path("results").mkdir(parents=True, exist_ok=True) return (0, "foo\n", "") @@ -153,7 +139,7 @@ def test_step_eq_maxstep_03(self, mocked_run): def side_effect(*args, **kwargs): for i in range(5): - Path().joinpath(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + Path().joinpath(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") Path("step").write_text("4") Path("results").mkdir(parents=True, exist_ok=True) return (0, "foo\n", "") @@ -185,7 +171,7 @@ def test_error_04(self, mocked_run): def side_effect(*args, **kwargs): for i in range(5): - Path().joinpath(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + Path().joinpath(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") Path("step").write_text("3") Path("results").mkdir(parents=True, exist_ok=True) return (1, "foo\n", "") diff --git a/tests/op/test_prep_caly_dp_optim.py b/tests/op/test_prep_caly_dp_optim.py index d1ffd724..0fe680f2 100644 --- a/tests/op/test_prep_caly_dp_optim.py +++ b/tests/op/test_prep_caly_dp_optim.py @@ -1,38 +1,20 @@ -import os import shutil import unittest from pathlib import ( Path, ) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - TransientError, -) -from mock import ( - call, - mock, - patch, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( calypso_task_pattern, - model_name_pattern, calypso_run_opt_file, calypso_check_opt_file, ) from dpgen2.op import PrepCalyDPOptim -from dpgen2.utils import ( - BinaryFileInput, -) # isort: on @@ -46,7 +28,7 @@ def setUp(self): self.poscar_dir.mkdir(parents=True, exist_ok=True) nposcar = 10 for i in range(1, nposcar + 1): - self.poscar_dir.joinpath(f"POSCAR_{str(i)}").write_text(f"POSCAR_{str(i)}") + self.poscar_dir.joinpath(f"POSCAR_{i!s}").write_text(f"POSCAR_{i!s}") self.models_dir = Path("models_dir") self.models_dir.mkdir(parents=True, exist_ok=True) @@ -67,7 +49,7 @@ def setUp(self): self.template_slice_config = {"group_size": 3} self.group_size = self.template_slice_config["group_size"] - grouped_poscar_list = [i for i in range(0, nposcar, self.group_size)] + grouped_poscar_list = list(range(0, nposcar, self.group_size)) self.ngrouped = len(grouped_poscar_list) self.ref_task_dirs = [] for i in range(0, self.ngrouped): diff --git a/tests/op/test_prep_caly_input.py b/tests/op/test_prep_caly_input.py index 0fac49c4..f96a8289 100644 --- a/tests/op/test_prep_caly_input.py +++ b/tests/op/test_prep_caly_input.py @@ -1,22 +1,11 @@ -import os import shutil import unittest from pathlib import ( Path, ) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - TransientError, -) -from mock import ( - call, - mock, - patch, ) # isort: off @@ -27,9 +16,6 @@ calypso_check_opt_file, ) from dpgen2.op.prep_caly_input import PrepCalyInput -from dpgen2.utils import ( - BinaryFileInput, -) from dpgen2.exploration.task import ( BaseExplorationTaskGroup, diff --git a/tests/op/test_prep_caly_model_devi.py b/tests/op/test_prep_caly_model_devi.py index 6cc6337f..875c801c 100644 --- a/tests/op/test_prep_caly_model_devi.py +++ b/tests/op/test_prep_caly_model_devi.py @@ -1,38 +1,15 @@ -import os import shutil import unittest from pathlib import ( Path, ) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - TransientError, -) -from mock import ( - call, - mock, - patch, ) # isort: off -from .context import ( - dpgen2, -) -from dpgen2.constants import ( - calypso_task_pattern, - model_name_pattern, - calypso_run_opt_file, - calypso_check_opt_file, -) from dpgen2.op import PrepCalyModelDevi -from dpgen2.utils import ( - BinaryFileInput, -) # isort: on diff --git a/tests/op/test_prep_dp_train.py b/tests/op/test_prep_dp_train.py index a380e221..9772f23c 100644 --- a/tests/op/test_prep_dp_train.py +++ b/tests/op/test_prep_dp_train.py @@ -4,22 +4,15 @@ from pathlib import ( Path, ) +from unittest import ( + mock, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, -) -from mock import ( - mock, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( train_script_name, train_task_pattern, diff --git a/tests/op/test_prep_relax.py b/tests/op/test_prep_relax.py index 83dcc2b7..9f4cbd8a 100644 --- a/tests/op/test_prep_relax.py +++ b/tests/op/test_prep_relax.py @@ -42,5 +42,5 @@ def tearDown(self): if os.path.isdir("task.%06d" % i): shutil.rmtree("task.%06d" % i) for i in range(4): - if os.path.isfile("%s.cif" % i): - os.remove("%s.cif" % i) + if os.path.isfile(f"{i}.cif"): + os.remove(f"{i}.cif") diff --git a/tests/op/test_run_caly_dp_optim.py b/tests/op/test_run_caly_dp_optim.py index ee6afd43..2b994e31 100644 --- a/tests/op/test_run_caly_dp_optim.py +++ b/tests/op/test_run_caly_dp_optim.py @@ -1,38 +1,19 @@ -import os import shutil import unittest from pathlib import ( Path, ) +from unittest.mock import ( + patch, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, TransientError, ) -from mock import ( - call, - mock, - patch, -) # isort: off -from .context import ( - dpgen2, -) -from dpgen2.constants import ( - calypso_task_pattern, - model_name_pattern, - calypso_run_opt_file, - calypso_check_opt_file, -) from dpgen2.op import RunCalyDPOptim -from dpgen2.utils import ( - BinaryFileInput, -) # isort: on @@ -57,9 +38,9 @@ def tearDown(self): def test_00_success(self, mocked_run): def side_effect(*args, **kwargs): for i in range(1, 11): - Path().joinpath(f"CONTCAR_{str(i)}").write_text(f"CONTCAR_{str(i)}") - Path().joinpath(f"OUTCAR_{str(i)}").write_text(f"OUTCAR_{str(i)}") - Path().joinpath(f"{str(i)}.traj").write_text(f"{str(i)}.traj") + Path().joinpath(f"CONTCAR_{i!s}").write_text(f"CONTCAR_{i!s}") + Path().joinpath(f"OUTCAR_{i!s}").write_text(f"OUTCAR_{i!s}") + Path().joinpath(f"{i!s}.traj").write_text(f"{i!s}.traj") return (0, "foo\n", "") mocked_run.side_effect = side_effect @@ -106,9 +87,9 @@ def side_effect(*args, **kwargs): def test_01_error(self, mocked_run): def side_effect(*args, **kwargs): for i in range(1, 6): - Path().joinpath(f"CONTCAR_{str(i)}").write_text(f"CONTCAR_{str(i)}") - Path().joinpath(f"OUTCAR_{str(i)}").write_text(f"OUTCAR_{str(i)}") - Path().joinpath(f"{str(i)}.traj").write_text(f"{str(i)}.traj") + Path().joinpath(f"CONTCAR_{i!s}").write_text(f"CONTCAR_{i!s}") + Path().joinpath(f"OUTCAR_{i!s}").write_text(f"OUTCAR_{i!s}") + Path().joinpath(f"{i!s}.traj").write_text(f"{i!s}.traj") return (1, "foo\n", "") mocked_run.side_effect = side_effect diff --git a/tests/op/test_run_caly_model_devi.py b/tests/op/test_run_caly_model_devi.py index 647604a8..8e39c495 100644 --- a/tests/op/test_run_caly_model_devi.py +++ b/tests/op/test_run_caly_model_devi.py @@ -1,16 +1,12 @@ -import os import shutil import unittest from pathlib import ( Path, ) from unittest.mock import ( - Mock, - call, patch, ) -import numpy as np from ase import ( Atoms, ) @@ -18,16 +14,10 @@ write, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - TransientError, ) from dpgen2.constants import ( - calypso_input_file, - calypso_log_name, calypso_task_pattern, ) from dpgen2.op.run_caly_model_devi import ( @@ -35,9 +25,6 @@ atoms2lmpdump, parse_traj, ) -from dpgen2.utils import ( - BinaryFileInput, -) # from .context import ( # dpgen2, @@ -45,11 +32,12 @@ # isort: on try: - import deepmd + import deepmd # noqa: F401 - x = 0 -except: +except ModuleNotFoundError: x = 1 +else: + x = 0 class TestRunCalyModelDevi(unittest.TestCase): diff --git a/tests/op/test_run_dp_train.py b/tests/op/test_run_dp_train.py index 384e7a3b..9c7d1901 100644 --- a/tests/op/test_run_dp_train.py +++ b/tests/op/test_run_dp_train.py @@ -1,4 +1,3 @@ -import itertools import json import os import shutil @@ -6,37 +5,27 @@ from pathlib import ( Path, ) +from unittest.mock import ( + call, + patch, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, FatalError, - OPIOSign, - TransientError, ) from fake_data_set import ( fake_multi_sys, fake_system, ) -from mock import ( - call, - patch, -) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( train_script_name, - train_task_pattern, ) from dpgen2.op.run_dp_train import ( RunDPTrain, _get_data_size_of_all_mult_sys, - _make_train_command, ) # isort: on @@ -74,7 +63,7 @@ def setUp(self): ss_0.to_deepmd_npy("init/data-0") ss_1.to_deepmd_npy("init/data-1") self.init_data = [Path("init/data-0"), Path("init/data-1")] - self.init_data = sorted(list(self.init_data)) + self.init_data = sorted(self.init_data) self.init_model = Path("bar.pb") @@ -764,7 +753,7 @@ def setUp(self): ss_0.to_deepmd_npy("init/data-0") ss_1.to_deepmd_npy("init/data-1") self.init_data = [Path("init/data-0"), Path("init/data-1")] - self.init_data = sorted(list(self.init_data)) + self.init_data = sorted(self.init_data) self.init_model = Path("bar.pb") diff --git a/tests/op/test_run_lmp.py b/tests/op/test_run_lmp.py index b727fb76..66439f87 100644 --- a/tests/op/test_run_lmp.py +++ b/tests/op/test_run_lmp.py @@ -1,29 +1,20 @@ -import json import os import shutil import unittest from pathlib import ( Path, ) +from unittest.mock import ( + call, + patch, +) -import numpy as np from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, TransientError, ) -from mock import ( - call, - mock, - patch, -) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( lmp_conf_name, lmp_input_name, @@ -143,7 +134,7 @@ class TestRunLmpDist(unittest.TestCase): change_box all triclinic mass 6 26.980000 pair_style deepmd model.000.pb out_freq 10 out_file model_devi.out -pair_coeff * * +pair_coeff * * thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz thermo ${THERMO_FREQ} @@ -215,7 +206,7 @@ def test_success(self, mocked_run): # check if the teacher model is linked to model.000.pb ii = 0 self.assertEqual( - (work_dir / (model_name_pattern % ii)).read_text(), f"teacher model" + (work_dir / (model_name_pattern % ii)).read_text(), "teacher model" ) ii = 1 @@ -224,7 +215,7 @@ def test_success(self, mocked_run): ) # The number of models have to be 2 in knowledge distillation - self.assertEqual(len(list((work_dir.glob("*.pb")))), 2) + self.assertEqual(len(list(work_dir.glob("*.pb"))), 2) def swap_element(arg): diff --git a/tests/test_block_cl.py b/tests/test_block_cl.py index fa0571a7..63817c43 100644 --- a/tests/test_block_cl.py +++ b/tests/test_block_cl.py @@ -1,47 +1,20 @@ -import json import os -import pickle import shutil import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) -from dflow.python import ( - OP, - OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, - upload_packages, -) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -52,14 +25,10 @@ skip_ut_with_dflow_reason, upload_python_packages, ) -from mock import ( - patch, -) from mocked_ops import ( MockedCollectData, MockedCollectDataCheckOptParam, MockedConfSelector, - MockedExplorationReport, MockedExplorationTaskGroup, MockedPrepDPTrain, MockedPrepVasp, @@ -78,19 +47,9 @@ from dpgen2.constants import ( fp_task_pattern, - lmp_conf_name, - lmp_input_name, - lmp_log_name, - lmp_traj_name, model_name_pattern, - train_log_name, - train_script_name, train_task_pattern, ) -from dpgen2.exploration.task import ( - ExplorationTask, - ExplorationTaskGroup, -) from dpgen2.fp.vasp import ( VaspInputs, ) @@ -109,10 +68,6 @@ from dpgen2.superop.prep_run_lmp import ( PrepRunLmp, ) -from dpgen2.utils import ( - dump_object_to_file, - load_object_from_file, -) from dpgen2.utils.step_config import normalize as normalize_step_dict default_config = normalize_step_dict( diff --git a/tests/test_caly_evo_step.py b/tests/test_caly_evo_step.py index 2aff3614..d9ce1f88 100644 --- a/tests/test_caly_evo_step.py +++ b/tests/test_caly_evo_step.py @@ -1,40 +1,19 @@ -import json import os -import pickle import shutil import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, Slices, ) @@ -45,9 +24,7 @@ ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -60,36 +37,12 @@ ) from mocked_ops import ( MockedCollRunCaly, - MockedPrepCalyDPOptim, MockedRunCalyDPOptim, mocked_numb_models, ) -from dpgen2.constants import ( - lmp_conf_name, - lmp_input_name, - lmp_log_name, - lmp_model_devi_name, - lmp_task_pattern, - lmp_traj_name, - model_name_pattern, - train_log_name, - train_script_name, - train_task_pattern, -) -from dpgen2.exploration.task import ( - ExplorationTask, - ExplorationTaskGroup, -) from dpgen2.op import ( PrepCalyDPOptim, - RunCalyDPOptim, -) -from dpgen2.op.collect_run_caly import ( - CollRunCaly, -) -from dpgen2.op.prep_caly_input import ( - PrepCalyInput, ) from dpgen2.superop.caly_evo_step import ( CalyEvoStep, @@ -167,7 +120,7 @@ def setUp(self) -> None: self.file_storage.mkdir(parents=True, exist_ok=True) for i in range(5): self.file_storage.joinpath(f"POSCAR_{i}").write_text(f"POSCAR_{i}") - self.file_storage.joinpath(f"frozen_model.pb").write_text(f"model.{i}.pb") + self.file_storage.joinpath("frozen_model.pb").write_text(f"model.{i}.pb") self.caly_run_opt_file = self.file_storage.joinpath(calypso_run_opt_file) self.caly_run_opt_file.write_text("caly_run_opt_script") self.caly_check_opt_file = self.file_storage.joinpath(calypso_check_opt_file) @@ -233,7 +186,7 @@ def setUp(self): for ii in range(self.nmodels): model_path = self.work_dir.joinpath(f"task.{ii}") model_path.mkdir(exist_ok=True, parents=True) - model = model_path.joinpath(f"model.ckpt.pt") + model = model_path.joinpath("model.ckpt.pt") model.write_text(f"model {ii}") self.model_list.append(model) self.models = upload_artifact(self.model_list) diff --git a/tests/test_collect_data.py b/tests/test_collect_data.py index 95b7e4c9..66277686 100644 --- a/tests/test_collect_data.py +++ b/tests/test_collect_data.py @@ -1,46 +1,23 @@ -import json -import os import shutil import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, PythonOPTemplate, - upload_packages, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -65,10 +42,10 @@ def setUp(self): self.labeled_data = [Path(ii) for ii in self.labeled_data] for ii in self.iter_data: ii.mkdir(exist_ok=True, parents=True) - (ii / "data").write_text(f"data of {str(ii)}") + (ii / "data").write_text(f"data of {ii!s}") for ii in self.labeled_data: (ii).mkdir(exist_ok=True, parents=True) - (ii / "data").write_text(f"data of {str(ii)}") + (ii / "data").write_text(f"data of {ii!s}") self.type_map = [] def tearDown(self): @@ -108,17 +85,17 @@ def test(self): @unittest.skipIf(skip_ut_with_dflow, skip_ut_with_dflow_reason) class TestMockedCollectDataArgo(unittest.TestCase): def setUp(self): - self.iter_data = set(("foo/iter0", "bar/iter1")) - self.iter_data = set([Path(ii) for ii in self.iter_data]) + self.iter_data = {"foo/iter0", "bar/iter1"} + self.iter_data = {Path(ii) for ii in self.iter_data} self.name = "outdata" self.labeled_data = ["d0", "d1"] self.labeled_data = [Path(ii) for ii in self.labeled_data] for ii in self.iter_data: ii.mkdir(exist_ok=True, parents=True) - (ii / "data").write_text(f"data of {str(ii)}") + (ii / "data").write_text(f"data of {ii!s}") for ii in self.labeled_data: (ii).mkdir(exist_ok=True, parents=True) - (ii / "data").write_text(f"data of {str(ii)}") + (ii / "data").write_text(f"data of {ii!s}") self.iter_data = upload_artifact(list(self.iter_data)) self.labeled_data = upload_artifact(self.labeled_data) self.type_map = [] diff --git a/tests/test_dpgen_loop.py b/tests/test_dpgen_loop.py index 49b4c873..b7159c51 100644 --- a/tests/test_dpgen_loop.py +++ b/tests/test_dpgen_loop.py @@ -1,72 +1,35 @@ -import json import os -import pickle import shutil -import textwrap import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) -from dflow.python import ( - OP, - OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, - upload_packages, -) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass from context import ( - default_host, default_image, skip_ut_with_dflow, skip_ut_with_dflow_reason, upload_python_packages, ) -from dflow.python import ( - FatalError, -) from mocked_ops import ( MockedCollectData, MockedCollectDataCheckOptParam, MockedCollectDataFailed, MockedCollectDataRestart, - MockedConfSelector, MockedConstTrustLevelStageScheduler, - MockedExplorationReport, - MockedExplorationTaskGroup, - MockedExplorationTaskGroup1, - MockedExplorationTaskGroup2, MockedPrepDPTrain, MockedPrepVasp, MockedRunDPTrain, @@ -79,8 +42,6 @@ MockedStage, MockedStage1, MockedStage2, - make_mocked_init_data, - make_mocked_init_models, mocked_incar_template, mocked_numb_models, mocked_numb_select, @@ -89,34 +50,17 @@ from dpgen2.constants import ( fp_task_pattern, - lmp_conf_name, - lmp_input_name, - lmp_log_name, - lmp_traj_name, model_name_pattern, - train_log_name, - train_script_name, train_task_pattern, ) -from dpgen2.exploration.report import ( - ExplorationReport, -) from dpgen2.exploration.scheduler import ( ExplorationScheduler, ) -from dpgen2.exploration.task import ( - ExplorationStage, - ExplorationTask, - ExplorationTaskGroup, -) from dpgen2.flow.dpgen_loop import ( ConcurrentLearning, ) from dpgen2.fp.vasp import ( VaspInputs, - vasp_conf_name, - vasp_input_name, - vasp_pot_name, ) from dpgen2.op.prep_lmp import ( PrepLmp, diff --git a/tests/test_merge_caly_evo_step.py b/tests/test_merge_caly_evo_step.py index 3f22ba56..17f1212b 100644 --- a/tests/test_merge_caly_evo_step.py +++ b/tests/test_merge_caly_evo_step.py @@ -1,39 +1,19 @@ -import json import os -import pickle import shutil import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, PythonOPTemplate, Slices, ) @@ -45,9 +25,7 @@ ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -60,43 +38,16 @@ ) from mocked_ops import ( MockedCollRunCaly, - MockedPrepCalyDPOptim, MockedRunCalyDPOptim, mocked_numb_models, ) -from dpgen2.constants import ( - lmp_conf_name, - lmp_input_name, - lmp_log_name, - lmp_model_devi_name, - lmp_task_pattern, - lmp_traj_name, - model_name_pattern, - train_log_name, - train_script_name, - train_task_pattern, -) -from dpgen2.exploration.task import ( - ExplorationTask, - ExplorationTaskGroup, -) from dpgen2.op import ( PrepCalyDPOptim, - RunCalyDPOptim, ) from dpgen2.op.caly_evo_step_merge import ( CalyEvoStepMerge, ) -from dpgen2.op.collect_run_caly import ( - CollRunCaly, -) -from dpgen2.op.prep_caly_input import ( - PrepCalyInput, -) -from dpgen2.superop.caly_evo_step import ( - CalyEvoStep, -) from dpgen2.utils.step_config import normalize as normalize_step_dict default_config = normalize_step_dict( @@ -170,7 +121,7 @@ def setUp(self) -> None: self.file_storage.mkdir(parents=True, exist_ok=True) for i in range(5): self.file_storage.joinpath(f"POSCAR_{i}").write_text(f"POSCAR_{i}") - self.file_storage.joinpath(f"frozen_model.pb").write_text(f"model.{i}.pb") + self.file_storage.joinpath("frozen_model.pb").write_text(f"model.{i}.pb") self.caly_run_opt_file = self.file_storage.joinpath(calypso_run_opt_file) self.caly_run_opt_file.write_text("caly_run_opt_script") self.caly_check_opt_file = self.file_storage.joinpath(calypso_check_opt_file) @@ -236,7 +187,7 @@ def setUp(self): for ii in range(self.nmodels): model_path = self.work_dir.joinpath(f"task.{ii}") model_path.mkdir(exist_ok=True, parents=True) - model = model_path.joinpath(f"model.ckpt.pt") + model = model_path.joinpath("model.ckpt.pt") model.write_text(f"model {ii}") self.model_list.append(model) self.models = upload_artifact(self.model_list) diff --git a/tests/test_prep_run_caly.py b/tests/test_prep_run_caly.py index e949410d..f1964df4 100644 --- a/tests/test_prep_run_caly.py +++ b/tests/test_prep_run_caly.py @@ -1,53 +1,24 @@ -import json -import os -import pickle import shutil import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, - download_artifact, upload_artifact, ) -from dflow.python import ( - OP, - OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, -) from dpgen2.constants import ( calypso_check_opt_file, - calypso_index_pattern, calypso_input_file, calypso_run_opt_file, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -81,9 +52,6 @@ from dpgen2.op.prep_caly_model_devi import ( PrepCalyModelDevi, ) -from dpgen2.op.run_caly_model_devi import ( - RunCalyModelDevi, -) from dpgen2.superop.caly_evo_step import ( CalyEvoStep, ) @@ -125,7 +93,7 @@ def setUp(self): for ii in range(self.nmodels): model_path = self.work_dir.joinpath(f"task.{ii}") model_path.mkdir(parents=True, exist_ok=True) - model = model_path.joinpath(f"frozen_model.pb") + model = model_path.joinpath("frozen_model.pb") model.write_text(f"model {ii}") self.model_list.append(model) self.models = upload_artifact(self.model_list) diff --git a/tests/test_prep_run_diffcsp.py b/tests/test_prep_run_diffcsp.py index d5569d17..71be1430 100644 --- a/tests/test_prep_run_diffcsp.py +++ b/tests/test_prep_run_diffcsp.py @@ -36,10 +36,10 @@ def execute( self, ip: OPIO, ) -> OPIO: - task_dir = Path("diffcsp.%s" % ip["task_id"]) + task_dir = Path("diffcsp.{}".format(ip["task_id"])) task_dir.mkdir(exist_ok=True) for i in range(2): - fpath = task_dir / ("%s.cif" % i) + fpath = task_dir / (f"{i}.cif") fpath.write_text("Mocked cif.") return OPIO( { @@ -60,10 +60,10 @@ def execute( model_devis = [] for cif in cifs: name = cif[:-4] - traj = ip["task_path"] / ("traj.%s.dump" % name) + traj = ip["task_path"] / (f"traj.{name}.dump") traj.write_text("Mocked traj.") trajs.append(traj) - model_devi = ip["task_path"] / ("model_devi.%s.out" % name) + model_devi = ip["task_path"] / (f"model_devi.{name}.out") model_devi.write_text("Mocked model_devi.") model_devis.append(model_devi) return OPIO( @@ -119,6 +119,6 @@ def testPrepRunDiffCSP(self): self.assertEqual(len(model_devis), 4) def tearDown(self): - for d in glob.glob("test-prep-run-diffcsp-*") + ["task.000000", "task.000001"]: + for d in [*glob.glob("test-prep-run-diffcsp-*"), "task.000000", "task.000001"]: if os.path.isdir(d): shutil.rmtree(d) diff --git a/tests/test_prep_run_dp_labeling.py b/tests/test_prep_run_dp_labeling.py index 67256f86..dc64dad3 100644 --- a/tests/test_prep_run_dp_labeling.py +++ b/tests/test_prep_run_dp_labeling.py @@ -1,23 +1,18 @@ -import os import shutil -import sys import unittest from pathlib import ( Path, ) +from unittest.mock import ( + Mock, + patch, +) import dpdata import numpy as np -from dargs import ( - Argument, -) from dflow.python import ( FatalError, ) -from mock import ( - Mock, - patch, -) from dpgen2.fp.deepmd import ( PrepDeepmd, diff --git a/tests/test_prep_run_dp_train.py b/tests/test_prep_run_dp_train.py index 536ca4b9..f2e3650f 100644 --- a/tests/test_prep_run_dp_train.py +++ b/tests/test_prep_run_dp_train.py @@ -1,4 +1,3 @@ -import json import os import shutil import time @@ -6,39 +5,19 @@ from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -89,7 +68,10 @@ def _check_log( lines.append(" ".join(ww)) else: lines = lines_ - revised_fname = lambda ff: Path(ff).name if only_check_name else Path(ff) + + def revised_fname(ff): + return Path(ff).name if only_check_name else Path(ff) + tcase.assertEqual( lines[0].split(" "), ["init_model", str(revised_fname(Path(path) / init_model)), "OK"], @@ -99,7 +81,7 @@ def _check_log( lines[1 + ii].split(" "), [ "data", - str(revised_fname(Path(path) / sorted(list(init_data))[ii])), + str(revised_fname(Path(path) / sorted(init_data)[ii])), "OK", ], ) @@ -108,7 +90,7 @@ def _check_log( lines[3 + ii].split(" "), [ "data", - str(revised_fname(Path(path) / sorted(list(iter_data))[ii])), + str(revised_fname(Path(path) / sorted(iter_data)[ii])), "OK", ], ) @@ -234,7 +216,7 @@ def setUp(self): Path(self.train_scripts[ii]).write_text("{}") def tearDown(self): - for ii in ["init_data", "iter_data"] + self.task_names: + for ii in ["init_data", "iter_data", *self.task_names]: if Path(ii).exists(): shutil.rmtree(str(ii)) for ii in self.init_models: @@ -302,7 +284,7 @@ def setUp(self): ] def tearDown(self): - for ii in ["init_data", "iter_data"] + self.task_names: + for ii in ["init_data", "iter_data", *self.task_names]: if Path(ii).exists(): shutil.rmtree(str(ii)) for ii in self.str_init_models: diff --git a/tests/test_prep_run_lmp.py b/tests/test_prep_run_lmp.py index 3b350240..8007615a 100644 --- a/tests/test_prep_run_lmp.py +++ b/tests/test_prep_run_lmp.py @@ -1,6 +1,4 @@ -import json import os -import pickle import shutil import time import unittest @@ -9,38 +7,20 @@ ) from typing import ( List, - Set, ) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -63,10 +43,6 @@ lmp_model_devi_name, lmp_task_pattern, lmp_traj_name, - model_name_pattern, - train_log_name, - train_script_name, - train_task_pattern, ) from dpgen2.exploration.task import ( BaseExplorationTaskGroup, diff --git a/tests/test_prep_run_vasp.py b/tests/test_prep_run_vasp.py index 77030bff..c77e5617 100644 --- a/tests/test_prep_run_vasp.py +++ b/tests/test_prep_run_vasp.py @@ -1,4 +1,3 @@ -import json import os import shutil import time @@ -6,40 +5,19 @@ from pathlib import ( Path, ) -from typing import ( - List, - Set, -) -import jsonpickle -import numpy as np from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, Step, - Steps, Workflow, - argo_range, download_artifact, upload_artifact, ) from dflow.python import ( - OP, OPIO, - Artifact, - OPIOSign, - PythonOPTemplate, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass @@ -63,7 +41,6 @@ VaspInputs, vasp_conf_name, vasp_input_name, - vasp_pot_name, ) from dpgen2.superop.prep_run_fp import ( PrepRunFp, @@ -153,7 +130,7 @@ def setUp(self): work_path = Path(fp_task_pattern % ii) work_path.mkdir(exist_ok=True, parents=True) (work_path / vasp_conf_name).write_text(f"conf {ii}") - (work_path / vasp_input_name).write_text(f"incar template") + (work_path / vasp_input_name).write_text("incar template") self.task_list.append(work_path) def check_run_lmp_output( @@ -238,7 +215,7 @@ def check_run_vasp_output( fc = [] ii = int(task_name.split(".")[1]) fc.append(f"conf {ii}") - fc.append(f"incar template") + fc.append("incar template") self.assertEqual(fc, Path("log").read_text().strip().split("\n")) self.assertEqual( f"labeled_data of {task_name}\nconf {ii}", diff --git a/tests/test_select_confs.py b/tests/test_select_confs.py index 491d42f7..7597c30e 100644 --- a/tests/test_select_confs.py +++ b/tests/test_select_confs.py @@ -1,53 +1,21 @@ -import json import os -import shutil -import time import unittest from pathlib import ( Path, ) -from typing import ( - List, - Set, - Tuple, -) -import jsonpickle -import numpy as np -from dflow import ( - InputArtifact, - InputParameter, - Inputs, - OutputArtifact, - OutputParameter, - Outputs, - S3Artifact, - Step, - Steps, - Workflow, - argo_range, - download_artifact, - upload_artifact, -) from dflow.python import ( - OP, OPIO, - Artifact, FatalError, - OPIOSign, - PythonOPTemplate, ) try: - from context import ( - dpgen2, - ) + from context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass from mocked_ops import ( MockedConfSelector, - MockedExplorationReport, MockedSelectConfs, ) diff --git a/tests/utils/context.py b/tests/utils/context.py index d6638725..f2118984 100644 --- a/tests/utils/context.py +++ b/tests/utils/context.py @@ -2,4 +2,4 @@ import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) -import dpgen2 +import dpgen2 # noqa: F401 diff --git a/tests/utils/test_binary_file_input.py b/tests/utils/test_binary_file_input.py index 3e627802..189f22a9 100644 --- a/tests/utils/test_binary_file_input.py +++ b/tests/utils/test_binary_file_input.py @@ -8,9 +8,6 @@ import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils import ( BinaryFileInput, ) @@ -147,7 +144,8 @@ def serialization(obj): ) # check binary file - tensor = np.random.random((3, 2)) + rng = np.random.default_rng() + tensor = rng.random((3, 2)) np.save(self.task_output_path / "tensor.npy", tensor) t = BinaryFileInput(self.task_output_path / "tensor.npy", "npy") t = serialization(t) diff --git a/tests/utils/test_bohrium_config.py b/tests/utils/test_bohrium_config.py index bd084631..311d1482 100644 --- a/tests/utils/test_bohrium_config.py +++ b/tests/utils/test_bohrium_config.py @@ -1,16 +1,4 @@ -import json -import os -import random -import shutil -import tempfile -import unittest -from pathlib import ( - Path, -) - import dflow -import dpdata -import numpy as np import pytest from dflow.config import ( config, @@ -21,9 +9,6 @@ ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils import ( bohrium_config_from_dict, ) diff --git a/tests/utils/test_dflow_config.py b/tests/utils/test_dflow_config.py index e5b2d420..f96e1e2f 100644 --- a/tests/utils/test_dflow_config.py +++ b/tests/utils/test_dflow_config.py @@ -1,24 +1,11 @@ -import json -import os -import random -import shutil -import tempfile import unittest -from pathlib import ( - Path, -) -import dpdata -import numpy as np from dflow.config import ( config, s3_config, ) # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils import ( dflow_config, dflow_s3_config, @@ -74,7 +61,7 @@ def test_s3_config(self): dflow_s3_config(config_data) self.assertEqual(s3_config["endpoint"], "bar") - def test_none(self): + def test_none2(self): config_data = { "endpoint": None, } diff --git a/tests/utils/test_dflow_query.py b/tests/utils/test_dflow_query.py index dad71643..0a320f5d 100644 --- a/tests/utils/test_dflow_query.py +++ b/tests/utils/test_dflow_query.py @@ -1,41 +1,11 @@ -import os -import textwrap import unittest -from pathlib import ( - Path, -) -from typing import ( - List, - Set, -) - -import numpy as np try: - from exploration.context import ( - dpgen2, - ) + from exploration.context import dpgen2 # noqa: F401 except ModuleNotFoundError: # case of upload everything to argo, no context needed pass -from dflow.python import ( - FatalError, -) -from dpgen2.exploration.report import ( - ExplorationReport, -) -from dpgen2.exploration.scheduler import ( - ConvergenceCheckStageScheduler, - ExplorationScheduler, -) -from dpgen2.exploration.selector import ( - ConfSelectorFrames, -) -from dpgen2.exploration.task import ( - ExplorationStage, - ExplorationTaskGroup, -) from dpgen2.utils.dflow_query import ( find_slice_ranges, get_all_schedulers, @@ -49,14 +19,6 @@ ) # isort: off -import utils.context -from mocked_ops import ( - MockedExplorationReport, - MockedExplorationTaskGroup, - MockedExplorationTaskGroup1, - MockedStage, - MockedStage1, -) # isort: on @@ -198,7 +160,7 @@ def test_get_last_scheduler(self): ) self.assertEqual(value, 10) - def test_get_last_scheduler(self): + def test_get_last_scheduler2(self): value = get_last_scheduler( MockedWF(none_global=False), ["iter-1--scheduler", "foo", "bar", "iter-0--scheduler", "init--scheduler"], @@ -220,7 +182,7 @@ def test_sort_slice_ops(self): idxes = find_slice_ranges(dpgen_keys, "run-lmp") self.assertEqual(idxes, [[8, 14], [30, 36]]) - def test_sort_slice_ops(self): + def test_sort_slice_ops2(self): expected_output = [ "init--scheduler", "init--id", @@ -266,7 +228,7 @@ def test_print_keys(self): " 3 -> 5 : iter-000000--run-train-0000 -> iter-000000--run-train-0002", " 6 : iter-000000--prep-run-train", ] - expected_output = "\n".join(expected_output + [""]) + expected_output = "\n".join([*expected_output, ""]) ret = print_keys_in_nice_format( dpgen_keys[:7], diff --git a/tests/utils/test_dl_dpgen2_arti.py b/tests/utils/test_dl_dpgen2_arti.py index c1166678..f0e08b92 100644 --- a/tests/utils/test_dl_dpgen2_arti.py +++ b/tests/utils/test_dl_dpgen2_arti.py @@ -1,22 +1,17 @@ -import json -import os -import random import shutil -import tempfile import unittest from pathlib import ( Path, ) - -import dflow -import dpdata -import mock -import numpy as np +from typing import ( + ClassVar, + List, +) +from unittest import ( + mock, +) # isort: off -from .context import ( - dpgen2, -) from dpgen2.entrypoint.watch import ( update_finished_steps, ) @@ -45,7 +40,7 @@ class MockedStep: class Mockedwf: - keys = [ + keys: ClassVar[List[str]] = [ "iter-0--prep-run-train", ] diff --git a/tests/utils/test_dl_dpgen2_arti_by_def.py b/tests/utils/test_dl_dpgen2_arti_by_def.py index 91d35e93..67136e2b 100644 --- a/tests/utils/test_dl_dpgen2_arti_by_def.py +++ b/tests/utils/test_dl_dpgen2_arti_by_def.py @@ -1,23 +1,18 @@ -import json -import os -import random import shutil -import tempfile import textwrap import unittest from pathlib import ( Path, ) - -import dflow -import dpdata -import mock -import numpy as np +from typing import ( + ClassVar, + List, +) +from unittest import ( + mock, +) # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils.download_dpgen2_artifacts import ( DownloadDefinition, _get_all_iterations, @@ -56,7 +51,7 @@ def __getitem__( class Mockedwf: - keys = [ + keys: ClassVar[List[str]] = [ "iter-000000--prep-run-train", "iter-000001--prep-run-train", "iter-000000--prep-run-explore", diff --git a/tests/utils/test_ele_temp.py b/tests/utils/test_ele_temp.py index 6a7de791..221a3353 100644 --- a/tests/utils/test_ele_temp.py +++ b/tests/utils/test_ele_temp.py @@ -7,9 +7,6 @@ import numpy as np # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils import ( setup_ele_temp, ) diff --git a/tests/utils/test_run_command.py b/tests/utils/test_run_command.py index 4b5b93bd..b65fcd6c 100644 --- a/tests/utils/test_run_command.py +++ b/tests/utils/test_run_command.py @@ -1,4 +1,3 @@ -import json import os import shutil import unittest @@ -6,12 +5,7 @@ Path, ) -import numpy as np - # isort: off -from .context import ( - dpgen2, -) from dpgen2.utils.run_command import ( run_command, ) diff --git a/tests/utils/test_step_config.py b/tests/utils/test_step_config.py index 002902a8..43683fdb 100644 --- a/tests/utils/test_step_config.py +++ b/tests/utils/test_step_config.py @@ -1,6 +1,3 @@ -import json -import os -import shutil import unittest from contextlib import ( contextmanager, @@ -8,26 +5,15 @@ from copy import ( deepcopy, ) -from pathlib import ( - Path, -) import dflow -import numpy as np -from dflow.python import ( - OPIO, -) # isort: off -from .context import ( - dpgen2, -) from dpgen2.constants import ( default_image, ) # from dpgen2.utils.step_config import normalize, gen_doc, init_executor -from dpgen2.utils import gen_doc_step_dict as gen_doc from dpgen2.utils import ( init_executor, )