From 21db464245d950f6cacb83d46111e287833bfa32 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 15 Sep 2023 01:10:37 -0400 Subject: [PATCH] improve configurations of Python lint tools (#2823) 1. use `black-pre-commit-mirror` instead of `black` which is faster; 2. first ruff and then black; 3. remove `tool.ruff.target-version` which can be detected automatically; 4. add `RUF` and `NPY` rules to `tool.ruff.select`; 5. set `tool.ruff.pydocstyle.convention` to `numpy`, which can automatically add several rules to `ignore`. --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- data/raw/shuffle_raw.py | 3 ++- deepmd/calculator.py | 11 +++++++++-- deepmd/descriptor/se_a.py | 6 +++--- deepmd/descriptor/se_a_ebd.py | 2 +- deepmd/descriptor/se_a_mask.py | 2 +- deepmd/descriptor/se_atten.py | 4 ++-- deepmd/descriptor/se_r.py | 2 +- deepmd/descriptor/se_t.py | 2 +- deepmd/entrypoints/ipi.py | 2 +- deepmd/fit/dos.py | 8 ++++---- deepmd/infer/deep_tensor.py | 4 +++- deepmd/loss/ener.py | 4 ++-- deepmd/model/model_stat.py | 2 +- deepmd/nvnmd/entrypoints/wrap.py | 2 +- deepmd/train/run_options.py | 4 ++-- deepmd/train/trainer.py | 8 ++++---- deepmd/utils/argcheck.py | 15 +++++++++------ deepmd/utils/data_system.py | 4 +--- deepmd/utils/finetune.py | 6 ++---- deepmd/utils/multi_init.py | 4 ++-- deepmd/utils/network.py | 4 ++-- deepmd/utils/path.py | 2 +- deepmd/utils/spin.py | 7 ++++--- deepmd_cli/main.py | 2 +- pyproject.toml | 11 +++++------ source/install/build_tf.py | 2 +- source/tests/common.py | 4 +--- source/tests/test_argument_parser.py | 2 +- source/tests/test_descrpt_sea_ef_rot.py | 8 ++++---- source/tests/test_fitting_stat.py | 4 ++-- 31 files changed, 78 insertions(+), 71 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d39f5ec127..19c29c0322 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,10 +22,6 @@ repos: - id: check-symlinks - id: check-toml # Python -- repo: https://github.com/psf/black - rev: 23.9.1 - hooks: - - id: black-jupyter - repo: https://github.com/PyCQA/isort rev: 5.12.0 hooks: @@ -37,6 +33,10 @@ repos: hooks: - id: ruff args: ["--fix"] +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.9.1 + hooks: + - id: black-jupyter # numpydoc - repo: https://github.com/Carreau/velin rev: 0.0.12 diff --git a/data/raw/shuffle_raw.py b/data/raw/shuffle_raw.py index f8c689e3f2..51bb7466c9 100755 --- a/data/raw/shuffle_raw.py +++ b/data/raw/shuffle_raw.py @@ -69,7 +69,8 @@ def _main(): tmp = np.reshape(tmp, [nframe, -1]) nframe = tmp.shape[0] idx = np.arange(nframe) - np.random.shuffle(idx) + rng = np.random.default_rng() + rng.shuffle(idx) for ii in raws: data = np.loadtxt(inpath + "/" + ii) diff --git a/deepmd/calculator.py b/deepmd/calculator.py index acef657e2c..8636ff30d2 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -6,6 +6,7 @@ ) from typing import ( TYPE_CHECKING, + ClassVar, Dict, List, Optional, @@ -69,13 +70,19 @@ class DP(Calculator): """ name = "DP" - implemented_properties = ["energy", "free_energy", "forces", "virial", "stress"] + implemented_properties: ClassVar[List[str]] = [ + "energy", + "free_energy", + "forces", + "virial", + "stress", + ] def __init__( self, model: Union[str, "Path"], label: str = "DP", - type_dict: Dict[str, int] = None, + type_dict: Optional[Dict[str, int]] = None, **kwargs, ) -> None: Calculator.__init__(self, label=label, **kwargs) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 82df8cc1a3..cceb72d4fb 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -144,7 +144,7 @@ class DescrptSeA(DescrptSe): .. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018. End-to-end symmetry preserving inter-atomic potential energy model for finite and extended systems. In Proceedings of the 32nd International Conference on Neural Information Processing - Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441–4451. + Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451. """ def __init__( @@ -890,7 +890,7 @@ def _filter_lower( suffix="", ): """Input env matrix, returns R.G.""" - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4]) @@ -1006,7 +1006,7 @@ def _filter( nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] outputs_size_2 = self.n_axis_neuron all_excluded = all( (type_input, type_i) in self.exclude_types for type_i in range(self.ntypes) diff --git a/deepmd/descriptor/se_a_ebd.py b/deepmd/descriptor/se_a_ebd.py index f46444169e..4816ec1569 100644 --- a/deepmd/descriptor/se_a_ebd.py +++ b/deepmd/descriptor/se_a_ebd.py @@ -230,7 +230,7 @@ def _embedding_net( # natom x (nei x 4) inputs = tf.reshape(inputs, [-1, self.ndescrpt]) shape = inputs.get_shape().as_list() - outputs_size = [1] + filter_neuron + outputs_size = [1, *filter_neuron] with tf.variable_scope(name, reuse=reuse): xyz_scatter_total = [] # with natom x (nei x 4) diff --git a/deepmd/descriptor/se_a_mask.py b/deepmd/descriptor/se_a_mask.py index cdec33e292..e4625922cc 100644 --- a/deepmd/descriptor/se_a_mask.py +++ b/deepmd/descriptor/se_a_mask.py @@ -112,7 +112,7 @@ class DescrptSeAMask(DescrptSeA): .. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018. End-to-end symmetry preserving inter-atomic potential energy model for finite and extended systems. In Proceedings of the 32nd International Conference on Neural Information Processing - Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441–4451. + Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451. """ def __init__( diff --git a/deepmd/descriptor/se_atten.py b/deepmd/descriptor/se_atten.py index 12558c45c4..c962952ec0 100644 --- a/deepmd/descriptor/se_atten.py +++ b/deepmd/descriptor/se_atten.py @@ -1057,7 +1057,7 @@ def _filter_lower( reuse=None, ): """Input env matrix, returns R.G.""" - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] # cut-out inputs # with natom x (nei_type_i x 4) inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4]) @@ -1260,7 +1260,7 @@ def _filter( nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] outputs_size_2 = self.n_axis_neuron start_index = 0 diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index ad9fda2238..fbc54a651f 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -638,7 +638,7 @@ def _filter_r( trainable=True, ): # natom x nei - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] with tf.variable_scope(name, reuse=reuse): start_index = 0 xyz_scatter_total = [] diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index 34af8a90a2..671dbd4e15 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -633,7 +633,7 @@ def _filter( ): # natom x (nei x 4) shape = inputs.get_shape().as_list() - outputs_size = [1] + self.filter_neuron + outputs_size = [1, *self.filter_neuron] with tf.variable_scope(name, reuse=reuse): start_index_i = 0 result = None diff --git a/deepmd/entrypoints/ipi.py b/deepmd/entrypoints/ipi.py index b14b369e40..da287ff3de 100644 --- a/deepmd/entrypoints/ipi.py +++ b/deepmd/entrypoints/ipi.py @@ -24,7 +24,7 @@ def _program(name: str, args: List[str]): args : list of str list of arguments """ - return subprocess.call([os.path.join(ROOT_DIR, name)] + args, close_fds=False) + return subprocess.call([os.path.join(ROOT_DIR, name), *args], close_fds=False) def dp_ipi(): diff --git a/deepmd/fit/dos.py b/deepmd/fit/dos.py index 82018ea520..9a7cb734e5 100644 --- a/deepmd/fit/dos.py +++ b/deepmd/fit/dos.py @@ -98,8 +98,8 @@ def __init__( numb_aparam: int = 0, numb_dos: int = 300, rcond: Optional[float] = None, - trainable: List[bool] = None, - seed: int = None, + trainable: Optional[List[bool]] = None, + seed: Optional[int] = None, activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, @@ -380,8 +380,8 @@ def build( self, inputs: tf.Tensor, natoms: tf.Tensor, - input_dict: dict = None, - reuse: bool = None, + input_dict: Optional[dict] = None, + reuse: Optional[bool] = None, suffix: str = "", ) -> tf.Tensor: """Build the computational graph for fitting net. diff --git a/deepmd/infer/deep_tensor.py b/deepmd/infer/deep_tensor.py index 367a8ab5e7..268523e959 100644 --- a/deepmd/infer/deep_tensor.py +++ b/deepmd/infer/deep_tensor.py @@ -1,6 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( TYPE_CHECKING, + ClassVar, + Dict, List, Optional, Tuple, @@ -39,7 +41,7 @@ class DeepTensor(DeepEval): The input map for tf.import_graph_def. Only work with default tf graph """ - tensors = { + tensors: ClassVar[Dict[str, str]] = { # descriptor attrs "t_ntypes": "descrpt_attr/ntypes:0", "t_rcut": "descrpt_attr/rcut:0", diff --git a/deepmd/loss/ener.py b/deepmd/loss/ener.py index 7895fadbf3..95997bad10 100644 --- a/deepmd/loss/ener.py +++ b/deepmd/loss/ener.py @@ -388,9 +388,9 @@ def __init__( limit_pref_ae: float = 0.0, start_pref_pf: float = 0.0, limit_pref_pf: float = 0.0, - relative_f: float = None, + relative_f: Optional[float] = None, enable_atom_ener_coeff: bool = False, - use_spin: list = None, + use_spin: Optional[list] = None, ) -> None: self.starter_learning_rate = starter_learning_rate self.start_pref_e = start_pref_e diff --git a/deepmd/model/model_stat.py b/deepmd/model/model_stat.py index 08bc162632..d2cc918b64 100644 --- a/deepmd/model/model_stat.py +++ b/deepmd/model/model_stat.py @@ -58,7 +58,7 @@ def make_stat_input(data, nbatches, merge_sys=True): def merge_sys_stat(all_stat): - first_key = list(all_stat.keys())[0] + first_key = next(iter(all_stat.keys())) nsys = len(all_stat[first_key]) ret = defaultdict(list) for ii in range(nsys): diff --git a/deepmd/nvnmd/entrypoints/wrap.py b/deepmd/nvnmd/entrypoints/wrap.py index 896e1e0342..455dd999df 100644 --- a/deepmd/nvnmd/entrypoints/wrap.py +++ b/deepmd/nvnmd/entrypoints/wrap.py @@ -145,7 +145,7 @@ def wrap(self): nvnmd_cfg.save(nvnmd_cfg.config_file) head = self.wrap_head(nhs, nws) # output model - hs = [] + head + hs = [*head] for d in datas: hs.extend(d) diff --git a/deepmd/train/run_options.py b/deepmd/train/run_options.py index ad1774908b..451632949e 100644 --- a/deepmd/train/run_options.py +++ b/deepmd/train/run_options.py @@ -45,7 +45,7 @@ # http://patorjk.com/software/taag. Font:Big" -WELCOME = ( # noqa +WELCOME = ( r" _____ _____ __ __ _____ _ _ _ ", r"| __ \ | __ \ | \/ || __ \ | | (_)| | ", r"| | | | ___ ___ | |__) || \ / || | | | ______ | | __ _ | |_ ", @@ -71,7 +71,7 @@ f"build float prec: {global_float_prec}", f"build variant: {GLOBAL_CONFIG['dp_variant']}", f"build with tf inc: {GLOBAL_CONFIG['tf_include_dir']}", - f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}", # noqa + f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}", ) diff --git a/deepmd/train/trainer.py b/deepmd/train/trainer.py index b322336b39..1f7b78045b 100644 --- a/deepmd/train/trainer.py +++ b/deepmd/train/trainer.py @@ -250,7 +250,7 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix=""): if not self.multi_task_mode: single_data = data else: - single_data = data[list(data.keys())[0]] + single_data = data[next(iter(data.keys()))] if self.ntypes < single_data.get_ntypes(): raise ValueError( "The number of types of the training data is %d, but that of the " @@ -373,7 +373,7 @@ def _build_network(self, data, suffix=""): if not self.multi_task_mode: self._get_place_horders(data.get_data_dict()) else: - self._get_place_horders(data[list(data.keys())[0]].get_data_dict()) + self._get_place_horders(data[next(iter(data.keys()))].get_data_dict()) self.place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type") self.place_holders["natoms_vec"] = tf.placeholder( @@ -467,7 +467,7 @@ def _build_training(self): var_list=trainable_variables, name="train_step", ) - train_ops = [apply_op] + self._extra_train_ops + train_ops = [apply_op, *self._extra_train_ops] self.train_op = tf.group(*train_ops) else: self.train_op = {} @@ -479,7 +479,7 @@ def _build_training(self): var_list=trainable_variables, name=f"train_step_{fitting_key}", ) - train_ops = [apply_op] + self._extra_train_ops + train_ops = [apply_op, *self._extra_train_ops] self.train_op[fitting_key] = tf.group(*train_ops) log.info("built training") diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 153824cb0d..f670feb578 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -177,7 +177,7 @@ def descrpt_se_a_args(): doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net is trainable" doc_seed = "Random seed for parameter initialization" @@ -263,7 +263,8 @@ def descrpt_se_a_tpe_args(): doc_type_nlayer = "number of hidden layers of type embedding net" doc_numb_aparam = "dimension of atomic parameter. if set to a value > 0, the atomic parameters are embedded." - return descrpt_se_a_args() + [ + return [ + *descrpt_se_a_args(), Argument("type_nchanl", int, optional=True, default=4, doc=doc_type_nchanl), Argument("type_nlayer", int, optional=True, default=2, doc=doc_type_nlayer), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), @@ -280,7 +281,7 @@ def descrpt_se_r_args(): doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net are trainable" doc_seed = "Random seed for parameter initialization" @@ -344,7 +345,7 @@ def descrpt_se_atten_common_args(): doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' - doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." + doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters." doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_trainable = "If the parameters in the embedding net is trainable" doc_seed = "Random seed for parameter initialization" @@ -397,7 +398,8 @@ def descrpt_se_atten_args(): doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True." doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" - return descrpt_se_atten_common_args() + [ + return [ + *descrpt_se_atten_common_args(), Argument( "stripped_type_embedding", bool, @@ -422,7 +424,8 @@ def descrpt_se_atten_args(): def descrpt_se_atten_v2_args(): doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used" - return descrpt_se_atten_common_args() + [ + return [ + *descrpt_se_atten_common_args(), Argument( "set_davg_zero", bool, optional=True, default=False, doc=doc_set_davg_zero ), diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 0bfe6b7c70..0071da755c 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -618,9 +618,7 @@ def _check_type_map_consistency(self, type_map_list): min_len = min([len(ii), len(ret)]) for idx in range(min_len): if ii[idx] != ret[idx]: - raise RuntimeError( - f"inconsistent type map: {str(ret)} {str(ii)}" - ) + raise RuntimeError(f"inconsistent type map: {ret!s} {ii!s}") if len(ii) > len(ret): ret = ii return ret diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py index b641a6beca..4e597b1e05 100644 --- a/deepmd/utils/finetune.py +++ b/deepmd/utils/finetune.py @@ -56,7 +56,7 @@ def replace_model_params_with_pretrained_model( if i not in pretrained_type_map: out_line_type.append(i) assert not out_line_type, ( - f"{str(out_line_type)} type(s) not contained in the pretrained model! " + f"{out_line_type!s} type(s) not contained in the pretrained model! " "Please choose another suitable one." ) if cur_type_map != pretrained_type_map: @@ -103,9 +103,7 @@ def replace_model_params_with_pretrained_model( # keep some params that are irrelevant to model structures (need to discuss) TODO if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] - log.info( - f"Change the '{config_key}' from {str(cur_para)} to {str(target_para)}." - ) + log.info(f"Change the '{config_key}' from {cur_para!s} to {target_para!s}.") jdata["model"][config_key] = target_para return jdata, cur_type_map diff --git a/deepmd/utils/multi_init.py b/deepmd/utils/multi_init.py index fd56f715c5..6c070dc67e 100644 --- a/deepmd/utils/multi_init.py +++ b/deepmd/utils/multi_init.py @@ -54,7 +54,7 @@ def replace_model_params_with_frz_multi_model( if i not in pretrained_type_map: out_line_type.append(i) assert not out_line_type, ( - f"{str(out_line_type)} type(s) not contained in the pretrained model! " + f"{out_line_type!s} type(s) not contained in the pretrained model! " "Please choose another suitable one." ) if cur_type_map != pretrained_type_map: @@ -169,5 +169,5 @@ def _change_sub_config(jdata: Dict[str, Any], src_jdata: Dict[str, Any], sub_key # keep some params that are irrelevant to model structures (need to discuss) TODO if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] - log.info(f"Change the '{sub_key}' from {str(cur_para)} to {str(target_para)}.") + log.info(f"Change the '{sub_key}' from {cur_para!s} to {target_para!s}.") jdata[sub_key] = target_para diff --git a/deepmd/utils/network.py b/deepmd/utils/network.py index a2fd81b85c..36d8c42f82 100644 --- a/deepmd/utils/network.py +++ b/deepmd/utils/network.py @@ -183,11 +183,11 @@ def embedding_net( References ---------- .. [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identitymappings - in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer + in deep residual networks. InComputer Vision - ECCV 2016,pages 630-645. Springer International Publishing, 2016. """ input_shape = xx.get_shape().as_list() - outputs_size = [input_shape[1]] + network_size + outputs_size = [input_shape[1], *network_size] for ii in range(1, len(outputs_size)): w_initializer = tf.random_normal_initializer( diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index 5206f44089..a8e4bc329f 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -114,7 +114,7 @@ def __str__(self) -> str: """Represent string.""" def __repr__(self) -> str: - return f"{type(self)} ({str(self)})" + return f"{type(self)} ({self!s})" def __eq__(self, other) -> bool: return str(self) == str(other) diff --git a/deepmd/utils/spin.py b/deepmd/utils/spin.py index c969a8062a..7820627649 100644 --- a/deepmd/utils/spin.py +++ b/deepmd/utils/spin.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( List, + Optional, ) from deepmd.env import ( @@ -24,9 +25,9 @@ class Spin: def __init__( self, - use_spin: List[bool] = None, - spin_norm: List[float] = None, - virtual_len: List[float] = None, + use_spin: Optional[List[bool]] = None, + spin_norm: Optional[List[float]] = None, + virtual_len: Optional[List[float]] = None, ) -> None: """Constructor.""" self.use_spin = use_spin diff --git a/deepmd_cli/main.py b/deepmd_cli/main.py index 94ceb9888d..fceca239ea 100644 --- a/deepmd_cli/main.py +++ b/deepmd_cli/main.py @@ -312,7 +312,7 @@ def main_parser() -> argparse.ArgumentParser: # The table is composed of fifth-order polynomial coefficients and is assembled # from two sub-tables. The first table takes the step(parameter) as it's uniform # step, while the second table takes 10 * step as it\s uniform step - #  The range of the first table is automatically detected by deepmd-kit, while the + # The range of the first table is automatically detected by deepmd-kit, while the # second table ranges from the first table's upper boundary(upper) to the # extrapolate(parameter) * upper. parser_compress = subparsers.add_parser( diff --git a/pyproject.toml b/pyproject.toml index b169a3b0eb..0ab9390efb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -166,24 +166,20 @@ profile = "black" force_grid_wrap = 1 [tool.ruff] -target-version = "py37" select = [ "E", # errors "F", # pyflakes "D", # pydocstyle "UP", # pyupgrade "C4", # flake8-comprehensions + "RUF", # ruff + "NPY", # numpy ] ignore = [ "E501", # line too long "F841", # local variable is assigned to but never used "E741", # ambiguous variable name "E402", # module level import not at top of file - "D413", # missing blank line after last section - "D416", # section name should end with a colon - "D203", # 1 blank line required before class docstring - "D107", # missing docstring in __init__ - "D213", # multi-line docstring summary should start at the second line "D100", # TODO: missing docstring in public module "D101", # TODO: missing docstring in public class "D102", # TODO: missing docstring in public method @@ -195,3 +191,6 @@ ignore = [ "D404", # TODO: first word of the docstring should not be This ] ignore-init-module-imports = true + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/source/install/build_tf.py b/source/install/build_tf.py index 043c4c6c81..15847d2c21 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -151,7 +151,7 @@ def __call__(self): if not self.exists: raise RuntimeError( f"Download {self.filename} from {self.url} failed! " - f"You can manually download it to {str(self.path)} and " + f"You can manually download it to {self.path!s} and " "retry the script." ) self.post_process() diff --git a/source/tests/common.py b/source/tests/common.py index e5dd1281f3..f8ed23df03 100644 --- a/source/tests/common.py +++ b/source/tests/common.py @@ -919,9 +919,7 @@ def check_type_map_consistency(self, type_map_list): min_len = min([len(ii), len(ret)]) for idx in range(min_len): if ii[idx] != ret[idx]: - raise RuntimeError( - f"inconsistent type map: {str(ret)} {str(ii)}" - ) + raise RuntimeError(f"inconsistent type map: {ret!s} {ii!s}") if len(ii) > len(ret): ret = ii return ret diff --git a/source/tests/test_argument_parser.py b/source/tests/test_argument_parser.py index 524499935c..bb8dd9ed62 100644 --- a/source/tests/test_argument_parser.py +++ b/source/tests/test_argument_parser.py @@ -184,7 +184,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): ) # test default values - cmd_args = [command] + required + cmd_args = [command, *required] buffer = StringIO() try: with redirect_stderr(buffer): diff --git a/source/tests/test_descrpt_sea_ef_rot.py b/source/tests/test_descrpt_sea_ef_rot.py index d94565af96..56cdb357b0 100644 --- a/source/tests/test_descrpt_sea_ef_rot.py +++ b/source/tests/test_descrpt_sea_ef_rot.py @@ -108,7 +108,7 @@ def make_test_data(self, nframes): one_type = [] for ii in range(2, 2 + self.ntypes): one_type = one_type + [ii - 2 for jj in range(self.natoms[ii])] - np.random.shuffle(one_type) + np.random.shuffle(one_type) # noqa: NPY002 one_type = np.array(one_type, dtype=int).reshape([1, -1]) dtype = np.tile(one_type, [nframes, 1]) defield = np.random.random(dcoord.shape) @@ -162,7 +162,7 @@ def test_rot_axis(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) @@ -308,7 +308,7 @@ def test_rot_diff_axis(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) @@ -423,7 +423,7 @@ def test_rot_field_corot(self, suffix=""): ) self.sess.run(tf.global_variables_initializer()) - np.random.seed(0) + np.random.seed(0) # noqa: NPY002 # make test data nframes = 2 dcoord, dbox, dtype, defield = self.make_test_data(nframes) diff --git a/source/tests/test_fitting_stat.py b/source/tests/test_fitting_stat.py index 045348440e..ad62c89f2a 100644 --- a/source/tests/test_fitting_stat.py +++ b/source/tests/test_fitting_stat.py @@ -28,12 +28,12 @@ def _make_fake_data(sys_natoms, sys_nframes, avgs, stds): tmp_data_a = [] for jj in range(ndof): tmp_data_f.append( - np.random.normal( + np.random.normal( # noqa: NPY002 loc=avgs[jj], scale=stds[jj], size=(sys_nframes[ii], 1) ) ) tmp_data_a.append( - np.random.normal( + np.random.normal( # noqa: NPY002 loc=avgs[jj], scale=stds[jj], size=(sys_nframes[ii], sys_natoms[ii]) ) )