Skip to content

Commit

Permalink
improve configurations of Python lint tools (#2823)
Browse files Browse the repository at this point in the history
1. use `black-pre-commit-mirror` instead of `black` which is faster;
2. first ruff and then black;
3. remove `tool.ruff.target-version` which can be detected
automatically;
4. add `RUF` and `NPY` rules to `tool.ruff.select`;
5. set `tool.ruff.pydocstyle.convention` to `numpy`, which can
automatically add several rules to `ignore`.

---------

Signed-off-by: Jinzhe Zeng <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
njzjz and pre-commit-ci[bot] authored Sep 15, 2023
1 parent 5591ed1 commit 21db464
Show file tree
Hide file tree
Showing 31 changed files with 78 additions and 71 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,6 @@ repos:
- id: check-symlinks
- id: check-toml
# Python
- repo: https://github.com/psf/black
rev: 23.9.1
hooks:
- id: black-jupyter
- repo: https://github.com/PyCQA/isort
rev: 5.12.0
hooks:
Expand All @@ -37,6 +33,10 @@ repos:
hooks:
- id: ruff
args: ["--fix"]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.9.1
hooks:
- id: black-jupyter
# numpydoc
- repo: https://github.com/Carreau/velin
rev: 0.0.12
Expand Down
3 changes: 2 additions & 1 deletion data/raw/shuffle_raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ def _main():
tmp = np.reshape(tmp, [nframe, -1])
nframe = tmp.shape[0]
idx = np.arange(nframe)
np.random.shuffle(idx)
rng = np.random.default_rng()
rng.shuffle(idx)

for ii in raws:
data = np.loadtxt(inpath + "/" + ii)
Expand Down
11 changes: 9 additions & 2 deletions deepmd/calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
)
from typing import (
TYPE_CHECKING,
ClassVar,
Dict,
List,
Optional,
Expand Down Expand Up @@ -69,13 +70,19 @@ class DP(Calculator):
"""

name = "DP"
implemented_properties = ["energy", "free_energy", "forces", "virial", "stress"]
implemented_properties: ClassVar[List[str]] = [
"energy",
"free_energy",
"forces",
"virial",
"stress",
]

def __init__(
self,
model: Union[str, "Path"],
label: str = "DP",
type_dict: Dict[str, int] = None,
type_dict: Optional[Dict[str, int]] = None,
**kwargs,
) -> None:
Calculator.__init__(self, label=label, **kwargs)
Expand Down
6 changes: 3 additions & 3 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ class DescrptSeA(DescrptSe):
.. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018.
End-to-end symmetry preserving inter-atomic potential energy model for finite and extended
systems. In Proceedings of the 32nd International Conference on Neural Information Processing
Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 44414451.
Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451.
"""

def __init__(
Expand Down Expand Up @@ -890,7 +890,7 @@ def _filter_lower(
suffix="",
):
"""Input env matrix, returns R.G."""
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
# cut-out inputs
# with natom x (nei_type_i x 4)
inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4])
Expand Down Expand Up @@ -1006,7 +1006,7 @@ def _filter(
nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0]
# natom x (nei x 4)
shape = inputs.get_shape().as_list()
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
outputs_size_2 = self.n_axis_neuron
all_excluded = all(
(type_input, type_i) in self.exclude_types for type_i in range(self.ntypes)
Expand Down
2 changes: 1 addition & 1 deletion deepmd/descriptor/se_a_ebd.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def _embedding_net(
# natom x (nei x 4)
inputs = tf.reshape(inputs, [-1, self.ndescrpt])
shape = inputs.get_shape().as_list()
outputs_size = [1] + filter_neuron
outputs_size = [1, *filter_neuron]
with tf.variable_scope(name, reuse=reuse):
xyz_scatter_total = []
# with natom x (nei x 4)
Expand Down
2 changes: 1 addition & 1 deletion deepmd/descriptor/se_a_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class DescrptSeAMask(DescrptSeA):
.. [1] Linfeng Zhang, Jiequn Han, Han Wang, Wissam A. Saidi, Roberto Car, and E. Weinan. 2018.
End-to-end symmetry preserving inter-atomic potential energy model for finite and extended
systems. In Proceedings of the 32nd International Conference on Neural Information Processing
Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 44414451.
Systems (NIPS'18). Curran Associates Inc., Red Hook, NY, USA, 4441-4451.
"""

def __init__(
Expand Down
4 changes: 2 additions & 2 deletions deepmd/descriptor/se_atten.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,7 +1057,7 @@ def _filter_lower(
reuse=None,
):
"""Input env matrix, returns R.G."""
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
# cut-out inputs
# with natom x (nei_type_i x 4)
inputs_i = tf.slice(inputs, [0, start_index * 4], [-1, incrs_index * 4])
Expand Down Expand Up @@ -1260,7 +1260,7 @@ def _filter(
nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0]
# natom x (nei x 4)
shape = inputs.get_shape().as_list()
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
outputs_size_2 = self.n_axis_neuron

start_index = 0
Expand Down
2 changes: 1 addition & 1 deletion deepmd/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ def _filter_r(
trainable=True,
):
# natom x nei
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
with tf.variable_scope(name, reuse=reuse):
start_index = 0
xyz_scatter_total = []
Expand Down
2 changes: 1 addition & 1 deletion deepmd/descriptor/se_t.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,7 @@ def _filter(
):
# natom x (nei x 4)
shape = inputs.get_shape().as_list()
outputs_size = [1] + self.filter_neuron
outputs_size = [1, *self.filter_neuron]
with tf.variable_scope(name, reuse=reuse):
start_index_i = 0
result = None
Expand Down
2 changes: 1 addition & 1 deletion deepmd/entrypoints/ipi.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def _program(name: str, args: List[str]):
args : list of str
list of arguments
"""
return subprocess.call([os.path.join(ROOT_DIR, name)] + args, close_fds=False)
return subprocess.call([os.path.join(ROOT_DIR, name), *args], close_fds=False)


def dp_ipi():
Expand Down
8 changes: 4 additions & 4 deletions deepmd/fit/dos.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def __init__(
numb_aparam: int = 0,
numb_dos: int = 300,
rcond: Optional[float] = None,
trainable: List[bool] = None,
seed: int = None,
trainable: Optional[List[bool]] = None,
seed: Optional[int] = None,
activation_function: str = "tanh",
precision: str = "default",
uniform_seed: bool = False,
Expand Down Expand Up @@ -380,8 +380,8 @@ def build(
self,
inputs: tf.Tensor,
natoms: tf.Tensor,
input_dict: dict = None,
reuse: bool = None,
input_dict: Optional[dict] = None,
reuse: Optional[bool] = None,
suffix: str = "",
) -> tf.Tensor:
"""Build the computational graph for fitting net.
Expand Down
4 changes: 3 additions & 1 deletion deepmd/infer/deep_tensor.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
TYPE_CHECKING,
ClassVar,
Dict,
List,
Optional,
Tuple,
Expand Down Expand Up @@ -39,7 +41,7 @@ class DeepTensor(DeepEval):
The input map for tf.import_graph_def. Only work with default tf graph
"""

tensors = {
tensors: ClassVar[Dict[str, str]] = {
# descriptor attrs
"t_ntypes": "descrpt_attr/ntypes:0",
"t_rcut": "descrpt_attr/rcut:0",
Expand Down
4 changes: 2 additions & 2 deletions deepmd/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,9 +388,9 @@ def __init__(
limit_pref_ae: float = 0.0,
start_pref_pf: float = 0.0,
limit_pref_pf: float = 0.0,
relative_f: float = None,
relative_f: Optional[float] = None,
enable_atom_ener_coeff: bool = False,
use_spin: list = None,
use_spin: Optional[list] = None,
) -> None:
self.starter_learning_rate = starter_learning_rate
self.start_pref_e = start_pref_e
Expand Down
2 changes: 1 addition & 1 deletion deepmd/model/model_stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def make_stat_input(data, nbatches, merge_sys=True):


def merge_sys_stat(all_stat):
first_key = list(all_stat.keys())[0]
first_key = next(iter(all_stat.keys()))
nsys = len(all_stat[first_key])
ret = defaultdict(list)
for ii in range(nsys):
Expand Down
2 changes: 1 addition & 1 deletion deepmd/nvnmd/entrypoints/wrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def wrap(self):
nvnmd_cfg.save(nvnmd_cfg.config_file)
head = self.wrap_head(nhs, nws)
# output model
hs = [] + head
hs = [*head]
for d in datas:
hs.extend(d)

Expand Down
4 changes: 2 additions & 2 deletions deepmd/train/run_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@


# http://patorjk.com/software/taag. Font:Big"
WELCOME = ( # noqa
WELCOME = (
r" _____ _____ __ __ _____ _ _ _ ",
r"| __ \ | __ \ | \/ || __ \ | | (_)| | ",
r"| | | | ___ ___ | |__) || \ / || | | | ______ | | __ _ | |_ ",
Expand All @@ -71,7 +71,7 @@
f"build float prec: {global_float_prec}",
f"build variant: {GLOBAL_CONFIG['dp_variant']}",
f"build with tf inc: {GLOBAL_CONFIG['tf_include_dir']}",
f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}", # noqa
f"build with tf lib: {GLOBAL_CONFIG['tf_libs'].replace(';', _sep)}",
)


Expand Down
8 changes: 4 additions & 4 deletions deepmd/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix=""):
if not self.multi_task_mode:
single_data = data
else:
single_data = data[list(data.keys())[0]]
single_data = data[next(iter(data.keys()))]
if self.ntypes < single_data.get_ntypes():
raise ValueError(
"The number of types of the training data is %d, but that of the "
Expand Down Expand Up @@ -373,7 +373,7 @@ def _build_network(self, data, suffix=""):
if not self.multi_task_mode:
self._get_place_horders(data.get_data_dict())
else:
self._get_place_horders(data[list(data.keys())[0]].get_data_dict())
self._get_place_horders(data[next(iter(data.keys()))].get_data_dict())

self.place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type")
self.place_holders["natoms_vec"] = tf.placeholder(
Expand Down Expand Up @@ -467,7 +467,7 @@ def _build_training(self):
var_list=trainable_variables,
name="train_step",
)
train_ops = [apply_op] + self._extra_train_ops
train_ops = [apply_op, *self._extra_train_ops]
self.train_op = tf.group(*train_ops)
else:
self.train_op = {}
Expand All @@ -479,7 +479,7 @@ def _build_training(self):
var_list=trainable_variables,
name=f"train_step_{fitting_key}",
)
train_ops = [apply_op] + self._extra_train_ops
train_ops = [apply_op, *self._extra_train_ops]
self.train_op[fitting_key] = tf.group(*train_ops)
log.info("built training")

Expand Down
15 changes: 9 additions & 6 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def descrpt_se_a_args():
doc_axis_neuron = "Size of the submatrix of G (embedding matrix)."
doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.'
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_trainable = "If the parameters in the embedding net is trainable"
doc_seed = "Random seed for parameter initialization"
Expand Down Expand Up @@ -263,7 +263,8 @@ def descrpt_se_a_tpe_args():
doc_type_nlayer = "number of hidden layers of type embedding net"
doc_numb_aparam = "dimension of atomic parameter. if set to a value > 0, the atomic parameters are embedded."

return descrpt_se_a_args() + [
return [
*descrpt_se_a_args(),
Argument("type_nchanl", int, optional=True, default=4, doc=doc_type_nchanl),
Argument("type_nlayer", int, optional=True, default=2, doc=doc_type_nlayer),
Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam),
Expand All @@ -280,7 +281,7 @@ def descrpt_se_r_args():
doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built."
doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.'
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_trainable = "If the parameters in the embedding net are trainable"
doc_seed = "Random seed for parameter initialization"
Expand Down Expand Up @@ -344,7 +345,7 @@ def descrpt_se_atten_common_args():
doc_axis_neuron = "Size of the submatrix of G (embedding matrix)."
doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.'
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_type_one_side = r"If true, the embedding network parameters vary by types of neighbor atoms only, so there will be $N_\text{types}$ sets of embedding network parameters. Otherwise, the embedding network parameters vary by types of centric atoms and types of neighbor atoms, so there will be $N_\text{types}^2$ sets of embedding network parameters."
doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_trainable = "If the parameters in the embedding net is trainable"
doc_seed = "Random seed for parameter initialization"
Expand Down Expand Up @@ -397,7 +398,8 @@ def descrpt_se_atten_args():
doc_smooth_type_embdding = "When using stripped type embedding, whether to dot smooth factor on the network output of type embedding to keep the network smooth, instead of setting `set_davg_zero` to be True."
doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"

return descrpt_se_atten_common_args() + [
return [
*descrpt_se_atten_common_args(),
Argument(
"stripped_type_embedding",
bool,
Expand All @@ -422,7 +424,8 @@ def descrpt_se_atten_args():
def descrpt_se_atten_v2_args():
doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"

return descrpt_se_atten_common_args() + [
return [
*descrpt_se_atten_common_args(),
Argument(
"set_davg_zero", bool, optional=True, default=False, doc=doc_set_davg_zero
),
Expand Down
4 changes: 1 addition & 3 deletions deepmd/utils/data_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -618,9 +618,7 @@ def _check_type_map_consistency(self, type_map_list):
min_len = min([len(ii), len(ret)])
for idx in range(min_len):
if ii[idx] != ret[idx]:
raise RuntimeError(
f"inconsistent type map: {str(ret)} {str(ii)}"
)
raise RuntimeError(f"inconsistent type map: {ret!s} {ii!s}")
if len(ii) > len(ret):
ret = ii
return ret
Expand Down
6 changes: 2 additions & 4 deletions deepmd/utils/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def replace_model_params_with_pretrained_model(
if i not in pretrained_type_map:
out_line_type.append(i)
assert not out_line_type, (
f"{str(out_line_type)} type(s) not contained in the pretrained model! "
f"{out_line_type!s} type(s) not contained in the pretrained model! "
"Please choose another suitable one."
)
if cur_type_map != pretrained_type_map:
Expand Down Expand Up @@ -103,9 +103,7 @@ def replace_model_params_with_pretrained_model(
# keep some params that are irrelevant to model structures (need to discuss) TODO
if "trainable" in cur_para.keys():
target_para["trainable"] = cur_para["trainable"]
log.info(
f"Change the '{config_key}' from {str(cur_para)} to {str(target_para)}."
)
log.info(f"Change the '{config_key}' from {cur_para!s} to {target_para!s}.")
jdata["model"][config_key] = target_para

return jdata, cur_type_map
4 changes: 2 additions & 2 deletions deepmd/utils/multi_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def replace_model_params_with_frz_multi_model(
if i not in pretrained_type_map:
out_line_type.append(i)
assert not out_line_type, (
f"{str(out_line_type)} type(s) not contained in the pretrained model! "
f"{out_line_type!s} type(s) not contained in the pretrained model! "
"Please choose another suitable one."
)
if cur_type_map != pretrained_type_map:
Expand Down Expand Up @@ -169,5 +169,5 @@ def _change_sub_config(jdata: Dict[str, Any], src_jdata: Dict[str, Any], sub_key
# keep some params that are irrelevant to model structures (need to discuss) TODO
if "trainable" in cur_para.keys():
target_para["trainable"] = cur_para["trainable"]
log.info(f"Change the '{sub_key}' from {str(cur_para)} to {str(target_para)}.")
log.info(f"Change the '{sub_key}' from {cur_para!s} to {target_para!s}.")
jdata[sub_key] = target_para
4 changes: 2 additions & 2 deletions deepmd/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,11 +183,11 @@ def embedding_net(
References
----------
.. [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identitymappings
in deep residual networks. InComputer Vision ECCV 2016,pages 630645. Springer
in deep residual networks. InComputer Vision - ECCV 2016,pages 630-645. Springer
International Publishing, 2016.
"""
input_shape = xx.get_shape().as_list()
outputs_size = [input_shape[1]] + network_size
outputs_size = [input_shape[1], *network_size]

for ii in range(1, len(outputs_size)):
w_initializer = tf.random_normal_initializer(
Expand Down
Loading

0 comments on commit 21db464

Please sign in to comment.