diff --git a/backend/read_env.py b/backend/read_env.py index edc3600115..3b217926d6 100644 --- a/backend/read_env.py +++ b/backend/read_env.py @@ -43,7 +43,7 @@ def get_argument_from_env() -> tuple[str, list, list, dict, str, str]: """ cmake_args = [] extra_scripts = {} - # get variant option from the environment varibles, available: cpu, cuda, rocm + # get variant option from the environment variables, available: cpu, cuda, rocm dp_variant = os.environ.get("DP_VARIANT", "cpu").lower() if dp_variant == "cpu" or dp_variant == "": cmake_minimum_required_version = "3.16" diff --git a/deepmd/__init__.py b/deepmd/__init__.py index 1ce4beb723..6f2b65ba63 100644 --- a/deepmd/__init__.py +++ b/deepmd/__init__.py @@ -17,7 +17,7 @@ def DeepPotential(*args, **kwargs): - """Factory function that forwards to DeepEval (for compatbility + """Factory function that forwards to DeepEval (for compatibility and performance). Parameters diff --git a/deepmd/backend/suffix.py b/deepmd/backend/suffix.py index d694b43488..e77aecb5d9 100644 --- a/deepmd/backend/suffix.py +++ b/deepmd/backend/suffix.py @@ -23,7 +23,7 @@ def format_model_suffix( """Check and format the suffixes of a filename. When preferred_backend is not given, this method checks the suffix of the filename - is within the suffixes of the any backends (with the given feature) and doesn't do formating. + is within the suffixes of the any backends (with the given feature) and doesn't do formatting. When preferred_backend is given, strict_prefer must be given. If strict_prefer is True and the suffix is not within the suffixes of the preferred backend, or strict_prefer is False and the suffix is not within the suffixes of the any backend with the given feature, diff --git a/deepmd/calculator.py b/deepmd/calculator.py index 032fa2bcfa..6f863ab09b 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -32,7 +32,7 @@ class DP(Calculator): """Implementation of ASE deepmd calculator. - Implemented propertie are `energy`, `forces` and `stress` + Implemented properties are `energy`, `forces` and `stress` Parameters ---------- diff --git a/deepmd/common.py b/deepmd/common.py index fdfeef0e6d..185722f4a8 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -77,7 +77,7 @@ def select_idx_map(atom_types: np.ndarray, select_types: np.ndarray) -> np.ndarr Parameters ---------- atom_types : np.ndarray - array specifing type for each atoms as integer + array specifying type for each atoms as integer select_types : np.ndarray types of atoms you want to find indices for @@ -126,7 +126,7 @@ def make_default_mesh(pbc: bool, mixed_type: bool) -> np.ndarray: def j_deprecated( jdata: dict[str, "_DICT_VAL"], key: str, deprecated_key: list[str] = [] ) -> "_DICT_VAL": - """Assert that supplied dictionary conaines specified key. + """Assert that supplied dictionary contains specified key. Parameters ---------- @@ -218,7 +218,7 @@ def get_np_precision(precision: "_PRECISION") -> np.dtype: Returns ------- np.dtype - numpy presicion constant + numpy precision constant Raises ------ diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index b615c81d1f..4e7620bdda 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -158,7 +158,7 @@ def forward_common_atomic( Parameters ---------- extended_coord - extended coodinates, shape: nf x (nall x 3) + extended coordinates, shape: nf x (nall x 3) extended_atype extended atom typs, shape: nf x nall for a type < 0 indicating the atomic is virtual. diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index fe049021fe..a621ece27e 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -100,7 +100,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region + coordinates in extended region extended_atype atomic type in extended region nlist @@ -169,7 +169,7 @@ def serialize(self) -> dict: ) return dd - # for subclass overriden + # for subclass overridden base_descriptor_cls = BaseDescriptor """The base descriptor class.""" base_fitting_cls = BaseFitting diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 880c92f504..5d86472674 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -162,7 +162,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region, (nframes, nall * 3) + coordinates in extended region, (nframes, nall * 3) extended_atype atomic type in extended region, (nframes, nall) nlist @@ -341,7 +341,7 @@ class DPZBLLinearEnergyAtomicModel(LinearEnergyAtomicModel): Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. """ diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index 6c0fc88e2c..99a92c23a4 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -152,7 +152,7 @@ def make_atom_mask( self, atype: t_tensor, ) -> t_tensor: - """The atoms with type < 0 are treated as virutal atoms, + """The atoms with type < 0 are treated as virtual atoms, which serves as place-holders for multi-frame calculations with different number of atoms in different frames. @@ -164,7 +164,7 @@ def make_atom_mask( Returns ------- mask - True for real atoms and False for virutal atoms. + True for real atoms and False for virtual atoms. """ # supposed to be supported by all backends diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index 5c75229e49..f834754195 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -30,7 +30,7 @@ "int64": np.int64, "bool": bool, "default": GLOBAL_NP_FLOAT_PRECISION, - # NumPy doesn't have bfloat16 (and does't plan to add) + # NumPy doesn't have bfloat16 (and doesn't plan to add) # ml_dtypes is a solution, but it seems not supporting np.save/np.load # hdf5 hasn't supported bfloat16 as well (see https://forum.hdfgroup.org/t/11975) "bfloat16": ml_dtypes.bfloat16, diff --git a/deepmd/dpmodel/descriptor/descriptor.py b/deepmd/dpmodel/descriptor/descriptor.py index 6d0644f856..746c02eb68 100644 --- a/deepmd/dpmodel/descriptor/descriptor.py +++ b/deepmd/dpmodel/descriptor/descriptor.py @@ -110,7 +110,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index add9cb9f71..2f2b12e03c 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -358,11 +358,11 @@ def get_dim_emb(self) -> int: return self.se_atten.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -385,7 +385,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -459,7 +459,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -602,7 +602,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -793,11 +793,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index 285dc724a7..1dbb14961e 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -624,11 +624,11 @@ def get_dim_emb(self) -> int: return self.repformers.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -653,7 +653,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -1021,7 +1021,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 3aa8882db1..4eb14f29cf 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -63,7 +63,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor {self.descrpt_list[0].__class__.__name__} does not match others" + ), f"number of atom types in {ii}th descriptor {self.descrpt_list[0].__class__.__name__} does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type hybrid_sel = self.get_sel() self.nlist_cut_idx: list[np.ndarray] = [] @@ -161,7 +161,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -284,7 +284,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/make_base_descriptor.py b/deepmd/dpmodel/descriptor/make_base_descriptor.py index a9b434d5f5..b9c1e93387 100644 --- a/deepmd/dpmodel/descriptor/make_base_descriptor.py +++ b/deepmd/dpmodel/descriptor/make_base_descriptor.py @@ -116,7 +116,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ pass @@ -194,7 +194,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/repformers.py b/deepmd/dpmodel/descriptor/repformers.py index ec8be21a53..ef79ecdd28 100644 --- a/deepmd/dpmodel/descriptor/repformers.py +++ b/deepmd/dpmodel/descriptor/repformers.py @@ -307,11 +307,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -1480,7 +1480,7 @@ def call( """ Parameters ---------- - g1_ext : nf x nall x ng1 extended single-atom chanel + g1_ext : nf x nall x ng1 extended single-atom channel g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) @@ -1489,7 +1489,7 @@ def call( Returns ------- - g1: nf x nloc x ng1 updated single-atom chanel + g1: nf x nloc x ng1 updated single-atom channel g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant """ diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index d29ce8862e..feebe57af7 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -281,7 +281,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -359,7 +359,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -486,7 +486,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -525,7 +525,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- diff --git a/deepmd/dpmodel/descriptor/se_r.py b/deepmd/dpmodel/descriptor/se_r.py index c9d27175d6..6ed1d943c1 100644 --- a/deepmd/dpmodel/descriptor/se_r.py +++ b/deepmd/dpmodel/descriptor/se_r.py @@ -46,7 +46,7 @@ @BaseDescriptor.register("se_e2_r") @BaseDescriptor.register("se_r") class DescrptSeR(NativeOP, BaseDescriptor): - r"""DeepPot-SE_R constructed from only the radial imformation of atomic configurations. + r"""DeepPot-SE_R constructed from only the radial information of atomic configurations. Parameters @@ -233,7 +233,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -303,7 +303,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -404,7 +404,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/se_t.py b/deepmd/dpmodel/descriptor/se_t.py index f2ea751c50..4dc4c965fb 100644 --- a/deepmd/dpmodel/descriptor/se_t.py +++ b/deepmd/dpmodel/descriptor/se_t.py @@ -225,7 +225,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -279,7 +279,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -405,7 +405,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/descriptor/se_t_tebd.py b/deepmd/dpmodel/descriptor/se_t_tebd.py index 147a335926..ca89c23968 100644 --- a/deepmd/dpmodel/descriptor/se_t_tebd.py +++ b/deepmd/dpmodel/descriptor/se_t_tebd.py @@ -199,11 +199,11 @@ def get_dim_emb(self) -> int: return self.se_ttebd.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -226,7 +226,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ raise NotImplementedError @@ -300,7 +300,7 @@ def call( nlist The neighbor list. shape: nf x nloc x nnei mapping - The index mapping from extended to lcoal region. not used by this descriptor. + The index mapping from extended to local region. not used by this descriptor. Returns ------- @@ -418,7 +418,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -576,11 +576,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 62aafc6207..ab5cf38f9d 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -56,7 +56,7 @@ class GeneralFitting(NativeOP, BaseFitting): neuron Number of neurons :math:`N` in each hidden layer of the fitting net bias_atom_e - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt Time-step `dt` in the resnet construction: :math:`y = x + dt * \phi (Wx + b)` @@ -88,9 +88,9 @@ class GeneralFitting(NativeOP, BaseFitting): exclude_types: list[int] Atomic contributions of the excluded atom types are set zero. remove_vaccum_contribution: list[bool], optional - Remove vaccum contribution before the bias is added. The list assigned each + Remove vacuum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same - length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. seed: Optional[Union[int, list[int]]] @@ -371,10 +371,10 @@ def _call_common( ) xx = descriptor if self.remove_vaccum_contribution is not None: - # TODO: comput the input for vaccum when setting remove_vaccum_contribution - # Idealy, the input for vaccum should be computed; + # TODO: comput the input for vacuum when setting remove_vaccum_contribution + # Ideally, the input for vacuum should be computed; # we consider it as always zero for convenience. - # Needs a compute_input_stats for vaccum passed from the + # Needs a compute_input_stats for vacuum passed from the # descriptor. xx_zeros = xp.zeros_like(xx) else: @@ -420,7 +420,7 @@ def _call_common( axis=-1, ) - # calcualte the prediction + # calculate the prediction if not self.mixed_types: outs = xp.zeros( [nf, nloc, net_dim_out], dtype=get_xp_precision(xp, self.precision) diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 893853bb38..12d4e9f7af 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -28,7 +28,7 @@ @GeneralFitting.register("invar") @fitting_check_output class InvarFitting(GeneralFitting): - r"""Fitting the energy (or a rotationally invariant porperty of `dim_out`) of the system. The force and the virial can also be trained. + r"""Fitting the energy (or a rotationally invariant property of `dim_out`) of the system. The force and the virial can also be trained. Lets take the energy fitting task as an example. The potential energy :math:`E` is a fitting network function of the descriptor :math:`\mathcal{D}`: @@ -90,7 +90,7 @@ class InvarFitting(GeneralFitting): Suppose that we have :math:`N_l` hidden layers in the fitting net, this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 1a8fe44aae..a1b6fe7638 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -20,7 +20,7 @@ @InvarFitting.register("property") class PropertyFittingNet(InvarFitting): - r"""Fitting the rotationally invariant porperties of `task_dim` of the system. + r"""Fitting the rotationally invariant properties of `task_dim` of the system. Parameters ---------- diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index 2b1e74c8de..c1f3e4630b 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -52,7 +52,7 @@ class DeepEval(DeepEvalBackend): - """NumPy backend implementaion of DeepEval. + """NumPy backend implementation of DeepEval. Parameters ---------- @@ -374,5 +374,5 @@ def _get_output_shape(self, odef, nframes, natoms): raise RuntimeError("unknown category") def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" return json.loads(self.model.get_model_def_script()) diff --git a/deepmd/dpmodel/model/base_model.py b/deepmd/dpmodel/model/base_model.py index 3f71003bad..777697b4b7 100644 --- a/deepmd/dpmodel/model/base_model.py +++ b/deepmd/dpmodel/model/base_model.py @@ -171,7 +171,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/model/dp_model.py b/deepmd/dpmodel/model/dp_model.py index eda0414398..769bba0b20 100644 --- a/deepmd/dpmodel/model/dp_model.py +++ b/deepmd/dpmodel/model/dp_model.py @@ -27,7 +27,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index dc90f10da7..afe2eaffb6 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -190,7 +190,7 @@ def call_lower( Parameters ---------- extended_coord - coodinates in extended region. nf x (nall x 3). + coordinates in extended region. nf x (nall x 3). extended_atype atomic type in extended region. nf x nall. nlist @@ -319,7 +319,7 @@ def format_nlist( the `nlist` is pad with -1. 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), - the nearest sum(sel) neighbors will be preseved. + the nearest sum(sel) neighbors will be preserved. Known limitations: @@ -329,7 +329,7 @@ def format_nlist( Parameters ---------- extended_coord - coodinates in extended region. nf x nall x 3 + coordinates in extended region. nf x nall x 3 extended_atype atomic type in extended region. nf x nall nlist @@ -340,7 +340,7 @@ def format_nlist( Returns ------- formated_nlist - the formated nlist. + the formatted nlist. """ n_nf, n_nloc, n_nnei = nlist.shape diff --git a/deepmd/dpmodel/model/transform_output.py b/deepmd/dpmodel/model/transform_output.py index 928c33f3bd..107455a6d5 100644 --- a/deepmd/dpmodel/model/transform_output.py +++ b/deepmd/dpmodel/model/transform_output.py @@ -32,7 +32,7 @@ def fit_output_to_model_output( atom_axis = -(len(shap) + 1) if vdef.reducible: kk_redu = get_reduce_name(kk) - # cast to energy prec brefore reduction + # cast to energy prec before reduction model_ret[kk_redu] = xp.sum( vv.astype(GLOBAL_ENER_FLOAT_PRECISION), axis=atom_axis ) diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index 2ceb4f412a..bfee338d64 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -166,7 +166,7 @@ class OutputVariableDef: r_differentiable If the variable is differentiated with respect to coordinates of atoms. Only reducible variable are differentiable. - Negative derivative w.r.t. coordinates will be calcualted. (e.g. force) + Negative derivative w.r.t. coordinates will be calculated. (e.g. force) c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reducible variable @@ -178,7 +178,7 @@ class OutputVariableDef: category : int The category of the output variable. r_hessian : bool - If hessian is requred + If hessian is required magnetic : bool If the derivatives of variable have magnetic parts. intensive : bool diff --git a/deepmd/dpmodel/utils/neighbor_stat.py b/deepmd/dpmodel/utils/neighbor_stat.py index 744a4476cd..43ca2cadd1 100644 --- a/deepmd/dpmodel/utils/neighbor_stat.py +++ b/deepmd/dpmodel/utils/neighbor_stat.py @@ -21,7 +21,7 @@ class NeighborStatOP(NativeOP): - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 339035ff4e..5140a88c97 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -600,7 +600,7 @@ class EN(T_Network): resnet_dt Use time step at the resnet architecture. precision - Floating point precision for the model paramters. + Floating point precision for the model parameters. seed : int, optional Random seed. bias : bool, Optional @@ -704,7 +704,7 @@ class FN(T_EmbeddingNet): resnet_dt Use time step at the resnet architecture. precision - Floating point precision for the model paramters. + Floating point precision for the model parameters. bias_out The last linear layer has bias. seed : int, optional @@ -794,7 +794,7 @@ def deserialize(cls, data: dict) -> "FittingNet": class NetworkCollection: """A collection of networks for multiple elements. - The number of dimesions for types might be 0, 1, or 2. + The number of dimensions for types might be 0, 1, or 2. - 0: embedding or fitting with type embedding, in () - 1: embedding with type_one_side, or fitting, in (type_i) - 2: embedding without type_one_side, in (type_i, type_j) diff --git a/deepmd/dpmodel/utils/nlist.py b/deepmd/dpmodel/utils/nlist.py index 3ef17fc6b9..b827032588 100644 --- a/deepmd/dpmodel/utils/nlist.py +++ b/deepmd/dpmodel/utils/nlist.py @@ -48,7 +48,7 @@ def extend_input_and_build_neighbor_list( return extended_coord, extended_atype, mapping, nlist -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def build_neighbor_list( coord: np.ndarray, atype: np.ndarray, @@ -57,7 +57,7 @@ def build_neighbor_list( sel: Union[int, list[int]], distinguish_types: bool = True, ) -> np.ndarray: - """Build neightbor list for a single frame. keeps nsel neighbors. + """Build neighbor list for a single frame. keeps nsel neighbors. Parameters ---------- @@ -185,7 +185,7 @@ def get_multiple_nlist_key(rcut: float, nsel: int) -> str: return str(rcut) + "_" + str(nsel) -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def build_multiple_neighbor_list( coord: np.ndarray, nlist: np.ndarray, @@ -243,7 +243,7 @@ def build_multiple_neighbor_list( return ret -## translated from torch implemantation by chatgpt +## translated from torch implementation by chatgpt def extend_coord_with_ghosts( coord: np.ndarray, atype: np.ndarray, @@ -272,7 +272,7 @@ def extend_coord_with_ghosts( extended_atype: np.ndarray extended atom type of shape [-1, nall]. index_mapping: np.ndarray - maping extended index to the local index + mapping extended index to the local index """ xp = array_api_compat.array_namespace(coord, atype) diff --git a/deepmd/dpmodel/utils/region.py b/deepmd/dpmodel/utils/region.py index 8102020827..8b24cbf948 100644 --- a/deepmd/dpmodel/utils/region.py +++ b/deepmd/dpmodel/utils/region.py @@ -59,7 +59,7 @@ def normalize_coord( Parameters ---------- coord : np.ndarray - orignal coordinates of shape [*, na, 3]. + original coordinates of shape [*, na, 3]. cell : np.ndarray simulation cell shape [*, 3, 3]. diff --git a/deepmd/driver.py b/deepmd/driver.py index 998edcbc18..30916259aa 100644 --- a/deepmd/driver.py +++ b/deepmd/driver.py @@ -3,7 +3,7 @@ # Derived from https://github.com/deepmodeling/dpdata/blob/18a0ed5ebced8b1f6887038883d46f31ae9990a4/dpdata/plugins/deepmd.py#L361-L443 # under LGPL-3.0-or-later license. -# The original deepmd driver maintained in the dpdata package will be overriden. +# The original deepmd driver maintained in the dpdata package will be overridden. # The class in the dpdata package needs to handle different situations for v1 and v2 interface, # which is too complex with the development of deepmd-kit. # So, it will be a good idea to ship it with DeePMD-kit itself. diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index ad445fdea1..d9ccf392f5 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -248,7 +248,7 @@ def save_txt_file( header : str, optional header string to use in file, by default "" append : bool, optional - if true file will be appended insted of overwriting, by default False + if true file will be appended instead of overwriting, by default False """ flags = "ab" if append else "w" with fname.open(flags) as fp: @@ -1015,7 +1015,7 @@ def test_polar( detail_file : Optional[str] file where test details will be output atomic : bool - wheter to use glovbal version of polar potential + whether to use glovbal version of polar potential Returns ------- diff --git a/deepmd/env.py b/deepmd/env.py index 605dfeed99..50e52fd719 100644 --- a/deepmd/env.py +++ b/deepmd/env.py @@ -102,7 +102,7 @@ def set_default_nthreads(): def get_default_nthreads() -> tuple[int, int]: - """Get paralellism settings. + """Get parallelism settings. The method will first read the environment variables with the prefix `DP_`. If not found, it will read the environment variables with the prefix `TF_` diff --git a/deepmd/infer/__init__.py b/deepmd/infer/__init__.py index 5678494023..8a8afb165a 100644 --- a/deepmd/infer/__init__.py +++ b/deepmd/infer/__init__.py @@ -18,7 +18,7 @@ def DeepPotential(*args, **kwargs) -> "DeepEval": - """Factory function that forwards to DeepEval (for compatbility). + """Factory function that forwards to DeepEval (for compatibility). Parameters ---------- diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 4d0134c37c..e08dc88674 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -281,7 +281,7 @@ def get_ntypes_spin(self) -> int: """Get the number of spin atom types of this model. Only used in old implement.""" def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" raise NotImplementedError("Not implemented in this backend.") @@ -548,5 +548,5 @@ def get_ntypes_spin(self) -> int: return self.deep_eval.get_ntypes_spin() def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" return self.deep_eval.get_model_def_script() diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 68100ba739..304aabdadc 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -378,7 +378,7 @@ def make_model_devi( frequency : int The number of steps that elapse between writing coordinates in a trajectory by a MD engine (such as Gromacs / LAMMPS). - This paramter is used to determine the index in the output file. + This parameter is used to determine the index in the output file. real_error : bool, default: False If True, calculate the RMS real error instead of model deviation. atomic : bool, default: False diff --git a/deepmd/loggers/loggers.py b/deepmd/loggers/loggers.py index 33b9497507..f42c032cfa 100644 --- a/deepmd/loggers/loggers.py +++ b/deepmd/loggers/loggers.py @@ -23,7 +23,7 @@ __all__ = ["set_log_handles"] -# logger formater +# logger formatter FFORMATTER = logging.Formatter( "[%(asctime)s] %(app_name)s %(levelname)-7s %(name)-45s %(message)s" ) @@ -61,7 +61,7 @@ def filter(self, record): class _MPIMasterFilter(logging.Filter): - """Filter that lets through only messages emited from rank==0.""" + """Filter that lets through only messages emitted from rank==0.""" def __init__(self, rank: int) -> None: super().__init__(name="MPI_master_log") @@ -138,7 +138,7 @@ def _open(self): return _MPIFileStream(self.baseFilename, self.MPI, self.mode) def setStream(self, stream): - """Stream canot be reasigned in MPI mode.""" + """Stream cannot be reasigned in MPI mode.""" raise NotImplementedError("Unable to do for MPI file handler!") @@ -254,7 +254,7 @@ def set_log_handles( fh.setFormatter(FFORMATTER_MPI) elif mpi_log == "workers": rank = MPI.COMM_WORLD.Get_rank() - # if file has suffix than inser rank number before suffix + # if file has suffix than insert rank number before suffix # e.g deepmd.log -> deepmd_.log # if no suffix is present, insert rank as suffix # e.g. deepmdlog -> deepmdlog. diff --git a/deepmd/pt/cxx_op.py b/deepmd/pt/cxx_op.py index d46f20a0bc..b0653522b2 100644 --- a/deepmd/pt/cxx_op.py +++ b/deepmd/pt/cxx_op.py @@ -76,7 +76,7 @@ def load_library(module_name: str) -> bool: "instead." ) from e error_message = ( - "This deepmd-kit package is inconsitent with PyTorch " + "This deepmd-kit package is inconsistent with PyTorch " f"Runtime, thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this PyTorch " "runtime." diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 8f0b686e7b..934cafdb47 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -70,7 +70,7 @@ class DeepEval(DeepEvalBackend): - """PyTorch backend implementaion of DeepEval. + """PyTorch backend implementation of DeepEval. Parameters ---------- @@ -601,7 +601,7 @@ def eval_typeebd(self) -> np.ndarray: return to_numpy_array(typeebd) def get_model_def_script(self) -> str: - """Get model defination script.""" + """Get model definition script.""" return self.model_def_script def eval_descriptor( diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index bd3c2b49ab..e26549581e 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -68,7 +68,7 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. """ @@ -150,7 +150,7 @@ def make_atom_mask( self, atype: torch.Tensor, ) -> torch.Tensor: - """The atoms with type < 0 are treated as virutal atoms, + """The atoms with type < 0 are treated as virtual atoms, which serves as place-holders for multi-frame calculations with different number of atoms in different frames. @@ -162,7 +162,7 @@ def make_atom_mask( Returns ------- mask - True for real atoms and False for virutal atoms. + True for real atoms and False for virtual atoms. """ # supposed to be supported by all backends @@ -202,7 +202,7 @@ def forward_common_atomic( Parameters ---------- extended_coord - extended coodinates, shape: nf x (nall x 3) + extended coordinates, shape: nf x (nall x 3) extended_atype extended atom typs, shape: nf x nall for a type < 0 indicating the atomic is virtual. diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index edb1253234..48c8d0d859 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -175,7 +175,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region + coordinates in extended region extended_atype atomic type in extended region nlist diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 0aa5afc67f..570fcdcc43 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -199,7 +199,7 @@ def forward_atomic( Parameters ---------- extended_coord - coodinates in extended region, (nframes, nall * 3) + coordinates in extended region, (nframes, nall * 3) extended_atype atomic type in extended region, (nframes, nall) nlist @@ -489,7 +489,7 @@ class DPZBLLinearEnergyAtomicModel(LinearEnergyAtomicModel): Mapping atom type to the name (str) of the type. For example `type_map[1]` gives the name of the type 1. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. """ diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 28a165d501..87e3027bc8 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -59,7 +59,7 @@ class PairTabAtomicModel(BaseAtomicModel): rcond : float, optional The condition number for the regression of atomic energy. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. """ @@ -104,7 +104,7 @@ def __init__( ) # self.model_type = "ener" - # self.model_version = MODEL_VERSION ## this shoud be in the parent class + # self.model_version = MODEL_VERSION ## this should be in the parent class if isinstance(sel, int): self.sel = sel diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py index 03173a7693..5d36606760 100644 --- a/deepmd/pt/model/descriptor/descriptor.py +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -129,7 +129,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 322fa3a12d..d3156f7c84 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -344,11 +344,11 @@ def get_dim_emb(self) -> int: return self.se_atten.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -371,7 +371,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -620,7 +620,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index 632efe5dbf..277aa4917f 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -343,11 +343,11 @@ def get_dim_emb(self) -> int: return self.repformers.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -373,7 +373,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -819,7 +819,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py index c8730e3465..ba64f53ef7 100644 --- a/deepmd/pt/model/descriptor/hybrid.py +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -70,7 +70,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor does not match others" + ), f"number of atom types in {ii}th descriptor does not match others" # if hybrid sel is larger than sub sel, the nlist needs to be cut for each type self.nlist_cut_idx: list[torch.Tensor] = [] if self.mixed_types() and not all( @@ -168,7 +168,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -308,7 +308,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py index 5270c94112..31132f365e 100644 --- a/deepmd/pt/model/descriptor/repformer_layer.py +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -1105,7 +1105,7 @@ def forward( """ Parameters ---------- - g1_ext : nf x nall x ng1 extended single-atom chanel + g1_ext : nf x nall x ng1 extended single-atom channel g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) @@ -1114,7 +1114,7 @@ def forward( Returns ------- - g1: nf x nloc x ng1 updated single-atom chanel + g1: nf x nloc x ng1 updated single-atom channel g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant """ diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py index 023a84b3ee..81d96d4372 100644 --- a/deepmd/pt/model/descriptor/repformers.py +++ b/deepmd/pt/model/descriptor/repformers.py @@ -60,7 +60,7 @@ def border_op( "See documentation for DPA-2 for details." ) - # Note: this hack cannot actually save a model that can be runned using LAMMPS. + # Note: this hack cannot actually save a model that can be run using LAMMPS. torch.ops.deepmd.border_op = border_op @@ -342,11 +342,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 8f3c7605d5..56cb1f5bc6 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -164,7 +164,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -342,7 +342,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -479,11 +479,11 @@ def get_dim_in(self) -> int: return self.dim_in def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py index 8f418c28f9..aab72f7e98 100644 --- a/deepmd/pt/model/descriptor/se_atten.py +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -298,11 +298,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 12677a3daf..36a70ef1a4 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -163,11 +163,11 @@ def get_dim_in(self) -> int: return 0 def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -190,12 +190,12 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ ), "Only descriptors of the same type can share params!" - # For SeR descriptors, the user-defined share-level + # For set descriptors, the user-defined share-level # shared_level: 0 if shared_level == 0: # link buffers @@ -473,7 +473,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 666eba6baf..7b83bcbd69 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -198,7 +198,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -372,7 +372,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -529,11 +529,11 @@ def get_dim_in(self) -> int: return self.dim_in def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index 9ee9b4dc0b..82ccb06f32 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -215,11 +215,11 @@ def get_dim_emb(self) -> int: return self.se_ttebd.dim_emb def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. @@ -242,7 +242,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -470,7 +470,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -632,11 +632,11 @@ def __getitem__(self, key): raise KeyError(key) def mixed_types(self) -> bool: - """If true, the discriptor + """If true, the descriptor 1. assumes total number of atoms aligned across frames; 2. requires a neighbor list that does not distinguish different atomic types. - If false, the discriptor + If false, the descriptor 1. assumes total number of atoms of each atom type aligned across frames; 2. requires a neighbor list that distinguishes different atomic types. diff --git a/deepmd/pt/model/model/dp_linear_model.py b/deepmd/pt/model/model/dp_linear_model.py index ef2e84bd19..d19070fc5b 100644 --- a/deepmd/pt/model/model/dp_linear_model.py +++ b/deepmd/pt/model/model/dp_linear_model.py @@ -140,7 +140,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/dp_model.py b/deepmd/pt/model/model/dp_model.py index bd278ed787..e71c5e08de 100644 --- a/deepmd/pt/model/model/dp_model.py +++ b/deepmd/pt/model/model/dp_model.py @@ -28,7 +28,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/dp_zbl_model.py b/deepmd/pt/model/model/dp_zbl_model.py index 59147e1d4c..e1ef00f5fe 100644 --- a/deepmd/pt/model/model/dp_zbl_model.py +++ b/deepmd/pt/model/model/dp_zbl_model.py @@ -140,7 +140,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/frozen.py b/deepmd/pt/model/model/frozen.py index 431c035339..37149303d4 100644 --- a/deepmd/pt/model/model/frozen.py +++ b/deepmd/pt/model/model/frozen.py @@ -182,7 +182,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 46b7e51109..a9d5e26060 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -221,7 +221,7 @@ def forward_common_lower( Parameters ---------- extended_coord - coodinates in extended region. nf x (nall x 3) + coordinates in extended region. nf x (nall x 3) extended_atype atomic type in extended region. nf x nall nlist @@ -362,7 +362,7 @@ def format_nlist( the `nlist` is pad with -1. 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), - the nearest sum(sel) neighbors will be preseved. + the nearest sum(sel) neighbors will be preserved. Known limitations: @@ -372,7 +372,7 @@ def format_nlist( Parameters ---------- extended_coord - coodinates in extended region. nf x nall x 3 + coordinates in extended region. nf x nall x 3 extended_atype atomic type in extended region. nf x nall nlist @@ -383,7 +383,7 @@ def format_nlist( Returns ------- formated_nlist - the formated nlist. + the formatted nlist. """ mixed_types = self.mixed_types() diff --git a/deepmd/pt/model/model/spin_model.py b/deepmd/pt/model/model/spin_model.py index a9f6e4d75a..bc1bc81a74 100644 --- a/deepmd/pt/model/model/spin_model.py +++ b/deepmd/pt/model/model/spin_model.py @@ -105,9 +105,9 @@ def process_spin_output( """ Split the output both real and virtual atoms, and scale the latter. add_mag: whether to add magnetic tensor onto the real tensor. - Default: True. e.g. Ture for forces and False for atomic virials on real atoms. + Default: True. e.g. True for forces and False for atomic virials on real atoms. virtual_scale: whether to scale the magnetic tensor with virtual scale factor. - Default: True. e.g. Ture for forces and False for atomic virials on virtual atoms. + Default: True. e.g. True for forces and False for atomic virials on virtual atoms. """ nframes, nloc_double = out_tensor.shape[:2] nloc = nloc_double // 2 @@ -138,9 +138,9 @@ def process_spin_output_lower( """ Split the extended output of both real and virtual atoms with switch, and scale the latter. add_mag: whether to add magnetic tensor onto the real tensor. - Default: True. e.g. Ture for forces and False for atomic virials on real atoms. + Default: True. e.g. True for forces and False for atomic virials on real atoms. virtual_scale: whether to scale the magnetic tensor with virtual scale factor. - Default: True. e.g. Ture for forces and False for atomic virials on virtual atoms. + Default: True. e.g. True for forces and False for atomic virials on virtual atoms. """ nframes, nall_double = extended_out_tensor.shape[:2] nall = nall_double // 2 diff --git a/deepmd/pt/model/network/init.py b/deepmd/pt/model/network/init.py index 0bab6b66bd..fe3c034637 100644 --- a/deepmd/pt/model/network/init.py +++ b/deepmd/pt/model/network/init.py @@ -17,7 +17,7 @@ # These no_grad_* functions are necessary as wrappers around the parts of these # functions that use `with torch.no_grad()`. The JIT doesn't support context # managers, so these need to be implemented as builtins. Using these wrappers -# lets us keep those builtins small and re-usable. +# lets us keep those builtins small and reusable. def _no_grad_uniform_(tensor, a, b, generator=None): with torch.no_grad(): return tensor.uniform_(a, b, generator=generator) diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 12e1eabf22..88ea108ce7 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -300,7 +300,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index dd32042564..df65f1cd18 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -39,7 +39,7 @@ def __init__( - ntypes: Element count. - embedding_width: Embedding width per atom. - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average enery per atom for each element. + - bias_atom_e: Average energy per atom for each element. - resnet_dt: Using time-step in the ResNet construction. """ super().__init__() diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index e0c5b0951e..ee8372c3ac 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -117,7 +117,7 @@ def __init__( - ntypes: Element count. - embedding_width: Embedding width per atom. - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average enery per atom for each element. + - bias_atom_e: Average energy per atom for each element. - resnet_dt: Using time-step in the ResNet construction. """ super().__init__() diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 6e9829e4b6..e97ec516ec 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -59,7 +59,7 @@ def share_params(self, base_class, shared_level, resume=False): """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -96,7 +96,7 @@ class GeneralFitting(Fitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -121,9 +121,9 @@ class GeneralFitting(Fitting): Now this only supports setting all the parameters in the fitting net at one state. When in list[bool], the trainable will be True only if all the boolean parameters are True. remove_vaccum_contribution: list[bool], optional - Remove vaccum contribution before the bias is added. The list assigned each + Remove vacuum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same - length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. """ @@ -400,9 +400,9 @@ def _forward_common( xx = descriptor if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set - # Idealy, the input for vaccum should be computed; + # Ideally, the input for vacuum should be computed; # we consider it as always zero for convenience. - # Needs a compute_input_stats for vaccum passed from the + # Needs a compute_input_stats for vacuum passed from the # descriptor. xx_zeros = torch.zeros_like(xx) else: diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 230046b74b..3bd37c1d6d 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -50,7 +50,7 @@ class InvarFitting(GeneralFitting): neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_e : torch.Tensor, optional - Average enery per atom for each element. + Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -74,7 +74,7 @@ class InvarFitting(GeneralFitting): Specifying atomic energy contribution in vacuum. The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. type_map: list[str], Optional A list of strings. Give the name to each type of atoms. diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index cc6a4e8745..4017f51468 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -35,7 +35,7 @@ @Fitting.register("property") class PropertyFittingNet(InvarFitting): - """Fitting the rotationally invariant porperties of `task_dim` of the system. + """Fitting the rotationally invariant properties of `task_dim` of the system. Parameters ---------- diff --git a/deepmd/pt/model/task/type_predict.py b/deepmd/pt/model/task/type_predict.py index c696590043..e8a5db62b5 100644 --- a/deepmd/pt/model/task/type_predict.py +++ b/deepmd/pt/model/task/type_predict.py @@ -19,7 +19,7 @@ def __init__(self, feature_dim, ntypes, activation_function="gelu", **kwargs): Args: - feature_dim: Input dm. - - ntypes: Numer of types to predict. + - ntypes: Number of types to predict. - activation_function: Activate function. """ super().__init__() diff --git a/deepmd/pt/train/wrapper.py b/deepmd/pt/train/wrapper.py index 922ac296ea..17fb8477a5 100644 --- a/deepmd/pt/train/wrapper.py +++ b/deepmd/pt/train/wrapper.py @@ -63,7 +63,7 @@ def share_params(self, shared_links, resume=False): """ Share the parameters of classes following rules defined in shared_links during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ supported_types = ["descriptor", "fitting_net"] for shared_item in shared_links: diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index c7f44cfb70..581f67196c 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -301,7 +301,7 @@ def get_weighted_sampler(training_data, prob_style, sys_prob=False): else: probs = process_sys_probs(prob_style, training_data.index) log.debug("Generated weighted sampler with prob array: " + str(probs)) - # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iteraters + # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iterators len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) with torch.device("cpu"): sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) diff --git a/deepmd/pt/utils/env_mat_stat.py b/deepmd/pt/utils/env_mat_stat.py index cc30bd5155..b253a1b55e 100644 --- a/deepmd/pt/utils/env_mat_stat.py +++ b/deepmd/pt/utils/env_mat_stat.py @@ -61,7 +61,7 @@ def compute_stat(self, env_mat: dict[str, torch.Tensor]) -> dict[str, StatItem]: class EnvMatStatSe(EnvMatStat): - """Environmental matrix statistics for the se_a/se_r environemntal matrix. + """Environmental matrix statistics for the se_a/se_r environmental matrix. Parameters ---------- diff --git a/deepmd/pt/utils/neighbor_stat.py b/deepmd/pt/utils/neighbor_stat.py index 7d52bfaae1..64ad695827 100644 --- a/deepmd/pt/utils/neighbor_stat.py +++ b/deepmd/pt/utils/neighbor_stat.py @@ -25,7 +25,7 @@ class NeighborStatOP(torch.nn.Module): - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index c30ec6dd02..db1e87785b 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -56,7 +56,7 @@ def build_neighbor_list( sel: Union[int, list[int]], distinguish_types: bool = True, ) -> torch.Tensor: - """Build neightbor list for a single frame. keeps nsel neighbors. + """Build neighbor list for a single frame. keeps nsel neighbors. Parameters ---------- @@ -264,7 +264,7 @@ def build_directional_neighbor_list( rr = torch.linalg.norm(diff, dim=-1) rr, nlist = torch.sort(rr, dim=-1) - # We assume that the central and neighbor atoms are diffferent, + # We assume that the central and neighbor atoms are different, # thus we do not need to exclude self-neighbors. # # if central atom has two zero distances, sorting sometimes can not exclude itself # rr -= torch.eye(nloc_cntl, nall_neig, dtype=rr.dtype, device=rr.device).unsqueeze(0) @@ -429,7 +429,7 @@ def extend_coord_with_ghosts( extended_atype: torch.Tensor extended atom type of shape [-1, nall]. index_mapping: torch.Tensor - maping extended index to the local index + mapping extended index to the local index """ device = coord.device diff --git a/deepmd/pt/utils/region.py b/deepmd/pt/utils/region.py index 6fa77125aa..3272434995 100644 --- a/deepmd/pt/utils/region.py +++ b/deepmd/pt/utils/region.py @@ -92,7 +92,7 @@ def normalize_coord( Parameters ---------- coord : torch.Tensor - orignal coordinates of shape [*, na, 3]. + original coordinates of shape [*, na, 3]. Returns ------- diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 831d2bef76..4028d89fc9 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -266,7 +266,7 @@ def compute_output_stats( Specifying atomic energy contribution in vacuum. Given by key:value pairs. The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] - The `set_davg_zero` key in the descrptor should be set. + The `set_davg_zero` key in the descriptor should be set. model_forward : Callable[..., torch.Tensor], optional The wrapped forward function of atomic model. If not None, the model will be utilized to generate the original energy prediction, diff --git a/deepmd/tf/cluster/local.py b/deepmd/tf/cluster/local.py index a9392bd326..25fb1cc645 100644 --- a/deepmd/tf/cluster/local.py +++ b/deepmd/tf/cluster/local.py @@ -43,7 +43,7 @@ def get_gpus(): stdout, stderr = p.communicate() if p.returncode != 0: decoded = stderr.decode("UTF-8") - raise RuntimeError(f"Failed to detect availbe GPUs due to:\n{decoded}") + raise RuntimeError(f"Failed to detect available GPUs due to:\n{decoded}") decoded = stdout.decode("UTF-8").strip() num_gpus = int(decoded) return list(range(num_gpus)) if num_gpus > 0 else None diff --git a/deepmd/tf/descriptor/descriptor.py b/deepmd/tf/descriptor/descriptor.py index ba54ca1309..dd86beb21e 100644 --- a/deepmd/tf/descriptor/descriptor.py +++ b/deepmd/tf/descriptor/descriptor.py @@ -222,7 +222,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters @@ -253,7 +253,7 @@ def enable_compression( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -473,7 +473,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/hybrid.py b/deepmd/tf/descriptor/hybrid.py index e4458476c8..3f20e7d856 100644 --- a/deepmd/tf/descriptor/hybrid.py +++ b/deepmd/tf/descriptor/hybrid.py @@ -72,7 +72,7 @@ def __init__( for ii in range(1, self.numb_descrpt): assert ( self.descrpt_list[ii].get_ntypes() == self.descrpt_list[0].get_ntypes() - ), f"number of atom types in {ii}th descrptor does not match others" + ), f"number of atom types in {ii}th descriptor does not match others" def get_rcut(self) -> float: """Returns the cut-off radius.""" @@ -317,7 +317,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters @@ -352,7 +352,7 @@ def enable_compression( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -434,7 +434,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/loc_frame.py b/deepmd/tf/descriptor/loc_frame.py index 74ba755b4c..9b338a5d25 100644 --- a/deepmd/tf/descriptor/loc_frame.py +++ b/deepmd/tf/descriptor/loc_frame.py @@ -72,7 +72,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -443,7 +443,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se.py b/deepmd/tf/descriptor/se.py index 319a65f6da..746ea8c628 100644 --- a/deepmd/tf/descriptor/se.py +++ b/deepmd/tf/descriptor/se.py @@ -35,7 +35,7 @@ class DescrptSe(Descriptor): ----- All of these descriptors have an environmental matrix and an embedding network (:meth:`deepmd.tf.utils.network.embedding_net`), so - they can share some similiar methods without defining them twice. + they can share some similar methods without defining them twice. Attributes ---------- @@ -162,7 +162,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index d5a8ed6815..a0b6b810e4 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -237,7 +237,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -448,7 +448,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- @@ -502,7 +502,7 @@ def enable_compression( ) elif len(ret_one_side) != 0 and len(ret_two_side) != 0: raise RuntimeError( - "both one side and two side embedding net varaibles are detected, it is a wrong model." + "both one side and two side embedding net variables are detected, it is a wrong model." ) elif len(ret_two_side) != 0: self.final_type_embedding = get_two_side_type_embedding(self, graph) @@ -548,7 +548,7 @@ def enable_compression( self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std") def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/descriptor/se_a_ebd_v2.py b/deepmd/tf/descriptor/se_a_ebd_v2.py index af43eedbbc..035fc6509c 100644 --- a/deepmd/tf/descriptor/se_a_ebd_v2.py +++ b/deepmd/tf/descriptor/se_a_ebd_v2.py @@ -23,7 +23,7 @@ class DescrptSeAEbdV2(DescrptSeA): r"""A compressible se_a_ebd model. - This model is a warpper for DescriptorSeA, which set tebd_input_mode='strip'. + This model is a wrapper for DescriptorSeA, which set tebd_input_mode='strip'. """ def __init__( diff --git a/deepmd/tf/descriptor/se_a_ef.py b/deepmd/tf/descriptor/se_a_ef.py index 9f70464c56..bf891e6032 100644 --- a/deepmd/tf/descriptor/se_a_ef.py +++ b/deepmd/tf/descriptor/se_a_ef.py @@ -348,7 +348,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r diff --git a/deepmd/tf/descriptor/se_a_mask.py b/deepmd/tf/descriptor/se_a_mask.py index e12f6a0fff..5667122809 100644 --- a/deepmd/tf/descriptor/se_a_mask.py +++ b/deepmd/tf/descriptor/se_a_mask.py @@ -157,7 +157,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei = self.nnei_a # to be compat with old option of `stripped_type_embedding` @@ -435,7 +435,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 963e81ecf0..d41226bbb1 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -424,7 +424,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- @@ -702,7 +702,7 @@ def _pass_filter( assert ( input_dict is not None and input_dict.get("type_embedding", None) is not None - ), "se_atten desctiptor must use type_embedding" + ), "se_atten descriptor must use type_embedding" type_embedding = input_dict.get("type_embedding", None) inputs = tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]) output = [] @@ -1429,9 +1429,9 @@ def build_type_exclude_mask_mixed( Notes ----- - This method has the similiar way to build the type exclude mask as + This method has the similar way to build the type exclude mask as :meth:`deepmd.tf.descriptor.descriptor.Descriptor.build_type_exclude_mask`. - The mathmatical expression has been explained in that method. + The mathematical expression has been explained in that method. The difference is that the attention descriptor has provided the type of the neighbors (idx_j) that is not in order, so we use it from an extra input. @@ -1516,7 +1516,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index 8096ef7c96..752642c1d5 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -149,7 +149,7 @@ def __init__( # descrpt config self.sel_a = [0 for ii in range(len(self.sel_r))] self.ntypes = len(self.sel_r) - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -325,7 +325,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- diff --git a/deepmd/tf/descriptor/se_t.py b/deepmd/tf/descriptor/se_t.py index f96b1ba778..464839aeac 100644 --- a/deepmd/tf/descriptor/se_t.py +++ b/deepmd/tf/descriptor/se_t.py @@ -145,7 +145,7 @@ def __init__( self.ntypes = len(self.sel_a) assert self.ntypes == len(self.sel_r) self.rcut_a = -1 - # numb of neighbors and numb of descrptors + # numb of neighbors and numb of descriptors self.nnei_a = np.cumsum(self.sel_a)[-1] self.nnei_r = np.cumsum(self.sel_r)[-1] self.nnei = self.nnei_a + self.nnei_r @@ -332,7 +332,7 @@ def enable_compression( check_frequency: int = -1, suffix: str = "", ) -> None: - """Reveive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. Parameters ---------- diff --git a/deepmd/tf/entrypoints/freeze.py b/deepmd/tf/entrypoints/freeze.py index cee6615abc..2658f565a6 100755 --- a/deepmd/tf/entrypoints/freeze.py +++ b/deepmd/tf/entrypoints/freeze.py @@ -59,7 +59,7 @@ def _transfer_fitting_net_trainable_variables(sess, old_graph_def, raw_graph_def raw_graph_def, # The graph_def is used to retrieve the nodes [ n + "_1" for n in old_graph_nodes - ], # The output node names are used to select the usefull nodes + ], # The output node names are used to select the useful nodes ) except AssertionError: # if there's no additional nodes @@ -275,7 +275,7 @@ def freeze_graph( output_graph_def = tf.graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights input_graph, # The graph_def is used to retrieve the nodes - output_node, # The output node names are used to select the usefull nodes + output_node, # The output node names are used to select the useful nodes ) # If we need to transfer the fitting net variables @@ -334,7 +334,7 @@ def freeze( # We import the meta graph and retrieve a Saver try: - # In case paralle training + # In case parallel training import horovod.tensorflow as HVD except ImportError: pass diff --git a/deepmd/tf/entrypoints/ipi.py b/deepmd/tf/entrypoints/ipi.py index 1183375119..a08a2293a9 100644 --- a/deepmd/tf/entrypoints/ipi.py +++ b/deepmd/tf/entrypoints/ipi.py @@ -13,7 +13,7 @@ def _program(name: str, args: list[str]): - """Execuate a program. + """Execute a program. Parameters ---------- diff --git a/deepmd/tf/entrypoints/main.py b/deepmd/tf/entrypoints/main.py index d9dff4eb4a..b8bfdef6d8 100644 --- a/deepmd/tf/entrypoints/main.py +++ b/deepmd/tf/entrypoints/main.py @@ -60,7 +60,7 @@ def main(args: Optional[Union[list[str], argparse.Namespace]] = None): args = parse_args(args=args) # do not set log handles for None, it is useless - # log handles for train will be set separatelly + # log handles for train will be set separately # when the use of MPI will be determined in `RunOptions` if args.command not in (None, "train"): set_log_handles(args.log_level, Path(args.log_path) if args.log_path else None) diff --git a/deepmd/tf/entrypoints/train.py b/deepmd/tf/entrypoints/train.py index 66622b3182..3d965ea71c 100755 --- a/deepmd/tf/entrypoints/train.py +++ b/deepmd/tf/entrypoints/train.py @@ -114,7 +114,7 @@ def train( mpi_log=mpi_log, ) if run_opt.is_distrib and len(run_opt.gpus or []) > 1: - # avoid conflict of visible gpus among multipe tf sessions in one process + # avoid conflict of visible gpus among multiple tf sessions in one process reset_default_tf_session_config(cpu_only=True) # load json database diff --git a/deepmd/tf/entrypoints/transfer.py b/deepmd/tf/entrypoints/transfer.py index b93caf3cac..52bf56c4fd 100644 --- a/deepmd/tf/entrypoints/transfer.py +++ b/deepmd/tf/entrypoints/transfer.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -"""Module used for transfering parameters between models.""" +"""Module used for transferring parameters between models.""" import logging import re @@ -43,7 +43,7 @@ def convert_matrix( shape : Sequence[int] shape to cast resulting array to dtype : Optional[type] - type that finall array will be cast to, If None no casting will take place + type that final array will be cast to, If None no casting will take place Returns ------- @@ -58,7 +58,7 @@ def convert_matrix( def transfer(*, old_model: str, raw_model: str, output: str, **kwargs): - """Transfer operation from old fron graph to new prepared raw graph. + """Transfer operation from old from graph to new prepared raw graph. Parameters ---------- @@ -67,7 +67,7 @@ def transfer(*, old_model: str, raw_model: str, output: str, **kwargs): raw_model : str new model that will accept ops from old model output : str - new model with transfered parameters will be saved to this location + new model with transferred parameters will be saved to this location **kwargs additional arguments """ @@ -104,7 +104,7 @@ def load_graph(graph_name: str) -> tf.Graph: def transform_graph(raw_graph: tf.Graph, old_graph: tf.Graph) -> tf.Graph: - """Trasform old graph into new. + """Transform old graph into new. Parameters ---------- @@ -116,7 +116,7 @@ def transform_graph(raw_graph: tf.Graph, old_graph: tf.Graph) -> tf.Graph: Returns ------- tf.Graph - new graph with parameters transfered form the old one + new graph with parameters transferred form the old one """ old_graph_def = old_graph.as_graph_def() raw_graph_def = raw_graph.as_graph_def() diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 5a66498dba..16ad4735fd 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -"""Module that sets tensorflow working environment and exports inportant constants.""" +"""Module that sets tensorflow working environment and exports important constants.""" import ctypes import logging @@ -92,7 +92,7 @@ def filter(self, record): # https://keras.io/getting_started/#tensorflow--keras-2-backwards-compatibility # 2024/04/24: deepmd.tf doesn't import tf.keras any more -# import tensorflow v1 compatability +# import tensorflow v1 compatibility import tensorflow.compat.v1 as tf tf.get_logger().addFilter(TFWarningFilter()) @@ -339,7 +339,7 @@ def get_module(module_name: str) -> "ModuleType": try: module = tf.load_op_library(str(module_file)) except tf.errors.NotFoundError as e: - # check CXX11_ABI_FLAG is compatiblity + # check CXX11_ABI_FLAG is compatibility # see https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html # ABI should be the same if "CXX11_ABI_FLAG" in tf.__dict__: @@ -377,7 +377,7 @@ def get_module(module_name: str) -> "ModuleType": "instead." ) from e error_message = ( - "This deepmd-kit package is inconsitent with TensorFlow " + "This deepmd-kit package is inconsistent with TensorFlow " f"Runtime, thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this TensorFlow " "runtime." diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index 0e5b860fa2..fa8a5b680c 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -41,11 +41,11 @@ class DipoleFittingSeA(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` embedding_width - The rotation matrix dimension of the descrptor :math:`\mathcal{D}` + The rotation matrix dimension of the descriptor :math:`\mathcal{D}` neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -320,7 +320,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index ebc347c2fd..099cba0d12 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -62,9 +62,9 @@ class DOSFitting(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` neuron Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt @@ -187,7 +187,7 @@ def get_numb_dos(self) -> int: # not used def compute_output_stats(self, all_stat: dict, mixed_type: bool = False) -> None: - """Compute the ouput statistics. + """Compute the output statistics. Parameters ---------- @@ -628,7 +628,7 @@ def init_variables( pass def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index b01574cf87..0b9eb97f80 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -109,9 +109,9 @@ class EnerFitting(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` neuron Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt @@ -132,7 +132,7 @@ class EnerFitting(Fitting): seed Random seed for initializing the network parameters. atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descriptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision @@ -252,7 +252,7 @@ def get_numb_aparam(self) -> int: return self.numb_aparam def compute_output_stats(self, all_stat: dict, mixed_type: bool = False) -> None: - """Compute the ouput statistics. + """Compute the output statistics. Parameters ---------- @@ -822,7 +822,7 @@ def change_energy_bias( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -985,7 +985,7 @@ def change_energy_bias_lower( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. ntest : int The number of test samples in a system to change the energy bias. diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index cc79e3402a..b5a21012bd 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -46,11 +46,11 @@ class PolarFittingSeA(Fitting): Parameters ---------- ntypes - The ntypes of the descrptor :math:`\mathcal{D}` + The ntypes of the descriptor :math:`\mathcal{D}` dim_descrpt - The dimension of the descrptor :math:`\mathcal{D}` + The dimension of the descriptor :math:`\mathcal{D}` embedding_width - The rotation matrix dimension of the descrptor :math:`\mathcal{D}` + The rotation matrix dimension of the descriptor :math:`\mathcal{D}` neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -221,7 +221,7 @@ def compute_output_stats(self, all_stat): else: # No atomic polar in this system, so it should have global polar if ( not all_stat["find_polarizability"][ss] > 0.0 - ): # This system is jsut a joke? + ): # This system is just a joke? continue # Till here, we have global polar sys_matrix.append( @@ -526,7 +526,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- @@ -618,7 +618,7 @@ class GlobalPolarFittingSeA: Parameters ---------- descrpt : tf.Tensor - The descrptor + The descriptor neuron : list[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -745,7 +745,7 @@ def init_variables( ) def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None: - """Reveive the mixed precision setting. + """Receive the mixed precision setting. Parameters ---------- diff --git a/deepmd/tf/infer/deep_dipole.py b/deepmd/tf/infer/deep_dipole.py index e10d09564d..b493af8552 100644 --- a/deepmd/tf/infer/deep_dipole.py +++ b/deepmd/tf/infer/deep_dipole.py @@ -39,7 +39,7 @@ class DeepDipoleOld(DeepTensor): -------- For developers: `DeepTensor` initializer must be called at the end after `self.tensors` are modified because it uses the data in `self.tensors` dict. - Do not chanage the order! + Do not change the order! """ def __init__( diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 56df7f782f..9527cb2ae8 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -111,7 +111,7 @@ def __init__( raise RuntimeError( f"model in graph (version {self.model_version}) is incompatible" f"with the model (version {MODEL_VERSION}) supported by the current code." - "See https://deepmd.rtfd.io/compatability/ for details." + "See https://deepmd.rtfd.io/compatibility/ for details." ) # set default to False, as subclasses may not support @@ -190,7 +190,7 @@ def _init_tensors(self): "numb_dos": "fitting_attr/numb_dos:0", # model attrs "sel_type": "model_attr/sel_type:0", - # additonal inputs + # additional inputs "efield": "t_efield:0", "fparam": "t_fparam:0", "aparam": "t_aparam:0", @@ -312,12 +312,12 @@ def sess(self) -> tf.Session: return tf.Session(graph=self.graph, config=default_tf_session_config) def _graph_compatable(self) -> bool: - """Check the model compatability. + """Check the model compatibility. Returns ------- bool - If the model stored in the graph file is compatable with the current code + If the model stored in the graph file is compatible with the current code """ model_version_major = int(self.model_version.split(".")[0]) model_version_minor = int(self.model_version.split(".")[1]) @@ -781,7 +781,7 @@ def _prepare_feed_dict( aparam=None, efield=None, ): - # standarize the shape of inputs + # standardize the shape of inputs natoms, nframes = self._get_natoms_and_nframes( coords, atom_types, @@ -1118,7 +1118,7 @@ def get_has_efield(self) -> bool: return self.has_efield def get_model_def_script(self) -> dict: - """Get model defination script.""" + """Get model definition script.""" t_script = self._get_tensor("train_attr/training_script:0") [script] = run_sess(self.sess, [t_script], feed_dict={}) model_def_script = script.decode("utf-8") @@ -1171,7 +1171,7 @@ def __init__( raise RuntimeError( f"model in graph (version {self.model_version}) is incompatible" f"with the model (version {MODEL_VERSION}) supported by the current code." - "See https://deepmd.rtfd.io/compatability/ for details." + "See https://deepmd.rtfd.io/compatibility/ for details." ) # set default to False, as subclasses may not support @@ -1224,12 +1224,12 @@ def sess(self) -> tf.Session: return tf.Session(graph=self.graph, config=default_tf_session_config) def _graph_compatable(self) -> bool: - """Check the model compatability. + """Check the model compatibility. Returns ------- bool - If the model stored in the graph file is compatable with the current code + If the model stored in the graph file is compatible with the current code """ model_version_major = int(self.model_version.split(".")[0]) model_version_minor = int(self.model_version.split(".")[1]) diff --git a/deepmd/tf/infer/deep_tensor.py b/deepmd/tf/infer/deep_tensor.py index a20bbfe513..a1edaaa409 100644 --- a/deepmd/tf/infer/deep_tensor.py +++ b/deepmd/tf/infer/deep_tensor.py @@ -186,7 +186,7 @@ def eval( If atomic == False then of size nframes x output_dim else of size nframes x natoms x output_dim """ - # standarize the shape of inputs + # standardize the shape of inputs if mixed_type: natoms = atom_types[0].size atom_types = np.array(atom_types, dtype=int).reshape([-1, natoms]) @@ -330,7 +330,7 @@ def eval_full( """ assert self._support_gfv, "do not support eval_full with old tensor model" - # standarize the shape of inputs + # standardize the shape of inputs if mixed_type: natoms = atom_types[0].size atom_types = np.array(atom_types, dtype=int).reshape([-1, natoms]) diff --git a/deepmd/tf/loss/ener.py b/deepmd/tf/loss/ener.py index 337046836b..95cc8adafb 100644 --- a/deepmd/tf/loss/ener.py +++ b/deepmd/tf/loss/ener.py @@ -673,7 +673,7 @@ def print_on_training( error_ae_train, ) = train_out - # than test data, if tensorboard log writter is present, commpute summary + # than test data, if tensorboard log writer is present, compute summary # and write tensorboard logs if tb_writer: summary_merged_op = tf.summary.merge( diff --git a/deepmd/tf/model/ener.py b/deepmd/tf/model/ener.py index b21c920d9c..57aaa2acf4 100644 --- a/deepmd/tf/model/ener.py +++ b/deepmd/tf/model/ener.py @@ -56,7 +56,7 @@ class EnerModel(StandardModel): use_srtab The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. sw_rmin The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. sw_rmin @@ -516,7 +516,7 @@ def change_energy_bias( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ self.fitting.change_energy_bias( diff --git a/deepmd/tf/model/frozen.py b/deepmd/tf/model/frozen.py index 05700dc64e..7501a5cbd1 100644 --- a/deepmd/tf/model/frozen.py +++ b/deepmd/tf/model/frozen.py @@ -250,7 +250,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/linear.py b/deepmd/tf/model/linear.py index 4c75c2a1d5..7cf3c5194d 100644 --- a/deepmd/tf/model/linear.py +++ b/deepmd/tf/model/linear.py @@ -146,7 +146,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 833f8364ae..03211d49d5 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -87,7 +87,7 @@ class Model(ABC, make_plugin_registry("model")): use_srtab The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly. smin_alpha - The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. + The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. sw_rmin The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. sw_rmin @@ -411,7 +411,7 @@ def change_energy_bias( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ raise RuntimeError("Not supported") @@ -524,7 +524,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict @@ -766,7 +766,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/pairtab.py b/deepmd/tf/model/pairtab.py index d54940fec6..80e68d7825 100644 --- a/deepmd/tf/model/pairtab.py +++ b/deepmd/tf/model/pairtab.py @@ -244,7 +244,7 @@ def get_fitting(self) -> Union[Fitting, dict]: def get_loss(self, loss: dict, lr) -> Optional[Union[Loss, dict]]: """Get the loss function(s).""" - # nothing nees to do + # nothing needs to do return def get_rcut(self) -> float: @@ -285,7 +285,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index c8a57d90b3..a0eaa1385f 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -421,7 +421,7 @@ def update_sel( Parameters ---------- train_data : DeepmdDataSystem - data used to do neighbor statictics + data used to do neighbor statistics type_map : list[str], optional The name of each type of atoms local_jdata : dict diff --git a/deepmd/tf/nvnmd/data/data.py b/deepmd/tf/nvnmd/data/data.py index 55e7c51bc7..e1fcaac9f2 100644 --- a/deepmd/tf/nvnmd/data/data.py +++ b/deepmd/tf/nvnmd/data/data.py @@ -118,7 +118,7 @@ "end": "", } -# change the configuration accordng to the max_nnei +# change the configuration according to the max_nnei jdata_config_v0_ni128 = jdata_config_v0.copy() jdata_config_v0_ni256 = jdata_config_v0.copy() jdata_config_v0_ni256["ctrl"] = { @@ -250,7 +250,7 @@ "end": "", } -# change the configuration accordng to the max_nnei +# change the configuration according to the max_nnei jdata_config_v1_ni128 = jdata_config_v1.copy() jdata_config_v1_ni256 = jdata_config_v1.copy() jdata_config_v1_ni256["ctrl"] = { diff --git a/deepmd/tf/nvnmd/entrypoints/mapt.py b/deepmd/tf/nvnmd/entrypoints/mapt.py index 8ee1967854..7a50ceae30 100644 --- a/deepmd/tf/nvnmd/entrypoints/mapt.py +++ b/deepmd/tf/nvnmd/entrypoints/mapt.py @@ -50,7 +50,7 @@ class MapTable: :math:`h_{ji} = \frac{s(r_{ji})}{r_{ji}}`, and :math:`\mathcal{G}_{ji}` is embedding matrix. - The mapping funciton can be define as: + The mapping function can be define as: | :math:`y = f(x) = y_{k} + (x - x_{k}) * dy_{k}` | :math:`y_{k} = f(x_{k})` @@ -436,7 +436,7 @@ def run_u2s(self): # N = NUM_MAPT N = 512 N2 = int(rc_max**2) - # N+1 ranther than N for calculating defference + # N+1 ranther than N for calculating difference keys = list(dic_ph.keys()) vals = list(dic_ph.values()) @@ -446,7 +446,7 @@ def run_u2s(self): u2 = N2 * np.reshape(np.arange(0, N * 16 + 1) / (N * 16), [-1, 1]) # pylint: disable=no-explicit-dtype res_lst2 = run_sess(sess, vals, feed_dict={dic_ph["u"]: u2}) - res_dic2 = dict(zip(keys, res_lst2)) # reference for commpare + res_dic2 = dict(zip(keys, res_lst2)) # reference for compare # change value for tt in range(ndim): diff --git a/deepmd/tf/nvnmd/entrypoints/wrap.py b/deepmd/tf/nvnmd/entrypoints/wrap.py index 7811f90f75..69077ca752 100755 --- a/deepmd/tf/nvnmd/entrypoints/wrap.py +++ b/deepmd/tf/nvnmd/entrypoints/wrap.py @@ -116,16 +116,16 @@ def wrap(self): # extend data according to the number of bits per row of BRAM nbit = 32 if nvnmd_cfg.version == 0: - datas = [hcfg, hfps, hbps, hswt, hdsw, hfea, hgra] + data = [hcfg, hfps, hbps, hswt, hdsw, hfea, hgra] keys = "cfg fps bps swt dsw fea gra".split() if nvnmd_cfg.version == 1: keys = "cfg fps bps swt dsw std fea gra gtt avc".split() - datas = [hcfg, hfps, hbps, hswt, hdsw, hstd, hfea, hgra, hgtt, havc] + data = [hcfg, hfps, hbps, hswt, hdsw, hstd, hfea, hgra, hgtt, havc] nhs = [] nws = [] - for ii in range(len(datas)): + for ii in range(len(data)): k = keys[ii] - d = datas[ii] + d = data[ii] h = len(d) w = len(d[0]) # nhex w4 = w * 4 # nbit @@ -138,7 +138,7 @@ def wrap(self): if jdata_sys["debug"]: log.info("%s: %d x % d bit" % (k, h, w * 4)) FioTxt().save(f"nvnmd/wrap/h{k}.txt", d) - datas[ii] = d + data[ii] = d # update h & w of nvnmd_cfg nvnmd_cfg.size["NH_DATA"] = nhs nvnmd_cfg.size["NW_DATA"] = nws @@ -146,7 +146,7 @@ def wrap(self): head = self.wrap_head(nhs, nws) # output model hs = [*head] - for d in datas: + for d in data: hs.extend(d) FioBin().save(self.model_file, hs) diff --git a/deepmd/tf/nvnmd/utils/encode.py b/deepmd/tf/nvnmd/utils/encode.py index 21398fbf23..46209e5230 100644 --- a/deepmd/tf/nvnmd/utils/encode.py +++ b/deepmd/tf/nvnmd/utils/encode.py @@ -122,7 +122,7 @@ def check_dec(self, idec, nbit, signed=False, name=""): def extend_list(self, slbin, nfull): r"""Extend the list (slbin) to the length (nfull) - the attched element of list is 0. + the attached element of list is 0. such as, when diff --git a/deepmd/tf/nvnmd/utils/network.py b/deepmd/tf/nvnmd/utils/network.py index 76c80ed4e7..c0572a7fa7 100644 --- a/deepmd/tf/nvnmd/utils/network.py +++ b/deepmd/tf/nvnmd/utils/network.py @@ -240,7 +240,7 @@ def one_layer( x = op_module.quantize_nvnmd(inputs, 1, NBIT_DATA_FL, NBIT_DATA_FL, -1) inputs = tf.ensure_shape(x, [None, shape[1]]) # wx - # normlize weight mode: 0 all | 1 column + # normalize weight mode: 0 all | 1 column norm_mode = 0 if final_layer else 1 wx = op_module.matmul_fitnet_nvnmd( inputs, w, NBIT_DATA_FL, NBIT_SHORT_FL, norm_mode diff --git a/deepmd/tf/op/__init__.py b/deepmd/tf/op/__init__.py index 421ef0b123..805dc148a7 100644 --- a/deepmd/tf/op/__init__.py +++ b/deepmd/tf/op/__init__.py @@ -18,7 +18,7 @@ def import_ops(): Notes ----- - Initialy this subdir is unpopulated. CMake will install all the op module python + Initially this subdir is unpopulated. CMake will install all the op module python files and shared libs. """ for module_file in Path(__file__).parent.glob("*.py"): diff --git a/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py b/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py index 8a4ffb2d0c..b6aae52519 100644 --- a/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py +++ b/deepmd/tf/op/_dotmul_flt_nvnmd_grad.py @@ -15,7 +15,7 @@ def _DotmulFltNvnmdGrad(op, grad): x = op.inputs[0] w = op.inputs[1] - # calcualte + # calculate dx = op_module.mul_flt_nvnmd(grad, w) dw = op_module.mul_flt_nvnmd(grad, x) # add shape for output of matmul_nvnmd diff --git a/deepmd/tf/op/_matmul_flt2fix_nvnmd.py b/deepmd/tf/op/_matmul_flt2fix_nvnmd.py index 319fb90ec8..3b802ec56a 100644 --- a/deepmd/tf/op/_matmul_flt2fix_nvnmd.py +++ b/deepmd/tf/op/_matmul_flt2fix_nvnmd.py @@ -22,7 +22,7 @@ def _MatmulFlt2fixNvnmdGrad(op, grad): else: x_T = tf.transpose(x) w_T = tf.transpose(w) - # calcualte + # calculate # dx = tf.matmul(grad, w_T) # dw = tf.matmul(x_T, grad) dx = op_module.matmul_flt_nvnmd(grad, w_T, 1, 1) diff --git a/deepmd/tf/op/_matmul_flt_nvnmd_grad.py b/deepmd/tf/op/_matmul_flt_nvnmd_grad.py index 6493794b00..94e0dc2d67 100644 --- a/deepmd/tf/op/_matmul_flt_nvnmd_grad.py +++ b/deepmd/tf/op/_matmul_flt_nvnmd_grad.py @@ -24,7 +24,7 @@ def _MatmulFltNvnmdGrad(op, grad): else: x_T = tf.transpose(x) w_T = tf.transpose(w) - # calcualte + # calculate modex = (normx >> 4) & 15 modew = (normw >> 4) & 15 if modex: diff --git a/deepmd/tf/train/run_options.py b/deepmd/tf/train/run_options.py index c36b42e194..c7f7b92674 100644 --- a/deepmd/tf/train/run_options.py +++ b/deepmd/tf/train/run_options.py @@ -82,7 +82,7 @@ class RunOptions: gpus: Optional[list[int]] list of GPUs if any are present else None is_chief: bool - in distribured training it is true for tha main MPI process in serail it is + in distribured training it is true for the main MPI process in serail it is always true world_size: int total worker count @@ -93,7 +93,7 @@ class RunOptions: node_list_ : list[str] the list of nodes of the current mpirun my_device: str - deviice type - gpu or cpu + device type - gpu or cpu """ gpus: Optional[list[int]] @@ -180,7 +180,7 @@ def _setup_logger( else: log.warning( f"Log handles have already been set. It is not advisable to " - f"reset them{', especially when runnig with MPI!' if self._HVD else ''}" + f"reset them{', especially when running with MPI!' if self._HVD else ''}" ) def _try_init_distrib(self): @@ -193,7 +193,7 @@ def _try_init_distrib(self): log.warning("Switch to serial execution due to lack of horovod module.") self.is_distrib = False - # Do real intialization + # Do real initialization if self.is_distrib: self._init_distributed(HVD) self._HVD = HVD diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index 9f353f2e32..58be9e8176 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -409,7 +409,7 @@ def train(self, train_data=None, valid_data=None): stop_batch = self.stop_batch self._init_session() - # Before data shard is enabled, only cheif do evaluation and record it + # Before data shard is enabled, only chief do evaluation and record it # self.print_head() fp = None if self.run_opt.is_chief: @@ -846,7 +846,7 @@ def _init_from_pretrained_model( bias_adjust_mode : str The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. + and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic energy bias in the target dataset. """ try: @@ -940,7 +940,7 @@ def build(self) -> list[tf.Tensor]: def get_train_batch() -> list[np.ndarray]: batch_data = train_data.get_batch() - # convert dict to list of arryas + # convert dict to list of arrays batch_data = tuple([batch_data[kk] for kk in self.data_keys]) return batch_data diff --git a/deepmd/tf/utils/learning_rate.py b/deepmd/tf/utils/learning_rate.py index 519bf20bd0..fee73ca9a3 100644 --- a/deepmd/tf/utils/learning_rate.py +++ b/deepmd/tf/utils/learning_rate.py @@ -58,7 +58,7 @@ def build( Parameters ---------- global_step - The tf Tensor prividing the global training step + The tf Tensor providing the global training step stop_step The stop step. If provided, the decay_rate will be determined automatically and overwritten. diff --git a/deepmd/tf/utils/neighbor_stat.py b/deepmd/tf/utils/neighbor_stat.py index 4052c89821..37028b23bc 100644 --- a/deepmd/tf/utils/neighbor_stat.py +++ b/deepmd/tf/utils/neighbor_stat.py @@ -33,7 +33,7 @@ class NeighborStatOP: - """Class for getting neighbor statics data information. + """Class for getting neighbor statistics data information. Parameters ---------- diff --git a/deepmd/tf/utils/network.py b/deepmd/tf/utils/network.py index 7941b451af..c4a0646705 100644 --- a/deepmd/tf/utils/network.py +++ b/deepmd/tf/utils/network.py @@ -264,7 +264,7 @@ def embedding_net( stddev : float Standard deviation of initializing network parameters bavg : float - Mean of network intial bias + Mean of network initial bias seed : int Random seed for initializing network parameters trainable : boolean diff --git a/deepmd/tf/utils/nlist.py b/deepmd/tf/utils/nlist.py index 0f33ec883b..6e405e9adb 100644 --- a/deepmd/tf/utils/nlist.py +++ b/deepmd/tf/utils/nlist.py @@ -39,7 +39,7 @@ def extend_coord_with_ghosts( extended_atype: tf.Tensor extended atom type of shape [-1, nall]. index_mapping: tf.Tensor - maping extended index to the local index + mapping extended index to the local index """ # generated by GitHub Copilot, converted from PT codes diff --git a/deepmd/tf/utils/sess.py b/deepmd/tf/utils/sess.py index ca98980f89..3c179d6b96 100644 --- a/deepmd/tf/utils/sess.py +++ b/deepmd/tf/utils/sess.py @@ -10,7 +10,7 @@ def run_sess(sess: tf.Session, *args, **kwargs): - """Run session with erorrs caught. + """Run session with errors caught. Parameters ---------- diff --git a/deepmd/tf/utils/tabulate.py b/deepmd/tf/utils/tabulate.py index 1dc6128f62..a136062e2d 100644 --- a/deepmd/tf/utils/tabulate.py +++ b/deepmd/tf/utils/tabulate.py @@ -97,7 +97,7 @@ def __init__( elif activation_fn == ACTIVATION_FN_DICT["sigmoid"]: self.functype = 6 else: - raise RuntimeError("Unknown actication function type!") + raise RuntimeError("Unknown activation function type!") self.activation_fn = activation_fn # self.sess = tf.Session(graph = self.graph) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index b3f3b26fd0..916e4de1b0 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -254,7 +254,7 @@ def descrpt_local_frame_args(): def descrpt_se_a_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -322,7 +322,7 @@ def descrpt_se_a_args(): def descrpt_se_t_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -391,7 +391,7 @@ def descrpt_se_a_tpe_args(): def descrpt_se_r_args(): doc_sel = 'This parameter set the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -468,7 +468,7 @@ def descrpt_se_atten_common_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -565,7 +565,7 @@ def descrpt_se_atten_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. " "When `type_one_side` is False, the input is `input_ij = concat([r_ij, tebd_j, tebd_i])`. When `type_one_side` is True, the input is `input_ij = concat([r_ij, tebd_j])`. " - "The output is `out_ij = embeding(input_ij)` for the pair-wise representation of atom i with neighbor j." + "The output is `out_ij = embedding(input_ij)` for the pair-wise representation of atom i with neighbor j." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. " f"When `type_one_side` is False, the input is `input_t = concat([tebd_j, tebd_i])`. {doc_only_pt_supported} When `type_one_side` is True, the input is `input_t = tebd_j`. " "The output is `out_ij = embeding_t(input_t) * embeding_s(r_ij) + embeding_s(r_ij)` for the pair-wise representation of atom i with neighbor j." @@ -665,7 +665,7 @@ def descrpt_se_e3_tebd_args(): doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_rcut = "The cut-off radius." doc_rcut_smth = "Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`" doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." @@ -687,7 +687,7 @@ def descrpt_se_e3_tebd_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed angular information as the union input for the embedding network. " "The input is `input_jk = concat([angle_jk, tebd_j, tebd_k])`. " - "The output is `out_jk = embeding(input_jk)` for the three-body representation of atom i with neighbors j and k." + "The output is `out_jk = embedding(input_jk)` for the three-body representation of atom i with neighbors j and k." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the angular embedding network output. " "The input is `input_t = concat([tebd_j, tebd_k])`." "The output is `out_jk = embeding_t(input_t) * embeding_s(angle_jk) + embeding_s(angle_jk)` for the three-body representation of atom i with neighbors j and k." @@ -952,7 +952,7 @@ def dpa2_repinit_args(): "The input mode of the type embedding. Supported modes are ['concat', 'strip']." "- 'concat': Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. " "When `type_one_side` is False, the input is `input_ij = concat([r_ij, tebd_j, tebd_i])`. When `type_one_side` is True, the input is `input_ij = concat([r_ij, tebd_j])`. " - "The output is `out_ij = embeding(input_ij)` for the pair-wise representation of atom i with neighbor j." + "The output is `out_ij = embedding(input_ij)` for the pair-wise representation of atom i with neighbor j." "- 'strip': Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. " f"When `type_one_side` is False, the input is `input_t = concat([tebd_j, tebd_i])`. {doc_only_pt_supported} When `type_one_side` is True, the input is `input_t = tebd_j`. " "The output is `out_ij = embeding_t(input_t) * embeding_s(r_ij) + embeding_s(r_ij)` for the pair-wise representation of atom i with neighbor j." @@ -1337,7 +1337,7 @@ def descrpt_se_a_ebd_v2_args(): def descrpt_se_a_mask_args(): doc_sel = 'This parameter sets the number of selected neighbors for each type of atom. It can be:\n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.\n\n\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' doc_neuron = "Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built." doc_axis_neuron = "Size of the submatrix of G (embedding matrix)." @@ -1398,7 +1398,7 @@ def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant: "se_atten_v2", "model[standard]/descriptor[se_atten_v2]" ) link_se_a_mask = make_link("se_a_mask", "model[standard]/descriptor[se_a_mask]") - doc_descrpt_type = f"The type of the descritpor. See explanation below. \n\n\ + doc_descrpt_type = f"The type of the descriptor. See explanation below. \n\n\ - {link_lf}: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\ - {link_se_e2_a}: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\ - {link_se_e2_r}: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.\n\n\ @@ -1431,7 +1431,7 @@ def fitting_ener(): doc_trainable = f"Whether the parameters in the fitting net are trainable. This option can be\n\n\ - bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\ - list of bool{doc_only_tf_supported}: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of this list should be equal to len(`neuron`)+1." - doc_rcond = "The condition number used to determine the inital energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." + doc_rcond = "The condition number used to determine the initial energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." doc_seed = "Random seed for parameter initialization of the fitting net" doc_atom_ener = "Specify the atomic energy in vacuum for each type" doc_layer_name = ( @@ -1506,8 +1506,8 @@ def fitting_dos(): doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' doc_trainable = "Whether the parameters in the fitting net are trainable. This option can be\n\n\ - bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\ -- list of bool: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of tihs list should be equal to len(`neuron`)+1." - doc_rcond = "The condition number used to determine the inital energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." +- list of bool: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of this list should be equal to len(`neuron`)+1." + doc_rcond = "The condition number used to determine the initial energy shift for each type of atoms. See `rcond` in :py:meth:`numpy.linalg.lstsq` for more details." doc_seed = "Random seed for parameter initialization of the fitting net" doc_numb_dos = ( "The number of gridpoints on which the DOS is evaluated (NEDOS in VASP)" @@ -1681,7 +1681,7 @@ def fitting_variant_type_args(): - `ener`: Fit an energy model (potential energy surface).\n\n\ - `dos` : Fit a density of states model. The total density of states / site-projected density of states labels should be provided by `dos.npy` or `atom_dos.npy` in each data system. The file has number of frames lines and number of energy grid columns (times number of atoms in `atom_dos.npy`). See `loss` parameter. \n\n\ - `dipole`: Fit an atomic dipole model. Global dipole labels or atomic dipole labels for all the selected atoms (see `sel_type`) should be provided by `dipole.npy` in each data system. The file either has number of frames lines and 3 times of number of selected atoms columns, or has number of frames lines and 3 columns. See `loss` parameter.\n\n\ -- `polar`: Fit an atomic polarizability model. Global polarizazbility labels or atomic polarizability labels for all the selected atoms (see `sel_type`) should be provided by `polarizability.npy` in each data system. The file eith has number of frames lines and 9 times of number of selected atoms columns, or has number of frames lines and 9 columns. See `loss` parameter.\n\n" +- `polar`: Fit an atomic polarizability model. Global polarizazbility labels or atomic polarizability labels for all the selected atoms (see `sel_type`) should be provided by `polarizability.npy` in each data system. The file with has number of frames lines and 9 times of number of selected atoms columns, or has number of frames lines and 9 columns. See `loss` parameter.\n\n" return Variant( "type", @@ -1765,7 +1765,7 @@ def model_args(exclude_hybrid=False): doc_type_embedding = "The type embedding." doc_modifier = "The modifier of model output." doc_use_srtab = "The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly." - doc_smin_alpha = "The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided." + doc_smin_alpha = "The short-range tabulated interaction will be switched according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided." doc_sw_rmin = "The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided." doc_sw_rmax = "The upper boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided." doc_srtab_add_bias = "Whether add energy bias from the statistics of the data to short-range tabulated atomic energy. It only takes effect when `use_srtab` is provided." @@ -1917,7 +1917,7 @@ def standard_model_args() -> Argument: doc=doc_fitting, ), ], - doc="Stardard model, which contains a descriptor and a fitting.", + doc="Standard model, which contains a descriptor and a fitting.", ) return ca @@ -1962,7 +1962,7 @@ def pairtab_model_args() -> Argument: doc_sel = 'This parameter set the number of selected neighbors. Note that this parameter is a little different from that in other descriptors. Instead of separating each type of atoms, only the summation matters. And this number is highly related with the efficiency, thus one should not make it too large. Usually 200 or less is enough, far away from the GPU limitation 4096. It can be:\n\n\ - `int`. The maximum number of neighbor atoms to be considered. We recommend it to be less than 200. \n\n\ - `list[int]`. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. Only the summation of `sel[i]` matters, and it is recommended to be less than 200.\ - - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wraped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' + - `str`. Can be "auto:factor" or "auto". "factor" is a float number larger than 1. This option will automatically determine the `sel`. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the "factor". Finally the number is wrapped up to 4 divisible. The option "auto" is equivalent to "auto:1.1".' ca = Argument( "pairtab", dict, @@ -2053,7 +2053,7 @@ def learning_rate_variant_type_args(): def learning_rate_args(fold_subdoc: bool = False) -> Argument: doc_scale_by_worker = "When parallel training or batch size scaled, how to alter learning rate. Valid values are `linear`(default), `sqrt` or `none`." - doc_lr = "The definitio of learning rate" + doc_lr = "The definition of learning rate" return Argument( "learning_rate", dict, @@ -2328,10 +2328,10 @@ def loss_dos(): doc_start_pref_dos = start_pref("Density of State (DOS)") doc_limit_pref_dos = limit_pref("Density of State (DOS)") doc_start_pref_cdf = start_pref( - "Cumulative Distribution Function (cumulative intergral of DOS)" + "Cumulative Distribution Function (cumulative integral of DOS)" ) doc_limit_pref_cdf = limit_pref( - "Cumulative Distribution Function (cumulative intergral of DOS)" + "Cumulative Distribution Function (cumulative integral of DOS)" ) doc_start_pref_ados = start_pref("atomic DOS (site-projected DOS)") doc_limit_pref_ados = limit_pref("atomic DOS (site-projected DOS)") @@ -2486,7 +2486,7 @@ def training_data_args(): # ! added by Ziyao: new specification style for data doc_auto_prob_style = 'Determine the probability of systems automatically. The method is assigned by this key and can be\n\n\ - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems()\n\n\ - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system\n\n\ -- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' +- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' doc_sys_probs = ( "A list of float if specified. " "Should be of the same length as `systems`, " @@ -2551,7 +2551,7 @@ def validation_data_args(): # ! added by Ziyao: new specification style for dat doc_auto_prob_style = 'Determine the probability of systems automatically. The method is assigned by this key and can be\n\n\ - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems()\n\n\ - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system\n\n\ -- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' +- "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system.' doc_sys_probs = ( "A list of float if specified. " "Should be of the same length as `systems`, " @@ -2664,7 +2664,7 @@ def training_args( "doing least square on the errors to add the target shift on the bias." ) doc_disp_training = "Displaying verbose information during training." - doc_time_training = "Timing durining training." + doc_time_training = "Timing during training." doc_profiling = "Export the profiling results to the Chrome JSON file for performance analysis, driven by the legacy TensorFlow profiling API or PyTorch Profiler. The output file will be saved to `profiling_file`." doc_profiling_file = "Output file for profiling." doc_enable_profiler = "Export the profiling results to the TensorBoard log for performance analysis, driven by TensorFlow Profiler (available in TensorFlow 2.3) or PyTorch Profiler. The log will be saved to `tensorboard_log_dir`." diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 0394993854..259fe93bdb 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -160,7 +160,7 @@ def execute_all( Parameters ---------- callable : Callable - The method should accept *args and **kwargs as input and return the similiar array. + The method should accept *args and **kwargs as input and return the similar array. total_size : int Total size natoms : int diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 72e3d58660..7d58d65578 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -24,7 +24,7 @@ class DeepmdData: """Class for a data system. - It loads data from hard disk, and mantains the data as a `data_dict` + It loads data from hard disk, and maintains the data as a `data_dict` Parameters ---------- @@ -43,7 +43,7 @@ class DeepmdData: trn_all_set [DEPRECATED] Deprecated. Now all sets are trained and tested. sort_atoms : bool - Sort atoms by atom types. Required to enable when the data is directly feeded to + Sort atoms by atom types. Required to enable when the data is directly fed to descriptors except mixed types. """ @@ -196,7 +196,7 @@ def reduce(self, key_out: str, key_in: str): assert key_out not in self.data_dict, "output key should not have been added" assert ( self.data_dict[key_in]["repeat"] == 1 - ), "reduced proerties should not have been repeated" + ), "reduced properties should not have been repeated" self.data_dict[key_out] = { "ndof": self.data_dict[key_in]["ndof"], diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 2b5fb6e6db..03a399106f 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -91,12 +91,12 @@ def __init__( - "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems() - "prob_sys_size" : the probability of a system is proportional to the number of batches in the system - "prob_sys_size;stt_idx:end_idx:weight;stt_idx:end_idx:weight;..." : - the list of systems is devided into blocks. A block is specified by `stt_idx:end_idx:weight`, + the list of systems is divided into blocks. A block is specified by `stt_idx:end_idx:weight`, where `stt_idx` is the starting index of the system, `end_idx` is then ending (not including) index of the system, the probabilities of the systems in this block sums up to `weight`, and the relatively probabilities within this block is proportional to the number of batches in the system. sort_atoms : bool - Sort atoms by atom types. Required to enable when the data is directly feeded to + Sort atoms by atom types. Required to enable when the data is directly fed to descriptors except mixed types. """ # init data @@ -184,7 +184,7 @@ def __init__( # ! altered by Marián Rynik # test size # now test size can be set as a percentage of systems data or test size - # can be set for each system individualy in the same manner as batch + # can be set for each system individually in the same manner as batch # size. This enables one to use systems with diverse number of # structures and different number of atoms. self.test_size = test_size @@ -277,7 +277,7 @@ def add_dict(self, adict: dict[str, dict[str, Any]]) -> None: "repeat": repeat, } - For the explaination of the keys see `add` + For the explanation of the keys see `add` """ for kk in adict: self.add( @@ -759,7 +759,7 @@ def process_systems(systems: Union[str, list[str]]) -> list[str]: msg = "cannot find valid a data system" log.fatal(msg) raise OSError(msg, help_msg) - # rougly check all items in systems are valid + # roughly check all items in systems are valid for ii in systems: ii = DPPath(ii) if not ii.is_dir(): diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index 99c7edf284..e33e07cee7 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -237,7 +237,7 @@ def make_econf_embedding( def transform_to_spin_rep(res: dict[str, np.ndarray]) -> dict[str, np.ndarray]: - """Tranform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" + """Transform electron occupation of 0/1/2 to -1,-1/-1,1/1,1.""" ret = {} def transform(ii): diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index bc765645dc..4d0d788f8b 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -21,7 +21,7 @@ def compute_stats_from_redu( """Compute the output statistics. Given the reduced output value and the number of atoms for each atom, - compute the least-squares solution as the atomic output bais and std. + compute the least-squares solution as the atomic output bias and std. Parameters ---------- @@ -93,7 +93,7 @@ def compute_stats_from_atomic( """Compute the output statistics. Given the output value and the type of atoms, - compute the atomic output bais and std. + compute the atomic output bias and std. Parameters ---------- diff --git a/deepmd/utils/summary.py b/deepmd/utils/summary.py index e2118bf7e0..a35dd4db93 100644 --- a/deepmd/utils/summary.py +++ b/deepmd/utils/summary.py @@ -48,7 +48,7 @@ class SummaryPrinter(ABC): BUILD: ClassVar = { "installed to": "\n".join(deepmd.__path__), "source": GLOBAL_CONFIG["git_summ"], - "source brach": GLOBAL_CONFIG["git_branch"], + "source branch": GLOBAL_CONFIG["git_branch"], "source commit": GLOBAL_CONFIG["git_hash"], "source commit at": GLOBAL_CONFIG["git_date"], "use float prec": global_float_prec, diff --git a/deepmd/utils/weight_avg.py b/deepmd/utils/weight_avg.py index 7c75d18e68..8328be5fcf 100644 --- a/deepmd/utils/weight_avg.py +++ b/deepmd/utils/weight_avg.py @@ -7,7 +7,7 @@ def weighted_average(errors: list[dict[str, tuple[float, float]]]) -> dict: - """Compute wighted average of prediction errors (MAE or RMSE) for model. + """Compute weighted average of prediction errors (MAE or RMSE) for model. Parameters ---------- diff --git a/doc/README b/doc/README index 2f4ce66792..728481df15 100644 --- a/doc/README +++ b/doc/README @@ -1 +1 @@ -To run the HTML documention build, doxygen have to be installed. +To run the HTML documentation build, doxygen have to be installed. diff --git a/doc/development/coding-conventions.rst b/doc/development/coding-conventions.rst index bf186d1231..4f82b34a60 100644 --- a/doc/development/coding-conventions.rst +++ b/doc/development/coding-conventions.rst @@ -72,7 +72,7 @@ Conventions`_ and `Typing Conventions`_ PEPs, clarified and extended as follows: f"something {'this' if x else 'that'}" -* Use f-strings ``s = f"{x:.2f}"`` instead of old style formating with ``"%f" % x``. +* Use f-strings ``s = f"{x:.2f}"`` instead of old style formatting with ``"%f" % x``. string format method ``"{x:.2f}".format()`` may be used sparsely where it is more convenient than f-strings. diff --git a/doc/development/create-a-model-pt.md b/doc/development/create-a-model-pt.md index 257dd8a25d..875067e2b8 100644 --- a/doc/development/create-a-model-pt.md +++ b/doc/development/create-a-model-pt.md @@ -6,7 +6,7 @@ In the following context, we use the PyTorch backend as the example, while it also applies to other backends listed above. ::: -If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainner, etc, you may want to read this section. +If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainer, etc, you may want to read this section. To incorporate your custom model you'll need to: diff --git a/doc/development/create-a-model-tf.md b/doc/development/create-a-model-tf.md index 95a2f66f23..cc7ad1999d 100644 --- a/doc/development/create-a-model-tf.md +++ b/doc/development/create-a-model-tf.md @@ -1,6 +1,6 @@ # Create a model in TensorFlow {{ tensorflow_icon }} -If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainner, etc, you may want to read this section. +If you'd like to create a new model that isn't covered by the existing DeePMD-kit library, but reuse DeePMD-kit's other efficient modules such as data processing, trainer, etc, you may want to read this section. To incorporate your custom model you'll need to: diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index 0c9563b9e9..1ddb6f5fce 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -454,7 +454,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Checke dargs version and Install\n", + "# Check dargs version and Install\n", "!pip show dargs || pip install --upgrade dargs" ] }, @@ -523,7 +523,7 @@ " color: #bbbbff;\n", "}\n", "\n", - "
{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.list[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descritpor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.list[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- list[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.list[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.list[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definitio of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" + "
{
  \"_comment\": \"that's all\",
  \"model\"model:
type: dict
: {
    \"type_map\"type_map:
type: typing.list[str], optional
A list of strings. Give the name to each type of atoms. It is noted that the number of atom type of training system must be less than 128 in a GPU environment. If not given, type.raw in each system should use the same type indexes, and type_map.raw will take no effect.
: [
     \"H\",
     \"C\"
    ],

    \"descriptor\"descriptor:
type: dict
The descriptor of atomic environment.
: {
      \"type\"type:
type: str
The type of the descriptor. See explanation below.
- loc_frame: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.
- se_e2_a: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.
- se_e2_r: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.
- se_e3: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.
- se_a_tpe: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.
- se_atten: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism will be used by this descriptor.
- se_atten_v2: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Attention mechanism with new modifications will be used by this descriptor.
- se_a_mask: Used by the smooth edition of Deep Potential. It can accept a variable number of atoms in a frame (Non-PBC system). aparam are required as an indicator matrix for the real/virtual sign of input atoms.
- hybrid: Concatenate of a list of descriptors as a new descriptor.
: \"se_e2_a\",
      \"sel\"sel:
type: str | typing.list[int], optional, default: auto
This parameter set the number of selected neighbors for each type of atom. It can be:
- list[int]. The length of the list should be the same as the number of atom types in the system. sel[i] gives the selected number of type-i neighbors. sel[i] is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius. It is noted that the total sel value must be less than 4096 in a GPU environment.
- str. Can be \"auto:factor\" or \"auto\". \"factor\" is a float number larger than 1. This option will automatically determine the sel. In detail it counts the maximal number of neighbors with in the cutoff radius for each type of neighbor, then multiply the maximum by the \"factor\". Finally the number is wraped up to 4 divisible. The option \"auto\" is equivalent to \"auto:1.1\".
: \"auto\",
      \"rcut_smth\"rcut_smth:
type: float, optional, default: 0.5
Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth
: 0.5,
      \"rcut\"rcut:
type: float, optional, default: 6.0
The cut-off radius.
: 6.0,
      \"neuron\"neuron:
type: typing.list[int], optional, default: [10, 20, 40]
Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.
: [
       25,
       50,
       100
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: False
Whether to use a \"Timestep\" in the skip connection
: false,
      \"axis_neuron\"axis_neuron:
type: int, optional, default: 4, alias: n_axis_neuron
Size of the submatrix of G (embedding matrix).
: 16,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization
: 1,
      \"_comment\": \" that's all\"
    },
    \"fitting_net\"fitting_net:
type: dict
The fitting of physical properties.
: {
      \"neuron\"neuron:
type: typing.list[int], optional, default: [120, 120, 120], alias: n_neuron
The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built.
: [
       240,
       240,
       240
      ],

      \"resnet_dt\"resnet_dt:
type: bool, optional, default: True
Whether to use a \"Timestep\" in the skip connection
: true,
      \"seed\"seed:
type: NoneType | int, optional
Random seed for parameter initialization of the fitting net
: 1,
      \"_comment\": \" that's all\"
    },
    \"_comment\": \" that's all\"
  },
  \"learning_rate\"learning_rate:
type: dict, optional
The definition of learning rate
: {
    \"type\"type:
type: str, default: exp
The type of the learning rate.
: \"exp\",
    \"decay_steps\"decay_steps:
type: int, optional, default: 5000
The learning rate is decaying every this number of training steps.
: 50,
    \"start_lr\"start_lr:
type: float, optional, default: 0.001
The learning rate at the start of the training.
: 0.001,
    \"stop_lr\"stop_lr:
type: float, optional, default: 1e-08
The desired learning rate at the end of the training.
: 3.51e-08,
    \"_comment\": \"that's all\"
  },
  \"loss\"loss:
type: dict, optional
The definition of loss function. The loss type should be set to tensor, ener or left unset.
: {
    \"type\"type:
type: str, default: ener
The type of the loss. When the fitting type is ener, the loss type should be set to ener or left unset. When the fitting type is dipole or polar, the loss type should be set to tensor.
: \"ener\",
    \"start_pref_e\"start_pref_e:
type: float | int, optional, default: 0.02
The prefactor of energy loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the energy label should be provided by file energy.npy in each data system. If both start_pref_e and limit_pref_e are set to 0, then the energy will be ignored.
: 0.02,
    \"limit_pref_e\"limit_pref_e:
type: float | int, optional, default: 1.0
The prefactor of energy loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_f\"start_pref_f:
type: float | int, optional, default: 1000
The prefactor of force loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the force label should be provided by file force.npy in each data system. If both start_pref_f and limit_pref_f are set to 0, then the force will be ignored.
: 1000,
    \"limit_pref_f\"limit_pref_f:
type: float | int, optional, default: 1.0
The prefactor of force loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 1,
    \"start_pref_v\"start_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the start of the training. Should be larger than or equal to 0. If set to none-zero value, the virial label should be provided by file virial.npy in each data system. If both start_pref_v and limit_pref_v are set to 0, then the virial will be ignored.
: 0,
    \"limit_pref_v\"limit_pref_v:
type: float | int, optional, default: 0.0
The prefactor of virial loss at the limit of the training, Should be larger than or equal to 0. i.e. the training step goes to infinity.
: 0,
    \"_comment\": \" that's all\"
  },
  \"training\"training:
type: dict
The training options.
: {
    \"training_data\"training_data:
type: dict, optional
Configurations of training data.
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for training. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/training_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
- string \"mixed:N\": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.
If MPI is used, the value should be considered as the batch size per task.
: \"auto\",
      \"_comment\": \"that's all\"
    },
    \"validation_data\"validation_data:
type: NoneType | dict, optional, default: None
Configurations of validation data. Similar to that of training data, except that a numb_btch argument may be configured
: {
      \"systems\"systems:
type: str | typing.list[str]
The data systems for validation. This key can be provided with a list that specifies the systems, or be provided with a string by which the prefix of all systems are given and the list of the systems is automatically generated.
: [
       \"../00.data/validation_data\"
      ],

      \"batch_size\"batch_size:
type: str | typing.list[int] | int, optional, default: auto
This key can be
- list: the length of which is the same as the systems _. The batch size of each system is given by the elements of the list.
- int: all systems _ use the same batch size.
- string \"auto\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.
- string \"auto:N\": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.
: \"auto\",
      \"numb_btch\"numb_btch:
type: int, optional, default: 1, alias: numb_batch
An integer that specifies the number of batches to be sampled for each validation period.
: 1,
      \"_comment\": \"that's all\"
    },
    \"numb_steps\"numb_steps:
type: int, alias: stop_batch
Number of training batch. Each training uses one batch of data.
: 10000,
    \"seed\"seed:
type: NoneType | int, optional
The random seed for getting frames from the training data set.
: 10,
    \"disp_file\"disp_file:
type: str, optional, default: lcurve.out
The file for printing learning curve.
: \"lcurve.out\",
    \"disp_freq\"disp_freq:
type: int, optional, default: 1000
The frequency of printing learning curve.
: 200,
    \"save_freq\"save_freq:
type: int, optional, default: 1000
The frequency of saving check point.
: 1000,
    \"_comment\": \"that's all\"
  }
}
" ], "text/plain": [ "" @@ -682,7 +682,7 @@ "DEEPMD INFO See https://deepmd.rtfd.io/credits/ for details.\n", "DEEPMD INFO installed to: /root/miniconda3/envs/deepmd\n", "DEEPMD INFO source : v2.2.7\n", - "DEEPMD INFO source brach: HEAD\n", + "DEEPMD INFO source branch: HEAD\n", "DEEPMD INFO source commit: 839f4fe7\n", "DEEPMD INFO source commit at: 2023-10-27 21:10:24 +0800\n", "DEEPMD INFO build float prec: double\n", @@ -1050,7 +1050,7 @@ "DEEPMD INFO See https://deepmd.rtfd.io/credits/ for details.\n", "DEEPMD INFO installed to: /root/miniconda3/envs/deepmd\n", "DEEPMD INFO source : v2.2.7\n", - "DEEPMD INFO source brach: HEAD\n", + "DEEPMD INFO source branch: HEAD\n", "DEEPMD INFO source commit: 839f4fe7\n", "DEEPMD INFO source commit at: 2023-10-27 21:10:24 +0800\n", "DEEPMD INFO build float prec: double\n", diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 3f65375865..07239cd3b7 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -319,7 +319,7 @@ pip install -U cmake You must enable at least one backend. If you enable two or more backends, these backend libraries must be built in a compatible way, e.g. using the same `_GLIBCXX_USE_CXX11_ABI` flag. -We recommend using [conda pacakges](https://docs.deepmodeling.org/faq/conda.html) from [conda-forge](https://conda-forge.org), which are usually compatible to each other. +We recommend using [conda packages](https://docs.deepmodeling.org/faq/conda.html) from [conda-forge](https://conda-forge.org), which are usually compatible to each other. ::::{tab-set} @@ -427,7 +427,7 @@ See also [ROCm documentation](https://rocm.docs.amd.com/en/latest/conceptual/cma **Type**: `PATH` -Only neccessary for using [LAMMPS plugin mode](./install-lammps.md#install-lammps-plugin-mode). +Only necessary for using [LAMMPS plugin mode](./install-lammps.md#install-lammps-plugin-mode). The path to the [LAMMPS source code](install-lammps.md). LAMMPS 8Apr2021 or later is supported. If not assigned, the plugin mode will not be enabled. diff --git a/doc/install/install-tf.2.12.md b/doc/install/install-tf.2.12.md index 8523345d3d..ab6a9ed00a 100644 --- a/doc/install/install-tf.2.12.md +++ b/doc/install/install-tf.2.12.md @@ -2,7 +2,7 @@ TensorFlow's C++ interface will be compiled from the source code. In this manual, we install TensorFlow 2.12.0. It is noted that the source code of TensorFlow 2.12.0 uses C++ 17, so one needs a C++ compiler that supports C++ 17. -Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be lanuched to use [bazel](https://github.com/bazelbuild/bazel). +Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be launched to use [bazel](https://github.com/bazelbuild/bazel). ```bash wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 -O /some/workspace/bazel/bin/bazel diff --git a/doc/install/install-tf.2.8.md b/doc/install/install-tf.2.8.md index 4145ba01d1..5e9057492b 100644 --- a/doc/install/install-tf.2.8.md +++ b/doc/install/install-tf.2.8.md @@ -1,6 +1,6 @@ # Install TensorFlow's C++ interface -TensorFlow's C++ interface will be compiled from the source code. Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be lanuched to use [bazel](https://github.com/bazelbuild/bazel). +TensorFlow's C++ interface will be compiled from the source code. Firstly one installs Bazel. [bazelisk](https://github.com/bazelbuild/bazelisk) can be launched to use [bazel](https://github.com/bazelbuild/bazel). ```bash wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 -O /some/workspace/bazel/bin/bazel diff --git a/doc/model/dplr.md b/doc/model/dplr.md index 91c2251346..838350e024 100644 --- a/doc/model/dplr.md +++ b/doc/model/dplr.md @@ -168,7 +168,7 @@ pair_style deepmd ener.pb pair_coeff * * bond_style zero bond_coeff * -special_bonds lj/coul 1 1 1 angle no +special_bonds lj/could 1 1 1 angle no ``` Type 1 and 2 (O and H) are `real_atom`s, while type 3 (WCs) are `virtual_atom`s. The model file `ener.pb` stores both the DW and DPLR models, so the position of WCs and the energy can be inferred from it. A virtual bond type is specified by `bond_style zero`. The `special_bonds` command switches off the exclusion of intramolecular interactions. diff --git a/doc/model/dprc.md b/doc/model/dprc.md index d9ce24b600..9f3eee244d 100644 --- a/doc/model/dprc.md +++ b/doc/model/dprc.md @@ -66,7 +66,7 @@ In a DPRc model, QM atoms and MM atoms have different atom types. Assuming we ha "type_map": ["C", "H", "HW", "O", "OW", "P"] ``` -As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\text{QM/MM}$ within the cutoff, so we use a hybrid descriptor to describe them separatedly: +As described in the paper, the DPRc model only corrects $E_\text{QM}$ and $E_\text{QM/MM}$ within the cutoff, so we use a hybrid descriptor to describe them separately: ::::{tab-set} diff --git a/doc/model/train-energy-spin.md b/doc/model/train-energy-spin.md index ec169892f2..eda4ffa835 100644 --- a/doc/model/train-energy-spin.md +++ b/doc/model/train-energy-spin.md @@ -145,7 +145,7 @@ We list the details about spin system data format in TensorFlow backend: ### Spin data format in PyTorch/DP -In the PyTorch backend, spin and magnetic forces are listed in seperate files, and the data format may contain the following files: +In the PyTorch backend, spin and magnetic forces are listed in separate files, and the data format may contain the following files: ``` type.raw diff --git a/doc/model/train-se-a-mask.md b/doc/model/train-se-a-mask.md index 69f344b138..93edfc999e 100644 --- a/doc/model/train-se-a-mask.md +++ b/doc/model/train-se-a-mask.md @@ -64,7 +64,7 @@ To make the `aparam.npy` used for descriptor `se_a_mask`, two variables in `fitt ``` - `neuron`, `resnet_dt` and `seed` are the same as the {ref}`fitting_net ` section for fitting energy. -- {ref}`numb_aparam ` gives the dimesion of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. +- {ref}`numb_aparam ` gives the dimension of the `aparam.npy` file. In this example, it is set to 1 and stores the real/virtual sign of the atoms. For real/virtual atoms, the corresponding sign in `aparam.npy` is set to 1/0. - {ref}`use_aparam_as_mask ` is set to `true` to use the `aparam.npy` as the mask of the atoms in the descriptor `se_a_mask`. Finally, to make a reasonable fitting task with `se_a_mask` descriptor for DP/MM simulations, the loss function with `se_a_mask` is designed to include the atomic forces difference in specific atoms of the input particles only. diff --git a/doc/nvnmd/nvnmd.md b/doc/nvnmd/nvnmd.md index c415b275ec..279236ec96 100644 --- a/doc/nvnmd/nvnmd.md +++ b/doc/nvnmd/nvnmd.md @@ -78,7 +78,7 @@ where items are defined as: | --------- | --------------------------------------------------------------------- | ---------------------------------------------------------------------------- | | version | the version of network structure | 0 or 1 | | max_nnei | the maximum number of neighbors that do not distinguish element types | 128 or 256 | -| net_size | the size of nueral network | 128 | +| net_size | the size of neural network | 128 | | sel | the number of neighbors | version 0: integer list of lengths 1 to 4 are acceptable; version 1: integer | | rcut | the cutoff radial | (0, 8.0] | | rcut_smth | the smooth cutoff parameter | (0, 8.0] | @@ -162,7 +162,7 @@ where items are defined as: | Item | Mean | Optional Value | | ---------- | --------------------------------------------------- | ------------------ | -| seed | the randome seed | a integer | +| seed | the random seed | a integer | | stop_batch | the total training steps | a positive integer | | numb_test | the accuracy is test by using {numb_test} sample | a positive integer | | disp_file | the log file where the training message display | a string | @@ -213,7 +213,7 @@ where the frozen model file to import is given via the `-m` command line flag, t # Running MD in Bohrium -After CNN and QNN training, you can upload the ML model to our online NVNMD system and run MD there through Bohrium (https://bohrium.dp.tech). Bohrium is a research platfrom designed for AI for Science Era. For more information, please refer to [Bohrium Introduction](https://bohrium-doc.dp.tech/en/docs/WhatIsBohrium/). +After CNN and QNN training, you can upload the ML model to our online NVNMD system and run MD there through Bohrium (https://bohrium.dp.tech). Bohrium is a research platform designed for AI for Science Era. For more information, please refer to [Bohrium Introduction](https://bohrium-doc.dp.tech/en/docs/WhatIsBohrium/). ## Registration diff --git a/doc/third-party/lammps-command.md b/doc/third-party/lammps-command.md index 6a16605bfc..4af3fe5096 100644 --- a/doc/third-party/lammps-command.md +++ b/doc/third-party/lammps-command.md @@ -15,7 +15,7 @@ All units in LAMMPS except `lj` are supported. `lj` is not supported. The most commonly used units are `metal`, since the internal units of distance, energy, force, and charge in DeePMD-kit are `\AA`, `eV`, `eV / \AA`, and `proton charge`, respectively. These units are consistent with the `metal` units in LAMMPS. -If one wants to use other units like `real` or `si`, it is welcome to do so. There is no need to do the unit conversion mannualy. The unit conversion is done automatically by LAMMPS. +If one wants to use other units like `real` or `si`, it is welcome to do so. There is no need to do the unit conversion manually. The unit conversion is done automatically by LAMMPS. The only thing that one needs to take care is the unit of the output of `compute deeptensor/atom`. Working with `metal` units for `compute deeptensor/atom` is totally fine, since there is no unit conversion. For other unit styles, we currently assume that the output of the `compute deeptensor/atom` command has the unit of distance and have applied the unit conversion factor of distance. If a user wants to infer quantities with units other than distance, the user is encouraged to open a GitHub feature request, so that the unit conversion factor can be added. @@ -95,7 +95,7 @@ Evaluate the interaction of the system by using [Deep Potential][DP] or [Deep Po This pair style takes the deep potential defined in a model file that usually has the .pb extension. The model can be trained and frozen by package [DeePMD-kit](https://github.com/deepmodeling/deepmd-kit), which can have either double or single float precision interface. -The model deviation evalulates the consistency of the force predictions from multiple models. By default, only the maximal, minimal and average model deviations are output. If the key `atomic` is set, then the model deviation of force prediction of each atom will be output. +The model deviation evaluates the consistency of the force predictions from multiple models. By default, only the maximal, minimal and average model deviations are output. If the key `atomic` is set, then the model deviation of force prediction of each atom will be output. The unit follows [LAMMPS units](#units) and the [scale factor](https://docs.lammps.org/pair_hybrid.html) is not applied. By default, the model deviation is output in absolute value. If the keyword `relative` is set, then the relative model deviation of the force will be output, including values output by the keyword `atomic`. The relative model deviation of the force on atom $i$ is defined by diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index 669d1319bd..e50109318d 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -106,7 +106,7 @@ $ dp --pt train input.json --finetune multitask_pretrained.pt --model-branch CHO ``` :::{note} -One can check the available model branches in multi-task pre-trained model by refering to the documentation of the pre-trained model or by using the following command: +One can check the available model branches in multi-task pre-trained model by referring to the documentation of the pre-trained model or by using the following command: ```bash $ dp --pt show multitask_pretrained.pt model-branch diff --git a/doc/troubleshooting/precision.md b/doc/troubleshooting/precision.md index 56dbd51958..5ebef97122 100644 --- a/doc/troubleshooting/precision.md +++ b/doc/troubleshooting/precision.md @@ -14,7 +14,7 @@ Some common reasons are listed below. The unit of training data should follow what is listed in [data section](../data/system.md). Usually, the package to calculate the training data has different units from those of the DeePMD-kit. It is noted that some software label the energy gradient as forces, instead of the negative energy gradient. -It is neccessary to check them carefully to avoid inconsistent data. +It is necessary to check them carefully to avoid inconsistent data. ### SCF coverage and data accuracy @@ -29,7 +29,7 @@ Here is a checklist for the accuracy of data: ### Enough data If the model performs good on the training data, but has bad accuracy on another data, this means some data space is not covered by the training data. -It can be validated by evaluting the [model deviation](../test/model-deviation.md) with multiple models. +It can be validated by evaluating the [model deviation](../test/model-deviation.md) with multiple models. If the model deviation of these data is high for some data, try to collect more data using [DP-GEN](../third-party/out-of-deepmd-kit.md#dp-gen). ### Values of data diff --git a/examples/data_conv/OUTCAR b/examples/data_conv/OUTCAR index 15041df5f0..3eacd478c4 100644 --- a/examples/data_conv/OUTCAR +++ b/examples/data_conv/OUTCAR @@ -39,7 +39,7 @@ LEXCH = PE EATOM = 432.3788 eV, 31.7789 Ry - TITEL = PAW_PBE O_h 06Feb2004 + TITLE = PAW_PBE O_h 06Feb2004 LULTRA = F use ultrasoft PP ? IUNSCR = 1 unscreen: 0-lin 1-nonlin 2-no RPACOR = 0.800 partial core radius @@ -93,7 +93,7 @@ LEXCH = PE EATOM = 12.4884 eV, 0.9179 Ry - TITEL = PAW_PBE H_h 06Feb2004 + TITLE = PAW_PBE H_h 06Feb2004 LULTRA = F use ultrasoft PP ? IUNSCR = 0 unscreen: 0-lin 1-nonlin 2-no RPACOR = 0.000 partial core radius @@ -297,8 +297,8 @@ Space group operators: Intra band minimization: WEIMIN = 0.0000 energy-eigenvalue tresh-hold - EBREAK = 0.16E-09 absolut break condition - DEPER = 0.30 relativ break condition + EBREAK = 0.16E-09 absolute break condition + DEPER = 0.30 relative break condition TIME = 0.40 timestep for ELM @@ -339,7 +339,7 @@ Space group operators: KINTER = 0 interpolate to denser k-point grid CSHIFT =0.1000 complex shift for real part using Kramers Kronig OMEGAMAX= -1.0 maximum frequency - DEG_THRESHOLD= 0.2000000E-02 threshold for treating states as degnerate + DEG_THRESHOLD= 0.2000000E-02 threshold for treating states as degenerate RTIME = -0.100 relaxation time in fs (WPLASMAI= 0.000 imaginary part of plasma frequency in eV, 0.658/RTIME) DFIELD = 0.0000000 0.0000000 0.0000000 field for delta impulse in time @@ -2475,7 +2475,7 @@ Space group operators: - General timing and accounting informations for this job: + General timing and accounting information for this job: ======================================================== Total CPU time used (sec): 877.816 diff --git a/examples/water/dplr/lmp/in.lammps b/examples/water/dplr/lmp/in.lammps index 4b5b09f8b2..fe8d799633 100644 --- a/examples/water/dplr/lmp/in.lammps +++ b/examples/water/dplr/lmp/in.lammps @@ -6,7 +6,7 @@ variable KMESH equal 32 variable THERMO_FREQ equal 2 variable DUMP_FREQ equal 20 variable TEMP equal 300.000000 -variable PRES equal 1.000000 +variable PRESS equal 1.000000 variable TAU_T equal 0.100000 variable TAU_P equal 0.500000 @@ -33,7 +33,7 @@ pair_style deepmd ener.pb pair_coeff * * bond_style zero bond_coeff * -special_bonds lj/coul 1 1 1 angle no +special_bonds lj/could 1 1 1 angle no # kspace_style "pppm/dplr" should be used. in addition the # gewald(1/distance) should be set the same as that used in diff --git a/pyproject.toml b/pyproject.toml index 6f0404174d..ea7df3d80f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -319,14 +319,14 @@ legacy_tox_ini = """ # be silenced # W504 - line break after binary operator - there is conflict between W503 and W504 in -# some lintners. One recomends line bread after and one before binary operator so we -# swith W504 off and recomend this coding style: +# some lintners. One recommends line bread after and one before binary operator so we +# switch W504 off and recommend this coding style: # a = (b + -> instead of -> a = (b # c) + c) [tool.autopep8] ignore = "W504" -# D413 - Missing blank line after last section - makes no sense only adds empy lines in +# D413 - Missing blank line after last section - makes no sense only adds empty lines in # docstrings # D416 - Section name should end with a colon - only applicable to RST type docstrings, # we are using numpy style diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 71b3dca1ea..805c6514e0 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -4,7 +4,7 @@ project(DeePMD) option(ENABLE_TENSORFLOW "Enable TensorFlow interface" OFF) option(ENABLE_PYTORCH "Enable PyTorch interface" OFF) -option(BUILD_TESTING "Build test and enable converage" OFF) +option(BUILD_TESTING "Build test and enable coverage" OFF) set(DEEPMD_C_ROOT "" CACHE PATH "Path to imported DeePMD-kit C library") @@ -272,7 +272,7 @@ endif() # set op prec set(HIGH_PREC_DEF "HIGH_PREC") -# this defination doesn't work, but leaving it empty will cause error +# this definition doesn't work, but leaving it empty will cause error set(LOW_PREC_DEF "LOW_PREC") set(HIGH_PREC_VARIANT "") set(LOW_PREC_VARIANT "_low") diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 9d0310d99a..270bc94cc5 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -1286,7 +1286,8 @@ class DeepPotModelDevi { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - // memory will be continous for std::vector but not std::vector + // memory will be continuous for std::vector but not + // std::vector std::vector energy_flat(numb_models); std::vector force_flat(static_cast(numb_models) * natoms * 3); @@ -1464,7 +1465,8 @@ class DeepPotModelDevi { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; - // memory will be continous for std::vector but not std::vector + // memory will be continuous for std::vector but not + // std::vector std::vector energy_flat(numb_models); std::vector force_flat(static_cast(numb_models) * natoms * 3); @@ -2326,7 +2328,7 @@ void inline read_file_to_string(std::string model, std::string &file_content) { int size; const char *c_file_content = DP_ReadFileToChar2(model.c_str(), &size); if (size < 0) { - // negtive size indicates error + // negative size indicates error std::string error_message = std::string(c_file_content, -size); DP_DeleteChar(c_file_content); throw deepmd::hpp::deepmd_exception(error_message); diff --git a/source/api_c/src/c_api.cc b/source/api_c/src/c_api.cc index 9ed37d04aa..56c5f9720f 100644 --- a/source/api_c/src/c_api.cc +++ b/source/api_c/src/c_api.cc @@ -1586,7 +1586,7 @@ const char* DP_ReadFileToChar2(const char* c_model, int* size) { try { deepmd::read_file_to_string(model, file_content); } catch (deepmd::deepmd_exception& ex) { - // use negtive size to indicate error + // use negative size to indicate error std::string error_message = std::string(ex.what()); *size = -error_message.size(); return string_to_char(error_message); diff --git a/source/api_cc/include/DeepTensor.h b/source/api_cc/include/DeepTensor.h index f355413d80..1ec14e3e7f 100644 --- a/source/api_cc/include/DeepTensor.h +++ b/source/api_cc/include/DeepTensor.h @@ -37,7 +37,7 @@ class DeepTensorBase { const std::string& name_scope = "") = 0; /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -75,7 +75,7 @@ class DeepTensorBase { /** @} */ /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -180,7 +180,8 @@ class DeepTensor { /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -193,7 +194,8 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -210,7 +212,7 @@ class DeepTensor { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -229,7 +231,7 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -252,7 +254,7 @@ class DeepTensor { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -277,7 +279,7 @@ class DeepTensor { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size diff --git a/source/api_cc/include/DeepTensorTF.h b/source/api_cc/include/DeepTensorTF.h index 3ca316a29f..3fd8338b1f 100644 --- a/source/api_cc/include/DeepTensorTF.h +++ b/source/api_cc/include/DeepTensorTF.h @@ -39,7 +39,8 @@ class DeepTensorTF : public DeepTensorBase { private: /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -52,7 +53,8 @@ class DeepTensorTF : public DeepTensorBase { const std::vector& box); /** * @brief Evaluate the value by using this model. - * @param[out] value The value to evalute, usually would be the atomic tensor. + * @param[out] value The value to evaluate, usually would be the atomic + *tensor. * @param[in] coord The coordinates of atoms. The array should be of size *natoms x 3. * @param[in] atype The atom types. The list should contain natoms ints. @@ -69,7 +71,7 @@ class DeepTensorTF : public DeepTensorBase { const InputNlist& inlist); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -94,7 +96,7 @@ class DeepTensorTF : public DeepTensorBase { const std::vector& box); /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -163,7 +165,7 @@ class DeepTensorTF : public DeepTensorBase { /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size @@ -201,7 +203,7 @@ class DeepTensorTF : public DeepTensorBase { /** @} */ /** * @brief Evaluate the global tensor and component-wise force and virial. - * @param[out] global_tensor The global tensor to evalute. + * @param[out] global_tensor The global tensor to evaluate. * @param[out] force The component-wise force of the global tensor, size odim *x natoms x 3. * @param[out] virial The component-wise virial of the global tensor, size diff --git a/source/api_cc/include/commonTF.h b/source/api_cc/include/commonTF.h index 0c14597e30..003b330308 100644 --- a/source/api_cc/include/commonTF.h +++ b/source/api_cc/include/commonTF.h @@ -63,7 +63,7 @@ int session_get_dtype(tensorflow::Session* session, * @param[in] aparam_ Atom parameters. * @param[in] atommap Atom map. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template @@ -93,7 +93,7 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template @@ -126,7 +126,7 @@ int session_input_tensors( * @param[in] nghost Number of ghost atoms. * @param[in] ago Update the internal neighbour list if ago is 0. * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is + * @param[in] aparam_nall Whether the atomic dimension of atomic parameters is * nall. */ template diff --git a/source/api_cc/src/DataModifierTF.cc b/source/api_cc/src/DataModifierTF.cc index aaa2252955..80cf6120a3 100644 --- a/source/api_cc/src/DataModifierTF.cc +++ b/source/api_cc/src/DataModifierTF.cc @@ -306,7 +306,7 @@ void DipoleChargeModifierTF::compute( dfcorr_2[pairs[ii].first * 3 + dd] += delef_[pairs[ii].second * 3 + dd]; } } - // add ele contrinution + // add ele contribution dfcorr_ = dfcorr_2; for (int ii = 0; ii < nloc_real; ++ii) { int oii = real_bkw_map[ii]; diff --git a/source/api_cc/src/DeepPotTF.cc b/source/api_cc/src/DeepPotTF.cc index d7a7edfb60..a990cecf8d 100644 --- a/source/api_cc/src/DeepPotTF.cc +++ b/source/api_cc/src/DeepPotTF.cc @@ -465,10 +465,10 @@ void DeepPotTF::init(const std::string& model, } if (!model_compatable(model_version)) { throw deepmd::deepmd_exception( - "incompatable model: version " + model_version + + "incompatible model: version " + model_version + " in graph, but version " + global_model_version + " supported " - "See https://deepmd.rtfd.io/compatability/ for details."); + "See https://deepmd.rtfd.io/compatibility/ for details."); } dtype = session_get_dtype(session, "descrpt_attr/rcut"); if (dtype == tensorflow::DT_DOUBLE) { diff --git a/source/api_cc/src/DeepTensorTF.cc b/source/api_cc/src/DeepTensorTF.cc index c69b7c018e..1081473f25 100644 --- a/source/api_cc/src/DeepTensorTF.cc +++ b/source/api_cc/src/DeepTensorTF.cc @@ -65,10 +65,10 @@ void DeepTensorTF::init(const std::string &model, } if (!model_compatable(model_version)) { throw deepmd::deepmd_exception( - "incompatable model: version " + model_version + + "incompatible model: version " + model_version + " in graph, but version " + global_model_version + " supported " - "See https://deepmd.rtfd.io/compatability/ for details."); + "See https://deepmd.rtfd.io/compatibility/ for details."); } dtype = session_get_dtype(session, "descrpt_attr/rcut"); if (dtype == tensorflow::DT_DOUBLE) { diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index baa257d60e..e84517ea7a 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -934,7 +934,7 @@ void deepmd::select_map(std::vector& out, for (int ii = 0; ii < in.size() / stride / nframes; ++ii) { #ifdef DEBUG assert(ii < idx_map.size() && "idx goes over the idx map size"); - assert(idx_map[ii] < out.size() && "mappped idx goes over the out size"); + assert(idx_map[ii] < out.size() && "mapped idx goes over the out size"); #endif if (idx_map[ii] >= 0) { int to_ii = idx_map[ii]; diff --git a/source/cmake/Findtensorflow.cmake b/source/cmake/Findtensorflow.cmake index 6321d4872b..d579af7679 100644 --- a/source/cmake/Findtensorflow.cmake +++ b/source/cmake/Findtensorflow.cmake @@ -366,7 +366,7 @@ elseif(NOT DEFINED OP_CXX_ABI) AND ${CPP_CXX_ABI_COMPILE_RESULT_VAR1}) message( WARNING - "Both _GLIBCXX_USE_CXX11_ABI=0 and 1 work. The reason may be that your C++ compiler (e.g. Red Hat Developer Toolset) does not support the custom cxx11 abi flag. For convience, we set _GLIBCXX_USE_CXX11_ABI=1." + "Both _GLIBCXX_USE_CXX11_ABI=0 and 1 work. The reason may be that your C++ compiler (e.g. Red Hat Developer Toolset) does not support the custom cxx11 abi flag. For convenience, we set _GLIBCXX_USE_CXX11_ABI=1." ) set(OP_CXX_ABI 1) else() diff --git a/source/cmake/tf_version.cpp b/source/cmake/tf_version.cpp index 390bd4c375..6d09e33493 100644 --- a/source/cmake/tf_version.cpp +++ b/source/cmake/tf_version.cpp @@ -6,7 +6,7 @@ int main(int argc, char* argv[]) { // See // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/public/version.h - // TF_VERSION_STRING has been avaiable since TensorFlow v0.6 + // TF_VERSION_STRING has been available since TensorFlow v0.6 std::cout << TF_VERSION_STRING; return 0; } diff --git a/source/gmx/dp_gmx_patch b/source/gmx/dp_gmx_patch index 4dacaea835..8df3f12cc6 100644 --- a/source/gmx/dp_gmx_patch +++ b/source/gmx/dp_gmx_patch @@ -128,6 +128,6 @@ do v) VERSION=${OPTARG} && DEEPMD_PATCH_ROOT=${DEEPMD_PATCH_ROOT}/${VERSION} ;; p) check_version ${VERSION} && dp_gmx_patch ${GMX_ROOT} ;; r) check_version ${VERSION} && dp_gmx_revert ${GMX_ROOT} ;; - *) echo "- ERROR: Invaild option ${opt}" && exit 1 ;; + *) echo "- ERROR: Invalid option ${opt}" && exit 1 ;; esac done diff --git a/source/gmx/src/gmx_plugin.cpp b/source/gmx/src/gmx_plugin.cpp index 15c4fa84ae..53f02f1fbe 100644 --- a/source/gmx/src/gmx_plugin.cpp +++ b/source/gmx/src/gmx_plugin.cpp @@ -103,7 +103,7 @@ void DeepmdPlugin::init_from_json(char* json_file) { std::cout << "Successfully init plugin!" << std::endl; } else { - std::cerr << "Invaild json file: " << json_file << std::endl; + std::cerr << "Invalid json file: " << json_file << std::endl; exit(1); } } diff --git a/source/install/build_tf.py b/source/install/build_tf.py index a9e1e247cd..0239ebfa46 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -19,7 +19,7 @@ if sys.version_info[0] < 3: # noqa: UP036 raise Exception("Python 3 or a more recent version is required.") -# The script should only rely on the stardard Python libraries. +# The script should only rely on the standard Python libraries. import argparse import hashlib @@ -333,7 +333,7 @@ def copytree2(src: Path, dst: Path, *args, **kwargs): call( [ "/bin/cp", - # archieve, recursive, force, do not create one inside + # achieve, recursive, force, do not create one inside # https://stackoverflow.com/a/24486142/9567349 "-arfT", str(tmpdst), @@ -386,7 +386,7 @@ def call(commands: list[str], env={}, **kwargs): # online resources to download RESOURCES = { - # bazelisk is used to warpper bazel + # bazelisk is used to wrapper bazel "bazelisk-1.11.0": OnlineResource( "bazel-linux-amd64-1.11.0", "https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64", diff --git a/source/lib/include/ComputeDescriptor.h b/source/lib/include/ComputeDescriptor.h index 7c3eaf4cd2..733cb1ee0c 100644 --- a/source/lib/include/ComputeDescriptor.h +++ b/source/lib/include/ComputeDescriptor.h @@ -501,7 +501,7 @@ void compute_descriptor(std::vector &descrpt_a, if (fmt_nlist_a[nei_iter] < 0) { break; } - // drdS, stored in tranposed form + // drdS, stored in transposed form double dtrdST[4][3]; double *rr = &sel_a_diff[nei_iter][0]; double tr[3]; diff --git a/source/lib/include/coord.h b/source/lib/include/coord.h index 699a90898c..6621d714a5 100644 --- a/source/lib/include/coord.h +++ b/source/lib/include/coord.h @@ -18,7 +18,7 @@ void normalize_coord_cpu(FPTYPE* coord, // in_c, in_t, nloc, mem_nall, rc, region // mem_nall is the size of allocated memory for out_c, out_t, mapping // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all copied coords and types. // i.e. nall > mem_nall template @@ -66,7 +66,7 @@ void normalize_coord_gpu(FPTYPE* coord, // box_info mem_nall is the size of allocated memory for out_c, out_t, // mapping // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all copied coords and types. // i.e. nall > mem_nall template diff --git a/source/lib/include/neighbor_list.h b/source/lib/include/neighbor_list.h index b99827b552..95f5cb6174 100644 --- a/source/lib/include/neighbor_list.h +++ b/source/lib/include/neighbor_list.h @@ -126,7 +126,7 @@ int max_numneigh(const InputNlist& to_nlist); // c_cpy, nloc, nall, mem_size, rcut, region // mem_size is the size of allocated memory for jlist. // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all neighbors. // i.e. max_list_size > mem_nall template @@ -190,7 +190,7 @@ void use_nlist_map(int* nlist, // c_cpy, nloc, nall, mem_size, rcut, region // mem_size is the size of allocated memory for jlist. // returns -// 0: succssful +// 0: successful // 1: the memory is not large enough to hold all neighbors. // i.e. max_list_size > mem_nall template diff --git a/source/lib/include/prod_force.h b/source/lib/include/prod_force.h index b5ae68bdce..2d88607131 100644 --- a/source/lib/include/prod_force.h +++ b/source/lib/include/prod_force.h @@ -29,7 +29,7 @@ void prod_force_a_cpu(FPTYPE* force, /** * @brief Produce force from net_deriv and in_deriv. * @details This function is used for multi-threading. Only part of atoms - * are computed in this thread. They will be comptued in parallel. + * are computed in this thread. They will be computed in parallel. * * @tparam FPTYPE float or double * @param[out] force Atomic forces. diff --git a/source/lib/src/gpu/tabulate.cu b/source/lib/src/gpu/tabulate.cu index 71ea17ced5..e0723b81af 100644 --- a/source/lib/src/gpu/tabulate.cu +++ b/source/lib/src/gpu/tabulate.cu @@ -272,7 +272,7 @@ __global__ void tabulate_fusion_se_a_grad_fifth_order_polynomial( bool enable_se_atten = two_embed != nullptr; GPU_DYNAMIC_SHARED_MEM_DECL(int, _data); const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, threadIdx.x / WARP_SIZE, 0); int lane_idx = threadIdx.x % WARP_SIZE; int breakpoint = nnei - 1; @@ -531,7 +531,7 @@ __global__ void tabulate_fusion_se_t_grad_fifth_order_polynomial( const int last_layer_size) { GPU_DYNAMIC_SHARED_MEM_DECL(int, _data); const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, threadIdx.x / WARP_SIZE, 0); int lane_idx = threadIdx.x % WARP_SIZE; FPTYPE* iteratorA = (FPTYPE*)&_data[0]; // dy @@ -678,7 +678,7 @@ __global__ void tabulate_fusion_se_r_grad_fifth_order_polynomial( const int nnei, const int last_layer_size) { const int_64 block_idx = blockIdx.x; // nloc - const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usally 128 here~ + const int thread_idx = threadIdx.x; // KTILE * WARP_SIZE, usually 128 here~ int warp_idx = GpuShuffleSync(0xffffffff, thread_idx / WARP_SIZE, 0); int lane_idx = thread_idx % WARP_SIZE; __syncthreads(); diff --git a/source/lib/tests/test_fmt_nlist.cc b/source/lib/tests/test_fmt_nlist.cc index bc79c92ea6..6cd24b556a 100644 --- a/source/lib/tests/test_fmt_nlist.cc +++ b/source/lib/tests/test_fmt_nlist.cc @@ -134,7 +134,7 @@ class TestEncodingDecodingNborInfo : public ::testing::Test { void TearDown() override {} }; -// orginal implementation. copy ghost +// original implementation. copy ghost TEST_F(TestFormatNlist, orig_cpy) { std::vector> nlist_a, nlist_r; std::vector fmt_nlist_a, fmt_nlist_r; @@ -155,7 +155,7 @@ TEST_F(TestFormatNlist, orig_cpy) { } } -// orginal implementation. copy ghost should be equal to pbc +// original implementation. copy ghost should be equal to pbc TEST_F(TestFormatNlist, orig_pbc) { std::vector> nlist_a_1, nlist_r_1; build_nlist(nlist_a_1, nlist_r_1, posi, rc, rc, ncell, region); @@ -174,7 +174,7 @@ TEST_F(TestFormatNlist, orig_pbc) { } } -// orginal implementation. copy ghost should be equal to pbc +// original implementation. copy ghost should be equal to pbc TEST_F(TestFormatNlist, orig_cpy_equal_pbc) { std::vector> nlist_a_0, nlist_r_0; build_nlist(nlist_a_0, nlist_r_0, posi_cpy, nloc, rc, rc, nat_stt, ncell, @@ -251,7 +251,7 @@ TEST_F(TestFormatNlist, cpu) { } } -// orginal implementation. copy ghost +// original implementation. copy ghost TEST_F(TestFormatNlistShortSel, orig_cpy) { std::vector> nlist_a, nlist_r; std::vector fmt_nlist_a, fmt_nlist_r; diff --git a/source/lmp/pppm_dplr.cpp b/source/lmp/pppm_dplr.cpp index 613a9f1c93..e1bdb828af 100644 --- a/source/lmp/pppm_dplr.cpp +++ b/source/lmp/pppm_dplr.cpp @@ -92,7 +92,7 @@ void PPPMDPLR::compute(int eflag, int vflag) { return; } - // convert atoms from box to lamda coords + // convert atoms from box to lambda coords if (triclinic == 0) { boxlo = domain->boxlo; @@ -266,7 +266,7 @@ void PPPMDPLR::compute(int eflag, int vflag) { slabcorr(); } - // convert atoms back from lamda to box coords + // convert atoms back from lambda to box coords if (triclinic) { domain->lamda2x(atom->nlocal); diff --git a/source/lmp/tests/test_dplr.py b/source/lmp/tests/test_dplr.py index 2dd3531894..28a573613f 100644 --- a/source/lmp/tests/test_dplr.py +++ b/source/lmp/tests/test_dplr.py @@ -384,7 +384,7 @@ def test_pair_deepmd_lr(lammps): lammps.pair_coeff("* *") lammps.bond_style("zero") lammps.bond_coeff("*") - lammps.special_bonds("lj/coul 1 1 1 angle no") + lammps.special_bonds("lj/could 1 1 1 angle no") lammps.kspace_style("pppm/dplr 1e-5") lammps.kspace_modify(f"gewald {beta:.2f} diff ik mesh {mesh:d} {mesh:d} {mesh:d}") lammps.fix(f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1") @@ -410,7 +410,7 @@ def test_pair_deepmd_lr_efield_constant(lammps): lammps.pair_coeff("* *") lammps.bond_style("zero") lammps.bond_coeff("*") - lammps.special_bonds("lj/coul 1 1 1 angle no") + lammps.special_bonds("lj/could 1 1 1 angle no") lammps.fix( f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1 efield 0 0 1" ) @@ -446,7 +446,7 @@ def test_pair_deepmd_lr_efield_variable(lammps): lammps.pair_coeff("* *") lammps.bond_style("zero") lammps.bond_coeff("*") - lammps.special_bonds("lj/coul 1 1 1 angle no") + lammps.special_bonds("lj/could 1 1 1 angle no") lammps.fix( f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1 efield 0 0 v_EFIELD_Z" ) @@ -481,7 +481,7 @@ def test_min_dplr(lammps): lammps.pair_coeff("* *") lammps.bond_style("zero") lammps.bond_coeff("*") - lammps.special_bonds("lj/coul 1 1 1 angle no") + lammps.special_bonds("lj/could 1 1 1 angle no") lammps.kspace_style("pppm/dplr 1e-5") lammps.kspace_modify(f"gewald {beta:.2f} diff ik mesh {mesh:d} {mesh:d} {mesh:d}") lammps.fix(f"0 all dplr model {pb_file.resolve()} type_associate 1 3 bond_type 1") @@ -505,7 +505,7 @@ def test_pair_deepmd_lr_type_map(lammps_type_map): lammps_type_map.pair_coeff("* * H O") lammps_type_map.bond_style("zero") lammps_type_map.bond_coeff("*") - lammps_type_map.special_bonds("lj/coul 1 1 1 angle no") + lammps_type_map.special_bonds("lj/could 1 1 1 angle no") lammps_type_map.kspace_style("pppm/dplr 1e-5") lammps_type_map.kspace_modify( f"gewald {beta:.2f} diff ik mesh {mesh:d} {mesh:d} {mesh:d}" @@ -535,7 +535,7 @@ def test_pair_deepmd_lr_si(lammps_si): lammps_si.pair_coeff("* *") lammps_si.bond_style("zero") lammps_si.bond_coeff("*") - lammps_si.special_bonds("lj/coul 1 1 1 angle no") + lammps_si.special_bonds("lj/could 1 1 1 angle no") lammps_si.kspace_style("pppm/dplr 1e-5") lammps_si.kspace_modify( f"gewald {beta / constants.dist_metal2si:.6e} diff ik mesh {mesh:d} {mesh:d} {mesh:d}" diff --git a/source/op/tf/descrpt.cc b/source/op/tf/descrpt.cc index 6362b8d37a..db3b0ca8e5 100644 --- a/source/op/tf/descrpt.cc +++ b/source/op/tf/descrpt.cc @@ -293,7 +293,7 @@ class DescrptOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef.cc b/source/op/tf/descrpt_se_a_ef.cc index 96c953f167..18dda3d8b0 100644 --- a/source/op/tf/descrpt_se_a_ef.cc +++ b/source/op/tf/descrpt_se_a_ef.cc @@ -310,7 +310,7 @@ class DescrptSeAEfOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef_para.cc b/source/op/tf/descrpt_se_a_ef_para.cc index 6dc4442ee6..0f34de3f4f 100644 --- a/source/op/tf/descrpt_se_a_ef_para.cc +++ b/source/op/tf/descrpt_se_a_ef_para.cc @@ -310,7 +310,7 @@ class DescrptSeAEfParaOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_ef_vert.cc b/source/op/tf/descrpt_se_a_ef_vert.cc index 9899e29f06..b4eb30d9ee 100644 --- a/source/op/tf/descrpt_se_a_ef_vert.cc +++ b/source/op/tf/descrpt_se_a_ef_vert.cc @@ -310,7 +310,7 @@ class DescrptSeAEfVertOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } // loop over atoms, compute descriptors for each atom diff --git a/source/op/tf/descrpt_se_a_mask.cc b/source/op/tf/descrpt_se_a_mask.cc index e27ea099ab..28e4a575db 100644 --- a/source/op/tf/descrpt_se_a_mask.cc +++ b/source/op/tf/descrpt_se_a_mask.cc @@ -181,7 +181,7 @@ class DescrptSeAMaskOp : public OpKernel { for (int jj = 0; jj < natoms * 3; ++jj) { rij(kk, ii * natoms * 3 + jj) = 0.; } - // Save the neighbor atoms indicies. + // Save the neighbor atoms indices. for (int jj = 0; jj < natoms; jj++) { nlist(kk, ii * natoms + jj) = -1; } @@ -304,7 +304,7 @@ class DescrptSeAMaskOp : public OpKernel { for (int jj = 0; jj < natoms * 3; ++jj) { rij(kk, ii * natoms * 3 + jj) = rij_atom[jj]; } - // Save the neighbor atoms indicies. + // Save the neighbor atoms indices. for (int jj = 0; jj < natoms; ++jj) { nlist(kk, ii * natoms + jj) = sorted_nlist[jj]; } diff --git a/source/op/tf/neighbor_stat.cc b/source/op/tf/neighbor_stat.cc index d2a6b3ab31..26f13b0c84 100644 --- a/source/op/tf/neighbor_stat.cc +++ b/source/op/tf/neighbor_stat.cc @@ -243,7 +243,7 @@ class NeighborStatOp : public OpKernel { } else if (nei_mode == -1) { ::build_nlist(d_nlist_a, d_nlist_r, d_coord3, -1, rcut, NULL); } else { - throw deepmd::deepmd_exception("unknow neighbor mode"); + throw deepmd::deepmd_exception("unknown neighbor mode"); } int MAX_NNEI = 0; diff --git a/source/op/tf/pairwise.cc b/source/op/tf/pairwise.cc index 8ed140a14a..ba1e5e6475 100644 --- a/source/op/tf/pairwise.cc +++ b/source/op/tf/pairwise.cc @@ -78,7 +78,7 @@ class PairwiseIdxOp : public OpKernel { backward_qm_maps.push_back(backward_qm_map); forward_qmmm_maps.push_back(forward_qmmm_map); backward_qmmm_maps.push_back(backward_qmmm_map); - // get the maximun + // get the maximum int nghost_qm_ii = nall_qm_ii - nloc_qm_ii, nghost_qmmm_ii = nall_qmmm_ii - nloc_qmmm_ii; nloc_qm.push_back(nloc_qm_ii); diff --git a/source/op/tf/prod_env_mat_multi_device.cc b/source/op/tf/prod_env_mat_multi_device.cc index 7037a00a6c..e374102224 100644 --- a/source/op/tf/prod_env_mat_multi_device.cc +++ b/source/op/tf/prod_env_mat_multi_device.cc @@ -485,7 +485,7 @@ class ProdEnvMatAOp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -791,7 +791,7 @@ class ProdEnvMatROp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -1144,7 +1144,7 @@ class ProdEnvMatAMixOp : public OpKernel { } } - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); diff --git a/source/op/tf/prod_env_mat_multi_device_nvnmd.cc b/source/op/tf/prod_env_mat_multi_device_nvnmd.cc index d9f9275b86..57390077ef 100644 --- a/source/op/tf/prod_env_mat_multi_device_nvnmd.cc +++ b/source/op/tf/prod_env_mat_multi_device_nvnmd.cc @@ -45,7 +45,7 @@ REGISTER_OP("ProdEnvMatANvnmdQuantize") .Output("descrpt_deriv: T") .Output("rij: T") .Output("nlist: int32"); -// only sel_a and rcut_r uesd. +// only sel_a and rcut_r used. // ProdEnvMatAMixNvnmd REGISTER_OP("ProdEnvMatAMixNvnmdQuantize") @@ -68,7 +68,7 @@ REGISTER_OP("ProdEnvMatAMixNvnmdQuantize") .Output("nlist: int32") .Output("ntype: int32") .Output("nmask: bool"); -// only sel_a and rcut_r uesd. +// only sel_a and rcut_r used. template static int _norm_copy_coord_cpu(std::vector& coord_cpy, @@ -463,7 +463,7 @@ class ProdEnvMatANvnmdQuantizeOp : public OpKernel { const FPTYPE* std = std_tensor.flat().data(); const int* p_type = type_tensor.flat().data(); - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); @@ -734,7 +734,7 @@ class ProdEnvMatAMixNvnmdQuantizeOp : public OpKernel { } } - // must declar out of if, otherwise the memory will be destroyed! + // must declare out of if, otherwise the memory will be destroyed! Tensor int_temp; Tensor uint64_temp; std::vector tensor_list(7); diff --git a/source/tests/common/dpmodel/test_pairtab_preprocess.py b/source/tests/common/dpmodel/test_pairtab_preprocess.py index da3b9251f7..7f4058dedd 100644 --- a/source/tests/common/dpmodel/test_pairtab_preprocess.py +++ b/source/tests/common/dpmodel/test_pairtab_preprocess.py @@ -72,7 +72,7 @@ def test_preprocess(self): ) # for this test case, the table does not decay to zero at rcut = 0.22, - # in the cubic spline code, we use a fixed size grid, if will be a problem if we introduce variable gird size. + # in the cubic spline code, we use a fixed size grid, if will be a problem if we introduce variable grid size. # we will do post process to overwrite spline coefficient `a3`,`a2`,`a1`,`a0`, to ensure energy decays to `0`. np.testing.assert_allclose( self.tab3.vdata, diff --git a/source/tests/common/test_argument_parser.py b/source/tests/common/test_argument_parser.py index 1404185607..2c67c1f6cb 100644 --- a/source/tests/common/test_argument_parser.py +++ b/source/tests/common/test_argument_parser.py @@ -156,7 +156,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): namespace = parse_args(cmd_args) except SystemExit as e: raise SystemExit( - f"Encountered expection when parsing arguments ->\n\n" + f"Encountered exception when parsing arguments ->\n\n" f"{buffer.getvalue()}\n" f"passed in arguments were: {cmd_args}\n" f"built from dict {mapping}" @@ -188,7 +188,7 @@ def run_test(self, *, command: str, mapping: "TEST_DICT"): namespace = parse_args(cmd_args) except SystemExit as e: raise SystemExit( - f"Encountered expection when parsing DEFAULT arguments ->\n\n" + f"Encountered exception when parsing DEFAULT arguments ->\n\n" f"{buffer.getvalue()}\n" f"passed in arguments were: {cmd_args}\n" f"built from dict {mapping}" diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index e3bf808978..885662c766 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -500,7 +500,7 @@ def tearDown(self) -> None: def parameterized(*attrs: tuple, **subblock_attrs: tuple) -> Callable: """Parameterized test. - Orginal class will not be actually generated. Avoid inherbiting from it. + Original class will not be actually generated. Avoid inherbiting from it. New classes are generated with the name of the original class and the parameters. diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index e2836c7a6c..a4b516ef16 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -90,7 +90,7 @@ def addtional_data(self) -> dict: use_econf_tebd, use_tebd_bias, ) = self.param - # implict argument not input by users + # implicit argument not input by users return { "ntypes": self.ntypes, "padding": padding, diff --git a/source/tests/pt/model/test_descriptor_dpa1.py b/source/tests/pt/model/test_descriptor_dpa1.py index a3d696516a..ddd5dc6c3c 100644 --- a/source/tests/pt/model/test_descriptor_dpa1.py +++ b/source/tests/pt/model/test_descriptor_dpa1.py @@ -249,7 +249,7 @@ def test_descriptor_block(self): coord = self.coord atype = self.atype box = self.cell - # handel type_embedding + # handle type_embedding type_embedding = TypeEmbedNet(ntypes, 8, use_tebd_bias=True).to(env.DEVICE) type_embedding.load_state_dict( torch.load(self.file_type_embed, weights_only=True) diff --git a/source/tests/pt/model/test_dpa1.py b/source/tests/pt/model/test_dpa1.py index d168ceb2ae..4fbbd5c20a 100644 --- a/source/tests/pt/model/test_dpa1.py +++ b/source/tests/pt/model/test_dpa1.py @@ -42,7 +42,7 @@ def test_consistency( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, sm, to, tm, prec, ect in itertools.product( + for idt, sm, to, tm, prec, etc in itertools.product( [False, True], # resnet_dt [False, True], # smooth_type_embedding [False, True], # type_one_side @@ -68,8 +68,8 @@ def test_consistency( smooth_type_embedding=sm, type_one_side=to, tebd_input_mode=tm, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -117,7 +117,7 @@ def test_jit( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, prec, sm, to, tm, ect in itertools.product( + for idt, prec, sm, to, tm, etc in itertools.product( [ False, ], # resnet_dt @@ -145,8 +145,8 @@ def test_jit( smooth_type_embedding=sm, type_one_side=to, tebd_input_mode=tm, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_dpa2.py b/source/tests/pt/model/test_dpa2.py index 2eac49d573..50cb6af744 100644 --- a/source/tests/pt/model/test_dpa2.py +++ b/source/tests/pt/model/test_dpa2.py @@ -64,7 +64,7 @@ def test_consistency( rpz, sm, prec, - ect, + etc, ns, ) in itertools.product( ["concat", "strip"], # repinit_tebd_input_mode @@ -152,8 +152,8 @@ def test_consistency( exclude_types=[], add_tebd_to_repinit_out=False, precision=prec, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ).to(env.DEVICE) @@ -219,7 +219,7 @@ def test_jit( rpz, sm, prec, - ect, + etc, ns, ) in itertools.product( ["concat", "strip"], # repinit_tebd_input_mode @@ -308,8 +308,8 @@ def test_jit( exclude_types=[], add_tebd_to_repinit_out=False, precision=prec, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ).to(env.DEVICE) diff --git a/source/tests/pt/model/test_embedding_net.py b/source/tests/pt/model/test_embedding_net.py index 1566eb2416..2cfcaa820e 100644 --- a/source/tests/pt/model/test_embedding_net.py +++ b/source/tests/pt/model/test_embedding_net.py @@ -181,7 +181,7 @@ def test_consistency(self): key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) var = dp_vars[key] with torch.no_grad(): - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations param.data.copy_(torch.from_numpy(var)) pt_coord = self.torch_batch["coord"].to(env.DEVICE) diff --git a/source/tests/pt/model/test_fitting_net.py b/source/tests/pt/model/test_fitting_net.py index ecff0d47e6..e08eed4f8b 100644 --- a/source/tests/pt/model/test_fitting_net.py +++ b/source/tests/pt/model/test_fitting_net.py @@ -133,7 +133,7 @@ def test_consistency(self): assert key is not None var = values[key] with torch.no_grad(): - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations param.data.copy_(torch.from_numpy(var)) embedding = torch.from_numpy(self.embedding) embedding = embedding.view(4, -1, self.embedding_width) diff --git a/source/tests/pt/model/test_make_hessian_model.py b/source/tests/pt/model/test_make_hessian_model.py index ef615554ef..df58d115a2 100644 --- a/source/tests/pt/model/test_make_hessian_model.py +++ b/source/tests/pt/model/test_make_hessian_model.py @@ -99,7 +99,7 @@ def test( aparam = torch.rand( [nf, natoms * nap], dtype=dtype, device=env.DEVICE, generator=generator ) - # forward hess and valu models + # forward hess and value models ret_dict0 = self.model_hess.forward_common( coord, atype, box=cell, fparam=fparam, aparam=aparam ) diff --git a/source/tests/pt/model/test_model.py b/source/tests/pt/model/test_model.py index 8fdbdaf413..84f5a113a3 100644 --- a/source/tests/pt/model/test_model.py +++ b/source/tests/pt/model/test_model.py @@ -300,7 +300,7 @@ def test_consistency(self): limit_pref_f=self.limit_pref_f, ) - # Keep statistics consistency between 2 implentations + # Keep statistics consistency between 2 implementations my_em = my_model.get_descriptor() mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4]) stddev = stat_dict["descriptor.stddev"].reshape( @@ -314,7 +314,7 @@ def test_consistency(self): stat_dict["fitting_net.bias_atom_e"], device=DEVICE ) - # Keep parameter value consistency between 2 implentations + # Keep parameter value consistency between 2 implementations for name, param in my_model.named_parameters(): name = name.replace("sea.", "") var_name = torch2tf(name, last_layer_id=len(self.n_neuron)) diff --git a/source/tests/pt/model/test_nlist.py b/source/tests/pt/model/test_nlist.py index c4401b2cdd..7558a2a7d9 100644 --- a/source/tests/pt/model/test_nlist.py +++ b/source/tests/pt/model/test_nlist.py @@ -44,7 +44,7 @@ def setUp(self): self.rcut = 1.01 self.prec = 1e-10 self.nsel = [10, 10] - # genrated by preprocess.build_neighbor_list + # generated by preprocess.build_neighbor_list # ref_nlist, _, _ = legacy_build_neighbor_list( # 2, ecoord[0], eatype[0], # self.rcut, diff --git a/source/tests/pt/model/test_se_atten_v2.py b/source/tests/pt/model/test_se_atten_v2.py index 462b2aca34..b73a848f5c 100644 --- a/source/tests/pt/model/test_se_atten_v2.py +++ b/source/tests/pt/model/test_se_atten_v2.py @@ -42,7 +42,7 @@ def test_consistency( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, to, prec, ect in itertools.product( + for idt, to, prec, etc in itertools.product( [False, True], # resnet_dt [False, True], # type_one_side [ @@ -64,8 +64,8 @@ def test_consistency( precision=prec, resnet_dt=idt, type_one_side=to, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ).to(env.DEVICE) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) @@ -113,7 +113,7 @@ def test_jit( dstd = rng.normal(size=(self.nt, nnei, 4)) dstd = 0.1 + np.abs(dstd) - for idt, prec, to, ect in itertools.product( + for idt, prec, to, etc in itertools.product( [ False, ], # resnet_dt @@ -135,8 +135,8 @@ def test_jit( precision=prec, resnet_dt=idt, type_one_side=to, - use_econf_tebd=ect, - type_map=["O", "H"] if ect else None, + use_econf_tebd=etc, + type_map=["O", "H"] if etc else None, seed=GLOBAL_SEED, ) dd0.se_atten.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) diff --git a/source/tests/pt/model/test_unused_params.py b/source/tests/pt/model/test_unused_params.py index 3f068d5e5b..98bbe7040e 100644 --- a/source/tests/pt/model/test_unused_params.py +++ b/source/tests/pt/model/test_unused_params.py @@ -38,10 +38,10 @@ def test_unused(self): [True], ): if (not drrd) and (not grrg) and h2: - # skip the case h2 is not envolved + # skip the case h2 is not involved continue if (not grrg) and (not conv): - # skip the case g2 is not envolved + # skip the case g2 is not involved continue model = copy.deepcopy(model_dpa2) model["descriptor"]["repformer"]["nlayers"] = 2 diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index fa9e5c138a..a7fcedcede 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -477,7 +477,7 @@ def test_dp_train(self): trainer.run() state_dict_trained = trainer.wrapper.model.state_dict() - # test fine-tuning using diffferent fitting_net, here using property fitting + # test fine-tuning using different fitting_net, here using property fitting finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" self.config_property["model"], finetune_links = get_finetune_rules( finetune_model, diff --git a/source/tests/tf/common.py b/source/tests/tf/common.py index 705e9f7faa..2b912c7a10 100644 --- a/source/tests/tf/common.py +++ b/source/tests/tf/common.py @@ -969,7 +969,7 @@ def __init__(self, systems, set_prefix, batch_size, test_size, rcut, run_opt=Non chk_ret = self.data_systems[ii].check_test_size(test_size) if chk_ret is not None: warnings.warn( - "WARNNING: system %s required test size %d is larger than the size %d of the dataset %s" + "WARNING: system %s required test size %d is larger than the size %d of the dataset %s" % (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0]) ) diff --git a/source/tests/tf/test_data_large_batch.py b/source/tests/tf/test_data_large_batch.py index d9bb00de40..63f1cff760 100644 --- a/source/tests/tf/test_data_large_batch.py +++ b/source/tests/tf/test_data_large_batch.py @@ -186,7 +186,7 @@ def test_data_mixed_type(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) @@ -386,7 +386,7 @@ def test_stripped_data_mixed_type(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) @@ -586,7 +586,7 @@ def test_compressible_data_mixed_type(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) diff --git a/source/tests/tf/test_descrpt_hybrid.py b/source/tests/tf/test_descrpt_hybrid.py index cadf2f83ae..222f833d97 100644 --- a/source/tests/tf/test_descrpt_hybrid.py +++ b/source/tests/tf/test_descrpt_hybrid.py @@ -88,7 +88,7 @@ def test_descriptor_hybrid(self): type_embedding = typeebd.build(ntypes, suffix="_hybrid") - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -115,7 +115,7 @@ def test_descriptor_hybrid(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) ref_dout1 = [ 1.34439289e-03, diff --git a/source/tests/tf/test_descrpt_se_a_mask.py b/source/tests/tf/test_descrpt_se_a_mask.py index b495072c7d..8b46680c27 100644 --- a/source/tests/tf/test_descrpt_se_a_mask.py +++ b/source/tests/tf/test_descrpt_se_a_mask.py @@ -257,7 +257,7 @@ def test_descriptor_se_a_mask(self): is_training = tf.placeholder(tf.bool) t_aparam = tf.placeholder(tf.int32, [None, None], name="i_aparam") - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -281,7 +281,7 @@ def test_descriptor_se_a_mask(self): } sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [op_dout] = sess.run([dout], feed_dict=feed_dict_test) + [op_dout] = sess.run([doubt], feed_dict=feed_dict_test) op_dout = op_dout.reshape([-1]) ref_dout = np.zeros(op_dout.shape, dtype=float) diff --git a/source/tests/tf/test_descrpt_se_a_type.py b/source/tests/tf/test_descrpt_se_a_type.py index ccb2ef96a6..915b3ee263 100644 --- a/source/tests/tf/test_descrpt_se_a_type.py +++ b/source/tests/tf/test_descrpt_se_a_type.py @@ -92,7 +92,7 @@ def test_descriptor_two_sides(self): type_embedding = typeebd.build(ntypes, suffix="_se_a_type_des_ebd_2sdies") - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -119,7 +119,7 @@ def test_descriptor_two_sides(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) ref_dout = [ @@ -257,7 +257,7 @@ def test_descriptor_one_side(self): type_embedding = typeebd.build(ntypes, suffix="_se_a_type_des_ebd_1side") - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -284,7 +284,7 @@ def test_descriptor_one_side(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) ref_dout = [ diff --git a/source/tests/tf/test_descrpt_se_atten.py b/source/tests/tf/test_descrpt_se_atten.py index 6393cc222b..bdc5cf9b20 100644 --- a/source/tests/tf/test_descrpt_se_atten.py +++ b/source/tests/tf/test_descrpt_se_atten.py @@ -112,7 +112,7 @@ def test_descriptor_two_sides(self): + "_se_atten_type_des_ebd_2sdies", ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -142,7 +142,7 @@ def test_descriptor_two_sides(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) np.savetxt("two.out", model_dout.reshape([1, -1]), delimiter=",") @@ -290,7 +290,7 @@ def test_descriptor_one_side(self): + "_se_atten_type_des_ebd_1side", ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -320,7 +320,7 @@ def test_descriptor_one_side(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) np.savetxt("one.out", model_dout.reshape([1, -1]), delimiter=",") @@ -464,7 +464,7 @@ def test_stripped_type_embedding_descriptor_two_sides(self): ntypes, suffix=self.filename + "-" + inspect.stack()[0][3] ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -491,7 +491,7 @@ def test_stripped_type_embedding_descriptor_two_sides(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) np.savetxt("two1.out", model_dout.reshape([1, -1]), delimiter=",") @@ -640,7 +640,7 @@ def test_compressible_descriptor_two_sides(self): + "_se_atten_type_des_ebd_2sdies", ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -670,7 +670,7 @@ def test_compressible_descriptor_two_sides(self): sess = self.cached_session().__enter__() sess.run(tf.global_variables_initializer()) - [model_dout] = sess.run([dout], feed_dict=feed_dict_test) + [model_dout] = sess.run([doubt], feed_dict=feed_dict_test) model_dout = model_dout.reshape([-1]) np.savetxt("two.out", model_dout.reshape([1, -1]), delimiter=",") diff --git a/source/tests/tf/test_descrpt_sea_ef_rot.py b/source/tests/tf/test_descrpt_sea_ef_rot.py index ac0064130e..5f57d53c8a 100644 --- a/source/tests/tf/test_descrpt_sea_ef_rot.py +++ b/source/tests/tf/test_descrpt_sea_ef_rot.py @@ -83,7 +83,7 @@ def build_efv(self, dcoord, dbox, dtype, tnatoms, name, op, reuse=None): "uniform_seed": True, } ) - dout = descrpt.build( + doubt = descrpt.build( dcoord, dtype, tnatoms, @@ -93,8 +93,8 @@ def build_efv(self, dcoord, dbox, dtype, tnatoms, name, op, reuse=None): suffix=name, reuse=reuse, ) - dout = tf.reshape(dout, [-1, descrpt.get_dim_out()]) - atom_ener = tf.reduce_sum(dout, axis=1) + doubt = tf.reshape(doubt, [-1, descrpt.get_dim_out()]) + atom_ener = tf.reduce_sum(doubt, axis=1) atom_ener_reshape = tf.reshape(atom_ener, [-1, self.natoms[0]]) energy = tf.reduce_sum(atom_ener_reshape, axis=1) force, virial, atom_vir = descrpt.prod_force_virial(atom_ener, tnatoms) diff --git a/source/tests/tf/test_fitting_dos.py b/source/tests/tf/test_fitting_dos.py index e66f0078c6..168741fe4e 100644 --- a/source/tests/tf/test_fitting_dos.py +++ b/source/tests/tf/test_fitting_dos.py @@ -81,7 +81,7 @@ def test_fitting(self): is_training = tf.placeholder(tf.bool) t_fparam = None - dout = np.array( + doubt = np.array( [ 0.0005722682145569174, -0.00020202686217742682, @@ -148,7 +148,7 @@ def test_fitting(self): atype = np.array([0, 0, 1, 1, 1, 1], dtype=np.int32) - dout = dout.reshape([-1, 10]) + doubt = doubt.reshape([-1, 10]) atype = atype.reshape([-1]) natoms = 6 @@ -158,7 +158,7 @@ def test_fitting(self): test_data["atom_dos"] = tmp_atom_dos atom_dos = fitting.build( - tf.convert_to_tensor(dout), + tf.convert_to_tensor(doubt), t_natoms, { "atype": tf.convert_to_tensor(atype), diff --git a/source/tests/tf/test_fitting_ener_type.py b/source/tests/tf/test_fitting_ener_type.py index 94ee99c386..b11bbd872f 100644 --- a/source/tests/tf/test_fitting_ener_type.py +++ b/source/tests/tf/test_fitting_ener_type.py @@ -79,7 +79,7 @@ def test_fitting(self): is_training = tf.placeholder(tf.bool) t_fparam = None - dout = np.array( + doubt = np.array( [ 0.0005722682145569174, -0.00020202686217742682, @@ -159,11 +159,11 @@ def test_fitting(self): ) atype = np.array([0, 0, 1, 1, 1, 1], dtype=np.int32) - dout = dout.reshape([-1, 10]) + doubt = doubt.reshape([-1, 10]) type_embedding = type_embedding.reshape([ntypes, -1]) atype = atype.reshape([-1]) atom_ener = fitting.build( - tf.convert_to_tensor(dout), + tf.convert_to_tensor(doubt), t_natoms, { "type_embedding": tf.convert_to_tensor(type_embedding), diff --git a/source/tests/tf/test_model_pairtab.py b/source/tests/tf/test_model_pairtab.py index 5caeb0a053..0a09e70430 100644 --- a/source/tests/tf/test_model_pairtab.py +++ b/source/tests/tf/test_model_pairtab.py @@ -42,7 +42,7 @@ def test_model(self): rcut = jdata["model"]["rcut"] def pair_pot(r: float): - # LJ, as exmaple + # LJ, as example return 4 * (1 / r**12 - 1 / r**6) dx = 1e-4 diff --git a/source/tests/tf/test_model_se_a_type.py b/source/tests/tf/test_model_se_a_type.py index e9fb8db466..4128ebf047 100644 --- a/source/tests/tf/test_model_se_a_type.py +++ b/source/tests/tf/test_model_se_a_type.py @@ -126,7 +126,7 @@ def test_model(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) diff --git a/source/tests/tf/test_model_se_atten.py b/source/tests/tf/test_model_se_atten.py index 4f661c2cc3..d5bfea2336 100644 --- a/source/tests/tf/test_model_se_atten.py +++ b/source/tests/tf/test_model_se_atten.py @@ -138,7 +138,7 @@ def test_model(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) @@ -239,7 +239,7 @@ def test_exclude_types(self): type_embedding = typeebd.build( ntypes, suffix=self.filename + "-" + inspect.stack()[0][3] ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -262,7 +262,7 @@ def test_exclude_types(self): with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) - [des] = sess.run([dout], feed_dict=feed_dict_test1) + [des] = sess.run([doubt], feed_dict=feed_dict_test1) np.testing.assert_almost_equal(des[:, 0:2], 0.0, 10) with self.assertRaises(AssertionError): @@ -364,7 +364,7 @@ def test_compressible_model(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) @@ -468,7 +468,7 @@ def test_compressible_exclude_types(self): type_embedding = typeebd.build( ntypes, suffix=self.filename + "-" + inspect.stack()[0][3] ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -491,7 +491,7 @@ def test_compressible_exclude_types(self): with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) - [des] = sess.run([dout], feed_dict=feed_dict_test1) + [des] = sess.run([doubt], feed_dict=feed_dict_test1) np.testing.assert_almost_equal(des[:, 0:2], 0.0, 10) with self.assertRaises(AssertionError): @@ -597,7 +597,7 @@ def test_stripped_type_embedding_model(self): sess.run(tf.global_variables_initializer()) [e, f, v] = sess.run([energy, force, virial], feed_dict=feed_dict_test) # print(sess.run(model.type_embedding)) - # np.savetxt('tmp.out', sess.run(descrpt.dout, feed_dict = feed_dict_test), fmt='%.10e') + # np.savetxt('tmp.out', sess.run(descrpt.doubt, feed_dict = feed_dict_test), fmt='%.10e') # # print(sess.run(model.atype_embed, feed_dict = feed_dict_test)) # print(sess.run(fitting.inputs, feed_dict = feed_dict_test)) # print(sess.run(fitting.outs, feed_dict = feed_dict_test)) @@ -703,7 +703,7 @@ def test_stripped_type_embedding_exclude_types(self): ntypes, suffix=self.filename + "-" + inspect.stack()[0][3] + "_type_embedding", ) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -729,7 +729,7 @@ def test_stripped_type_embedding_exclude_types(self): with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) - [des] = sess.run([dout], feed_dict=feed_dict_test1) + [des] = sess.run([doubt], feed_dict=feed_dict_test1) np.testing.assert_almost_equal(des[:, 0:2], 0.0, 10) with self.assertRaises(AssertionError): diff --git a/source/tests/tf/test_type_one_side.py b/source/tests/tf/test_type_one_side.py index 2ab7cc03a7..0c96ee1ad6 100644 --- a/source/tests/tf/test_type_one_side.py +++ b/source/tests/tf/test_type_one_side.py @@ -69,7 +69,7 @@ def test_descriptor_one_side_exclude_types(self): # successful descrpt = Descriptor(**jdata["model"]["descriptor"]) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -125,8 +125,8 @@ def test_descriptor_one_side_exclude_types(self): with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) - [model_dout1] = sess.run([dout], feed_dict=feed_dict_test1) - [model_dout2] = sess.run([dout], feed_dict=feed_dict_test2) + [model_dout1] = sess.run([doubt], feed_dict=feed_dict_test1) + [model_dout2] = sess.run([doubt], feed_dict=feed_dict_test2) [model_dout1_failed] = sess.run([dout_failed], feed_dict=feed_dict_test1) [model_dout2_failed] = sess.run([dout_failed], feed_dict=feed_dict_test2) model_dout1 = model_dout1.reshape([6, -1]) @@ -175,7 +175,7 @@ def test_se_r_one_side_exclude_types(self): # successful descrpt = Descriptor(**jdata["model"]["descriptor"]) - dout = descrpt.build( + doubt = descrpt.build( t_coord, t_type, t_natoms, @@ -231,8 +231,8 @@ def test_se_r_one_side_exclude_types(self): with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) - [model_dout1] = sess.run([dout], feed_dict=feed_dict_test1) - [model_dout2] = sess.run([dout], feed_dict=feed_dict_test2) + [model_dout1] = sess.run([doubt], feed_dict=feed_dict_test1) + [model_dout2] = sess.run([doubt], feed_dict=feed_dict_test2) [model_dout1_failed] = sess.run([dout_failed], feed_dict=feed_dict_test1) [model_dout2_failed] = sess.run([dout_failed], feed_dict=feed_dict_test2) model_dout1 = model_dout1.reshape([6, -1]) diff --git a/source/tests/universal/common/cases/atomic_model/utils.py b/source/tests/universal/common/cases/atomic_model/utils.py index bfd2e2cd5f..97a6cf707b 100644 --- a/source/tests/universal/common/cases/atomic_model/utils.py +++ b/source/tests/universal/common/cases/atomic_model/utils.py @@ -40,7 +40,7 @@ class AtomicModelTestCase: expected_has_message_passing: bool """Expected whether having message passing.""" forward_wrapper: Callable[[Any], Any] - """Calss wrapper for forward method.""" + """Class wrapper for forward method.""" aprec_dict: dict[str, Optional[float]] """Dictionary of absolute precision in each test.""" rprec_dict: dict[str, Optional[float]]