From 4a27dec730274c20dcbff42a11d4e59bb7a2afba Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Wed, 12 Jun 2024 21:02:58 +0800 Subject: [PATCH] feat(pt): consistent fine-tuning with init-model (#3803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix #3747. Fix #3455. - Consistent fine-tuning with init-model, now in pt, fine-tuning include three steps: 1. Change model params (for multitask fine-tuning, random fitting and type-related params), 2. Init-model, 3. Change bias - By default, input will use user input while fine-tuning, instead of being overwritten by that in the pre-trained model. When adding “--use-pretrain-script”, user can use that in the pre-trained model. - Now `type_map` will use that in the user input instead of overwritten by that in the pre-trained model. Note: 1. After discussed with @wanghan-iapcm, **behavior of fine-tuning in TF is kept as before**. If needed in the future, it can be implemented then. 2. Fine-tuning using DOSModel in PT need to be fixed. (an issue will be opened, maybe fixed in another PR, cc @anyangml ) ## Summary by CodeRabbit - **New Features** - Added support for using model parameters from a pretrained model script. - Introduced new methods to handle type-related parameters and fine-tuning configurations. - **Documentation** - Updated documentation to clarify the model section requirements and the new `--use-pretrain-script` option for fine-tuning. - **Refactor** - Simplified and improved the readability of key functions related to model training and fine-tuning. - **Tests** - Added new test methods and utility functions to ensure consistency of type mapping and parameter updates. --------- Signed-off-by: Duo <50307526+iProzd@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- .../dpmodel/atomic_model/base_atomic_model.py | 22 ++ .../dpmodel/atomic_model/dp_atomic_model.py | 18 ++ .../atomic_model/linear_atomic_model.py | 17 + .../atomic_model/make_base_atomic_model.py | 6 + .../atomic_model/pairtab_atomic_model.py | 12 + deepmd/dpmodel/descriptor/descriptor.py | 34 ++ deepmd/dpmodel/descriptor/dpa1.py | 43 ++- deepmd/dpmodel/descriptor/dpa2.py | 70 ++++- deepmd/dpmodel/descriptor/hybrid.py | 42 +++ .../descriptor/make_base_descriptor.py | 24 ++ deepmd/dpmodel/descriptor/se_e2_a.py | 38 ++- deepmd/dpmodel/descriptor/se_r.py | 38 ++- deepmd/dpmodel/descriptor/se_t.py | 38 ++- deepmd/dpmodel/fitting/dipole_fitting.py | 6 +- deepmd/dpmodel/fitting/dos_fitting.py | 4 +- deepmd/dpmodel/fitting/ener_fitting.py | 4 +- deepmd/dpmodel/fitting/general_fitting.py | 37 ++- deepmd/dpmodel/fitting/invar_fitting.py | 6 +- deepmd/dpmodel/fitting/make_base_fitting.py | 15 + .../dpmodel/fitting/polarizability_fitting.py | 37 ++- deepmd/dpmodel/model/make_model.py | 8 + deepmd/dpmodel/model/model.py | 2 + deepmd/dpmodel/utils/type_embed.py | 108 +++++-- deepmd/main.py | 5 + deepmd/pt/entrypoints/main.py | 53 ++-- deepmd/pt/infer/deep_eval.py | 1 - deepmd/pt/infer/inference.py | 1 - .../model/atomic_model/base_atomic_model.py | 36 +++ .../pt/model/atomic_model/dp_atomic_model.py | 19 ++ .../model/atomic_model/linear_atomic_model.py | 17 + .../atomic_model/pairtab_atomic_model.py | 12 + deepmd/pt/model/descriptor/descriptor.py | 38 +++ deepmd/pt/model/descriptor/dpa1.py | 44 ++- deepmd/pt/model/descriptor/dpa2.py | 70 ++++- deepmd/pt/model/descriptor/hybrid.py | 42 +++ deepmd/pt/model/descriptor/se_a.py | 28 +- deepmd/pt/model/descriptor/se_r.py | 28 +- deepmd/pt/model/descriptor/se_t.py | 30 +- deepmd/pt/model/model/__init__.py | 10 +- deepmd/pt/model/model/make_model.py | 13 + deepmd/pt/model/network/network.py | 103 ++++-- deepmd/pt/model/task/__init__.py | 4 + deepmd/pt/model/task/dipole.py | 6 +- deepmd/pt/model/task/dos.py | 4 +- deepmd/pt/model/task/ener.py | 12 +- deepmd/pt/model/task/fitting.py | 39 ++- deepmd/pt/model/task/invar_fitting.py | 6 +- deepmd/pt/model/task/polarizability.py | 42 ++- deepmd/pt/train/training.py | 164 +++++----- deepmd/pt/utils/finetune.py | 157 +++++----- deepmd/pt/utils/utils.py | 2 + deepmd/tf/descriptor/se_a.py | 9 +- deepmd/tf/descriptor/se_atten.py | 9 +- deepmd/tf/descriptor/se_r.py | 9 +- deepmd/tf/descriptor/se_t.py | 9 +- deepmd/tf/fit/dipole.py | 9 +- deepmd/tf/fit/dos.py | 9 +- deepmd/tf/fit/ener.py | 136 +++++++- deepmd/tf/fit/polar.py | 9 +- deepmd/tf/model/model.py | 10 +- deepmd/tf/model/pairwise_dprc.py | 3 +- deepmd/tf/utils/type_embed.py | 29 +- deepmd/utils/finetune.py | 270 ++++++++-------- doc/train/finetuning.md | 18 +- source/tests/common/test_type_index_map.py | 152 +++++++++ .../pt/model/test_atomic_model_atomic_stat.py | 9 + .../pt/model/test_atomic_model_global_stat.py | 9 + .../pt/model/test_linear_atomic_model_stat.py | 17 + source/tests/pt/test_finetune.py | 225 ++++++++++--- source/tests/pt/test_multitask.py | 12 +- source/tests/pt/test_training.py | 33 +- source/tests/universal/common/backend.py | 10 + source/tests/universal/common/cases/cases.py | 13 + .../common/cases/descriptor/utils.py | 295 ++++++++++++++++++ .../common/cases/fitting/__init__.py | 1 + .../universal/common/cases/fitting/fitting.py | 11 + .../universal/common/cases/fitting/utils.py | 193 ++++++++++++ .../universal/common/cases/utils/__init__.py | 1 + .../common/cases/utils/type_embed.py | 11 + .../universal/common/cases/utils/utils.py | 107 +++++++ source/tests/universal/dpmodel/backend.py | 10 + .../dpmodel/descriptor/test_descriptor.py | 35 +++ .../universal/dpmodel/fitting/__init__.py | 1 + .../universal/dpmodel/fitting/test_fitting.py | 65 ++++ .../tests/universal/dpmodel/utils/__init__.py | 1 + .../dpmodel/utils/test_type_embed.py | 20 ++ source/tests/universal/pt/backend.py | 9 + .../pt/descriptor/test_descriptor.py | 35 +++ source/tests/universal/pt/fitting/__init__.py | 1 + .../universal/pt/fitting/test_fitting.py | 65 ++++ source/tests/universal/pt/utils/__init__.py | 1 + .../universal/pt/utils/test_type_embed.py | 24 ++ 92 files changed, 3014 insertions(+), 496 deletions(-) create mode 100644 source/tests/common/test_type_index_map.py create mode 100644 source/tests/universal/common/cases/fitting/__init__.py create mode 100644 source/tests/universal/common/cases/fitting/fitting.py create mode 100644 source/tests/universal/common/cases/fitting/utils.py create mode 100644 source/tests/universal/common/cases/utils/__init__.py create mode 100644 source/tests/universal/common/cases/utils/type_embed.py create mode 100644 source/tests/universal/common/cases/utils/utils.py create mode 100644 source/tests/universal/dpmodel/fitting/__init__.py create mode 100644 source/tests/universal/dpmodel/fitting/test_fitting.py create mode 100644 source/tests/universal/dpmodel/utils/__init__.py create mode 100644 source/tests/universal/dpmodel/utils/test_type_embed.py create mode 100644 source/tests/universal/pt/fitting/__init__.py create mode 100644 source/tests/universal/pt/fitting/test_fitting.py create mode 100644 source/tests/universal/pt/utils/__init__.py create mode 100644 source/tests/universal/pt/utils/test_type_embed.py diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index c16749405d..0244dc5355 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -20,6 +20,11 @@ AtomExcludeMask, PairExcludeMask, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, + map_pair_exclude_types, +) from .make_base_atomic_model import ( make_base_atomic_model, @@ -113,6 +118,23 @@ def atomic_output_def(self) -> FittingOutputDef: ] ) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.reinit_atom_exclude( + map_atom_exclude_types(self.atom_exclude_types, remap_index) + ) + self.reinit_pair_exclude( + map_pair_exclude_types(self.pair_exclude_types, remap_index) + ) + self.out_bias = self.out_bias[:, remap_index, :] + self.out_std = self.out_std[:, remap_index, :] + def forward_common_atomic( self, extended_coord: np.ndarray, diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index bdff512311..ded716bd15 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -135,6 +135,24 @@ def forward_atomic( ) return ret + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + self.type_map = type_map + self.descriptor.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descriptor + if model_with_new_type_stat is not None + else None, + ) + self.fitting_net.change_type_map(type_map=type_map) + def serialize(self) -> dict: dd = super().serialize() dd.update( diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 07cb6b560e..c923be67b7 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -104,6 +104,23 @@ def get_type_map(self) -> List[str]: """Get the type map.""" return self.type_map + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + for ii, model in enumerate(self.models): + model.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.models[ii] + if model_with_new_type_stat is not None + else None, + ) + def get_model_rcuts(self) -> List[float]: """Get the cut-off radius for each individual models.""" return [model.get_rcut() for model in self.models] diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index 2b47cd81e6..ac6076a8e3 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -140,6 +140,12 @@ def serialize(self) -> dict: def deserialize(cls, data: dict): pass + @abstractmethod + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + pass + def make_atom_mask( self, atype: t_tensor, diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 4d9097a0e9..a75abd9ce2 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -135,6 +135,18 @@ def has_message_passing(self) -> bool: """Returns whether the atomic model has message passing.""" return False + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert type_map == self.type_map, ( + "PairTabAtomicModel does not support changing type map now. " + "This feature is currently not implemented because it would require additional work to change the tab file. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def serialize(self) -> dict: dd = BaseAtomicModel.serialize(self) dd.update( diff --git a/deepmd/dpmodel/descriptor/descriptor.py b/deepmd/dpmodel/descriptor/descriptor.py index efd804496a..aa9db1e96b 100644 --- a/deepmd/dpmodel/descriptor/descriptor.py +++ b/deepmd/dpmodel/descriptor/descriptor.py @@ -129,3 +129,37 @@ def call( @abstractmethod def has_message_passing(self) -> bool: """Returns whether the descriptor block has message passing.""" + + +def extend_descrpt_stat(des, type_map, des_with_stat=None): + r""" + Extend the statistics of a descriptor block with types from newly provided `type_map`. + + After extending, the type related dimension of the extended statistics will have a length of + `len(old_type_map) + len(type_map)`, where `old_type_map` represents the type map in `des`. + The `get_index_between_two_maps()` function can then be used to correctly select statistics for types + from `old_type_map` or `type_map`. + Positive indices from 0 to `len(old_type_map) - 1` will select old statistics of types in `old_type_map`, + while negative indices from `-len(type_map)` to -1 will select new statistics of types in `type_map`. + + Parameters + ---------- + des : DescriptorBlock + The descriptor block to be extended. + type_map : List[str] + The name of each type of atoms to be extended. + des_with_stat : DescriptorBlock, Optional + The descriptor block has additional statistics of types from newly provided `type_map`. + If None, the default statistics will be used. + Otherwise, the statistics provided in this DescriptorBlock will be used. + + """ + if des_with_stat is not None: + extend_davg = des_with_stat["davg"] + extend_dstd = des_with_stat["dstd"] + else: + extend_shape = [len(type_map), *list(des["davg"].shape[1:])] + extend_davg = np.zeros(extend_shape, dtype=des["davg"].dtype) + extend_dstd = np.ones(extend_shape, dtype=des["dstd"].dtype) + des["davg"] = np.concatenate([des["davg"], extend_davg], axis=0) + des["dstd"] = np.concatenate([des["dstd"], extend_dstd], axis=0) diff --git a/deepmd/dpmodel/descriptor/dpa1.py b/deepmd/dpmodel/descriptor/dpa1.py index ead334fbe0..876062cce6 100644 --- a/deepmd/dpmodel/descriptor/dpa1.py +++ b/deepmd/dpmodel/descriptor/dpa1.py @@ -37,6 +37,10 @@ from deepmd.utils.data_system import ( DeepmdDataSystem, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) from deepmd.utils.path import ( DPPath, ) @@ -49,6 +53,7 @@ ) from .descriptor import ( DescriptorBlock, + extend_descrpt_stat, ) @@ -194,8 +199,6 @@ class DescrptDPA1(NativeOP, BaseDescriptor): Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. - spin (Only support None to keep consistent with other backend references.) (Not used in this version. Not-none option is not implemented.) @@ -327,6 +330,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_atten.get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension.""" ret = self.se_atten.get_dim_out() @@ -382,9 +389,41 @@ def set_stat_mean_and_stddev( mean: np.ndarray, stddev: np.ndarray, ) -> None: + """Update mean and stddev for descriptor.""" self.se_atten.mean = mean self.se_atten.stddev = stddev + def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + """Get mean and stddev for descriptor.""" + return self.se_atten.mean, self.se_atten.stddev + + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_atten + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_atten + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + def call( self, coord_ext, diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index f3e88ddacc..766fe19302 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -32,6 +32,10 @@ from deepmd.utils.data_system import ( DeepmdDataSystem, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) from deepmd.utils.path import ( DPPath, ) @@ -42,6 +46,9 @@ from .base_descriptor import ( BaseDescriptor, ) +from .descriptor import ( + extend_descrpt_stat, +) from .dpa1 import ( DescrptBlockSeAtten, ) @@ -353,7 +360,6 @@ def __init__( Whether to use electronic configuration type embedding. type_map : List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. Returns ------- @@ -501,6 +507,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension of this descriptor.""" ret = self.repformers.dim_out @@ -542,6 +552,47 @@ def share_params(self, base_class, shared_level, resume=False): """ raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + self.exclude_types = map_pair_exclude_types(self.exclude_types, remap_index) + self.ntypes = len(type_map) + repinit = self.repinit + repformers = self.repformers + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + repinit, + type_map, + des_with_stat=model_with_new_type_stat.repinit + if model_with_new_type_stat is not None + else None, + ) + extend_descrpt_stat( + repformers, + type_map, + des_with_stat=model_with_new_type_stat.repformers + if model_with_new_type_stat is not None + else None, + ) + repinit.ntypes = self.ntypes + repformers.ntypes = self.ntypes + repinit.reinit_exclude(self.exclude_types) + repformers.reinit_exclude(self.exclude_types) + repinit["davg"] = repinit["davg"][remap_index] + repinit["dstd"] = repinit["dstd"][remap_index] + repformers["davg"] = repformers["davg"][remap_index] + repformers["dstd"] = repformers["dstd"][remap_index] + @property def dim_out(self): return self.get_dim_out() @@ -555,6 +606,23 @@ def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None) """Update mean and stddev for descriptor elements.""" raise NotImplementedError + def set_stat_mean_and_stddev( + self, + mean: List[np.ndarray], + stddev: List[np.ndarray], + ) -> None: + """Update mean and stddev for descriptor.""" + for ii, descrpt in enumerate([self.repinit, self.repformers]): + descrpt.mean = mean[ii] + descrpt.stddev = stddev[ii] + + def get_stat_mean_and_stddev(self) -> Tuple[List[np.ndarray], List[np.ndarray]]: + """Get mean and stddev for descriptor.""" + return [self.repinit.mean, self.repformers.mean], [ + self.repinit.stddev, + self.repformers.stddev, + ] + def call( self, coord_ext: np.ndarray, diff --git a/deepmd/dpmodel/descriptor/hybrid.py b/deepmd/dpmodel/descriptor/hybrid.py index 6912590317..3b08426b13 100644 --- a/deepmd/dpmodel/descriptor/hybrid.py +++ b/deepmd/dpmodel/descriptor/hybrid.py @@ -124,6 +124,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.descrpt_list[0].get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.descrpt_list[0].get_type_map() + def get_dim_out(self) -> int: """Returns the output dimension.""" return np.sum([descrpt.get_dim_out() for descrpt in self.descrpt_list]).item() @@ -160,11 +164,49 @@ def share_params(self, base_class, shared_level, resume=False): """ raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + for ii, descrpt in enumerate(self.descrpt_list): + descrpt.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descrpt_list[ii] + if model_with_new_type_stat is not None + else None, + ) + def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" for descrpt in self.descrpt_list: descrpt.compute_input_stats(merged, path) + def set_stat_mean_and_stddev( + self, + mean: List[Union[np.ndarray, List[np.ndarray]]], + stddev: List[Union[np.ndarray, List[np.ndarray]]], + ) -> None: + """Update mean and stddev for descriptor.""" + for ii, descrpt in enumerate(self.descrpt_list): + descrpt.set_stat_mean_and_stddev(mean[ii], stddev[ii]) + + def get_stat_mean_and_stddev( + self, + ) -> Tuple[ + List[Union[np.ndarray, List[np.ndarray]]], + List[Union[np.ndarray, List[np.ndarray]]], + ]: + """Get mean and stddev for descriptor.""" + mean_list = [] + stddev_list = [] + for ii, descrpt in enumerate(self.descrpt_list): + mean_item, stddev_item = descrpt.get_stat_mean_and_stddev() + mean_list.append(mean_item) + stddev_list.append(stddev_item) + return mean_list, stddev_list + def call( self, coord_ext, diff --git a/deepmd/dpmodel/descriptor/make_base_descriptor.py b/deepmd/dpmodel/descriptor/make_base_descriptor.py index 328352c7d8..49bf000248 100644 --- a/deepmd/dpmodel/descriptor/make_base_descriptor.py +++ b/deepmd/dpmodel/descriptor/make_base_descriptor.py @@ -78,6 +78,11 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" pass + @abstractmethod + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + pass + @abstractmethod def get_dim_out(self) -> int: """Returns the output descriptor dimension.""" @@ -113,6 +118,25 @@ def share_params(self, base_class, shared_level, resume=False): """ pass + @abstractmethod + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + pass + + @abstractmethod + def set_stat_mean_and_stddev(self, mean, stddev) -> None: + """Update mean and stddev for descriptor.""" + pass + + @abstractmethod + def get_stat_mean_and_stddev(self): + """Get mean and stddev for descriptor.""" + pass + def compute_input_stats( self, merged: Union[Callable[[], List[dict]], List[dict]], diff --git a/deepmd/dpmodel/descriptor/se_e2_a.py b/deepmd/dpmodel/descriptor/se_e2_a.py index d63bda5ab3..504e357aeb 100644 --- a/deepmd/dpmodel/descriptor/se_e2_a.py +++ b/deepmd/dpmodel/descriptor/se_e2_a.py @@ -117,6 +117,8 @@ class DescrptSeA(NativeOP, BaseDescriptor): The precision of the embedding net parameters. Supported options are |PRECISION| spin The deepspin object. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. Not used in this descriptor, only to be compat with input. @@ -153,6 +155,7 @@ def __init__( activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, spin: Optional[Any] = None, + type_map: Optional[List[str]] = None, ntypes: Optional[int] = None, # to be compat with input # consistent with argcheck, not used though seed: Optional[int] = None, @@ -176,6 +179,7 @@ def __init__( self.activation_function = activation_function self.precision = precision self.spin = spin + self.type_map = type_map # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) @@ -268,14 +272,43 @@ def share_params(self, base_class, shared_level, resume=False): """ raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_a does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError + def set_stat_mean_and_stddev( + self, + mean: np.ndarray, + stddev: np.ndarray, + ) -> None: + """Update mean and stddev for descriptor.""" + self.davg = mean + self.dstd = stddev + + def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + """Get mean and stddev for descriptor.""" + return self.davg, self.dstd + def cal_g( self, ss, @@ -385,7 +418,7 @@ def serialize(self) -> dict: return { "@class": "Descriptor", "type": "se_e2_a", - "@version": 1, + "@version": 2, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, @@ -407,13 +440,14 @@ def serialize(self) -> dict: "davg": self.davg, "dstd": self.dstd, }, + "type_map": self.type_map, } @classmethod def deserialize(cls, data: dict) -> "DescrptSeA": """Deserialize from dict.""" data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) variables = data.pop("@variables") diff --git a/deepmd/dpmodel/descriptor/se_r.py b/deepmd/dpmodel/descriptor/se_r.py index a0ca23b3b1..938826d16c 100644 --- a/deepmd/dpmodel/descriptor/se_r.py +++ b/deepmd/dpmodel/descriptor/se_r.py @@ -75,6 +75,8 @@ class DescrptSeR(NativeOP, BaseDescriptor): The precision of the embedding net parameters. Supported options are |PRECISION| spin The deepspin object. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. Not used in this descriptor, only to be compat with input. @@ -110,6 +112,7 @@ def __init__( activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, spin: Optional[Any] = None, + type_map: Optional[List[str]] = None, ntypes: Optional[int] = None, # to be compat with input # consistent with argcheck, not used though seed: Optional[int] = None, @@ -134,6 +137,7 @@ def __init__( self.activation_function = activation_function self.precision = precision self.spin = spin + self.type_map = type_map self.emask = PairExcludeMask(self.ntypes, self.exclude_types) self.env_protection = env_protection @@ -226,14 +230,43 @@ def share_params(self, base_class, shared_level, resume=False): """ raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_r does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError + def set_stat_mean_and_stddev( + self, + mean: np.ndarray, + stddev: np.ndarray, + ) -> None: + """Update mean and stddev for descriptor.""" + self.davg = mean + self.dstd = stddev + + def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + """Get mean and stddev for descriptor.""" + return self.davg, self.dstd + def cal_g( self, ss, @@ -311,7 +344,7 @@ def serialize(self) -> dict: return { "@class": "Descriptor", "type": "se_r", - "@version": 1, + "@version": 2, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, @@ -332,13 +365,14 @@ def serialize(self) -> dict: "davg": self.davg, "dstd": self.dstd, }, + "type_map": self.type_map, } @classmethod def deserialize(cls, data: dict) -> "DescrptSeR": """Deserialize from dict.""" data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) variables = data.pop("@variables") diff --git a/deepmd/dpmodel/descriptor/se_t.py b/deepmd/dpmodel/descriptor/se_t.py index ef91dabbc4..b91f9a6c6e 100644 --- a/deepmd/dpmodel/descriptor/se_t.py +++ b/deepmd/dpmodel/descriptor/se_t.py @@ -78,6 +78,8 @@ class DescrptSeT(NativeOP, BaseDescriptor): If the weights of embedding net are trainable. seed : int, Optional Random seed for initializing the network parameters. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. ntypes : int Number of element types. Not used in this descriptor, only to be compat with input. @@ -97,6 +99,7 @@ def __init__( precision: str = DEFAULT_PRECISION, trainable: bool = True, seed: Optional[int] = None, + type_map: Optional[List[str]] = None, ntypes: Optional[int] = None, # to be compat with input ) -> None: del ntypes @@ -113,6 +116,7 @@ def __init__( self.env_protection = env_protection self.ntypes = len(sel) self.seed = seed + self.type_map = type_map # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) self.trainable = trainable @@ -164,6 +168,18 @@ def dim_out(self): """Returns the output dimension of this descriptor.""" return self.get_dim_out() + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e3 does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def get_dim_out(self): """Returns the output dimension of this descriptor.""" return self.neuron[-1] @@ -210,10 +226,27 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" raise NotImplementedError + def set_stat_mean_and_stddev( + self, + mean: np.ndarray, + stddev: np.ndarray, + ) -> None: + """Update mean and stddev for descriptor.""" + self.davg = mean + self.dstd = stddev + + def get_stat_mean_and_stddev(self) -> Tuple[np.ndarray, np.ndarray]: + """Get mean and stddev for descriptor.""" + return self.davg, self.dstd + def reinit_exclude( self, exclude_types: List[Tuple[int, int]] = [], @@ -315,7 +348,7 @@ def serialize(self) -> dict: return { "@class": "Descriptor", "type": "se_e3", - "@version": 1, + "@version": 2, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, @@ -332,6 +365,7 @@ def serialize(self) -> dict: "davg": self.davg, "dstd": self.dstd, }, + "type_map": self.type_map, "trainable": self.trainable, } @@ -339,7 +373,7 @@ def serialize(self) -> dict: def deserialize(cls, data: dict) -> "DescrptSeT": """Deserialize from dict.""" data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) variables = data.pop("@variables") diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index 98325f41ee..f922b57367 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -80,6 +80,8 @@ class DipoleFitting(GeneralFitting): c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reduciable variable are differentiable. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -103,6 +105,7 @@ def __init__( exclude_types: List[int] = [], r_differentiable: bool = True, c_differentiable: bool = True, + type_map: Optional[List[str]] = None, old_impl=False, # not used seed: Optional[int] = None, @@ -138,6 +141,7 @@ def __init__( spin=spin, mixed_types=mixed_types, exclude_types=exclude_types, + type_map=type_map, ) self.old_impl = False @@ -157,7 +161,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) var_name = data.pop("var_name", None) assert var_name == "dipole" return super().deserialize(data) diff --git a/deepmd/dpmodel/fitting/dos_fitting.py b/deepmd/dpmodel/fitting/dos_fitting.py index 7c86d392b0..2c113c1f7d 100644 --- a/deepmd/dpmodel/fitting/dos_fitting.py +++ b/deepmd/dpmodel/fitting/dos_fitting.py @@ -44,6 +44,7 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = False, exclude_types: List[int] = [], + type_map: Optional[List[str]] = None, # not used seed: Optional[int] = None, ): @@ -67,12 +68,13 @@ def __init__( precision=precision, mixed_types=mixed_types, exclude_types=exclude_types, + type_map=type_map, ) @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data["numb_dos"] = data.pop("dim_out") data.pop("tot_ener_zero", None) data.pop("var_name", None) diff --git a/deepmd/dpmodel/fitting/ener_fitting.py b/deepmd/dpmodel/fitting/ener_fitting.py index 7f83f1e886..7c262209d9 100644 --- a/deepmd/dpmodel/fitting/ener_fitting.py +++ b/deepmd/dpmodel/fitting/ener_fitting.py @@ -44,6 +44,7 @@ def __init__( spin: Any = None, mixed_types: bool = False, exclude_types: List[int] = [], + type_map: Optional[List[str]] = None, # not used seed: Optional[int] = None, ): @@ -67,12 +68,13 @@ def __init__( spin=spin, mixed_types=mixed_types, exclude_types=exclude_types, + type_map=type_map, ) @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("var_name") data.pop("dim_out") return super().deserialize(data) diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 5681f5bf0c..2f0b3c7ac6 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -21,6 +21,10 @@ FittingNet, NetworkCollection, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) from .base_fitting import ( BaseFitting, @@ -76,6 +80,8 @@ class GeneralFitting(NativeOP, BaseFitting): Remove vaccum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -99,6 +105,7 @@ def __init__( mixed_types: bool = True, exclude_types: List[int] = [], remove_vaccum_contribution: Optional[List[bool]] = None, + type_map: Optional[List[str]] = None, ): self.var_name = var_name self.ntypes = ntypes @@ -110,6 +117,7 @@ def __init__( self.rcond = rcond self.tot_ener_zero = tot_ener_zero self.trainable = trainable + self.type_map = type_map if self.trainable is None: self.trainable = [True for ii in range(len(self.neuron) + 1)] if isinstance(self.trainable, bool): @@ -185,6 +193,32 @@ def get_sel_type(self) -> List[int]: """ return [ii for ii in range(self.ntypes) if ii not in self.exclude_types] + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = np.zeros(extend_shape, dtype=self.bias_atom_e.dtype) + self.bias_atom_e = np.concatenate( + [self.bias_atom_e, extend_bias_atom_e], axis=0 + ) + self.bias_atom_e = self.bias_atom_e[remap_index] + def __setitem__(self, key, value): if key in ["bias_atom_e"]: self.bias_atom_e = value @@ -228,7 +262,7 @@ def serialize(self) -> dict: """Serialize the fitting to dict.""" return { "@class": "Fitting", - "@version": 1, + "@version": 2, "var_name": self.var_name, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -249,6 +283,7 @@ def serialize(self) -> dict: "aparam_avg": self.aparam_avg, "aparam_inv_std": self.aparam_inv_std, }, + "type_map": self.type_map, # not supported "tot_ener_zero": self.tot_ener_zero, "trainable": self.trainable, diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 9bf1731830..91103ecf11 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -106,6 +106,8 @@ class InvarFitting(GeneralFitting): If false, different atomic types uses different fitting net, otherwise different atom types share the same fitting net. exclude_types: List[int] Atomic contributions of the excluded atom types are set zero. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ @@ -131,6 +133,7 @@ def __init__( spin: Any = None, mixed_types: bool = True, exclude_types: List[int] = [], + type_map: Optional[List[str]] = None, ): # seed, uniform_seed are not included if tot_ener_zero: @@ -168,6 +171,7 @@ def __init__( remove_vaccum_contribution=None if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 else [x is not None for x in atom_ener], + type_map=type_map, ) def serialize(self) -> dict: @@ -180,7 +184,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) return super().deserialize(data) def _net_out_dim(self): diff --git a/deepmd/dpmodel/fitting/make_base_fitting.py b/deepmd/dpmodel/fitting/make_base_fitting.py index 72dc83c29e..417ccc892a 100644 --- a/deepmd/dpmodel/fitting/make_base_fitting.py +++ b/deepmd/dpmodel/fitting/make_base_fitting.py @@ -5,6 +5,7 @@ ) from typing import ( Dict, + List, Optional, ) @@ -67,6 +68,20 @@ def compute_output_stats(self, merged): """Update the output bias for fitting net.""" raise NotImplementedError + @abstractmethod + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + pass + + @abstractmethod + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + pass + @abstractmethod def serialize(self) -> dict: """Serialize the obj to dict.""" diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index 70e52c8e7d..67b4888c67 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -23,6 +23,9 @@ OutputVariableDef, fitting_check_output, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) from deepmd.utils.version import ( check_version_compatibility, ) @@ -82,6 +85,8 @@ class PolarFitting(GeneralFitting): The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] shift_diag : bool Whether to shift the diagonal part of the polarizability matrix. The shift operation is carried out after scale. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -107,6 +112,7 @@ def __init__( fit_diag: bool = True, scale: Optional[List[float]] = None, shift_diag: bool = True, + type_map: Optional[List[str]] = None, # not used seed: Optional[int] = None, ): @@ -159,6 +165,7 @@ def __init__( spin=spin, mixed_types=mixed_types, exclude_types=exclude_types, + type_map=type_map, ) self.old_impl = False @@ -185,7 +192,7 @@ def __getitem__(self, key): def serialize(self) -> dict: data = super().serialize() data["type"] = "polar" - data["@version"] = 2 + data["@version"] = 3 data["embedding_width"] = self.embedding_width data["old_impl"] = self.old_impl data["fit_diag"] = self.fit_diag @@ -197,7 +204,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) var_name = data.pop("var_name", None) assert var_name == "polar" return super().deserialize(data) @@ -215,6 +222,32 @@ def output_def(self): ] ) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + super().change_type_map(type_map=type_map) + if has_new_type: + extend_shape = [len(type_map), *list(self.scale.shape[1:])] + extend_scale = np.ones(extend_shape, dtype=self.scale.dtype) + self.scale = np.concatenate([self.scale, extend_scale], axis=0) + extend_shape = [len(type_map), *list(self.constant_matrix.shape[1:])] + extend_constant_matrix = np.zeros( + extend_shape, dtype=self.constant_matrix.dtype + ) + self.constant_matrix = np.concatenate( + [self.constant_matrix, extend_constant_matrix], axis=0 + ) + self.scale = self.scale[remap_index] + self.constant_matrix = self.constant_matrix[remap_index] + def call( self, descriptor: np.ndarray, diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index f8579de9a4..a130437b3d 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -408,6 +408,14 @@ def do_grad_c( """ return self.atomic_model.do_grad_c(var_name) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.atomic_model.change_type_map(type_map=type_map) + def serialize(self) -> dict: return self.atomic_model.serialize() diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index 0df6e94f05..b8faa39dbd 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -25,7 +25,9 @@ def get_standard_model(data: dict) -> EnergyModel: The data to construct the model. """ descriptor_type = data["descriptor"].pop("type") + data["descriptor"]["type_map"] = data["type_map"] fitting_type = data["fitting_net"].pop("type") + data["fitting_net"]["type_map"] = data["type_map"] if descriptor_type == "se_e2_a": descriptor = DescrptSeA( **data["descriptor"], diff --git a/deepmd/dpmodel/utils/type_embed.py b/deepmd/dpmodel/utils/type_embed.py index 201ac91cc6..99508ea7b3 100644 --- a/deepmd/dpmodel/utils/type_embed.py +++ b/deepmd/dpmodel/utils/type_embed.py @@ -13,6 +13,9 @@ from deepmd.dpmodel.utils.network import ( EmbeddingNet, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) from deepmd.utils.version import ( check_version_compatibility, ) @@ -43,7 +46,6 @@ class TypeEmbedNet(NativeOP): Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -72,27 +74,9 @@ def __init__( self.type_map = type_map embed_input_dim = ntypes if self.use_econf_tebd: - from deepmd.utils.econf_embd import ( - ECONF_DIM, - electronic_configuration_embedding, - ) - from deepmd.utils.econf_embd import type_map as periodic_table - - assert ( - self.type_map is not None - ), "When using electronic configuration type embedding, type_map must be provided!" - - missing_types = [t for t in self.type_map if t not in periodic_table] - assert not missing_types, ( - "When using electronic configuration type embedding, " - "all element in type_map should be in periodic table! " - f"Found these invalid elements: {missing_types}" - ) - self.econf_tebd = np.array( - [electronic_configuration_embedding[kk] for kk in self.type_map], - dtype=PRECISION_DICT[self.precision], + self.econf_tebd, embed_input_dim = get_econf_tebd( + self.type_map, precision=self.precision ) - embed_input_dim = ECONF_DIM self.embedding_net = EmbeddingNet( embed_input_dim, self.neuron, @@ -159,3 +143,85 @@ def serialize(self) -> dict: "type_map": self.type_map, "embedding": self.embedding_net.serialize(), } + + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + if not self.use_econf_tebd: + do_resnet = self.neuron[0] in [ + self.ntypes, + self.ntypes * 2, + len(type_map), + len(type_map) * 2, + ] + assert ( + not do_resnet or self.activation_function == "Linear" + ), "'activation_function' must be 'Linear' when performing type changing on resnet structure!" + first_layer_matrix = self.embedding_net.layers[0].w + eye_vector = np.eye(self.ntypes, dtype=PRECISION_DICT[self.precision]) + # preprocess for resnet connection + if self.neuron[0] == self.ntypes: + first_layer_matrix += eye_vector + elif self.neuron[0] == self.ntypes * 2: + first_layer_matrix += np.concatenate([eye_vector, eye_vector], axis=-1) + + # randomly initialize params for the unseen types + rng = np.random.default_rng() + if has_new_type: + extend_type_params = rng.random( + [len(type_map), first_layer_matrix.shape[-1]], + dtype=first_layer_matrix.dtype, + ) + first_layer_matrix = np.concatenate( + [first_layer_matrix, extend_type_params], axis=0 + ) + + first_layer_matrix = first_layer_matrix[remap_index] + new_ntypes = len(type_map) + eye_vector = np.eye(new_ntypes, dtype=PRECISION_DICT[self.precision]) + + if self.neuron[0] == new_ntypes: + first_layer_matrix -= eye_vector + elif self.neuron[0] == new_ntypes * 2: + first_layer_matrix -= np.concatenate([eye_vector, eye_vector], axis=-1) + + self.embedding_net.layers[0].num_in = new_ntypes + self.embedding_net.layers[0].w = first_layer_matrix + else: + self.econf_tebd, embed_input_dim = get_econf_tebd( + type_map, precision=self.precision + ) + self.type_map = type_map + self.ntypes = len(type_map) + + +def get_econf_tebd(type_map, precision: str = "default"): + from deepmd.utils.econf_embd import ( + ECONF_DIM, + electronic_configuration_embedding, + ) + from deepmd.utils.econf_embd import type_map as periodic_table + + assert ( + type_map is not None + ), "When using electronic configuration type embedding, type_map must be provided!" + + missing_types = [t for t in type_map if t not in periodic_table] + assert not missing_types, ( + "When using electronic configuration type embedding, " + "all element in type_map should be in periodic table! " + f"Found these invalid elements: {missing_types}" + ) + econf_tebd = np.array( + [electronic_configuration_embedding[kk] for kk in type_map], + dtype=PRECISION_DICT[precision], + ) + embed_input_dim = ECONF_DIM + return econf_tebd, embed_input_dim diff --git a/deepmd/main.py b/deepmd/main.py index 322933333c..4560df9e57 100644 --- a/deepmd/main.py +++ b/deepmd/main.py @@ -255,6 +255,11 @@ def main_parser() -> argparse.ArgumentParser: default=None, help="Finetune the frozen pretrained model.", ) + parser_train.add_argument( + "--use-pretrain-script", + action="store_true", + help="Use model parameters from the script of the pretrained model instead of user input when doing finetuning. Note: This behavior is default and unchangeable in TensorFlow.", + ) parser_train.add_argument( "-o", "--output", diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 8e37dbf09b..ef192eab1f 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -51,7 +51,7 @@ DEVICE, ) from deepmd.pt.utils.finetune import ( - change_finetune_model_params, + get_finetune_rules, ) from deepmd.pt.utils.multi_task import ( preprocess_shared_params, @@ -79,10 +79,10 @@ def get_trainer( init_model=None, restart_model=None, finetune_model=None, - model_branch="", force_load=False, init_frz_model=None, shared_links=None, + finetune_links=None, ): multi_task = "model_dict" in config.get("model", {}) @@ -93,23 +93,8 @@ def get_trainer( assert dist.is_nccl_available() dist.init_process_group(backend="nccl") - ckpt = init_model if init_model is not None else restart_model - finetune_links = None - if finetune_model is not None: - config["model"], finetune_links = change_finetune_model_params( - finetune_model, - config["model"], - model_branch=model_branch, - ) - config["model"]["resuming"] = (finetune_model is not None) or (ckpt is not None) - - def prepare_trainer_input_single( - model_params_single, data_dict_single, loss_dict_single, suffix="", rank=0 - ): + def prepare_trainer_input_single(model_params_single, data_dict_single, rank=0): training_dataset_params = data_dict_single["training_data"] - type_split = False - if model_params_single["descriptor"]["type"] in ["se_e2_a"]: - type_split = True validation_dataset_params = data_dict_single.get("validation_data", None) validation_systems = ( validation_dataset_params["systems"] if validation_dataset_params else None @@ -142,18 +127,11 @@ def prepare_trainer_input_single( if validation_systems else None ) - if ckpt or finetune_model: - train_data_single = DpLoaderSet( - training_systems, - training_dataset_params["batch_size"], - model_params_single["type_map"], - ) - else: - train_data_single = DpLoaderSet( - training_systems, - training_dataset_params["batch_size"], - model_params_single["type_map"], - ) + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single["type_map"], + ) return ( train_data_single, validation_data_single, @@ -169,7 +147,6 @@ def prepare_trainer_input_single( ) = prepare_trainer_input_single( config["model"], config["training"], - config["loss"], rank=rank, ) else: @@ -182,8 +159,6 @@ def prepare_trainer_input_single( ) = prepare_trainer_input_single( config["model"]["model_dict"][model_key], config["training"]["data_dict"][model_key], - config["loss_dict"][model_key], - suffix=f"_{model_key}", rank=rank, ) @@ -243,6 +218,16 @@ def train(FLAGS): if multi_task: config["model"], shared_links = preprocess_shared_params(config["model"]) + # update fine-tuning config + finetune_links = None + if FLAGS.finetune is not None: + config["model"], finetune_links = get_finetune_rules( + FLAGS.finetune, + config["model"], + model_branch=FLAGS.model_branch, + change_model_params=FLAGS.use_pretrain_script, + ) + # argcheck if not multi_task: config = update_deepmd_input(config, warning=True, dump="input_v2_compat.json") @@ -286,10 +271,10 @@ def train(FLAGS): FLAGS.init_model, FLAGS.restart, FLAGS.finetune, - FLAGS.model_branch, FLAGS.force_load, FLAGS.init_frz_model, shared_links=shared_links, + finetune_links=finetune_links, ) # save min_nbor_dist if min_nbor_dist is not None: diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 0e3dd292cb..98504c3990 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -118,7 +118,6 @@ def __init__( item.replace(f"model.{head}.", "model.Default.") ] = state_dict[item].clone() state_dict = state_dict_head - self.input_param["resuming"] = True model = get_model(self.input_param).to(DEVICE) model = torch.jit.script(model) self.dp = ModelWrapper(model) diff --git a/deepmd/pt/infer/inference.py b/deepmd/pt/infer/inference.py index 6c13b363bc..dfb7abdb21 100644 --- a/deepmd/pt/infer/inference.py +++ b/deepmd/pt/infer/inference.py @@ -56,7 +56,6 @@ def __init__( state_dict = state_dict_head self.model_params = deepcopy(model_params) - model_params["resuming"] = True self.model = get_model(model_params).to(DEVICE) # Model Wrapper diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 1340028425..fe904a39ab 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -35,6 +35,11 @@ to_numpy_array, to_torch_tensor, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, + map_pair_exclude_types, +) from deepmd.utils.path import ( DPPath, ) @@ -276,6 +281,37 @@ def forward( comm_dict=comm_dict, ) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.reinit_atom_exclude( + map_atom_exclude_types(self.atom_exclude_types, remap_index) + ) + self.reinit_pair_exclude( + map_pair_exclude_types(self.pair_exclude_types, remap_index) + ) + if has_new_type: + extend_shape = [ + self.out_bias.shape[0], + len(type_map), + *list(self.out_bias.shape[2:]), + ] + extend_bias = torch.zeros( + extend_shape, dtype=self.out_bias.dtype, device=self.out_bias.device + ) + self.out_bias = torch.cat([self.out_bias, extend_bias], dim=1) + extend_std = torch.ones( + extend_shape, dtype=self.out_std.dtype, device=self.out_std.device + ) + self.out_std = torch.cat([self.out_std, extend_std], dim=1) + self.out_bias = self.out_bias[:, remap_index, :] + self.out_std = self.out_std[:, remap_index, :] + def serialize(self) -> dict: return { "type_map": self.type_map, diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 90254e8c11..549a6dcaee 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -95,6 +95,25 @@ def mixed_types(self) -> bool: """ return self.descriptor.mixed_types() + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + self.type_map = type_map + self.ntypes = len(type_map) + self.descriptor.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descriptor + if model_with_new_type_stat is not None + else None, + ) + self.fitting_net.change_type_map(type_map=type_map) + def has_message_passing(self) -> bool: """Returns whether the atomic model has message passing.""" return self.descriptor.has_message_passing() diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index db8280cd02..7c619a0424 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -119,6 +119,23 @@ def get_type_map(self) -> List[str]: """Get the type map.""" return self.type_map + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + for ii, model in enumerate(self.models): + model.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.models[ii] + if model_with_new_type_stat is not None + else None, + ) + def get_model_rcuts(self) -> List[float]: """Get the cut-off radius for each individual models.""" return [model.get_rcut() for model in self.models] diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index ff1a83da6a..e5504f86c2 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -164,6 +164,18 @@ def has_message_passing(self) -> bool: """Returns whether the atomic model has message passing.""" return False + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert type_map == self.type_map, ( + "PairTabAtomicModel does not support changing type map now. " + "This feature is currently not implemented because it would require additional work to change the tab file. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def serialize(self) -> dict: dd = BaseAtomicModel.serialize(self) dd.update( diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py index 28656d716c..0f0d87fe86 100644 --- a/deepmd/pt/model/descriptor/descriptor.py +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -180,3 +180,41 @@ def make_default_type_embedding( aux = {} aux["tebd_dim"] = 8 return TypeEmbedNet(ntypes, aux["tebd_dim"]), aux + + +def extend_descrpt_stat(des, type_map, des_with_stat=None): + r""" + Extend the statistics of a descriptor block with types from newly provided `type_map`. + + After extending, the type related dimension of the extended statistics will have a length of + `len(old_type_map) + len(type_map)`, where `old_type_map` represents the type map in `des`. + The `get_index_between_two_maps()` function can then be used to correctly select statistics for types + from `old_type_map` or `type_map`. + Positive indices from 0 to `len(old_type_map) - 1` will select old statistics of types in `old_type_map`, + while negative indices from `-len(type_map)` to -1 will select new statistics of types in `type_map`. + + Parameters + ---------- + des : DescriptorBlock + The descriptor block to be extended. + type_map : List[str] + The name of each type of atoms to be extended. + des_with_stat : DescriptorBlock, Optional + The descriptor block has additional statistics of types from newly provided `type_map`. + If None, the default statistics will be used. + Otherwise, the statistics provided in this DescriptorBlock will be used. + + """ + if des_with_stat is not None: + extend_davg = des_with_stat["davg"] + extend_dstd = des_with_stat["dstd"] + else: + extend_shape = [len(type_map), *list(des["davg"].shape[1:])] + extend_davg = torch.zeros( + extend_shape, dtype=des["davg"].dtype, device=des["davg"].device + ) + extend_dstd = torch.ones( + extend_shape, dtype=des["dstd"].dtype, device=des["dstd"].device + ) + des["davg"] = torch.cat([des["davg"], extend_davg], dim=0) + des["dstd"] = torch.cat([des["dstd"], extend_dstd], dim=0) diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index 8f19aad961..ff29d14e1d 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -30,6 +30,10 @@ from deepmd.utils.data_system import ( DeepmdDataSystem, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) from deepmd.utils.path import ( DPPath, ) @@ -40,6 +44,9 @@ from .base_descriptor import ( BaseDescriptor, ) +from .descriptor import ( + extend_descrpt_stat, +) from .se_atten import ( DescrptBlockSeAtten, NeighborGatedAttention, @@ -181,7 +188,6 @@ class DescrptDPA1(BaseDescriptor, torch.nn.Module): Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. spin (Only support None to keep consistent with other backend references.) (Not used in this version. Not-none option is not implemented.) @@ -320,6 +326,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.se_atten.get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension.""" ret = self.se_atten.get_dim_out() @@ -409,9 +419,41 @@ def set_stat_mean_and_stddev( mean: torch.Tensor, stddev: torch.Tensor, ) -> None: + """Update mean and stddev for descriptor.""" self.se_atten.mean = mean self.se_atten.stddev = stddev + def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Get mean and stddev for descriptor.""" + return self.se_atten.mean, self.se_atten.stddev + + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_atten + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_atten + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + def serialize(self) -> dict: obj = self.se_atten data = { diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index 322c34734a..ae8c924e9a 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -40,6 +40,10 @@ from deepmd.utils.data_system import ( DeepmdDataSystem, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) from deepmd.utils.path import ( DPPath, ) @@ -50,6 +54,9 @@ from .base_descriptor import ( BaseDescriptor, ) +from .descriptor import ( + extend_descrpt_stat, +) from .repformer_layer import ( RepformerLayer, ) @@ -113,7 +120,6 @@ def __init__( Whether to use electronic configuration type embedding. type_map : List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. Returns ------- @@ -271,6 +277,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension of this descriptor.""" ret = self.repformers.dim_out @@ -345,6 +355,47 @@ def share_params(self, base_class, shared_level, resume=False): else: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + self.exclude_types = map_pair_exclude_types(self.exclude_types, remap_index) + self.ntypes = len(type_map) + repinit = self.repinit + repformers = self.repformers + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + repinit, + type_map, + des_with_stat=model_with_new_type_stat.repinit + if model_with_new_type_stat is not None + else None, + ) + extend_descrpt_stat( + repformers, + type_map, + des_with_stat=model_with_new_type_stat.repformers + if model_with_new_type_stat is not None + else None, + ) + repinit.ntypes = self.ntypes + repformers.ntypes = self.ntypes + repinit.reinit_exclude(self.exclude_types) + repformers.reinit_exclude(self.exclude_types) + repinit["davg"] = repinit["davg"][remap_index] + repinit["dstd"] = repinit["dstd"][remap_index] + repformers["davg"] = repformers["davg"][remap_index] + repformers["dstd"] = repformers["dstd"][remap_index] + @property def dim_out(self): return self.get_dim_out() @@ -378,6 +429,23 @@ def compute_input_stats( for ii, descrpt in enumerate([self.repinit, self.repformers]): descrpt.compute_input_stats(merged, path) + def set_stat_mean_and_stddev( + self, + mean: List[torch.Tensor], + stddev: List[torch.Tensor], + ) -> None: + """Update mean and stddev for descriptor.""" + for ii, descrpt in enumerate([self.repinit, self.repformers]): + descrpt.mean = mean[ii] + descrpt.stddev = stddev[ii] + + def get_stat_mean_and_stddev(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """Get mean and stddev for descriptor.""" + return [self.repinit.mean, self.repformers.mean], [ + self.repinit.stddev, + self.repformers.stddev, + ] + def serialize(self) -> dict: repinit = self.repinit repformers = self.repformers diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py index 3733cec8e7..d486cda399 100644 --- a/deepmd/pt/model/descriptor/hybrid.py +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -129,6 +129,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.descrpt_list[0].get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.descrpt_list[0].get_type_map() + def get_dim_out(self) -> int: """Returns the output dimension.""" return sum([descrpt.get_dim_out() for descrpt in self.descrpt_list]) @@ -174,11 +178,49 @@ def share_params(self, base_class, shared_level, resume=False): else: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + for ii, descrpt in enumerate(self.descrpt_list): + descrpt.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descrpt_list[ii] + if model_with_new_type_stat is not None + else None, + ) + def compute_input_stats(self, merged: List[dict], path: Optional[DPPath] = None): """Update mean and stddev for descriptor elements.""" for descrpt in self.descrpt_list: descrpt.compute_input_stats(merged, path) + def set_stat_mean_and_stddev( + self, + mean: List[Union[torch.Tensor, List[torch.Tensor]]], + stddev: List[Union[torch.Tensor, List[torch.Tensor]]], + ) -> None: + """Update mean and stddev for descriptor.""" + for ii, descrpt in enumerate(self.descrpt_list): + descrpt.set_stat_mean_and_stddev(mean[ii], stddev[ii]) + + def get_stat_mean_and_stddev( + self, + ) -> Tuple[ + List[Union[torch.Tensor, List[torch.Tensor]]], + List[Union[torch.Tensor, List[torch.Tensor]]], + ]: + """Get mean and stddev for descriptor.""" + mean_list = [] + stddev_list = [] + for ii, descrpt in enumerate(self.descrpt_list): + mean_item, stddev_item = descrpt.get_stat_mean_and_stddev() + mean_list.append(mean_item) + stddev_list.append(stddev_item) + return mean_list, stddev_list + def forward( self, coord_ext: torch.Tensor, diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 01a6d1ab38..e771c03e52 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -88,6 +88,7 @@ def __init__( trainable: bool = True, seed: Optional[int] = None, ntypes: Optional[int] = None, # to be compat with input + type_map: Optional[List[str]] = None, # not implemented spin=None, ): @@ -95,6 +96,7 @@ def __init__( if spin is not None: raise NotImplementedError("old implementation of spin is not supported.") super().__init__() + self.type_map = type_map self.sea = DescrptBlockSeA( rcut, rcut_smth, @@ -133,6 +135,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.sea.get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension.""" return self.sea.get_dim_out() @@ -178,6 +184,18 @@ def dim_out(self): """Returns the output dimension of this descriptor.""" return self.sea.dim_out + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_a does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def compute_input_stats( self, merged: Union[Callable[[], List[dict]], List[dict]], @@ -255,15 +273,20 @@ def set_stat_mean_and_stddev( mean: torch.Tensor, stddev: torch.Tensor, ) -> None: + """Update mean and stddev for descriptor.""" self.sea.mean = mean self.sea.stddev = stddev + def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Get mean and stddev for descriptor.""" + return self.sea.mean, self.sea.stddev + def serialize(self) -> dict: obj = self.sea return { "@class": "Descriptor", "type": "se_e2_a", - "@version": 1, + "@version": 2, "rcut": obj.rcut, "rcut_smth": obj.rcut_smth, "sel": obj.sel, @@ -282,6 +305,7 @@ def serialize(self) -> dict: "davg": obj["davg"].detach().cpu().numpy(), "dstd": obj["dstd"].detach().cpu().numpy(), }, + "type_map": self.type_map, ## to be updated when the options are supported. "trainable": True, "type_one_side": obj.type_one_side, @@ -291,7 +315,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "DescrptSeA": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) variables = data.pop("@variables") diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 21fecd4857..e6ebe53c26 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -71,6 +71,7 @@ def __init__( old_impl: bool = False, trainable: bool = True, seed: Optional[int] = None, + type_map: Optional[List[str]] = None, **kwargs, ): super().__init__() @@ -86,6 +87,7 @@ def __init__( self.old_impl = False # this does not support old implementation. self.exclude_types = exclude_types self.ntypes = len(sel) + self.type_map = type_map self.seed = seed # order matters, placed after the assignment of self.ntypes self.reinit_exclude(exclude_types) @@ -146,6 +148,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.ntypes + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension.""" return self.neuron[-1] @@ -211,6 +217,18 @@ def share_params(self, base_class, shared_level, resume=False): else: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_r does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def compute_input_stats( self, merged: Union[Callable[[], List[dict]], List[dict]], @@ -371,14 +389,19 @@ def set_stat_mean_and_stddev( mean: torch.Tensor, stddev: torch.Tensor, ) -> None: + """Update mean and stddev for descriptor.""" self.mean = mean self.stddev = stddev + def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Get mean and stddev for descriptor.""" + return self.mean, self.stddev + def serialize(self) -> dict: return { "@class": "Descriptor", "type": "se_r", - "@version": 1, + "@version": 2, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, @@ -396,6 +419,7 @@ def serialize(self) -> dict: "davg": self["davg"].detach().cpu().numpy(), "dstd": self["dstd"].detach().cpu().numpy(), }, + "type_map": self.type_map, ## to be updated when the options are supported. "trainable": True, "type_one_side": True, @@ -405,7 +429,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "DescrptSeR": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) variables = data.pop("@variables") embeddings = data.pop("embeddings") env_mat = data.pop("env_mat") diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 3b67e1657f..caa4c9ce45 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -101,6 +101,8 @@ class DescrptSeT(BaseDescriptor, torch.nn.Module): If the weights of embedding net are trainable. seed : int, Optional Random seed for initializing the network parameters. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -117,6 +119,7 @@ def __init__( precision: str = "float64", trainable: bool = True, seed: Optional[int] = None, + type_map: Optional[List[str]] = None, ntypes: Optional[int] = None, # to be compat with input # not implemented spin=None, @@ -125,6 +128,7 @@ def __init__( if spin is not None: raise NotImplementedError("old implementation of spin is not supported.") super().__init__() + self.type_map = type_map self.seat = DescrptBlockSeT( rcut, rcut_smth, @@ -160,6 +164,10 @@ def get_ntypes(self) -> int: """Returns the number of element types.""" return self.seat.get_ntypes() + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def get_dim_out(self) -> int: """Returns the output dimension.""" return self.seat.get_dim_out() @@ -205,6 +213,18 @@ def dim_out(self): """Returns the output dimension of this descriptor.""" return self.seat.dim_out + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e3 does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + def compute_input_stats( self, merged: Union[Callable[[], List[dict]], List[dict]], @@ -283,15 +303,20 @@ def set_stat_mean_and_stddev( mean: torch.Tensor, stddev: torch.Tensor, ) -> None: + """Update mean and stddev for descriptor.""" self.seat.mean = mean self.seat.stddev = stddev + def get_stat_mean_and_stddev(self) -> Tuple[torch.Tensor, torch.Tensor]: + """Get mean and stddev for descriptor.""" + return self.seat.mean, self.seat.stddev + def serialize(self) -> dict: obj = self.seat return { "@class": "Descriptor", "type": "se_e3", - "@version": 1, + "@version": 2, "rcut": obj.rcut, "rcut_smth": obj.rcut_smth, "sel": obj.sel, @@ -304,6 +329,7 @@ def serialize(self) -> dict: "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "exclude_types": obj.exclude_types, "env_protection": obj.env_protection, + "type_map": self.type_map, "@variables": { "davg": obj["davg"].detach().cpu().numpy(), "dstd": obj["dstd"].detach().cpu().numpy(), @@ -314,7 +340,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "DescrptSeT": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) variables = data.pop("@variables") diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 1d46720af2..586e3f4a6e 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -107,13 +107,13 @@ def get_zbl_model(model_params): ntypes = len(model_params["type_map"]) # descriptor model_params["descriptor"]["ntypes"] = ntypes - if model_params["descriptor"].get("use_econf_tebd", False): - model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) descriptor = BaseDescriptor(**model_params["descriptor"]) # fitting fitting_net = model_params.get("fitting_net", None) fitting_net["type"] = fitting_net.get("type", "ener") fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) fitting_net["mixed_types"] = descriptor.mixed_types() fitting_net["embedding_width"] = descriptor.get_dim_out() fitting_net["dim_descrpt"] = descriptor.get_dim_out() @@ -154,13 +154,13 @@ def get_standard_model(model_params): ntypes = len(model_params["type_map"]) # descriptor model_params["descriptor"]["ntypes"] = ntypes - if model_params["descriptor"].get("use_econf_tebd", False): - model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) descriptor = BaseDescriptor(**model_params["descriptor"]) # fitting - fitting_net = model_params.get("fitting_net", None) + fitting_net = model_params.get("fitting_net", {}) fitting_net["type"] = fitting_net.get("type", "ener") fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) fitting_net["mixed_types"] = descriptor.mixed_types() if fitting_net["type"] in ["dipole", "polar"]: fitting_net["embedding_width"] = descriptor.get_dim_emb() diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 31e26dc718..38fa0e2530 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -448,6 +448,19 @@ def do_grad_c( """ return self.atomic_model.do_grad_c(var_name) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.atomic_model.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.atomic_model + if model_with_new_type_stat is not None + else None, + ) + def serialize(self) -> dict: return self.atomic_model.serialize() diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index c2a719c2b0..0475c35750 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -32,13 +32,16 @@ import torch.utils.checkpoint -from deepmd.dpmodel.common import ( - PRECISION_DICT, +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, ) from deepmd.pt.utils.utils import ( ActivationFn, to_torch_tensor, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) def Tensor(*shape): @@ -619,6 +622,14 @@ def share_params(self, base_class, shared_level, resume=False): else: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.embedding.change_type_map(type_map=type_map) + class TypeEmbedNetConsistent(nn.Module): r"""Type embedding network that is consistent with other backends. @@ -645,7 +656,6 @@ class TypeEmbedNetConsistent(nn.Module): Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -678,29 +688,10 @@ def __init__( self.econf_tebd = None embed_input_dim = ntypes if self.use_econf_tebd: - from deepmd.utils.econf_embd import ( - ECONF_DIM, - electronic_configuration_embedding, - ) - from deepmd.utils.econf_embd import type_map as periodic_table - - assert ( - self.type_map is not None - ), "When using electronic configuration type embedding, type_map must be provided!" - - missing_types = [t for t in self.type_map if t not in periodic_table] - assert not missing_types, ( - "When using electronic configuration type embedding, " - "all element in type_map should be in periodic table! " - f"Found these invalid elements: {missing_types}" + econf_tebd, embed_input_dim = get_econf_tebd( + self.type_map, precision=self.precision ) - self.econf_tebd = to_torch_tensor( - np.array( - [electronic_configuration_embedding[kk] for kk in self.type_map], - dtype=PRECISION_DICT[self.precision], - ) - ) - embed_input_dim = ECONF_DIM + self.econf_tebd = to_torch_tensor(econf_tebd) self.embedding_net = EmbeddingNet( embed_input_dim, self.neuron, @@ -733,6 +724,68 @@ def forward(self, device: torch.device): ) return embed + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + if not self.use_econf_tebd: + do_resnet = self.neuron[0] in [ + self.ntypes, + self.ntypes * 2, + len(type_map), + len(type_map) * 2, + ] + assert ( + not do_resnet or self.activation_function == "Linear" + ), "'activation_function' must be 'Linear' when performing type changing on resnet structure!" + first_layer_matrix = self.embedding_net.layers[0].matrix.data + eye_vector = torch.eye( + self.ntypes, dtype=self.prec, device=first_layer_matrix.device + ) + # preprocess for resnet connection + if self.neuron[0] == self.ntypes: + first_layer_matrix += eye_vector + elif self.neuron[0] == self.ntypes * 2: + first_layer_matrix += torch.concat([eye_vector, eye_vector], dim=-1) + + # randomly initialize params for the unseen types + if has_new_type: + extend_type_params = torch.rand( + [len(type_map), first_layer_matrix.shape[-1]], + device=first_layer_matrix.device, + dtype=first_layer_matrix.dtype, + ) + first_layer_matrix = torch.cat( + [first_layer_matrix, extend_type_params], dim=0 + ) + + first_layer_matrix = first_layer_matrix[remap_index] + new_ntypes = len(type_map) + eye_vector = torch.eye( + new_ntypes, dtype=self.prec, device=first_layer_matrix.device + ) + + if self.neuron[0] == new_ntypes: + first_layer_matrix -= eye_vector + elif self.neuron[0] == new_ntypes * 2: + first_layer_matrix -= torch.concat([eye_vector, eye_vector], dim=-1) + + self.embedding_net.layers[0].num_in = new_ntypes + self.embedding_net.layers[0].matrix = nn.Parameter(data=first_layer_matrix) + else: + econf_tebd, embed_input_dim = get_econf_tebd( + type_map, precision=self.precision + ) + self.econf_tebd = to_torch_tensor(econf_tebd) + self.type_map = type_map + self.ntypes = len(type_map) + @classmethod def deserialize(cls, data: dict): """Deserialize the model. diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py index 9430ede766..8a13b27e20 100644 --- a/deepmd/pt/model/task/__init__.py +++ b/deepmd/pt/model/task/__init__.py @@ -11,6 +11,9 @@ from .dipole import ( DipoleFittingNet, ) +from .dos import ( + DOSFittingNet, +) from .ener import ( EnergyFittingNet, EnergyFittingNetDirect, @@ -35,4 +38,5 @@ "BaseFitting", "TypePredictNet", "PolarFittingNet", + "DOSFittingNet", ] diff --git a/deepmd/pt/model/task/dipole.py b/deepmd/pt/model/task/dipole.py index cddbbf5291..917af1bdcc 100644 --- a/deepmd/pt/model/task/dipole.py +++ b/deepmd/pt/model/task/dipole.py @@ -70,6 +70,8 @@ class DipoleFittingNet(GeneralFitting): c_differentiable If the variable is differentiated with respect to the cell tensor (pbc case). Only reduciable variable are differentiable. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -89,6 +91,7 @@ def __init__( exclude_types: List[int] = [], r_differentiable: bool = True, c_differentiable: bool = True, + type_map: Optional[List[str]] = None, **kwargs, ): self.embedding_width = embedding_width @@ -108,6 +111,7 @@ def __init__( rcond=rcond, seed=seed, exclude_types=exclude_types, + type_map=type_map, **kwargs, ) self.old_impl = False # this only supports the new implementation. @@ -128,7 +132,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("var_name", None) return super().deserialize(data) diff --git a/deepmd/pt/model/task/dos.py b/deepmd/pt/model/task/dos.py index c37b05277a..c6a533ce7e 100644 --- a/deepmd/pt/model/task/dos.py +++ b/deepmd/pt/model/task/dos.py @@ -57,6 +57,7 @@ def __init__( precision: str = DEFAULT_PRECISION, exclude_types: List[int] = [], mixed_types: bool = True, + type_map: Optional[List[str]] = None, ): if bias_dos is not None: self.bias_dos = bias_dos @@ -81,6 +82,7 @@ def __init__( seed=seed, exclude_types=exclude_types, trainable=trainable, + type_map=type_map, ) def output_def(self) -> FittingOutputDef: @@ -99,7 +101,7 @@ def output_def(self) -> FittingOutputDef: @classmethod def deserialize(cls, data: dict) -> "DOSFittingNet": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("var_name", None) data.pop("tot_ener_zero", None) diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index ea9e21b1ae..6db937f72c 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -56,6 +56,7 @@ def __init__( precision: str = DEFAULT_PRECISION, mixed_types: bool = True, seed: Optional[int] = None, + type_map: Optional[List[str]] = None, **kwargs, ): super().__init__( @@ -72,13 +73,14 @@ def __init__( precision=precision, mixed_types=mixed_types, seed=seed, + type_map=type_map, **kwargs, ) @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("var_name") data.pop("dim_out") return super().deserialize(data) @@ -181,6 +183,14 @@ def serialize(self) -> dict: def deserialize(cls) -> "EnergyFittingNetDirect": raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> List[str]: + raise NotImplementedError + def forward( self, inputs: torch.Tensor, diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 73390aebc9..0ca2c5c896 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -37,6 +37,10 @@ to_numpy_array, to_torch_tensor, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) dtype = env.GLOBAL_PT_FLOAT_PRECISION device = env.DEVICE @@ -121,6 +125,8 @@ class GeneralFitting(Fitting): Remove vaccum contribution before the bias is added. The list assigned each type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same length as `ntypes` signaling if or not removing the vaccum contribution for the atom types in the list. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -141,6 +147,7 @@ def __init__( exclude_types: List[int] = [], trainable: Union[bool, List[bool]] = True, remove_vaccum_contribution: Optional[List[bool]] = None, + type_map: Optional[List[str]] = None, **kwargs, ): super().__init__() @@ -157,6 +164,7 @@ def __init__( self.prec = PRECISION_DICT[self.precision] self.rcond = rcond self.seed = seed + self.type_map = type_map # order matters, should be place after the assignment of ntypes self.reinit_exclude(exclude_types) self.trainable = trainable @@ -247,11 +255,35 @@ def reinit_exclude( self.exclude_types = exclude_types self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = torch.zeros( + extend_shape, + dtype=self.bias_atom_e.dtype, + device=self.bias_atom_e.device, + ) + self.bias_atom_e = torch.cat([self.bias_atom_e, extend_bias_atom_e], dim=0) + self.bias_atom_e = self.bias_atom_e[remap_index] + def serialize(self) -> dict: """Serialize the fitting to dict.""" return { "@class": "Fitting", - "@version": 1, + "@version": 2, "var_name": self.var_name, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -272,6 +304,7 @@ def serialize(self) -> dict: "aparam_avg": to_numpy_array(self.aparam_avg), "aparam_inv_std": to_numpy_array(self.aparam_inv_std), }, + "type_map": self.type_map, # "tot_ener_zero": self.tot_ener_zero , # "trainable": self.trainable , # "atom_ener": self.atom_ener , @@ -322,6 +355,10 @@ def get_sel_type(self) -> List[int]: sel_type.append(ii) return sel_type + def get_type_map(self) -> List[str]: + """Get the name to each type of atoms.""" + return self.type_map + def __setitem__(self, key, value): if key in ["bias_atom_e"]: value = value.view([self.ntypes, self._net_out_dim()]) diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index ea46a552e5..2a8aab9734 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -75,6 +75,8 @@ class InvarFitting(GeneralFitting): The value is a list specifying the bias. the elements can be None or np.array of output shape. For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] The `set_davg_zero` key in the descrptor should be set. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ @@ -96,6 +98,7 @@ def __init__( seed: Optional[int] = None, exclude_types: List[int] = [], atom_ener: Optional[List[Optional[torch.Tensor]]] = None, + type_map: Optional[List[str]] = None, **kwargs, ): self.dim_out = dim_out @@ -118,6 +121,7 @@ def __init__( remove_vaccum_contribution=None if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 else [x is not None for x in atom_ener], + type_map=type_map, **kwargs, ) @@ -135,7 +139,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) return super().deserialize(data) def output_def(self) -> FittingOutputDef: diff --git a/deepmd/pt/model/task/polarizability.py b/deepmd/pt/model/task/polarizability.py index 18cc7e69a0..66120a1523 100644 --- a/deepmd/pt/model/task/polarizability.py +++ b/deepmd/pt/model/task/polarizability.py @@ -25,6 +25,9 @@ from deepmd.pt.utils.utils import ( to_numpy_array, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) from deepmd.utils.version import ( check_version_compatibility, ) @@ -70,6 +73,9 @@ class PolarFittingNet(GeneralFitting): The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i] shift_diag : bool Whether to shift the diagonal part of the polarizability matrix. The shift operation is carried out after scale. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. + """ def __init__( @@ -90,6 +96,7 @@ def __init__( fit_diag: bool = True, scale: Optional[Union[List[float], float]] = None, shift_diag: bool = True, + type_map: Optional[List[str]] = None, **kwargs, ): self.embedding_width = embedding_width @@ -129,6 +136,7 @@ def __init__( rcond=rcond, seed=seed, exclude_types=exclude_types, + type_map=type_map, **kwargs, ) self.old_impl = False # this only supports the new implementation. @@ -153,10 +161,40 @@ def __getitem__(self, key): else: return super().__getitem__(key) + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + super().change_type_map(type_map=type_map) + if has_new_type: + extend_shape = [len(type_map), *list(self.scale.shape[1:])] + extend_scale = torch.ones( + extend_shape, dtype=self.scale.dtype, device=self.scale.device + ) + self.scale = torch.cat([self.scale, extend_scale], dim=0) + extend_shape = [len(type_map), *list(self.constant_matrix.shape[1:])] + extend_constant_matrix = torch.zeros( + extend_shape, + dtype=self.constant_matrix.dtype, + device=self.constant_matrix.device, + ) + self.constant_matrix = torch.cat( + [self.constant_matrix, extend_constant_matrix], dim=0 + ) + self.scale = self.scale[remap_index] + self.constant_matrix = self.constant_matrix[remap_index] + def serialize(self) -> dict: data = super().serialize() data["type"] = "polar" - data["@version"] = 2 + data["@version"] = 3 data["embedding_width"] = self.embedding_width data["old_impl"] = self.old_impl data["fit_diag"] = self.fit_diag @@ -168,7 +206,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("var_name", None) return super().deserialize(data) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index cceadb38d2..3b8b5a435c 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -31,7 +31,7 @@ TensorLoss, ) from deepmd.pt.model.model import ( - EnergyModel, + DOSModel, get_model, get_zbl_model, ) @@ -119,6 +119,7 @@ def __init__( training_params = config["training"] self.multi_task = "model_dict" in model_params self.finetune_links = finetune_links + self.finetune_update_stat = False self.model_keys = ( list(model_params["model_dict"]) if self.multi_task else ["Default"] ) @@ -218,6 +219,7 @@ def single_model_stat( _validation_data, _stat_file_path, _data_requirement, + finetune_has_new_type=False, ): if _model.get_dim_fparam() > 0: fparam_requirement_items = [ @@ -254,7 +256,7 @@ def get_sample(): ) return sampled - if not resuming and self.rank == 0: + if (not resuming or finetune_has_new_type) and self.rank == 0: _model.compute_or_load_stat( sampled_func=get_sample, stat_file_path=_stat_file_path, @@ -335,16 +337,22 @@ def get_loss(loss_params, start_lr, _ntypes, _model): dp_random.seed(training_params["seed"]) if training_params["seed"] is not None: torch.manual_seed(training_params["seed"]) - if not self.multi_task: - self.model = get_single_model( - model_params, - ) - else: - self.model = {} - for model_key in self.model_keys: - self.model[model_key] = get_single_model( - model_params["model_dict"][model_key], + + def get_model_for_wrapper(_model_params): + if "model_dict" not in _model_params: + _model = get_single_model( + _model_params, ) + else: + _model = {} + model_keys = list(_model_params["model_dict"]) + for _model_key in model_keys: + _model[_model_key] = get_single_model( + _model_params["model_dict"][_model_key], + ) + return _model + + self.model = get_model_for_wrapper(model_params) # Loss if not self.multi_task: @@ -377,6 +385,9 @@ def get_loss(loss_params, start_lr, _ntypes, _model): validation_data, stat_file_path, self.loss.label_requirement, + finetune_has_new_type=self.finetune_links["Default"].get_has_new_type() + if self.finetune_links is not None + else False, ) ( self.training_dataloader, @@ -410,6 +421,11 @@ def get_loss(loss_params, start_lr, _ntypes, _model): validation_data[model_key], stat_file_path[model_key], self.loss[model_key].label_requirement, + finetune_has_new_type=self.finetune_links[ + model_key + ].get_has_new_type() + if self.finetune_links is not None + else False, ) ( self.training_dataloader[model_key], @@ -462,12 +478,8 @@ def get_loss(loss_params, start_lr, _ntypes, _model): # resuming and finetune optimizer_state_dict = None if resuming: - ntest = model_params.get("data_bias_nsample", 1) - origin_model = ( - finetune_model if finetune_model is not None else resume_model - ) - log.info(f"Resuming from {origin_model}.") - state_dict = torch.load(origin_model, map_location=DEVICE) + log.info(f"Resuming from {resume_model}.") + state_dict = torch.load(resume_model, map_location=DEVICE) if "model" in state_dict: optimizer_state_dict = ( state_dict["optimizer"] if finetune_model is None else None @@ -502,19 +514,48 @@ def get_loss(loss_params, start_lr, _ntypes, _model): log.warning( f"Force load mode allowed! These keys are not in ckpt and will re-init: {slim_keys}" ) - + # update model params in the pretrained model if finetune_model is not None: new_state_dict = {} target_state_dict = self.wrapper.state_dict() + # pretrained_model + pretrained_model = get_model_for_wrapper( + state_dict["_extra_state"]["model_params"] + ) + pretrained_model_wrapper = ModelWrapper(pretrained_model) + pretrained_model_wrapper.load_state_dict(state_dict) + # update type related params + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + _model_key_from = finetune_rule_single.get_model_branch() + # skip if updated + if ( + finetune_rule_single.get_finetune_tmap() + != pretrained_model_wrapper.model[ + _model_key_from + ].get_type_map() + ): + model_with_new_type_stat = None + if finetune_rule_single.get_has_new_type(): + self.finetune_update_stat = True + model_with_new_type_stat = self.wrapper.model[model_key] + pretrained_model_wrapper.model[ + _model_key_from + ].change_type_map( + finetune_rule_single.get_finetune_tmap(), + model_with_new_type_stat=model_with_new_type_stat, + ) + state_dict = pretrained_model_wrapper.state_dict() - def update_single_finetune_params( + def collect_single_finetune_params( _model_key, - _model_key_from, + _finetune_rule_single, _new_state_dict, _origin_state_dict, _random_state_dict, - _new_fitting=False, ): + _new_fitting = _finetune_rule_single.get_random_fitting() + _model_key_from = _finetune_rule_single.get_model_branch() target_keys = [ i for i in _random_state_dict.keys() @@ -535,76 +576,57 @@ def update_single_finetune_params( _origin_state_dict[new_key].clone().detach() ) - if not self.multi_task: - model_key = "Default" - model_key_from = self.finetune_links[model_key] - new_fitting = model_params.pop("new_fitting", False) - update_single_finetune_params( + # collect model params from the pretrained model + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + collect_single_finetune_params( model_key, - model_key_from, + finetune_rule_single, new_state_dict, state_dict, target_state_dict, - _new_fitting=new_fitting, ) - else: - for model_key in self.model_keys: - if model_key in self.finetune_links: - model_key_from = self.finetune_links[model_key] - new_fitting = model_params["model_dict"][model_key].pop( - "new_fitting", False - ) - else: - model_key_from = model_key - new_fitting = False - update_single_finetune_params( - model_key, - model_key_from, - new_state_dict, - state_dict, - target_state_dict, - _new_fitting=new_fitting, - ) state_dict = new_state_dict state_dict["_extra_state"] = self.wrapper.state_dict()[ "_extra_state" ] + self.wrapper.load_state_dict(state_dict) + # change bias for fine-tuning if finetune_model is not None: def single_model_finetune( _model, - _model_params, + _finetune_rule_single, _sample_func, ): - old_type_map, new_type_map = ( - _model_params["type_map"], - _model_params["new_type_map"], - ) - if isinstance(_model, EnergyModel): + # need fix for DOSModel + if not isinstance(_model, DOSModel): _model = _model_change_out_bias( - _model, new_type_map, _sample_func, _model_params + _model, + _sample_func, + _bias_adjust_mode="change-by-statistic" + if not _finetune_rule_single.get_random_fitting() + else "set-by-statistic", ) - else: - # need to updated - pass return _model - # finetune if not self.multi_task: + finetune_rule_single = self.finetune_links["Default"] self.model = single_model_finetune( - self.model, model_params, self.get_sample_func + self.model, finetune_rule_single, self.get_sample_func ) else: for model_key in self.model_keys: - if model_key in self.finetune_links: + finetune_rule_single = self.finetune_links[model_key] + if not finetune_rule_single.get_resuming(): log.info( f"Model branch {model_key} will be fine-tuned. This may take a long time..." ) self.model[model_key] = single_model_finetune( self.model[model_key], - model_params["model_dict"][model_key], + finetune_rule_single, self.get_sample_func[model_key], ) else: @@ -618,7 +640,10 @@ def single_model_finetune( # Multi-task share params if shared_links is not None: - self.wrapper.share_params(shared_links, resume=resuming or self.rank != 0) + self.wrapper.share_params( + shared_links, + resume=(resuming and not self.finetune_update_stat) or self.rank != 0, + ) if dist.is_available() and dist.is_initialized(): torch.cuda.set_device(LOCAL_RANK) @@ -1205,27 +1230,20 @@ def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results) def _model_change_out_bias( _model, - new_type_map, _sample_func, - _model_params, + _bias_adjust_mode="change-by-statistic", ): old_bias = _model.get_out_bias() _model.change_out_bias( _sample_func, - bias_adjust_mode=_model_params.get("bias_adjust_mode", "change-by-statistic"), + bias_adjust_mode=_bias_adjust_mode, ) new_bias = _model.get_out_bias() model_type_map = _model.get_type_map() - sorter = np.argsort(model_type_map) - missing_types = [t for t in new_type_map if t not in model_type_map] - assert ( - not missing_types - ), f"Some types are not in the pre-trained model: {list(missing_types)} !" - idx_type_map = sorter[np.searchsorted(model_type_map, new_type_map, sorter=sorter)] log.info( - f"Change output bias of {new_type_map!s} " - f"from {to_numpy_array(old_bias[:,idx_type_map]).reshape(-1)!s} " - f"to {to_numpy_array(new_bias[:,idx_type_map]).reshape(-1)!s}." + f"Change output bias of {model_type_map!s} " + f"from {to_numpy_array(old_bias).reshape(-1)!s} " + f"to {to_numpy_array(new_bias).reshape(-1)!s}." ) return _model diff --git a/deepmd/pt/utils/finetune.py b/deepmd/pt/utils/finetune.py index 2de4214070..74f01fc2ea 100644 --- a/deepmd/pt/utils/finetune.py +++ b/deepmd/pt/utils/finetune.py @@ -9,47 +9,32 @@ from deepmd.pt.utils import ( env, ) +from deepmd.utils.finetune import ( + FinetuneRuleItem, +) log = logging.getLogger(__name__) -def change_finetune_model_params_single( +def get_finetune_rule_single( _single_param_target, _model_param_pretrained, from_multitask=False, model_branch="Default", model_branch_from="", + change_model_params=False, ): single_config = deepcopy(_single_param_target) - trainable_param = { - "descriptor": True, - "fitting_net": True, - } - for net_type in trainable_param: - if net_type in single_config: - trainable_param[net_type] = single_config[net_type].get("trainable", True) + new_fitting = False + model_branch_chosen = "Default" + if not from_multitask: - old_type_map, new_type_map = ( - _model_param_pretrained["type_map"], - single_config["type_map"], - ) - assert set(new_type_map).issubset( - old_type_map - ), "Only support for smaller type map when finetuning or resuming." - single_config = deepcopy(_model_param_pretrained) - log.info( - f"Change the '{model_branch}' model configurations according to the pretrained one..." - ) - single_config["new_type_map"] = new_type_map + single_config_chosen = deepcopy(_model_param_pretrained) else: model_dict_params = _model_param_pretrained["model_dict"] - new_fitting = False if model_branch_from == "": model_branch_chosen = next(iter(model_dict_params.keys())) new_fitting = True - single_config["bias_adjust_mode"] = ( - "set-by-statistic" # fitting net re-init - ) log.warning( "The fitting net will be re-init instead of using that in the pretrained model! " "The bias_adjust_mode will be set-by-statistic!" @@ -61,54 +46,73 @@ def change_finetune_model_params_single( f"Available ones are {list(model_dict_params.keys())}." ) single_config_chosen = deepcopy(model_dict_params[model_branch_chosen]) - old_type_map, new_type_map = ( - single_config_chosen["type_map"], - single_config["type_map"], - ) - assert set(new_type_map).issubset( - old_type_map - ), "Only support for smaller type map when finetuning or resuming." - for key_item in ["type_map", "descriptor"]: - if key_item in single_config_chosen: - single_config[key_item] = single_config_chosen[key_item] + old_type_map, new_type_map = ( + single_config_chosen["type_map"], + single_config["type_map"], + ) + finetune_rule = FinetuneRuleItem( + p_type_map=old_type_map, + type_map=new_type_map, + model_branch=model_branch_chosen, + random_fitting=new_fitting, + ) + if change_model_params: + trainable_param = { + "descriptor": single_config.get("descriptor", {}).get("trainable", True), + "fitting_net": single_config.get("fitting_net", {}).get("trainable", True), + } + single_config["descriptor"] = single_config_chosen["descriptor"] if not new_fitting: single_config["fitting_net"] = single_config_chosen["fitting_net"] log.info( f"Change the '{model_branch}' model configurations according to the model branch " f"'{model_branch_chosen}' in the pretrained one..." ) - single_config["new_type_map"] = new_type_map - single_config["model_branch_chosen"] = model_branch_chosen - single_config["new_fitting"] = new_fitting - for net_type in trainable_param: - if net_type in single_config: - single_config[net_type]["trainable"] = trainable_param[net_type] - else: - single_config[net_type] = {"trainable": trainable_param[net_type]} - return single_config + for net_type in trainable_param: + if net_type in single_config: + single_config[net_type]["trainable"] = trainable_param[net_type] + else: + single_config[net_type] = {"trainable": trainable_param[net_type]} + return single_config, finetune_rule -def change_finetune_model_params(finetune_model, model_config, model_branch=""): +def get_finetune_rules( + finetune_model, model_config, model_branch="", change_model_params=True +): """ - Load model_params according to the pretrained one. - This function modifies the fine-tuning input in different modes as follows: + Get fine-tuning rules and (optionally) change the model_params according to the pretrained one. + + This function gets the fine-tuning rules and (optionally) changes input in different modes as follows: 1. Single-task fine-tuning from a single-task pretrained model: - - Updates the model parameters based on the pretrained model. + - The model will be fine-tuned based on the pretrained model. + - (Optional) Updates the model parameters based on the pretrained model. 2. Single-task fine-tuning from a multi-task pretrained model: - - Updates the model parameters based on the selected branch in the pretrained model. - - The chosen branch can be defined from the command-line or `finetune_head` input parameter. - - If not defined, model parameters in the fitting network will be randomly initialized. + - The model will be fine-tuned based on the selected branch in the pretrained model. + The chosen branch can be defined from the command-line or `finetune_head` input parameter. + If not defined, model parameters in the fitting network will be randomly initialized. + - (Optional) Updates the model parameters based on the selected branch in the pretrained model. 3. Multi-task fine-tuning from a single-task pretrained model: - - Updates model parameters in each branch based on the single branch ('Default') in the pretrained model. - - If `finetune_head` is not set to 'Default', - model parameters in the fitting network of the branch will be randomly initialized. + - The model in each branch will be fine-tuned or resumed based on the single branch ('Default') in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model. + - If `finetune_head` is not defined and the model_key is 'Default', + it will resume from the single branch ('Default') in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and the model_key is not 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the single branch ('Default') in the pretrained model. 4. Multi-task fine-tuning from a multi-task pretrained model: - - Updates model parameters in each branch based on the selected branch in the pretrained model. - - The chosen branches can be defined from the `finetune_head` input parameter of each model. - - If `finetune_head` is not defined and the model_key is the same as in the pretrained model, - it will resume from the model_key branch without fine-tuning. - - If `finetune_head` is not defined and a new model_key is used, - model parameters in the fitting network of the branch will be randomly initialized. + - The model in each branch will be fine-tuned or resumed based on the chosen branches in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as one of the branches in the pretrained model, + it will be fine-tuned based on the chosen branch in the pretrained model. + - If `finetune_head` is not defined and the model_key is the same as one of those in the pretrained model, + it will resume from the model_key branch in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and a new model_key is used, + it will be fine-tuned based on the chosen branch in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the chosen branches in the pretrained model. Parameters ---------- @@ -118,14 +122,15 @@ def change_finetune_model_params(finetune_model, model_config, model_branch=""): The fine-tuning input parameters. model_branch The model branch chosen in command-line mode, only for single-task fine-tuning. + change_model_params + Whether to change the model parameters according to the pretrained one. Returns ------- model_config: Updated model parameters. finetune_links: - Fine-tuning rules in a dict format, with `model_branch`: `model_branch_from` pairs. - If `model_key` is not in this dict, it will do just resuming instead of fine-tuning. + Fine-tuning rules in a dict format, with `model_branch`: FinetuneRuleItem pairs. """ multi_task = "model_dict" in model_config state_dict = torch.load(finetune_model, map_location=env.DEVICE) @@ -138,18 +143,15 @@ def change_finetune_model_params(finetune_model, model_config, model_branch=""): # use command-line first if model_branch == "" and "finetune_head" in model_config: model_branch = model_config["finetune_head"] - model_config = change_finetune_model_params_single( + model_config, finetune_rule = get_finetune_rule_single( model_config, last_model_params, from_multitask=finetune_from_multi_task, model_branch="Default", model_branch_from=model_branch, + change_model_params=change_model_params, ) - finetune_links["Default"] = ( - model_config["model_branch_chosen"] - if finetune_from_multi_task - else "Default" - ) + finetune_links["Default"] = finetune_rule else: assert model_branch == "", ( "Multi-task fine-tuning does not support command-line branches chosen!" @@ -161,6 +163,7 @@ def change_finetune_model_params(finetune_model, model_config, model_branch=""): else: pretrained_keys = last_model_params["model_dict"].keys() for model_key in target_keys: + resuming = False if "finetune_head" in model_config["model_dict"][model_key]: pretrained_key = model_config["model_dict"][model_key]["finetune_head"] assert pretrained_key in pretrained_keys, ( @@ -168,20 +171,24 @@ def change_finetune_model_params(finetune_model, model_config, model_branch=""): f"Available heads are: {list(pretrained_keys)}" ) model_branch_from = pretrained_key - finetune_links[model_key] = model_branch_from elif model_key in pretrained_keys: # not do anything if not defined "finetune_head" in heads that exist in the pretrained model # this will just do resuming model_branch_from = model_key + resuming = True else: # if not defined "finetune_head" in new heads, the fitting net will bre randomly initialized model_branch_from = "" - finetune_links[model_key] = next(iter(pretrained_keys)) - model_config["model_dict"][model_key] = change_finetune_model_params_single( - model_config["model_dict"][model_key], - last_model_params, - from_multitask=finetune_from_multi_task, - model_branch=model_key, - model_branch_from=model_branch_from, + model_config["model_dict"][model_key], finetune_rule = ( + get_finetune_rule_single( + model_config["model_dict"][model_key], + last_model_params, + from_multitask=finetune_from_multi_task, + model_branch=model_key, + model_branch_from=model_branch_from, + change_model_params=change_model_params, + ) ) + finetune_links[model_key] = finetune_rule + finetune_links[model_key].resuming = resuming return model_config, finetune_links diff --git a/deepmd/pt/utils/utils.py b/deepmd/pt/utils/utils.py index 6b4377038f..86cede347a 100644 --- a/deepmd/pt/utils/utils.py +++ b/deepmd/pt/utils/utils.py @@ -85,6 +85,8 @@ def to_torch_tensor( if xx is None: return None assert xx is not None + if not isinstance(xx, np.ndarray): + return xx # Create a reverse mapping of NP_PRECISION_DICT reverse_precision_dict = {v: k for k, v in NP_PRECISION_DICT.items()} # Use the reverse mapping to find keys with the desired value diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index 108e486da7..babec2d68e 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -154,6 +154,8 @@ class DescrptSeA(DescrptSe): Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. References ---------- @@ -181,6 +183,7 @@ def __init__( uniform_seed: bool = False, spin: Optional[Spin] = None, tebd_input_mode: str = "concat", + type_map: Optional[List[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -211,6 +214,7 @@ def __init__( self.orig_exclude_types = exclude_types self.exclude_types = set() self.env_protection = env_protection + self.type_map = type_map for tt in exclude_types: assert len(tt) == 2 self.exclude_types.add((tt[0], tt[1])) @@ -1371,7 +1375,7 @@ def deserialize(cls, data: dict, suffix: str = ""): if cls is not DescrptSeA: raise NotImplementedError(f"Not implemented in class {cls.__name__}") data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) embedding_net_variables = cls.deserialize_network( @@ -1428,7 +1432,7 @@ def serialize(self, suffix: str = "") -> dict: return { "@class": "Descriptor", "type": "se_e2_a", - "@version": 1, + "@version": 2, "rcut": self.rcut_r, "rcut_smth": self.rcut_r_smth, "sel": self.sel_a, @@ -1458,5 +1462,6 @@ def serialize(self, suffix: str = "") -> dict: "davg": self.davg.reshape(self.ntypes, self.nnei_a, 4), "dstd": self.dstd.reshape(self.ntypes, self.nnei_a, 4), }, + "type_map": self.type_map, "spin": self.spin, } diff --git a/deepmd/tf/descriptor/se_atten.py b/deepmd/tf/descriptor/se_atten.py index 2bfe71fcf8..312a7481ba 100644 --- a/deepmd/tf/descriptor/se_atten.py +++ b/deepmd/tf/descriptor/se_atten.py @@ -161,6 +161,8 @@ class DescrptSeAtten(DescrptSeA): Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. The default value is `None`, which means the `tebd_input_mode` setting will be used instead. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. Raises ------ @@ -200,6 +202,7 @@ def __init__( concat_output_tebd: bool = True, env_protection: float = 0.0, # not implement!! stripped_type_embedding: Optional[bool] = None, + type_map: Optional[List[str]] = None, # to be compat with input **kwargs, ) -> None: # Ensure compatibility with the deprecated stripped_type_embedding option. @@ -246,6 +249,7 @@ def __init__( activation_function=activation_function, precision=precision, uniform_seed=uniform_seed, + type_map=type_map, ) """ Constructor @@ -1953,6 +1957,7 @@ def serialize(self, suffix: str = "") -> dict: "davg": self.davg.reshape(self.ntypes, self.nnei_a, 4), "dstd": self.dstd.reshape(self.ntypes, self.nnei_a, 4), }, + "type_map": self.type_map, "trainable": self.trainable, "type_one_side": self.type_one_side, "spin": self.spin, @@ -2061,7 +2066,6 @@ class DescrptDPA1Compat(DescrptSeAtten): Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. spin (Only support None to keep consistent with old implementation.) The old implementation of deepspin. @@ -2144,10 +2148,10 @@ def __init__( smooth_type_embedding=smooth_type_embedding, tebd_input_mode=tebd_input_mode, env_protection=env_protection, + type_map=type_map, ) self.tebd_dim = tebd_dim self.use_econf_tebd = use_econf_tebd - self.type_map = type_map self.scaling_factor = scaling_factor self.normalize = normalize self.temperature = temperature @@ -2341,7 +2345,6 @@ def serialize(self, suffix: str = "") -> dict: "temperature": self.temperature, "concat_output_tebd": self.concat_output_tebd, "use_econf_tebd": self.use_econf_tebd, - "type_map": self.type_map, "type_embedding": self.type_embedding.serialize(suffix), } ) diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index ba24142987..e18b857fd9 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -85,6 +85,8 @@ class DescrptSeR(DescrptSe): The precision of the embedding net parameters. Supported options are |PRECISION| uniform_seed Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -103,6 +105,7 @@ def __init__( precision: str = "default", uniform_seed: bool = False, spin: Optional[Spin] = None, + type_map: Optional[List[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -128,6 +131,7 @@ def __init__( self.orig_exclude_types = exclude_types self.exclude_types = set() self.env_protection = env_protection + self.type_map = type_map for tt in exclude_types: assert len(tt) == 2 self.exclude_types.add((tt[0], tt[1])) @@ -726,7 +730,7 @@ def deserialize(cls, data: dict, suffix: str = ""): if cls is not DescrptSeR: raise NotImplementedError(f"Not implemented in class {cls.__name__}") data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) embedding_net_variables = cls.deserialize_network( data.pop("embeddings"), suffix=suffix ) @@ -768,7 +772,7 @@ def serialize(self, suffix: str = "") -> dict: return { "@class": "Descriptor", "type": "se_r", - "@version": 1, + "@version": 2, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel_r, @@ -797,5 +801,6 @@ def serialize(self, suffix: str = "") -> dict: "davg": self.davg.reshape(self.ntypes, self.nnei_r, 1), "dstd": self.dstd.reshape(self.ntypes, self.nnei_r, 1), }, + "type_map": self.type_map, "spin": self.spin, } diff --git a/deepmd/tf/descriptor/se_t.py b/deepmd/tf/descriptor/se_t.py index b1a278703a..b8e024abb3 100644 --- a/deepmd/tf/descriptor/se_t.py +++ b/deepmd/tf/descriptor/se_t.py @@ -90,6 +90,8 @@ class DescrptSeT(DescrptSe): Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed env_protection: float Protection parameter to prevent division by zero errors during environment matrix calculations. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -106,6 +108,7 @@ def __init__( activation_function: str = "tanh", precision: str = "default", uniform_seed: bool = False, + type_map: Optional[List[str]] = None, # to be compat with input env_protection: float = 0.0, # not implement!! **kwargs, ) -> None: @@ -133,6 +136,7 @@ def __init__( self.env_protection = env_protection self.orig_exclude_types = exclude_types self.exclude_types = set() + self.type_map = type_map for tt in exclude_types: assert len(tt) == 2 self.exclude_types.add((tt[0], tt[1])) @@ -879,7 +883,7 @@ def deserialize(cls, data: dict, suffix: str = ""): if cls is not DescrptSeT: raise NotImplementedError(f"Not implemented in class {cls.__name__}") data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) embedding_net_variables = cls.deserialize_network( @@ -922,7 +926,7 @@ def serialize(self, suffix: str = "") -> dict: return { "@class": "Descriptor", "type": "se_e3", - "@version": 1, + "@version": 2, "rcut": self.rcut_r, "rcut_smth": self.rcut_r_smth, "sel": self.sel_a, @@ -949,5 +953,6 @@ def serialize(self, suffix: str = "") -> dict: "davg": self.davg.reshape(self.ntypes, self.nnei_a, 4), "dstd": self.dstd.reshape(self.ntypes, self.nnei_a, 4), }, + "type_map": self.type_map, "trainable": self.trainable, } diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index d99c793415..fd37b63720 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -65,6 +65,8 @@ class DipoleFittingSeA(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -80,6 +82,7 @@ def __init__( precision: str = "default", uniform_seed: bool = False, mixed_types: bool = False, + type_map: Optional[List[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -105,6 +108,7 @@ def __init__( self.fitting_net_variables = None self.mixed_prec = None self.mixed_types = mixed_types + self.type_map = type_map def get_sel_type(self) -> int: """Get selected type.""" @@ -361,7 +365,7 @@ def serialize(self, suffix: str) -> dict: data = { "@class": "Fitting", "type": "dipole", - "@version": 1, + "@version": 2, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, @@ -383,6 +387,7 @@ def serialize(self, suffix: str) -> dict: variables=self.fitting_net_variables, suffix=suffix, ), + "type_map": self.type_map, } return data @@ -401,7 +406,7 @@ def deserialize(cls, data: dict, suffix: str): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( data["nets"], diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index bc5180b60a..382d11f45e 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -100,6 +100,8 @@ class DOSFitting(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -120,6 +122,7 @@ def __init__( layer_name: Optional[List[Optional[str]]] = None, use_aparam_as_mask: bool = False, mixed_types: bool = False, + type_map: Optional[List[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -169,6 +172,7 @@ def __init__( len(self.layer_name) == len(self.n_neuron) + 1 ), "length of layer_name should be that of n_neuron + 1" self.mixed_types = mixed_types + self.type_map = type_map def get_numb_fparam(self) -> int: """Get the number of frame parameters.""" @@ -669,7 +673,7 @@ def deserialize(cls, data: dict, suffix: str = ""): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) data["numb_dos"] = data.pop("dim_out") fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( @@ -696,7 +700,7 @@ def serialize(self, suffix: str = "") -> dict: data = { "@class": "Fitting", "type": "dos", - "@version": 1, + "@version": 2, "var_name": "dos", "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -729,6 +733,7 @@ def serialize(self, suffix: str = "") -> dict: "aparam_avg": self.aparam_avg, "aparam_inv_std": self.aparam_inv_std, }, + "type_map": self.type_map, } return data diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index a1eb916a1c..c2aef0610a 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -8,6 +8,9 @@ import numpy as np +from deepmd.infer.deep_eval import ( + DeepEval, +) from deepmd.tf.common import ( cast_precision, get_activation_func, @@ -55,8 +58,8 @@ from deepmd.utils.data import ( DataRequirementItem, ) -from deepmd.utils.finetune import ( - change_energy_bias_lower, +from deepmd.utils.data_system import ( + DeepmdDataSystem, ) from deepmd.utils.out_stat import ( compute_stats_from_redu, @@ -146,6 +149,8 @@ class EnerFitting(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -168,6 +173,7 @@ def __init__( use_aparam_as_mask: bool = False, spin: Optional[Spin] = None, mixed_types: bool = False, + type_map: Optional[List[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -202,6 +208,7 @@ def __init__( self.fitting_activation_fn = get_activation_func(activation_function) self.fitting_precision = get_precision(precision) self.trainable = trainable + self.type_map = type_map if self.trainable is None: self.trainable = [True for ii in range(len(self.n_neuron) + 1)] if isinstance(self.trainable, bool): @@ -867,7 +874,7 @@ def deserialize(cls, data: dict, suffix: str = ""): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 1, 1) + check_version_compatibility(data.pop("@version", 1), 2, 1) fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( data["nets"], @@ -893,7 +900,7 @@ def serialize(self, suffix: str = "") -> dict: data = { "@class": "Fitting", "type": "ener", - "@version": 1, + "@version": 2, "var_name": "energy", "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -930,6 +937,7 @@ def serialize(self, suffix: str = "") -> dict: "aparam_avg": self.aparam_avg, "aparam_inv_std": self.aparam_inv_std, }, + "type_map": self.type_map, } return data @@ -950,3 +958,123 @@ def input_requirement(self) -> List[DataRequirementItem]: ) ) return data_requirement + + +def change_energy_bias_lower( + data: DeepmdDataSystem, + dp: DeepEval, + origin_type_map: List[str], + full_type_map: List[str], + bias_atom_e: np.ndarray, + bias_adjust_mode="change-by-statistic", + ntest=10, +): + """Change the energy bias according to the input data and the pretrained model. + + Parameters + ---------- + data : DeepmdDataSystem + The training data. + dp : str + The DeepEval object. + origin_type_map : list + The original type_map in dataset, they are targets to change the energy bias. + full_type_map : str + The full type_map in pretrained model + bias_atom_e : np.ndarray + The old energy bias in the pretrained model. + bias_adjust_mode : str + The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] + 'change-by-statistic' : perform predictions on energies of target dataset, + and do least sqaure on the errors to obtain the target shift as bias. + 'set-by-statistic' : directly use the statistic energy bias in the target dataset. + ntest : int + The number of test samples in a system to change the energy bias. + """ + type_numbs = [] + energy_ground_truth = [] + energy_predict = [] + sorter = np.argsort(full_type_map) + idx_type_map = sorter[ + np.searchsorted(full_type_map, origin_type_map, sorter=sorter) + ] + mixed_type = data.mixed_type + numb_type = len(full_type_map) + for sys in data.data_systems: + test_data = sys.get_test() + nframes = test_data["box"].shape[0] + numb_test = min(nframes, ntest) + if mixed_type: + atype = test_data["type"][:numb_test].reshape([numb_test, -1]) + else: + atype = test_data["type"][0] + assert np.array( + [i in idx_type_map for i in list(set(atype.reshape(-1)))] + ).all(), "Some types are not in 'type_map'!" + energy_ground_truth.append( + test_data["energy"][:numb_test].reshape([numb_test, 1]) + ) + if mixed_type: + type_numbs.append( + np.array( + [(atype == i).sum(axis=-1) for i in idx_type_map], + dtype=np.int32, + ).T + ) + else: + type_numbs.append( + np.tile( + np.bincount(atype, minlength=numb_type)[idx_type_map], + (numb_test, 1), + ) + ) + if bias_adjust_mode == "change-by-statistic": + coord = test_data["coord"][:numb_test].reshape([numb_test, -1]) + if sys.pbc: + box = test_data["box"][:numb_test] + else: + box = None + if dp.get_dim_fparam() > 0: + fparam = test_data["fparam"][:numb_test] + else: + fparam = None + if dp.get_dim_aparam() > 0: + aparam = test_data["aparam"][:numb_test] + else: + aparam = None + ret = dp.eval( + coord, + box, + atype, + mixed_type=mixed_type, + fparam=fparam, + aparam=aparam, + ) + energy_predict.append(ret[0].reshape([numb_test, 1])) + type_numbs = np.concatenate(type_numbs) + energy_ground_truth = np.concatenate(energy_ground_truth) + old_bias = bias_atom_e[idx_type_map] + if bias_adjust_mode == "change-by-statistic": + energy_predict = np.concatenate(energy_predict) + bias_diff = energy_ground_truth - energy_predict + delta_bias = np.linalg.lstsq(type_numbs, bias_diff, rcond=None)[0] + unbias_e = energy_predict + type_numbs @ delta_bias + atom_numbs = type_numbs.sum(-1) + rmse_ae = np.sqrt( + np.mean( + np.square((unbias_e.ravel() - energy_ground_truth.ravel()) / atom_numbs) + ) + ) + bias_atom_e[idx_type_map] += delta_bias.reshape(-1) + log.info( + f"RMSE of atomic energy after linear regression is: {rmse_ae} eV/atom." + ) + elif bias_adjust_mode == "set-by-statistic": + statistic_bias = np.linalg.lstsq(type_numbs, energy_ground_truth, rcond=None)[0] + bias_atom_e[idx_type_map] = statistic_bias.reshape(-1) + else: + raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) + log.info( + f"Change energy bias of {origin_type_map!s} from {old_bias!s} to {bias_atom_e[idx_type_map]!s}." + ) + return bias_atom_e diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index 460813f309..901eaa7c09 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -76,6 +76,8 @@ class PolarFittingSeA(Fitting): mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. + type_map: List[str], Optional + A list of strings. Give the name to each type of atoms. """ def __init__( @@ -95,6 +97,7 @@ def __init__( precision: str = "default", uniform_seed: bool = False, mixed_types: bool = False, + type_map: Optional[List[str]] = None, # to be compat with input **kwargs, ) -> None: """Constructor.""" @@ -148,6 +151,7 @@ def __init__( self.fitting_net_variables = None self.mixed_prec = None self.mixed_types = mixed_types + self.type_map = type_map def get_sel_type(self) -> List[int]: """Get selected atom types.""" @@ -554,7 +558,7 @@ def serialize(self, suffix: str) -> dict: data = { "@class": "Fitting", "type": "polar", - "@version": 1, + "@version": 3, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, @@ -579,6 +583,7 @@ def serialize(self, suffix: str) -> dict: variables=self.fitting_net_variables, suffix=suffix, ), + "type_map": self.type_map, } return data @@ -598,7 +603,7 @@ def deserialize(cls, data: dict, suffix: str): """ data = data.copy() check_version_compatibility( - data.pop("@version", 1), 2, 1 + data.pop("@version", 1), 3, 1 ) # to allow PT version. fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index a1baf85dbc..06c02bba79 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -657,7 +657,10 @@ def __init__( self.descrpt = descriptor else: self.descrpt = Descriptor( - **descriptor, ntypes=len(self.get_type_map()), spin=self.spin + **descriptor, + ntypes=len(self.get_type_map()), + spin=self.spin, + type_map=type_map, ) if isinstance(fitting_net, Fitting): @@ -672,6 +675,7 @@ def __init__( ntypes=self.descrpt.get_ntypes(), dim_descrpt=self.descrpt.get_dim_out(), mixed_types=type_embedding is not None or self.descrpt.explicit_ntypes, + type_map=type_map, ) self.rcut = self.descrpt.get_rcut() self.ntypes = self.descrpt.get_ntypes() @@ -680,12 +684,11 @@ def __init__( if type_embedding is not None and isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding elif type_embedding is not None: - if type_embedding.get("use_econf_tebd", False): - type_embedding["type_map"] = type_map self.typeebd = TypeEmbedNet( ntypes=self.ntypes, **type_embedding, padding=self.descrpt.explicit_ntypes, + type_map=type_map, ) elif self.descrpt.explicit_ntypes: default_args = type_embedding_args() @@ -695,6 +698,7 @@ def __init__( ntypes=self.ntypes, **default_args_dict, padding=True, + type_map=type_map, ) else: self.typeebd = None diff --git a/deepmd/tf/model/pairwise_dprc.py b/deepmd/tf/model/pairwise_dprc.py index 44e3943e12..6fd8e82f7e 100644 --- a/deepmd/tf/model/pairwise_dprc.py +++ b/deepmd/tf/model/pairwise_dprc.py @@ -88,13 +88,12 @@ def __init__( if isinstance(type_embedding, TypeEmbedNet): self.typeebd = type_embedding else: - if type_embedding.get("use_econf_tebd", False): - type_embedding["type_map"] = type_map self.typeebd = TypeEmbedNet( ntypes=self.ntypes, **type_embedding, # must use se_atten, so it must be True padding=True, + type_map=type_map, ) self.qm_model = Model( diff --git a/deepmd/tf/utils/type_embed.py b/deepmd/tf/utils/type_embed.py index 77a0744ea4..20beda9d3a 100644 --- a/deepmd/tf/utils/type_embed.py +++ b/deepmd/tf/utils/type_embed.py @@ -6,14 +6,12 @@ Union, ) -import numpy as np - -from deepmd.dpmodel.common import ( - PRECISION_DICT, -) from deepmd.dpmodel.utils.network import ( EmbeddingNet, ) +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, +) from deepmd.tf.common import ( get_activation_func, get_precision, @@ -104,7 +102,6 @@ class TypeEmbedNet: Whether to use electronic configuration type embedding. type_map: List[str], Optional A list of strings. Give the name to each type of atoms. - Only used if `use_econf_tebd` is `True` in type embedding net. """ def __init__( @@ -138,25 +135,7 @@ def __init__( self.use_econf_tebd = use_econf_tebd self.type_map = type_map if self.use_econf_tebd: - from deepmd.utils.econf_embd import ( - electronic_configuration_embedding, - ) - from deepmd.utils.econf_embd import type_map as periodic_table - - assert ( - self.type_map is not None - ), "When using electronic configuration type embedding, type_map must be provided!" - - missing_types = [t for t in self.type_map if t not in periodic_table] - assert not missing_types, ( - "When using electronic configuration type embedding, " - "all element in type_map should be in periodic table! " - f"Found these invalid elements: {missing_types}" - ) - self.econf_tebd = np.array( - [electronic_configuration_embedding[kk] for kk in self.type_map], - dtype=PRECISION_DICT[precision], - ) + self.econf_tebd, _ = get_econf_tebd(self.type_map, precision=precision) self.model_type = None def build( diff --git a/deepmd/utils/finetune.py b/deepmd/utils/finetune.py index 1150fe2701..9baa1b5aa8 100644 --- a/deepmd/utils/finetune.py +++ b/deepmd/utils/finetune.py @@ -1,140 +1,164 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging from typing import ( - TYPE_CHECKING, List, + Tuple, ) -import numpy as np +log = logging.getLogger(__name__) -from deepmd.infer.deep_eval import ( - DeepEval, -) -from deepmd.utils.data_system import ( - DeepmdDataSystem, -) -if TYPE_CHECKING: - pass +class FinetuneRuleItem: + def __init__( + self, + p_type_map: List[str], + type_map: List[str], + model_branch: str = "Default", + random_fitting: bool = False, + resuming: bool = False, + ): + """ + The rules for fine-tuning the model from pretrained model. -log = logging.getLogger(__name__) + Parameters + ---------- + p_type_map + The type map from the pretrained model. + type_map + The newly defined type map. + model_branch + From which branch the model should be fine-tuned. + random_fitting + If true, the fitting net will be randomly initialized instead of inherit from the pretrained model. + resuming + If true, the model will just resume from model_branch without fine-tuning. + """ + self.p_type_map = p_type_map + self.type_map = type_map + self.model_branch = model_branch + self.random_fitting = random_fitting + self.resuming = resuming + self.update_type = self.p_type_map != self.type_map + + def get_index_mapping(self): + """Returns the mapping index of newly defined types to those in the pretrained model.""" + return get_index_between_two_maps(self.p_type_map, self.type_map)[0] + + def get_has_new_type(self): + """Returns whether there are unseen types in the new type_map.""" + return get_index_between_two_maps(self.p_type_map, self.type_map)[1] + + def get_model_branch(self): + """Returns the chosen model branch.""" + return self.model_branch + + def get_random_fitting(self): + """Returns whether to use random fitting.""" + return self.random_fitting + + def get_resuming(self): + """Returns whether to only do resuming.""" + return self.resuming + def get_update_type(self): + """Returns whether to update the type related params when loading from pretrained model with redundant types.""" + return self.update_type -def change_energy_bias_lower( - data: DeepmdDataSystem, - dp: DeepEval, - origin_type_map: List[str], - full_type_map: List[str], - bias_atom_e: np.ndarray, - bias_adjust_mode="change-by-statistic", - ntest=10, + def get_pretrained_tmap(self): + """Returns the type map in the pretrained model.""" + return self.p_type_map + + def get_finetune_tmap(self): + """Returns the type map in the fine-tuned model.""" + return self.type_map + + +def get_index_between_two_maps( + old_map: List[str], + new_map: List[str], ): - """Change the energy bias according to the input data and the pretrained model. + """Returns the mapping index of types in new_map to those in the old_map. Parameters ---------- - data : DeepmdDataSystem - The training data. - dp : str - The DeepEval object. - origin_type_map : list - The original type_map in dataset, they are targets to change the energy bias. - full_type_map : str - The full type_map in pretrained model - bias_atom_e : np.ndarray - The old energy bias in the pretrained model. - bias_adjust_mode : str - The mode for changing energy bias : ['change-by-statistic', 'set-by-statistic'] - 'change-by-statistic' : perform predictions on energies of target dataset, - and do least sqaure on the errors to obtain the target shift as bias. - 'set-by-statistic' : directly use the statistic energy bias in the target dataset. - ntest : int - The number of test samples in a system to change the energy bias. + old_map : List[str] + The old list of atom type names. + new_map : List[str] + The new list of atom type names. + + Returns + ------- + index_map: List[int] + List contains `len(new_map)` indices, where `index_map[i]` is the index of `new_map[i]` in `old_map`. + If `new_map[i]` is not in the `old_map`, the index will be `i - len(new_map)`. + has_new_type: bool + Whether there are unseen types in the new type_map. + If True, some type related params in the model, such as statistics, need to be extended + to have a length of `len(old_map) + len(new_map)` in the type related dimension. + Then positive indices from 0 to `len(old_map) - 1` will select old params of types in `old_map`, + while negative indices from `-len(new_map)` to -1 will select new params of types in `new_map`. """ - type_numbs = [] - energy_ground_truth = [] - energy_predict = [] - sorter = np.argsort(full_type_map) - idx_type_map = sorter[ - np.searchsorted(full_type_map, origin_type_map, sorter=sorter) - ] - mixed_type = data.mixed_type - numb_type = len(full_type_map) - for sys in data.data_systems: - test_data = sys.get_test() - nframes = test_data["box"].shape[0] - numb_test = min(nframes, ntest) - if mixed_type: - atype = test_data["type"][:numb_test].reshape([numb_test, -1]) - else: - atype = test_data["type"][0] - assert np.array( - [i in idx_type_map for i in list(set(atype.reshape(-1)))] - ).all(), "Some types are not in 'type_map'!" - energy_ground_truth.append( - test_data["energy"][:numb_test].reshape([numb_test, 1]) + missing_type = [i for i in new_map if i not in old_map] + has_new_type = False + if len(missing_type) > 0: + has_new_type = True + log.warning( + f"These types are not in the pretrained model and related params will be randomly initialized: {missing_type}." ) - if mixed_type: - type_numbs.append( - np.array( - [(atype == i).sum(axis=-1) for i in idx_type_map], - dtype=np.int32, - ).T - ) - else: - type_numbs.append( - np.tile( - np.bincount(atype, minlength=numb_type)[idx_type_map], - (numb_test, 1), - ) - ) - if bias_adjust_mode == "change-by-statistic": - coord = test_data["coord"][:numb_test].reshape([numb_test, -1]) - if sys.pbc: - box = test_data["box"][:numb_test] - else: - box = None - if dp.get_dim_fparam() > 0: - fparam = test_data["fparam"][:numb_test] - else: - fparam = None - if dp.get_dim_aparam() > 0: - aparam = test_data["aparam"][:numb_test] - else: - aparam = None - ret = dp.eval( - coord, - box, - atype, - mixed_type=mixed_type, - fparam=fparam, - aparam=aparam, - ) - energy_predict.append(ret[0].reshape([numb_test, 1])) - type_numbs = np.concatenate(type_numbs) - energy_ground_truth = np.concatenate(energy_ground_truth) - old_bias = bias_atom_e[idx_type_map] - if bias_adjust_mode == "change-by-statistic": - energy_predict = np.concatenate(energy_predict) - bias_diff = energy_ground_truth - energy_predict - delta_bias = np.linalg.lstsq(type_numbs, bias_diff, rcond=None)[0] - unbias_e = energy_predict + type_numbs @ delta_bias - atom_numbs = type_numbs.sum(-1) - rmse_ae = np.sqrt( - np.mean( - np.square((unbias_e.ravel() - energy_ground_truth.ravel()) / atom_numbs) - ) - ) - bias_atom_e[idx_type_map] += delta_bias.reshape(-1) - log.info( - f"RMSE of atomic energy after linear regression is: {rmse_ae} eV/atom." - ) - elif bias_adjust_mode == "set-by-statistic": - statistic_bias = np.linalg.lstsq(type_numbs, energy_ground_truth, rcond=None)[0] - bias_atom_e[idx_type_map] = statistic_bias.reshape(-1) - else: - raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) - log.info( - f"Change energy bias of {origin_type_map!s} from {old_bias!s} to {bias_atom_e[idx_type_map]!s}." - ) - return bias_atom_e + index_map = [] + for ii, t in enumerate(new_map): + index_map.append(old_map.index(t) if t in old_map else ii - len(new_map)) + return index_map, has_new_type + + +def map_atom_exclude_types( + atom_exclude_types: List[int], + remap_index: List[int], +): + """Return the remapped atom_exclude_types according to remap_index. + + Parameters + ---------- + atom_exclude_types : List[int] + Exclude the atomic contribution of the given types. + remap_index : List[int] + The indices in the old type list that correspond to the types in the new type list. + + Returns + ------- + remapped_atom_exclude_types: List[int] + Remapped atom_exclude_types that only keeps the types in the new type list. + + """ + remapped_atom_exclude_types = [ + remap_index.index(i) for i in atom_exclude_types if i in remap_index + ] + return remapped_atom_exclude_types + + +def map_pair_exclude_types( + pair_exclude_types: List[Tuple[int, int]], + remap_index: List[int], +): + """Return the remapped atom_exclude_types according to remap_index. + + Parameters + ---------- + pair_exclude_types : List[Tuple[int, int]] + Exclude the pair of atoms of the given types from computing the output + of the atomic model. + remap_index : List[int] + The indices in the old type list that correspond to the types in the new type list. + + Returns + ------- + remapped_pair_exclude_typess: List[Tuple[int, int]] + Remapped pair_exclude_types that only keeps the types in the new type list. + + """ + remapped_pair_exclude_typess = [ + (remap_index.index(pair[0]), remap_index.index(pair[1])) + for pair in pair_exclude_types + if pair[0] in remap_index and pair[1] in remap_index + ] + return remapped_pair_exclude_typess diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index 77630720c7..1cd88191d2 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -29,7 +29,9 @@ The command above will change the energy bias in the last layer of the fitting n according to the training dataset in input.json. :::{warning} -Note that the elements in the training dataset must be contained in the pre-trained dataset. +Note that in TensorFlow, model parameters including the `type_map` will be overwritten based on those in the pre-trained model. +Please ensure you are familiar with the configurations in the pre-trained model, especially `type_map`, before starting the fine-tuning process. +The elements in the training dataset must be contained in the pre-trained dataset. ::: The finetune procedure will inherit the model structures in `pretrained.pb`, @@ -70,7 +72,14 @@ $ dp --pt train input.json --finetune pretrained.pt We do not support fine-tuning from a randomly initialized fitting net in this case, which is the same as implementations in TensorFlow. ::: -The model section in input.json can be simplified as follows: +The model section in input.json **must be the same as that in the pretrained model**. +If you do not know the model params in the pretrained model, you can add `--use-pretrain-script` in the fine-tuning command: + +```bash +$ dp --pt train input.json --finetune pretrained.pt --use-pretrain-script +``` + +The model section will be overwritten (except the `type_map` subsection) by that in the pretrained model and then the input.json can be simplified as follows: ```json "model": { @@ -80,11 +89,6 @@ The model section in input.json can be simplified as follows: } ``` -:::{warning} -The `type_map` will be overwritten based on that in the pre-trained model. Please ensure you are familiar with the `type_map` configuration in the pre-trained model before starting the fine-tuning process. -This issue will be addressed in the future version. -::: - #### Fine-tuning from a multi-task pre-trained model Additionally, within the PyTorch implementation and leveraging the flexibility offered by the framework and the multi-task training capabilities provided by DPA2, diff --git a/source/tests/common/test_type_index_map.py b/source/tests/common/test_type_index_map.py new file mode 100644 index 0000000000..cd7e761ac2 --- /dev/null +++ b/source/tests/common/test_type_index_map.py @@ -0,0 +1,152 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, + map_pair_exclude_types, +) + + +class TestTypeIndexMap(unittest.TestCase): + def test_get_index_between_two_maps(self): + tm_1 = [ + "Al", + "F", + "N", + "H", + "S", + "O", + "He", + "C", + "Li", + "Na", + "Be", + "Mg", + "Si", + "B", + "Ne", + "P", + ] # 16 elements + tm_2 = [ + "P", + "Na", + "Si", + "Mg", + "C", + "O", + "Be", + "B", + "Li", + "S", + "Ne", + "N", + "H", + "Al", + "F", + "He", + ] # 16 elements + tm_3 = ["O", "H", "Be", "C", "N", "B", "Li"] # 7 elements + + # self consistence + old_tm = tm_1 + new_tm = tm_1 + expected_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + expected_has_new = False + result_map, result_has_new = get_index_between_two_maps(old_tm, new_tm) + self.assertEqual(len(result_map), len(new_tm)) + self.assertEqual(expected_map, result_map) + self.assertEqual(expected_has_new, result_has_new) + + # test resort + old_tm = tm_1 + new_tm = tm_2 + expected_map = [15, 9, 12, 11, 7, 5, 10, 13, 8, 4, 14, 2, 3, 0, 1, 6] + expected_has_new = False + result_map, result_has_new = get_index_between_two_maps(old_tm, new_tm) + self.assertEqual(len(result_map), len(new_tm)) + self.assertEqual(expected_map, result_map) + self.assertEqual(expected_has_new, result_has_new) + + # test slim + old_tm = tm_1 + new_tm = tm_3 + expected_map = [5, 3, 10, 7, 2, 13, 8] + expected_has_new = False + result_map, result_has_new = get_index_between_two_maps(old_tm, new_tm) + self.assertEqual(len(result_map), len(new_tm)) + self.assertEqual(expected_map, result_map) + self.assertEqual(expected_has_new, result_has_new) + + # test extend + old_tm = tm_3 + new_tm = tm_1 + expected_map = [-16, -15, 4, 1, -12, 0, -10, 3, 6, -7, 2, -5, -4, 5, -2, -1] + expected_has_new = True + result_map, result_has_new = get_index_between_two_maps(old_tm, new_tm) + self.assertEqual(len(result_map), len(new_tm)) + self.assertEqual(expected_map, result_map) + self.assertEqual(expected_has_new, result_has_new) + + def test_map_exclude_types(self): + old_tm = [ + "Al", + "F", + "N", + "H", + "S", + "O", + "He", + "C", + "Li", + "Na", + "Be", + "Mg", + "Si", + "B", + "Ne", + "P", + ] # 16 elements + new_tm = ["O", "H", "Be", "C", "N", "B", "Li"] # 7 elements + remap_index, _ = get_index_between_two_maps(old_tm, new_tm) + remap_index_reverse, _ = get_index_between_two_maps(new_tm, old_tm) + aem_1 = [0] + aem_2 = [0, 5] + aem_3 = [7, 8, 11] + pem_1 = [(0, 0), (0, 5)] + pem_2 = [(0, 0), (0, 5), (5, 8)] + pem_3 = [(0, 0), (0, 5), (8, 7)] + + # test map_atom_exclude_types + expected_aem_1 = [] + result_aem_1 = map_atom_exclude_types(aem_1, remap_index) + self.assertEqual(expected_aem_1, result_aem_1) + + expected_aem_2 = [0] + result_aem_2 = map_atom_exclude_types(aem_2, remap_index) + self.assertEqual(expected_aem_2, result_aem_2) + + expected_aem_3 = [3, 6] + result_aem_3 = map_atom_exclude_types(aem_3, remap_index) + self.assertEqual(expected_aem_3, result_aem_3) + + expected_aem_1_reverse = [5] + result_aem_1_reverse = map_atom_exclude_types(aem_1, remap_index_reverse) + self.assertEqual(expected_aem_1_reverse, result_aem_1_reverse) + + # test map_pair_exclude_types + expected_pem_1 = [] + result_pem_1 = map_pair_exclude_types(pem_1, remap_index) + self.assertEqual(expected_pem_1, result_pem_1) + + expected_pem_2 = [(0, 6)] + result_pem_2 = map_pair_exclude_types(pem_2, remap_index) + self.assertEqual(expected_pem_2, result_pem_2) + + expected_pem_3 = [(6, 3)] + result_pem_3 = map_pair_exclude_types(pem_3, remap_index) + self.assertEqual(expected_pem_3, result_pem_3) + + expected_pem_1_reverse = [(5, 5), (5, 13)] + result_pem_1_reverse = map_pair_exclude_types(pem_1, remap_index_reverse) + self.assertEqual(expected_pem_1_reverse, result_pem_1_reverse) diff --git a/source/tests/pt/model/test_atomic_model_atomic_stat.py b/source/tests/pt/model/test_atomic_model_atomic_stat.py index e779eb572c..4aeeb956a4 100644 --- a/source/tests/pt/model/test_atomic_model_atomic_stat.py +++ b/source/tests/pt/model/test_atomic_model_atomic_stat.py @@ -5,6 +5,7 @@ Path, ) from typing import ( + List, Optional, ) @@ -68,6 +69,14 @@ def output_def(self): def serialize(self) -> dict: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> List[str]: + raise NotImplementedError + def forward( self, descriptor: torch.Tensor, diff --git a/source/tests/pt/model/test_atomic_model_global_stat.py b/source/tests/pt/model/test_atomic_model_global_stat.py index 799948b14f..aff3231792 100644 --- a/source/tests/pt/model/test_atomic_model_global_stat.py +++ b/source/tests/pt/model/test_atomic_model_global_stat.py @@ -5,6 +5,7 @@ Path, ) from typing import ( + List, Optional, ) @@ -80,6 +81,14 @@ def output_def(self): def serialize(self) -> dict: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> List[str]: + raise NotImplementedError + def forward( self, descriptor: torch.Tensor, diff --git a/source/tests/pt/model/test_linear_atomic_model_stat.py b/source/tests/pt/model/test_linear_atomic_model_stat.py index f7feeda550..3b02e57df3 100644 --- a/source/tests/pt/model/test_linear_atomic_model_stat.py +++ b/source/tests/pt/model/test_linear_atomic_model_stat.py @@ -5,6 +5,7 @@ Path, ) from typing import ( + List, Optional, ) @@ -61,6 +62,14 @@ def output_def(self): def serialize(self) -> dict: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> List[str]: + raise NotImplementedError + def forward( self, descriptor: torch.Tensor, @@ -105,6 +114,14 @@ def output_def(self): def serialize(self) -> dict: raise NotImplementedError + def change_type_map( + self, type_map: List[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> List[str]: + raise NotImplementedError + def forward( self, descriptor: torch.Tensor, diff --git a/source/tests/pt/test_finetune.py b/source/tests/pt/test_finetune.py index a874d35497..2db3076da2 100644 --- a/source/tests/pt/test_finetune.py +++ b/source/tests/pt/test_finetune.py @@ -1,6 +1,12 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil import tempfile import unittest +from copy import ( + deepcopy, +) from pathlib import ( Path, ) @@ -11,23 +17,34 @@ from deepmd.infer.deep_eval import ( DeepEval, ) +from deepmd.pt.entrypoints.main import ( + get_trainer, +) from deepmd.pt.model.model import ( get_model, ) +from deepmd.pt.utils import ( + env, +) from deepmd.pt.utils.dataloader import ( DpLoaderSet, ) +from deepmd.pt.utils.finetune import ( + get_finetune_rules, +) from deepmd.pt.utils.stat import ( make_stat_input, ) from deepmd.pt.utils.utils import ( to_numpy_array, + to_torch_tensor, ) from deepmd.utils.data import ( DataRequirementItem, ) from .model.test_permutation import ( + model_dpa1, model_dpa2, model_se_e2_a, model_zbl, @@ -75,8 +92,20 @@ class FinetuneTest: def test_finetune_change_out_bias(self): + # get data + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) # get model - model = get_model(self.model_config) + model = get_model(self.config["model"]).to(env.DEVICE) atomic_model = model.atomic_model atomic_model["out_bias"] = torch.rand_like(atomic_model["out_bias"]) energy_bias_before = to_numpy_array(atomic_model["out_bias"])[0].ravel() @@ -91,7 +120,7 @@ def test_finetune_change_out_bias(self): # change energy bias model.atomic_model.change_out_bias( - self.sampled, + sampled, bias_adjust_mode="change-by-statistic", ) energy_bias_after = to_numpy_array(atomic_model["out_bias"])[0].ravel() @@ -103,15 +132,15 @@ def test_finetune_change_out_bias(self): ] ntest = 1 atom_nums = np.tile( - np.bincount(to_numpy_array(self.sampled[0]["atype"][0]))[idx_type_map], + np.bincount(to_numpy_array(sampled[0]["atype"][0]))[idx_type_map], (ntest, 1), ) energy = dp.eval( - to_numpy_array(self.sampled[0]["coord"][:ntest]), - to_numpy_array(self.sampled[0]["box"][:ntest]), - to_numpy_array(self.sampled[0]["atype"][0]), + to_numpy_array(sampled[0]["coord"][:ntest]), + to_numpy_array(sampled[0]["box"][:ntest]), + to_numpy_array(sampled[0]["atype"][0]), )[0] - energy_diff = to_numpy_array(self.sampled[0]["energy"][:ntest]) - energy + energy_diff = to_numpy_array(sampled[0]["energy"][:ntest]) - energy finetune_shift = ( energy_bias_after[idx_type_map] - energy_bias_before[idx_type_map] ) @@ -122,57 +151,161 @@ def test_finetune_change_out_bias(self): # check values np.testing.assert_almost_equal(finetune_shift, ground_truth_shift, decimal=10) + self.tearDown() -class TestEnergyModelSeA(unittest.TestCase, FinetuneTest): - def setUp(self): - self.data_file = [str(Path(__file__).parent / "water/data/data_0")] - self.model_config = model_se_e2_a - self.data = DpLoaderSet( + def test_finetune_change_type(self): + if not self.mixed_types: + # skip when not mixed_types + return + # get data + data = DpLoaderSet( self.data_file, batch_size=1, - type_map=self.model_config["type_map"], + type_map=self.config["model"]["type_map"], ) - self.data.add_data_requirement(energy_data_requirement) - self.sampled = make_stat_input( - self.data.systems, - self.data.dataloaders, + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, nbatches=1, ) + data_type_map = self.config["model"]["type_map"] + for [old_type_map, new_type_map] in [ + [["H", "X1", "X2", "O", "B"], ["O", "H", "B"]], + [["O", "H", "B"], ["H", "X1", "X2", "O", "B"]], + ]: + old_type_map_index = np.array( + [old_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + new_type_map_index = np.array( + [new_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + + # get pretrained model with old type map + config_old_type_map = deepcopy(self.config) + config_old_type_map["model"]["type_map"] = old_type_map + trainer = get_trainer(config_old_type_map) + trainer.run() + finetune_model = ( + config_old_type_map["training"].get("save_ckpt", "model.ckpt") + ".pt" + ) + + # finetune load the same type_map + config_old_type_map_finetune = deepcopy(self.config) + config_old_type_map_finetune["model"]["type_map"] = old_type_map + config_old_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_old_type_map_finetune["model"], + ) + trainer_finetune_old = get_trainer( + config_old_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # finetune load the slim type_map + config_new_type_map_finetune = deepcopy(self.config) + config_new_type_map_finetune["model"]["type_map"] = new_type_map + config_new_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_new_type_map_finetune["model"], + ) + trainer_finetune_new = get_trainer( + config_new_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + # test consistency + ntest = 1 + prec = 1e-10 + model_old_result = trainer_finetune_old.model( + sampled[0]["coord"][:ntest], + to_torch_tensor(old_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + model_new_result = trainer_finetune_new.model( + sampled[0]["coord"][:ntest], + to_torch_tensor(new_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + test_keys = ["energy", "force", "virial"] + for key in test_keys: + torch.testing.assert_close( + model_old_result[key], + model_new_result[key], + rtol=prec, + atol=prec, + ) -@unittest.skip("change bias not implemented yet.") -class TestEnergyZBLModelSeA(unittest.TestCase, FinetuneTest): + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pt"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): def setUp(self): - self.data_file = [str(Path(__file__).parent / "water/data/data_0")] - self.model_config = model_zbl - self.data = DpLoaderSet( - self.data_file, - batch_size=1, - type_map=self.model_config["type_map"], - ) - self.data.add_data_requirement(energy_data_requirement) - self.sampled = make_stat_input( - self.data.systems, - self.data.dataloaders, - nbatches=1, - ) + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False -class TestEnergyModelDPA2(unittest.TestCase, FinetuneTest): +class TestEnergyZBLModelSeA(FinetuneTest, unittest.TestCase): def setUp(self): - self.data_file = [str(Path(__file__).parent / "water/data/data_0")] - self.model_config = model_dpa2 - self.data = DpLoaderSet( - self.data_file, - batch_size=1, - type_map=self.model_config["type_map"], - ) - self.data.add_data_requirement(energy_data_requirement) - self.sampled = make_stat_input( - self.data.systems, - self.data.dataloaders, - nbatches=1, - ) + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_zbl) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + + +class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True + + +class TestEnergyModelDPA2(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["repformer"]["nlayers"] = 2 + + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True if __name__ == "__main__": diff --git a/source/tests/pt/test_multitask.py b/source/tests/pt/test_multitask.py index 08b632a2e4..cf9ec9685d 100644 --- a/source/tests/pt/test_multitask.py +++ b/source/tests/pt/test_multitask.py @@ -15,6 +15,9 @@ from deepmd.pt.entrypoints.main import ( get_trainer, ) +from deepmd.pt.utils.finetune import ( + get_finetune_rules, +) from deepmd.pt.utils.multi_task import ( preprocess_shared_params, ) @@ -116,11 +119,16 @@ def test_multitask_train(self): self.origin_config["model"] ) + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" + self.origin_config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.origin_config["model"], + ) trainer_finetune = get_trainer( deepcopy(self.origin_config), - finetune_model=self.config["training"].get("save_ckpt", "model.ckpt") - + ".pt", + finetune_model=finetune_model, shared_links=shared_links_finetune, + finetune_links=finetune_links, ) # check parameters diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index f0a988607e..c7094712ad 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -15,6 +15,9 @@ from deepmd.pt.entrypoints.main import ( get_trainer, ) +from deepmd.pt.utils.finetune import ( + get_finetune_rules, +) from .model.test_permutation import ( model_dos, @@ -32,13 +35,37 @@ def test_dp_train(self): trainer = get_trainer(deepcopy(self.config)) trainer.run() - # test fine-tuning + # test fine-tuning using same input + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" + self.config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config["model"], + ) trainer_finetune = get_trainer( deepcopy(self.config), - finetune_model=self.config["training"].get("save_ckpt", "model.ckpt") - + ".pt", + finetune_model=finetune_model, + finetune_links=finetune_links, ) trainer_finetune.run() + + # test fine-tuning using empty input + self.config_empty = deepcopy(self.config) + if "descriptor" in self.config_empty["model"]: + self.config_empty["model"]["descriptor"] = {} + if "fitting_net" in self.config_empty["model"]: + self.config_empty["model"]["fitting_net"] = {} + self.config_empty["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config_empty["model"], + change_model_params=True, + ) + trainer_finetune_empty = get_trainer( + deepcopy(self.config_empty), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + trainer_finetune_empty.run() + self.tearDown() def test_trainable(self): diff --git a/source/tests/universal/common/backend.py b/source/tests/universal/common/backend.py index d5747b77b7..44532a4d68 100644 --- a/source/tests/universal/common/backend.py +++ b/source/tests/universal/common/backend.py @@ -21,3 +21,13 @@ def modules_to_test(self) -> list: @abstractmethod def forward_wrapper(self, x): pass + + @classmethod + @abstractmethod + def convert_to_numpy(cls, xx): + pass + + @classmethod + @abstractmethod + def convert_from_numpy(cls, xx): + pass diff --git a/source/tests/universal/common/cases/cases.py b/source/tests/universal/common/cases/cases.py index a8c9a7cd71..7830a20989 100644 --- a/source/tests/universal/common/cases/cases.py +++ b/source/tests/universal/common/cases/cases.py @@ -9,6 +9,9 @@ def setUp(self): self.nloc = 3 self.nall = 4 self.nf, self.nt = 2, 2 + self.dim_descrpt = 100 + self.dim_embed = 20 + rng = np.random.default_rng() self.coord_ext = np.array( [ [0, 0, 0], @@ -32,6 +35,9 @@ def setUp(self): ], dtype=int, ).reshape([1, self.nloc, sum(self.sel)]) + self.mock_descriptor = rng.normal(size=(1, self.nloc, self.dim_descrpt)) + self.mock_gr = rng.normal(size=(1, self.nloc, self.dim_embed, 3)) + self.mock_energy_bias = rng.normal(size=(self.nt, 1)) self.rcut = 2.2 self.rcut_smth = 0.4 # permutations @@ -47,6 +53,13 @@ def setUp(self): self.mapping = np.concatenate( [self.mapping, self.mapping[:, self.perm]], axis=0 ) + self.mock_descriptor = np.concatenate( + [self.mock_descriptor, self.mock_descriptor[:, self.perm[: self.nloc], :]], + axis=0, + ) + self.mock_gr = np.concatenate( + [self.mock_gr, self.mock_gr[:, self.perm[: self.nloc], :, :]], axis=0 + ) # permute the nlist nlist1 = self.nlist[:, self.perm[: self.nloc], :] diff --git a/source/tests/universal/common/cases/descriptor/utils.py b/source/tests/universal/common/cases/descriptor/utils.py index aa1a8c21d4..e1c2b80c15 100644 --- a/source/tests/universal/common/cases/descriptor/utils.py +++ b/source/tests/universal/common/cases/descriptor/utils.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import itertools from copy import ( deepcopy, ) @@ -8,7 +9,13 @@ from deepmd.dpmodel.utils import ( PairExcludeMask, ) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) +from .....seed import ( + GLOBAL_SEED, +) from ..cases import ( TestCaseSingleFrameWithNlist, ) @@ -24,6 +31,7 @@ def setUp(self): "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, + "type_map": ["O", "H"], } def test_forward_consistency(self): @@ -93,3 +101,290 @@ def test_exclude_types( mapping=mapping_device, ) np.testing.assert_allclose(rd0, rd_ex) + + def test_change_type_map(self): + if ( + not self.module.mixed_types() + or getattr(self.module, "sel_no_mixed_types", None) is not None + ): + # skip if not mixed_types + return + coord_ext_device = self.coord_ext + atype_ext_device = self.atype_ext + nlist_device = self.nlist + mapping_device = self.mapping + # type_map for data and exclude_types + original_type_map = ["O", "H"] + full_type_map_test = [ + "H", + "He", + "Li", + "Be", + "B", + "C", + "N", + "O", + "F", + "Ne", + "Na", + "Mg", + "Al", + "Si", + "P", + "S", + "Cl", + "Ar", + ] # 18 elements + rng = np.random.default_rng(GLOBAL_SEED) + for old_tm, new_tm, em, econf in itertools.product( + [ + full_type_map_test[:], # 18 elements + full_type_map_test[ + :16 + ], # 16 elements, double of tebd default first dim + full_type_map_test[:8], # 8 elements, tebd default first dim + ["H", "O"], # slimmed types + ], # old_type_map + [ + full_type_map_test[:], # 18 elements + full_type_map_test[ + :16 + ], # 16 elements, double of tebd default first dim + full_type_map_test[:8], # 8 elements, tebd default first dim + ["H", "O"], # slimmed types + ], # new_type_map + [[], [[0, 1]], [[1, 1]]], # exclude_types for original_type_map + [False, True], # use_econf_tebd + ): + # use shuffled type_map + rng.shuffle(old_tm) + rng.shuffle(new_tm) + old_tm_index = np.array( + [old_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + new_tm_index = np.array( + [new_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + old_tm_em = remap_exclude_types(em, original_type_map, old_tm) + old_tm_input = update_input_type_map(self.input_dict, old_tm) + old_tm_input = update_input_use_econf_tebd(old_tm_input, econf) + old_tm_input = update_input_exclude_types(old_tm_input, old_tm_em) + old_tm_module = self.module_class(**old_tm_input) + old_tm_dd = self.forward_wrapper(old_tm_module) + rd_old_tm, _, _, _, sw_old_tm = old_tm_dd( + coord_ext_device, + old_tm_index[atype_ext_device], + nlist_device, + mapping=mapping_device, + ) + old_tm_module.change_type_map(new_tm) + new_tm_dd = self.forward_wrapper(old_tm_module) + rd_new_tm, _, _, _, sw_new_tm = new_tm_dd( + coord_ext_device, + new_tm_index[atype_ext_device], + nlist_device, + mapping=mapping_device, + ) + np.testing.assert_allclose(rd_old_tm, rd_new_tm) + + def test_change_type_map_extend_stat(self): + if ( + not self.module.mixed_types() + or getattr(self.module, "sel_no_mixed_types", None) is not None + ): + # skip if not mixed_types + return + full_type_map_test = [ + "H", + "He", + "Li", + "Be", + "B", + "C", + "N", + "O", + "F", + "Ne", + "Na", + "Mg", + "Al", + "Si", + "P", + "S", + "Cl", + "Ar", + ] # 18 elements + rng = np.random.default_rng(GLOBAL_SEED) + for small_tm, large_tm in itertools.product( + [ + full_type_map_test[:8], # 8 elements, tebd default first dim + ["H", "O"], # slimmed types + ], # small_tm + [ + full_type_map_test[:], # 18 elements + full_type_map_test[ + :16 + ], # 16 elements, double of tebd default first dim + full_type_map_test[:8], # 8 elements, tebd default first dim + ], # large_tm + ): + # use shuffled type_map + rng.shuffle(small_tm) + rng.shuffle(large_tm) + small_tm_input = update_input_type_map(self.input_dict, small_tm) + small_tm_module = self.module_class(**small_tm_input) + + large_tm_input = update_input_type_map(self.input_dict, large_tm) + large_tm_module = self.module_class(**large_tm_input) + + # set random stat + mean_small_tm, std_small_tm = small_tm_module.get_stat_mean_and_stddev() + mean_large_tm, std_large_tm = large_tm_module.get_stat_mean_and_stddev() + if "list" not in self.input_dict: + mean_rand_small_tm, std_rand_small_tm = self.get_rand_stat( + rng, mean_small_tm, std_small_tm + ) + mean_rand_large_tm, std_rand_large_tm = self.get_rand_stat( + rng, mean_large_tm, std_large_tm + ) + else: + # for hybrid + mean_rand_small_tm, std_rand_small_tm = [], [] + mean_rand_large_tm, std_rand_large_tm = [], [] + for ii in range(len(mean_small_tm)): + mean_rand_item_small_tm, std_rand_item_small_tm = ( + self.get_rand_stat(rng, mean_small_tm[ii], std_small_tm[ii]) + ) + mean_rand_small_tm.append(mean_rand_item_small_tm) + std_rand_small_tm.append(std_rand_item_small_tm) + mean_rand_item_large_tm, std_rand_item_large_tm = ( + self.get_rand_stat(rng, mean_large_tm[ii], std_large_tm[ii]) + ) + mean_rand_large_tm.append(mean_rand_item_large_tm) + std_rand_large_tm.append(std_rand_item_large_tm) + + small_tm_module.set_stat_mean_and_stddev( + mean_rand_small_tm, std_rand_small_tm + ) + large_tm_module.set_stat_mean_and_stddev( + mean_rand_large_tm, std_rand_large_tm + ) + + # extend the type map + small_tm_module.change_type_map( + large_tm, model_with_new_type_stat=large_tm_module + ) + + # check the stat + mean_result, std_result = small_tm_module.get_stat_mean_and_stddev() + type_index_map = get_index_between_two_maps(small_tm, large_tm)[0] + + if "list" not in self.input_dict: + self.check_expect_stat( + type_index_map, mean_rand_small_tm, mean_rand_large_tm, mean_result + ) + self.check_expect_stat( + type_index_map, std_rand_small_tm, std_rand_large_tm, std_result + ) + else: + # for hybrid + for ii in range(len(mean_small_tm)): + self.check_expect_stat( + type_index_map, + mean_rand_small_tm[ii], + mean_rand_large_tm[ii], + mean_result[ii], + ) + self.check_expect_stat( + type_index_map, + std_rand_small_tm[ii], + std_rand_large_tm[ii], + std_result[ii], + ) + + def get_rand_stat(self, rng, mean, std): + if not isinstance(mean, list): + mean_rand, std_rand = self.get_rand_stat_item(rng, mean, std) + else: + mean_rand, std_rand = [], [] + for ii in range(len(mean)): + mean_rand_item, std_rand_item = self.get_rand_stat_item( + rng, mean[ii], std[ii] + ) + mean_rand.append(mean_rand_item) + std_rand.append(std_rand_item) + return mean_rand, std_rand + + def get_rand_stat_item(self, rng, mean, std): + mean = self.convert_to_numpy(mean) + std = self.convert_to_numpy(std) + mean_rand = rng.random(size=mean.shape) + std_rand = rng.random(size=std.shape) + mean_rand = self.convert_from_numpy(mean_rand) + std_rand = self.convert_from_numpy(std_rand) + return mean_rand, std_rand + + def check_expect_stat(self, type_index_map, stat_small, stat_large, stat_result): + if not isinstance(stat_small, list): + self.check_expect_stat_item( + type_index_map, stat_small, stat_large, stat_result + ) + else: + for ii in range(len(stat_small)): + self.check_expect_stat_item( + type_index_map, stat_small[ii], stat_large[ii], stat_result[ii] + ) + + def check_expect_stat_item( + self, type_index_map, stat_small, stat_large, stat_result + ): + stat_small = self.convert_to_numpy(stat_small) + stat_large = self.convert_to_numpy(stat_large) + stat_result = self.convert_to_numpy(stat_result) + full_stat = np.concatenate([stat_small, stat_large], axis=0) + expected_stat = full_stat[type_index_map] + np.testing.assert_allclose(expected_stat, stat_result) + + +def update_input_type_map(input_dict, type_map): + updated_input_dict = deepcopy(input_dict) + if "list" not in updated_input_dict: + updated_input_dict["type_map"] = type_map + updated_input_dict["ntypes"] = len(type_map) + else: + # for hybrid + for sd in updated_input_dict["list"]: + sd["type_map"] = type_map + sd["ntypes"] = len(type_map) + return updated_input_dict + + +def update_input_use_econf_tebd(input_dict, use_econf_tebd): + updated_input_dict = deepcopy(input_dict) + if "list" not in updated_input_dict: + updated_input_dict["use_econf_tebd"] = use_econf_tebd + else: + # for hybrid + for sd in updated_input_dict["list"]: + sd["use_econf_tebd"] = use_econf_tebd + return updated_input_dict + + +def update_input_exclude_types(input_dict, exclude_types): + updated_input_dict = deepcopy(input_dict) + if "list" not in updated_input_dict: + updated_input_dict["exclude_types"] = exclude_types + else: + # for hybrid + for sd in updated_input_dict["list"]: + sd["exclude_types"] = exclude_types + return updated_input_dict + + +def remap_exclude_types(exclude_types, ori_tm, new_tm): + assert set(ori_tm).issubset(set(new_tm)) + new_ori_index = [new_tm.index(i) for i in ori_tm] + updated_em = [ + (new_ori_index[pair[0]], new_ori_index[pair[1]]) for pair in exclude_types + ] + return updated_em diff --git a/source/tests/universal/common/cases/fitting/__init__.py b/source/tests/universal/common/cases/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/common/cases/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/common/cases/fitting/fitting.py b/source/tests/universal/common/cases/fitting/fitting.py new file mode 100644 index 0000000000..51642b5977 --- /dev/null +++ b/source/tests/universal/common/cases/fitting/fitting.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from .utils import ( + FittingTestCase, +) + + +class FittingTest(FittingTestCase): + def setUp(self) -> None: + FittingTestCase.setUp(self) diff --git a/source/tests/universal/common/cases/fitting/utils.py b/source/tests/universal/common/cases/fitting/utils.py new file mode 100644 index 0000000000..2ab5fd911b --- /dev/null +++ b/source/tests/universal/common/cases/fitting/utils.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +from copy import ( + deepcopy, +) + +import numpy as np + +from deepmd.dpmodel.utils import ( + AtomExcludeMask, +) + +from .....seed import ( + GLOBAL_SEED, +) +from ..cases import ( + TestCaseSingleFrameWithNlist, +) + + +class FittingTestCase(TestCaseSingleFrameWithNlist): + """Common test case for descriptor.""" + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.input_dict = { + "ntypes": self.nt, + "dim_descrpt": self.dim_descrpt, + "mixed_types": self.mixed_types, + "type_map": ["O", "H"], + } + + def test_forward_consistency(self): + serialize_dict = self.module.serialize() + # set random bias + rng = np.random.default_rng() + serialize_dict["@variables"]["bias_atom_e"] = rng.random( + size=serialize_dict["@variables"]["bias_atom_e"].shape + ) + self.module = self.module.deserialize(serialize_dict) + ret = [] + for module in self.modules_to_test: + module = self.forward_wrapper(module) + ret.append( + module( + self.mock_descriptor, + self.atype_ext[:, : self.nloc], + gr=self.mock_gr, + ) + ) + for kk in ret[0]: + subret = [] + for rr in ret: + if rr is not None: + subret.append(rr[kk]) + if len(subret): + for ii, rr in enumerate(subret[1:]): + if subret[0] is None: + assert rr is None + else: + np.testing.assert_allclose( + subret[0], + rr, + err_msg=f"compare {kk} output between 0 and {ii}", + ) + + def test_exclude_types( + self, + ): + atype_device = self.atype_ext[:, : self.nloc] + serialize_dict = self.module.serialize() + # set random bias + rng = np.random.default_rng() + serialize_dict["@variables"]["bias_atom_e"] = rng.random( + size=serialize_dict["@variables"]["bias_atom_e"].shape + ) + self.module = self.module.deserialize(serialize_dict) + ff = self.forward_wrapper(self.module) + var_name = self.module.var_name + if var_name == "polar": + var_name = "polarizability" + + for em in [[0], [1]]: + ex_pair = AtomExcludeMask(self.nt, em) + atom_mask = ex_pair.build_type_exclude_mask(atype_device) + # exclude neighbors in the output + rd = ff( + self.mock_descriptor, + self.atype_ext[:, : self.nloc], + gr=self.mock_gr, + )[var_name] + for _ in range(len(rd.shape) - len(atom_mask.shape)): + atom_mask = atom_mask[..., None] + rd = rd * atom_mask + + # normal nlist but use exclude_types params + serialize_dict_em = deepcopy(serialize_dict) + serialize_dict_em.update({"exclude_types": em}) + ff_ex = self.forward_wrapper(self.module.deserialize(serialize_dict_em)) + rd_ex = ff_ex( + self.mock_descriptor, + self.atype_ext[:, : self.nloc], + gr=self.mock_gr, + )[var_name] + np.testing.assert_allclose(rd, rd_ex) + + def test_change_type_map(self): + if not self.module.mixed_types: + # skip if not mixed_types + return + atype_device = self.atype_ext[:, : self.nloc] + # type_map for data and exclude_types + original_type_map = ["O", "H"] + full_type_map_test = [ + "H", + "He", + "Li", + "Be", + "B", + "C", + "N", + "O", + "F", + "Ne", + "Na", + "Mg", + "Al", + "Si", + "P", + "S", + "Cl", + "Ar", + ] # 18 elements + rng = np.random.default_rng(GLOBAL_SEED) + for old_tm, new_tm, em in itertools.product( + [ + full_type_map_test[:8], # 8 elements + ["H", "O"], # slimmed types + ], # large_type_map + [ + full_type_map_test[:8], # 8 elements + ["H", "O"], # slimmed types + ], # small_type_map + [ + [], + [0], + [1], + ], # exclude_types for original_type_map + ): + # use shuffled type_map + rng.shuffle(old_tm) + rng.shuffle(new_tm) + old_tm_index = np.array( + [old_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + new_tm_index = np.array( + [new_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + old_tm_em = remap_exclude_types(em, original_type_map, old_tm) + old_tm_input = deepcopy(self.input_dict) + old_tm_input["type_map"] = old_tm + old_tm_input["ntypes"] = len(old_tm) + old_tm_input["exclude_types"] = old_tm_em + old_tm_module = self.module_class(**old_tm_input) + serialize_dict = old_tm_module.serialize() + # set random bias + serialize_dict["@variables"]["bias_atom_e"] = rng.random( + size=serialize_dict["@variables"]["bias_atom_e"].shape + ) + old_tm_module = old_tm_module.deserialize(serialize_dict) + var_name = old_tm_module.var_name + if var_name == "polar": + var_name = "polarizability" + old_tm_ff = self.forward_wrapper(old_tm_module) + rd_old_tm = old_tm_ff( + self.mock_descriptor, + old_tm_index[atype_device], + gr=self.mock_gr, + )[var_name] + old_tm_module.change_type_map(new_tm) + new_tm_ff = self.forward_wrapper(old_tm_module) + rd_new_tm = new_tm_ff( + self.mock_descriptor, + new_tm_index[atype_device], + gr=self.mock_gr, + )[var_name] + np.testing.assert_allclose(rd_old_tm, rd_new_tm) + + +def remap_exclude_types(exclude_types, ori_tm, new_tm): + assert set(ori_tm).issubset(set(new_tm)) + updated_em = [new_tm.index(i) for i in ori_tm if ori_tm.index(i) in exclude_types] + return updated_em diff --git a/source/tests/universal/common/cases/utils/__init__.py b/source/tests/universal/common/cases/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/common/cases/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/common/cases/utils/type_embed.py b/source/tests/universal/common/cases/utils/type_embed.py new file mode 100644 index 0000000000..3bb22e3f02 --- /dev/null +++ b/source/tests/universal/common/cases/utils/type_embed.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from .utils import ( + TypeEmbdTestCase, +) + + +class TypeEmbdTest(TypeEmbdTestCase): + def setUp(self) -> None: + TypeEmbdTestCase.setUp(self) diff --git a/source/tests/universal/common/cases/utils/utils.py b/source/tests/universal/common/cases/utils/utils.py new file mode 100644 index 0000000000..9f86ca1feb --- /dev/null +++ b/source/tests/universal/common/cases/utils/utils.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +from copy import ( + deepcopy, +) + +import numpy as np + +from .....seed import ( + GLOBAL_SEED, +) +from ..cases import ( + TestCaseSingleFrameWithNlist, +) + + +class TypeEmbdTestCase(TestCaseSingleFrameWithNlist): + """Common test case for type embedding network.""" + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.input_dict = { + "ntypes": self.nt, + "neuron": [8], + "type_map": ["O", "H"], + "use_econf_tebd": False, + } + self.module_input = {} + + def test_change_type_map(self): + atype_ext_device = self.atype_ext + # type_map for data and exclude_types + original_type_map = ["O", "H"] + full_type_map_test = [ + "H", + "He", + "Li", + "Be", + "B", + "C", + "N", + "O", + "F", + "Ne", + "Na", + "Mg", + "Al", + "Si", + "P", + "S", + "Cl", + "Ar", + ] # 18 elements + rng = np.random.default_rng(GLOBAL_SEED) + for old_tm, new_tm, neuron, act, econf in itertools.product( + [ + full_type_map_test[:], # 18 elements + full_type_map_test[ + :16 + ], # 16 elements, double of tebd default first dim + full_type_map_test[:8], # 8 elements, tebd default first dim + ["H", "O"], # slimmed types + ], # old_type_map + [ + full_type_map_test[:], # 18 elements + full_type_map_test[ + :16 + ], # 16 elements, double of tebd default first dim + full_type_map_test[:8], # 8 elements, tebd default first dim + ["H", "O"], # slimmed types + ], # new_type_map + [[8], [8, 16, 32]], # neuron + ["Linear", "tanh"], # activation_function + [False, True], # use_econf_tebd + ): + do_resnet = neuron[0] in [ + len(old_tm), + len(old_tm) * 2, + len(new_tm), + len(new_tm) * 2, + ] + if do_resnet and act != "Linear": + # `activation_function` must be "Linear" when performing type changing on resnet structure + continue + # use shuffled type_map + rng.shuffle(old_tm) + rng.shuffle(new_tm) + old_tm_index = np.array( + [old_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + new_tm_index = np.array( + [new_tm.index(i) for i in original_type_map], dtype=np.int32 + ) + old_tm_input = deepcopy(self.input_dict) + old_tm_input["type_map"] = old_tm + old_tm_input["ntypes"] = len(old_tm) + old_tm_input["neuron"] = neuron + old_tm_input["activation_function"] = act + old_tm_input["use_econf_tebd"] = econf + old_tm_module = self.module_class(**old_tm_input) + old_tm_dd = self.forward_wrapper(old_tm_module) + + rd_old_tm = old_tm_dd(**self.module_input)[old_tm_index[atype_ext_device]] + old_tm_module.change_type_map(new_tm) + new_tm_dd = self.forward_wrapper(old_tm_module) + rd_new_tm = new_tm_dd(**self.module_input)[new_tm_index[atype_ext_device]] + np.testing.assert_allclose(rd_old_tm, rd_new_tm) diff --git a/source/tests/universal/dpmodel/backend.py b/source/tests/universal/dpmodel/backend.py index 61982fea98..aff009b71b 100644 --- a/source/tests/universal/dpmodel/backend.py +++ b/source/tests/universal/dpmodel/backend.py @@ -1,4 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + from deepmd.dpmodel.common import ( NativeOP, ) @@ -17,6 +19,14 @@ class DPTestCase(BackendTestCase): def forward_wrapper(self, x): return x + @classmethod + def convert_to_numpy(cls, xx: np.ndarray) -> np.ndarray: + return xx + + @classmethod + def convert_from_numpy(cls, xx: np.ndarray) -> np.ndarray: + return xx + @property def deserialized_module(self): return self.module.deserialize(self.module.serialize()) diff --git a/source/tests/universal/dpmodel/descriptor/test_descriptor.py b/source/tests/universal/dpmodel/descriptor/test_descriptor.py index 9d0253c54c..38c1672079 100644 --- a/source/tests/universal/dpmodel/descriptor/test_descriptor.py +++ b/source/tests/universal/dpmodel/descriptor/test_descriptor.py @@ -21,30 +21,35 @@ class TestDescriptorSeADP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeA self.module = DescrptSeA(**self.input_dict) class TestDescriptorSeRDP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeR self.module = DescrptSeR(**self.input_dict) class TestDescriptorSeTDP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeT self.module = DescrptSeT(**self.input_dict) class TestDescriptorDPA1DP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptDPA1 self.module = DescrptDPA1(**self.input_dict) class TestDescriptorDPA2DP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptDPA2 self.input_dict = { "ntypes": self.nt, "repinit": { @@ -57,6 +62,7 @@ def setUp(self): "rcut_smth": self.rcut_smth, "nsel": self.sel_mix[0] // 2, }, + "type_map": ["O", "H"], } self.module = DescrptDPA2(**self.input_dict) @@ -64,12 +70,14 @@ def setUp(self): class TestDescriptorHybridDP(unittest.TestCase, DescriptorTest, DPTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptHybrid ddsub0 = { "type": "se_e2_a", "ntypes": self.nt, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, + "type_map": ["O", "H"], } ddsub1 = { "type": "dpa1", @@ -77,6 +85,33 @@ def setUp(self): "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel_mix, + "type_map": ["O", "H"], + } + self.input_dict = { + "list": [ddsub0, ddsub1], + } + self.module = DescrptHybrid(**self.input_dict) + + +class TestDescriptorHybridMixedDP(unittest.TestCase, DescriptorTest, DPTestCase): + def setUp(self): + DescriptorTest.setUp(self) + self.module_class = DescrptHybrid + ddsub0 = { + "type": "dpa1", + "ntypes": self.nt, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel_mix, + "type_map": ["O", "H"], + } + ddsub1 = { + "type": "dpa1", + "ntypes": self.nt, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel_mix, + "type_map": ["O", "H"], } self.input_dict = { "list": [ddsub0, ddsub1], diff --git a/source/tests/universal/dpmodel/fitting/__init__.py b/source/tests/universal/dpmodel/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/dpmodel/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py new file mode 100644 index 0000000000..ab95fae6b8 --- /dev/null +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +from deepmd.dpmodel.fitting import ( + DipoleFitting, + DOSFittingNet, + EnergyFittingNet, + PolarFitting, +) + +from ....consistent.common import ( + parameterized, +) +from ...common.cases.fitting.fitting import ( + FittingTest, +) +from ..backend import ( + DPTestCase, +) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingEnergyDP(unittest.TestCase, FittingTest, DPTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.module_class = EnergyFittingNet + self.module = EnergyFittingNet(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingDosDP(unittest.TestCase, FittingTest, DPTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.module_class = DOSFittingNet + self.module = DOSFittingNet(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingDipoleDP(unittest.TestCase, FittingTest, DPTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.input_dict.update({"embedding_width": self.dim_embed}) + self.module_class = DipoleFitting + self.module = DipoleFitting(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingPolarDP(unittest.TestCase, FittingTest, DPTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.input_dict.update({"embedding_width": self.dim_embed}) + self.module_class = PolarFitting + self.module = PolarFitting(**self.input_dict) diff --git a/source/tests/universal/dpmodel/utils/__init__.py b/source/tests/universal/dpmodel/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/dpmodel/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/dpmodel/utils/test_type_embed.py b/source/tests/universal/dpmodel/utils/test_type_embed.py new file mode 100644 index 0000000000..1eec54de9d --- /dev/null +++ b/source/tests/universal/dpmodel/utils/test_type_embed.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +from deepmd.dpmodel.utils.type_embed import ( + TypeEmbedNet, +) + +from ...common.cases.utils.type_embed import ( + TypeEmbdTest, +) +from ..backend import ( + DPTestCase, +) + + +class TestTypeEmbd(unittest.TestCase, TypeEmbdTest, DPTestCase): + def setUp(self): + TypeEmbdTest.setUp(self) + self.module_class = TypeEmbedNet + self.module = TypeEmbedNet(**self.input_dict) diff --git a/source/tests/universal/pt/backend.py b/source/tests/universal/pt/backend.py index 61110a0cc6..5ee4791ec8 100644 --- a/source/tests/universal/pt/backend.py +++ b/source/tests/universal/pt/backend.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np import torch from deepmd.pt.utils.utils import ( @@ -32,6 +33,14 @@ def modules_to_test(self): def test_jit(self): self.script_module + @classmethod + def convert_to_numpy(cls, xx: torch.Tensor) -> np.ndarray: + return to_numpy_array(xx) + + @classmethod + def convert_from_numpy(cls, xx: np.ndarray) -> torch.Tensor: + return to_torch_tensor(xx) + def forward_wrapper(self, module): def create_wrapper_method(method): def wrapper_method(self, *args, **kwargs): diff --git a/source/tests/universal/pt/descriptor/test_descriptor.py b/source/tests/universal/pt/descriptor/test_descriptor.py index 87107a2f90..9331ad12f5 100644 --- a/source/tests/universal/pt/descriptor/test_descriptor.py +++ b/source/tests/universal/pt/descriptor/test_descriptor.py @@ -21,30 +21,35 @@ class TestDescriptorSeAPT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeA self.module = DescrptSeA(**self.input_dict) class TestDescriptorSeRPT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeR self.module = DescrptSeR(**self.input_dict) class TestDescriptorSeTPT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptSeT self.module = DescrptSeT(**self.input_dict) class TestDescriptorDPA1PT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptDPA1 self.module = DescrptDPA1(**self.input_dict) class TestDescriptorDPA2PT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptDPA2 self.input_dict = { "ntypes": self.nt, "repinit": { @@ -57,6 +62,7 @@ def setUp(self): "rcut_smth": self.rcut_smth, "nsel": self.sel_mix[0] // 2, }, + "type_map": ["O", "H"], } self.module = DescrptDPA2(**self.input_dict) @@ -64,12 +70,14 @@ def setUp(self): class TestDescriptorHybridPT(unittest.TestCase, DescriptorTest, PTTestCase): def setUp(self): DescriptorTest.setUp(self) + self.module_class = DescrptHybrid ddsub0 = { "type": "se_e2_a", "ntypes": self.nt, "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel, + "type_map": ["O", "H"], } ddsub1 = { "type": "dpa1", @@ -77,6 +85,33 @@ def setUp(self): "rcut": self.rcut, "rcut_smth": self.rcut_smth, "sel": self.sel_mix, + "type_map": ["O", "H"], + } + self.input_dict = { + "list": [ddsub0, ddsub1], + } + self.module = DescrptHybrid(**self.input_dict) + + +class TestDescriptorHybridMixedPT(unittest.TestCase, DescriptorTest, PTTestCase): + def setUp(self): + DescriptorTest.setUp(self) + self.module_class = DescrptHybrid + ddsub0 = { + "type": "dpa1", + "ntypes": self.nt, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel_mix, + "type_map": ["O", "H"], + } + ddsub1 = { + "type": "dpa1", + "ntypes": self.nt, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel_mix, + "type_map": ["O", "H"], } self.input_dict = { "list": [ddsub0, ddsub1], diff --git a/source/tests/universal/pt/fitting/__init__.py b/source/tests/universal/pt/fitting/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/pt/fitting/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/pt/fitting/test_fitting.py b/source/tests/universal/pt/fitting/test_fitting.py new file mode 100644 index 0000000000..1b0ffd3eec --- /dev/null +++ b/source/tests/universal/pt/fitting/test_fitting.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +from deepmd.pt.model.task import ( + DipoleFittingNet, + DOSFittingNet, + EnergyFittingNet, + PolarFittingNet, +) + +from ....consistent.common import ( + parameterized, +) +from ...common.cases.fitting.fitting import ( + FittingTest, +) +from ..backend import ( + PTTestCase, +) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingEnergyPT(unittest.TestCase, FittingTest, PTTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.module_class = EnergyFittingNet + self.module = EnergyFittingNet(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingDosPT(unittest.TestCase, FittingTest, PTTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.module_class = DOSFittingNet + self.module = DOSFittingNet(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingDipolePT(unittest.TestCase, FittingTest, PTTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.input_dict.update({"embedding_width": self.dim_embed}) + self.module_class = DipoleFittingNet + self.module = DipoleFittingNet(**self.input_dict) + + +@parameterized( + (True, False), # mixed_types +) +class TestFittingPolarPT(unittest.TestCase, FittingTest, PTTestCase): + def setUp(self): + (self.mixed_types,) = self.param + FittingTest.setUp(self) + self.input_dict.update({"embedding_width": self.dim_embed}) + self.module_class = PolarFittingNet + self.module = PolarFittingNet(**self.input_dict) diff --git a/source/tests/universal/pt/utils/__init__.py b/source/tests/universal/pt/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/universal/pt/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/universal/pt/utils/test_type_embed.py b/source/tests/universal/pt/utils/test_type_embed.py new file mode 100644 index 0000000000..0a53eeeccb --- /dev/null +++ b/source/tests/universal/pt/utils/test_type_embed.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +from deepmd.pt.model.network.network import ( + TypeEmbedNetConsistent, +) +from deepmd.pt.utils import ( + env, +) + +from ...common.cases.utils.type_embed import ( + TypeEmbdTest, +) +from ..backend import ( + PTTestCase, +) + + +class TestTypeEmbd(unittest.TestCase, TypeEmbdTest, PTTestCase): + def setUp(self): + TypeEmbdTest.setUp(self) + self.module_class = TypeEmbedNetConsistent + self.module = TypeEmbedNetConsistent(**self.input_dict) + self.module_input = {"device": env.DEVICE}