From 66add48ecea7616feb428e1b76ec76142c80f62e Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 4 Apr 2024 01:36:06 +0800 Subject: [PATCH 01/13] stat: add support for std. out_stat: support output var of any shape. base_atomic_model: make stat as attribute. --- .../atomic_model/make_base_atomic_model.py | 3 - deepmd/dpmodel/output_def.py | 4 + .../model/atomic_model/base_atomic_model.py | 168 +++++++++++- .../pt/model/atomic_model/dp_atomic_model.py | 13 +- .../model/atomic_model/linear_atomic_model.py | 6 +- .../atomic_model/pairtab_atomic_model.py | 9 +- deepmd/pt/model/task/invar_fitting.py | 3 +- deepmd/pt/utils/stat.py | 92 +++++-- deepmd/utils/out_stat.py | 15 +- .../tests/pt/model/test_atomic_model_stat.py | 254 ++++++++++++++++++ source/tests/pt/test_stat.py | 6 +- 11 files changed, 509 insertions(+), 64 deletions(-) create mode 100644 source/tests/pt/model/test_atomic_model_stat.py diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index fe5882ef36..3e02a5d076 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -51,9 +51,6 @@ def atomic_output_def(self) -> FittingOutputDef: """ return self.fitting_output_def() - def get_output_keys(self) -> List[str]: - return list(self.atomic_output_def().keys()) - @abstractmethod def get_rcut(self) -> float: """Get the cut-off radius.""" diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index cbebb4908a..1c17fae432 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -224,6 +224,10 @@ def __init__( if not self.r_differentiable: raise ValueError("only r_differentiable variable can calculate hessian") + @property + def size(self): + return self.output_size + class FittingOutputDef: """Defines the shapes and other properties of the fitting network outputs. diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 2980f0b21b..ed22c5c8e6 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -23,6 +23,7 @@ from deepmd.pt.utils import ( AtomExcludeMask, PairExcludeMask, + env, ) from deepmd.pt.utils.nlist import ( extend_input_and_build_neighbor_list, @@ -35,19 +36,48 @@ ) log = logging.getLogger(__name__) +dtype = env.GLOBAL_PT_FLOAT_PRECISION +device = env.DEVICE BaseAtomicModel_ = make_base_atomic_model(torch.Tensor) -class BaseAtomicModel(BaseAtomicModel_): +class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): def __init__( self, + type_map, atom_exclude_types: List[int] = [], pair_exclude_types: List[Tuple[int, int]] = [], ): - super().__init__() + torch.nn.Module.__init__(self) + BaseAtomicModel_.__init__(self) + self.type_map = type_map self.reinit_atom_exclude(atom_exclude_types) self.reinit_pair_exclude(pair_exclude_types) + self.rcond = None + self.atom_ener = None + + def init_out_stat(self): + """Initialize the output bias.""" + ntypes = self.get_ntypes() + self.bias_keys: List[str] = list(self.fitting_output_def().keys()) + self.max_out_size = max( + [self.atomic_output_def()[kk].size for kk in self.bias_keys] + ) + self.n_out = len(self.bias_keys) + self.out_bias_data = torch.zeros( + [self.n_out, ntypes, self.max_out_size], dtype=dtype, device=device + ) + self.out_std_data = torch.ones( + [self.n_out, ntypes, self.max_out_size], dtype=dtype, device=device + ) + self.register_buffer("out_bias", self.out_bias_data) + self.register_buffer("out_std", self.out_std_data) + + @torch.jit.export + def get_type_map(self) -> List[str]: + """Get the type map.""" + return self.type_map def reinit_atom_exclude( self, @@ -165,6 +195,7 @@ def forward_common_atomic( fparam=fparam, aparam=aparam, ) + ret_dict = self.apply_out_bias(ret_dict, atype) # nf x nloc atom_mask = ext_atom_mask[:, :nloc].to(torch.int32) @@ -210,9 +241,60 @@ def compute_or_load_stat( """ raise NotImplementedError + def compute_or_load_out_stat( + self, + merged: Union[Callable[[], List[dict]], List[dict]], + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], List[dict]], List[dict]] + - List[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `torch.Tensor` + originating from the `i`-th data system. + - Callable[[], List[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + self.change_out_bias( + merged, + stat_file_path=stat_file_path, + bias_adjust_mode="set-by-statistic", + ) + + def apply_out_bias( + self, + ret: Dict[str, torch.Tensor], + atype: torch.Tensor, + ): + """Apply the bias to each atomic output. + The developer may override the method to define how the bias is applied + to the atomic output of the model. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc + + """ + out_bias, out_std = self._fetch_out_stat(self.bias_keys) + for kk in self.bias_keys: + # nf x nloc x odims, out_bias: ntypes x odims + ret[kk] = ret[kk] + out_bias[kk][atype] + return ret + def change_out_bias( self, sample_merged, + stat_file_path: Optional[DPPath] = None, bias_adjust_mode="change-by-statistic", ) -> None: """Change the output bias according to the input data and the pretrained model. @@ -233,20 +315,28 @@ def change_out_bias( 'set-by-statistic' : directly use the statistic output bias in the target dataset. """ if bias_adjust_mode == "change-by-statistic": - delta_bias = compute_output_stats( + delta_bias, out_std = compute_output_stats( sample_merged, self.get_ntypes(), - keys=self.get_output_keys(), + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, model_forward=self._get_forward_wrapper_func(), - )["energy"] + rcond=self.rcond, + atom_ener=self.atom_ener, + ) self.set_out_bias(delta_bias, add=True) + self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": - bias_atom = compute_output_stats( + bias_out, std_out = compute_output_stats( sample_merged, self.get_ntypes(), - keys=self.get_output_keys(), - )["energy"] - self.set_out_bias(bias_atom) + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, + rcond=self.rcond, + atom_ener=self.atom_ener, + ) + self.set_out_bias(bias_out) + self._store_out_stat(bias_out, std_out) else: raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) @@ -279,3 +369,63 @@ def model_forward(coord, atype, box, fparam=None, aparam=None): return {kk: vv.detach() for kk, vv in atomic_ret.items()} return model_forward + + def _varsize( + self, + shape: List[int], + ) -> int: + output_size = 1 + len_shape = len(shape) + for i in range(len_shape): + output_size *= shape[i] + return output_size + + def _get_bias_index( + self, + kk: str, + ) -> int: + res: List[int] = [] + for i, e in enumerate(self.bias_keys): + if e == kk: + res.append(i) + assert len(res) == 1 + return res[0] + + def _store_out_stat( + self, + out_bias: Dict[str, torch.Tensor], + out_std: Dict[str, torch.Tensor], + add: bool = False, + ): + ntypes = self.get_ntypes() + out_bias_data = torch.clone(self.out_bias) + out_std_data = torch.clone(self.out_std) + for kk in out_bias.keys(): + assert kk in out_std.keys() + idx = self._get_bias_index(kk) + size = self._varsize(self.atomic_output_def()[kk].shape) + if not add: + out_bias_data[idx, :, :size] = out_bias[kk].view(ntypes, size) + else: + out_bias_data[idx, :, :size] += out_bias[kk].view(ntypes, size) + out_std_data[idx, :, :size] = out_std[kk].view(ntypes, size) + self.out_bias.copy_(out_bias_data) + self.out_std.copy_(out_std_data) + + def _fetch_out_stat( + self, + keys: List[str], + ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: + ret_bias = {} + ret_std = {} + ntypes = self.get_ntypes() + for kk in keys: + idx = self._get_bias_index(kk) + isize = self._varsize(self.atomic_output_def()[kk].shape) + ret_bias[kk] = self.out_bias[idx, :, :isize].view( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + ret_std[kk] = self.out_std[idx, :, :isize].view( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + return ret_bias, ret_std diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 13b8f09a79..8db74f5f74 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -34,7 +34,7 @@ @BaseAtomicModel.register("standard") -class DPAtomicModel(torch.nn.Module, BaseAtomicModel): +class DPAtomicModel(BaseAtomicModel): """Model give atomic prediction of some physical property. Parameters @@ -55,7 +55,7 @@ def __init__( type_map: List[str], **kwargs, ): - torch.nn.Module.__init__(self) + super().__init__(type_map, **kwargs) ntypes = len(type_map) self.type_map = type_map self.ntypes = ntypes @@ -63,9 +63,9 @@ def __init__( self.rcut = self.descriptor.get_rcut() self.sel = self.descriptor.get_sel() self.fitting_net = fitting - # order matters ntypes and type_map should be initialized first. - BaseAtomicModel.__init__(self, **kwargs) + super().init_out_stat() + @torch.jit.export def fitting_output_def(self) -> FittingOutputDef: """Get the output def of the fitting net.""" return ( @@ -79,11 +79,6 @@ def get_rcut(self) -> float: """Get the cut-off radius.""" return self.rcut - @torch.jit.export - def get_type_map(self) -> List[str]: - """Get the type map.""" - return self.type_map - def get_sel(self) -> List[int]: """Get the neighbor selection.""" return self.sel diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index f599399e66..f9fc97dea4 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -39,7 +39,7 @@ ) -class LinearEnergyAtomicModel(torch.nn.Module, BaseAtomicModel): +class LinearEnergyAtomicModel(BaseAtomicModel): """Linear model make linear combinations of several existing models. Parameters @@ -57,7 +57,8 @@ def __init__( type_map: List[str], **kwargs, ): - torch.nn.Module.__init__(self) + super().__init__(type_map, **kwargs) + super().init_out_stat() self.models = torch.nn.ModuleList(models) sub_model_type_maps = [md.get_type_map() for md in models] err_msg = [] @@ -78,7 +79,6 @@ def __init__( self.get_model_rcuts(), dtype=torch.float64, device=env.DEVICE ) self.nsels = torch.tensor(self.get_model_nsels(), device=env.DEVICE) - BaseAtomicModel.__init__(self, **kwargs) def mixed_types(self) -> bool: """If true, the model diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 4db77790e9..d9fa1760a4 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -36,7 +36,7 @@ @BaseAtomicModel.register("pairtab") -class PairTabAtomicModel(torch.nn.Module, BaseAtomicModel): +class PairTabAtomicModel(BaseAtomicModel): """Pairwise tabulation energy model. This model can be used to tabulate the pairwise energy between atoms for either @@ -78,12 +78,12 @@ def __init__( atom_ener: Optional[List[float]] = None, **kwargs, ): - torch.nn.Module.__init__(self) + super().__init__(type_map, **kwargs) + super().init_out_stat() self.tab_file = tab_file self.rcut = rcut self.tab = self._set_pairtab(tab_file, rcut) - BaseAtomicModel.__init__(self, **kwargs) self.rcond = rcond self.atom_ener = atom_ener self.type_map = type_map @@ -227,6 +227,7 @@ def compute_or_load_stat( The path to the stat file. """ + # [0] to get the mean (bias) bias_atom_e = compute_output_stats( merged, self.ntypes, @@ -234,7 +235,7 @@ def compute_or_load_stat( stat_file_path=stat_file_path, rcond=self.rcond, atom_ener=self.atom_ener, - )["energy"] + )[0]["energy"] self.bias_atom_e.copy_( torch.tensor(bias_atom_e, device=env.DEVICE).view([self.ntypes, 1]) ) diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 810cf2d675..31f5b5d6c9 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -164,6 +164,7 @@ def compute_output_stats( The path to the stat file. """ + # [0] to get the mean (bias) bias_atom_e = compute_output_stats( merged, self.ntypes, @@ -171,7 +172,7 @@ def compute_output_stats( stat_file_path=stat_file_path, rcond=self.rcond, atom_ener=self.atom_ener, - )[self.var_name] + )[0][self.var_name] self.bias_atom_e.copy_(bias_atom_e.view([self.ntypes, self.dim_out])) def output_def(self) -> FittingOutputDef: diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index d0a614cf79..0f531560f6 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -83,28 +83,58 @@ def _restore_from_file( keys: List[str] = ["energy"], ) -> Optional[dict]: if stat_file_path is None: - return None + return None, None stat_files = [stat_file_path / f"bias_atom_{kk}" for kk in keys] - if any(not (ii.is_file()) for ii in stat_files): - return None - ret = {} + if all(not (ii.is_file()) for ii in stat_files): + return None, None + stat_files = [stat_file_path / f"std_atom_{kk}" for kk in keys] + if all(not (ii.is_file()) for ii in stat_files): + return None, None + ret_bias = {} + ret_std = {} for kk in keys: fp = stat_file_path / f"bias_atom_{kk}" - assert fp.is_file() - ret[kk] = fp.load_numpy() - return ret + # only read the key that exists + if fp.is_file(): + ret_bias[kk] = fp.load_numpy() + for kk in keys: + fp = stat_file_path / f"std_atom_{kk}" + # only read the key that exists + if fp.is_file(): + ret_std[kk] = fp.load_numpy() + return ret_bias, ret_std def _save_to_file( stat_file_path: DPPath, - results: dict, + bias_out: dict, + std_out: dict, ): assert stat_file_path is not None stat_file_path.mkdir(exist_ok=True, parents=True) - for kk, vv in results.items(): + for kk, vv in bias_out.items(): fp = stat_file_path / f"bias_atom_{kk}" fp.save_numpy(vv) + for kk, vv in std_out.items(): + fp = stat_file_path / f"std_atom_{kk}" + fp.save_numpy(vv) + + +def _post_process_stat( + out_bias, + out_std, +): + """Post process the statistics. + + For global statistics, we do not have the std for each type of atoms, + thus fake the output std by ones for all the types. + + """ + new_std = {} + for kk, vv in out_bias.items(): + new_std[kk] = np.ones_like(vv) + return out_bias, new_std def _compute_model_predict( @@ -183,7 +213,7 @@ def compute_output_stats( The difference will then be used to calculate the delta complement energy bias for each type. """ # try to restore the bias from stat file - bias_atom_e = _restore_from_file(stat_file_path, keys) + bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) # failed to restore the bias from stat file. compute if bias_atom_e is None: @@ -210,6 +240,7 @@ def compute_output_stats( merged_output = {kk: to_numpy_array(torch.cat(outputs[kk])) for kk in keys} # shape: (nframes, ntypes) merged_natoms = to_numpy_array(torch.cat(input_natoms)[:, 2:]) + nf = merged_natoms.shape[0] if atom_ener is not None and len(atom_ener) > 0: assigned_atom_ener = np.array( [ee if ee is not None else np.nan for ee in atom_ener] @@ -224,39 +255,46 @@ def compute_output_stats( model_predict = _compute_model_predict(sampled, keys, model_forward) stats_input = {kk: merged_output[kk] - model_predict[kk] for kk in keys} - # [0]: take the first otuput (mean) of compute_stats_from_redu - bias_atom_e = { - kk: compute_stats_from_redu( + bias_atom_e = {} + std_atom_e = {} + for kk in keys: + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( stats_input[kk], merged_natoms, assigned_bias=assigned_atom_ener, rcond=rcond, - )[0] - for kk in keys - } + ) + bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) + # unbias_e is only used for print rmse if model_forward is None: - unbias_e = {kk: merged_natoms @ bias_atom_e[kk] for kk in keys} + unbias_e = { + kk: merged_natoms @ bias_atom_e[kk].reshape(ntypes, -1) for kk in keys + } else: unbias_e = { - kk: model_predict[kk] + merged_natoms @ bias_atom_e[kk] for kk in keys + kk: model_predict[kk].reshape(ntypes, -1) + + merged_natoms @ bias_atom_e[kk].reshape(ntypes, -1) + for kk in keys } atom_numbs = merged_natoms.sum(-1) + + def rmse(x): + return np.sqrt(np.mean(np.square(x))) + for kk in keys: - rmse_ae = np.sqrt( - np.mean( - np.square( - (unbias_e[kk].ravel() - merged_output[kk].ravel()) / atom_numbs - ) - ) + rmse_ae = rmse( + (unbias_e[kk].reshape(nf, -1) - merged_output[kk].reshape(nf, -1)) + / atom_numbs[:, None] ) log.info( f"RMSE of {kk} per atom after linear regression is: {rmse_ae} in the unit of {kk}." ) if stat_file_path is not None: - _save_to_file(stat_file_path, bias_atom_e) + _save_to_file(stat_file_path, bias_atom_e, std_atom_e) - ret = {kk: to_torch_tensor(bias_atom_e[kk]) for kk in keys} + ret_bias = {kk: to_torch_tensor(vv) for kk, vv in bias_atom_e.items()} + ret_std = {kk: to_torch_tensor(vv) for kk, vv in std_atom_e.items()} - return ret + return ret_bias, ret_std diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index 3956dac654..bf883fd4fe 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -23,11 +23,11 @@ def compute_stats_from_redu( Parameters ---------- output_redu - The reduced output value, shape is [nframes, ndim]. + The reduced output value, shape is [nframes, *(odim0, odim1, ...)]. natoms The number of atoms for each atom, shape is [nframes, ntypes]. assigned_bias - The assigned output bias, shape is [ntypes, ndim]. Set to nan + The assigned output bias, shape is [ntypes, *(odim0, odim1, ...)]. Set to nan if not assigned. rcond Cut-off ratio for small singular values of a. @@ -35,12 +35,15 @@ def compute_stats_from_redu( Returns ------- np.ndarray - The computed output bias, shape is [ntypes, ndim]. + The computed output bias, shape is [ntypes, *(odim0, odim1, ...)]. np.ndarray - The computed output std, shape is [ntypes, ndim]. + The computed output std, shape is [*(odim0, odim1, ...)]. """ - output_redu = np.array(output_redu) natoms = np.array(natoms) + nf, _ = natoms.shape + output_redu = np.array(output_redu) + var_shape = list(output_redu.shape[1:]) + output_redu = output_redu.reshape(nf, -1) # check shape assert output_redu.ndim == 2 assert natoms.ndim == 2 @@ -74,6 +77,8 @@ def compute_stats_from_redu( # rest_redu: nframes, ndim rest_redu = output_redu - np.einsum("ij,jk->ik", natoms, computed_output_bias) output_std = rest_redu.std(axis=0) + computed_output_bias = computed_output_bias.reshape([natoms.shape[1]] + var_shape) # noqa: RUF005 + output_std = output_std.reshape(var_shape) return computed_output_bias, output_std diff --git a/source/tests/pt/model/test_atomic_model_stat.py b/source/tests/pt/model/test_atomic_model_stat.py new file mode 100644 index 0000000000..10f24e096d --- /dev/null +++ b/source/tests/pt/model/test_atomic_model_stat.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tempfile +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import h5py +import numpy as np +import torch + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pt.model.atomic_model import ( + BaseAtomicModel, + DPAtomicModel, +) +from deepmd.pt.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pt.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.utils import ( + to_numpy_array, + to_torch_tensor, +) +from deepmd.utils.path import ( + DPPath, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PT_FLOAT_PRECISION + + +class FooFitting(torch.nn.Module, BaseFitting): + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "foo", + [1], + reduciable=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "pix", + [1], + reduciable=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "bar", + [1, 2], + reduciable=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def forward( + self, + descriptor: torch.Tensor, + atype: torch.Tensor, + gr: Optional[torch.Tensor] = None, + g2: Optional[torch.Tensor] = None, + h2: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + ): + nf, nloc, _ = descriptor.shape + ret = {} + ret["foo"] = ( + torch.Tensor( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ) + .view([nf, nloc] + self.output_def()["foo"].shape) # noqa: RUF005 + .to(env.GLOBAL_PT_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["pix"] = ( + torch.Tensor( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ) + .view([nf, nloc] + self.output_def()["pix"].shape) # noqa: RUF005 + .to(env.GLOBAL_PT_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["bar"] = ( + torch.Tensor( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ) + .view([nf, nloc] + self.output_def()["bar"].shape) # noqa: RUF005 + .to(env.GLOBAL_PT_FLOAT_PRECISION) + .to(env.DEVICE) + ) + return ret + + +class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + nf, nloc, nnei = self.nlist.shape + self.merged_output_stat = [ + { + "coord": to_torch_tensor(np.zeros([2, 3, 3])), + "atype": to_torch_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_torch_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_torch_tensor(np.zeros([2, 3, 3])), + "natoms": to_torch_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 1, 3 + "foo": to_torch_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # no bias of pix + # bias of bar: [1, 5], [3, 2] + "bar": to_torch_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + } + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_torch_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_torch_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ## model output on foo: [[2, 3, 6], [5, 8, 9]] given bias [1, 3] + ## foo sumed: [11, 22] compared with [5, 7], fit target is [-6, -15] + ## fit bias is [1, -8] + ## old bias + fit bias [2, -5] + ## new model output is [[3, 4, -2], [6, 0, 1]], which sumed to [5, 7] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(to_numpy_array(ret3[kk]), expected_ret3[kk]) + # bar is too complicated to be manually computed. diff --git a/source/tests/pt/test_stat.py b/source/tests/pt/test_stat.py index 2362821dfa..eff73a60c8 100644 --- a/source/tests/pt/test_stat.py +++ b/source/tests/pt/test_stat.py @@ -366,7 +366,7 @@ def test_calc_and_load(self): type_map = self.type_map # compute from sample - ret0 = compute_output_stats( + ret0, _ = compute_output_stats( self.sampled, len(type_map), keys=["energy"], @@ -394,7 +394,7 @@ def raise_error(): # hack!!! # suppose to load stat from file, if from sample, an error will raise. - ret1 = compute_output_stats( + ret1, _ = compute_output_stats( raise_error, len(type_map), keys=["energy"], @@ -412,7 +412,7 @@ def test_assigned(self): type_map = self.type_map # from assigned atom_ener - ret2 = compute_output_stats( + ret2, _ = compute_output_stats( self.sampled, len(type_map), keys=["energy"], From 6dd726d48472d798d4e1fe35a4070a3a053187c2 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 4 Apr 2024 01:45:29 +0800 Subject: [PATCH 02/13] fix bugs --- deepmd/pt/model/atomic_model/base_atomic_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index ed22c5c8e6..0c79234a34 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -324,7 +324,7 @@ def change_out_bias( rcond=self.rcond, atom_ener=self.atom_ener, ) - self.set_out_bias(delta_bias, add=True) + # self.set_out_bias(delta_bias, add=True) self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": bias_out, std_out = compute_output_stats( @@ -335,7 +335,7 @@ def change_out_bias( rcond=self.rcond, atom_ener=self.atom_ener, ) - self.set_out_bias(bias_out) + # self.set_out_bias(bias_out) self._store_out_stat(bias_out, std_out) else: raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) From 9e89b25a90bbb8914acfcea6eea080ea7cb36392 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 4 Apr 2024 09:25:07 +0800 Subject: [PATCH 03/13] model atomic model call output stat. fix test_finetune --- .../model/atomic_model/base_atomic_model.py | 24 +++++++++++++++---- .../pt/model/atomic_model/dp_atomic_model.py | 3 +-- .../atomic_model/pairtab_atomic_model.py | 16 +------------ deepmd/pt/utils/stat.py | 2 +- source/tests/pt/test_finetune.py | 15 ++++-------- 5 files changed, 27 insertions(+), 33 deletions(-) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 0c79234a34..a27171f16c 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -65,14 +65,30 @@ def init_out_stat(self): [self.atomic_output_def()[kk].size for kk in self.bias_keys] ) self.n_out = len(self.bias_keys) - self.out_bias_data = torch.zeros( + out_bias_data = torch.zeros( [self.n_out, ntypes, self.max_out_size], dtype=dtype, device=device ) - self.out_std_data = torch.ones( + out_std_data = torch.ones( [self.n_out, ntypes, self.max_out_size], dtype=dtype, device=device ) - self.register_buffer("out_bias", self.out_bias_data) - self.register_buffer("out_std", self.out_std_data) + self.register_buffer("out_bias", out_bias_data) + self.register_buffer("out_std", out_std_data) + + def __setitem__(self, key, value): + if key in ["out_bias"]: + self.out_bias = value + elif key in ["out_std"]: + self.out_std = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["out_bias"]: + return self.out_bias + elif key in ["out_std"]: + return self.out_std + else: + raise KeyError(key) @torch.jit.export def get_type_map(self) -> List[str]: diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 8db74f5f74..c9c9e6ed47 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -215,8 +215,7 @@ def wrapped_sampler(): return sampled self.descriptor.compute_input_stats(wrapped_sampler, stat_file_path) - if self.fitting_net is not None: - self.fitting_net.compute_output_stats(wrapped_sampler, stat_file_path) + self.compute_or_load_out_stat(wrapped_sampler, stat_file_path) def set_out_bias(self, out_bias: torch.Tensor, add=False) -> None: """ diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index d9fa1760a4..627dffd620 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -17,9 +17,6 @@ from deepmd.pt.utils import ( env, ) -from deepmd.pt.utils.stat import ( - compute_output_stats, -) from deepmd.utils.pair_tab import ( PairTab, ) @@ -227,18 +224,7 @@ def compute_or_load_stat( The path to the stat file. """ - # [0] to get the mean (bias) - bias_atom_e = compute_output_stats( - merged, - self.ntypes, - keys=["energy"], - stat_file_path=stat_file_path, - rcond=self.rcond, - atom_ener=self.atom_ener, - )[0]["energy"] - self.bias_atom_e.copy_( - torch.tensor(bias_atom_e, device=env.DEVICE).view([self.ntypes, 1]) - ) + self.compute_or_load_out_stat(merged, stat_file_path) def set_out_bias(self, out_bias: torch.Tensor, add=False) -> None: """ diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 0f531560f6..bd8ab23e40 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -273,7 +273,7 @@ def compute_output_stats( } else: unbias_e = { - kk: model_predict[kk].reshape(ntypes, -1) + kk: model_predict[kk].reshape(nf, -1) + merged_natoms @ bias_atom_e[kk].reshape(ntypes, -1) for kk in keys } diff --git a/source/tests/pt/test_finetune.py b/source/tests/pt/test_finetune.py index b7120414ba..8f299ce542 100644 --- a/source/tests/pt/test_finetune.py +++ b/source/tests/pt/test_finetune.py @@ -1,9 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import tempfile import unittest -from copy import ( - deepcopy, -) from pathlib import ( Path, ) @@ -41,11 +38,9 @@ class FinetuneTest: def test_finetune_change_out_bias(self): # get model model = get_model(self.model_config) - fitting_net = model.get_fitting_net() - fitting_net["bias_atom_e"] = torch.rand_like(fitting_net["bias_atom_e"]) - energy_bias_before = deepcopy( - to_numpy_array(fitting_net["bias_atom_e"]).reshape(-1) - ) + atomic_model = model.atomic_model + atomic_model["out_bias"] = torch.rand_like(atomic_model["out_bias"]) + energy_bias_before = to_numpy_array(atomic_model["out_bias"])[0].ravel() # prepare original model for test dp = torch.jit.script(model) @@ -60,9 +55,7 @@ def test_finetune_change_out_bias(self): self.sampled, bias_adjust_mode="change-by-statistic", ) - energy_bias_after = deepcopy( - to_numpy_array(fitting_net["bias_atom_e"]).reshape(-1) - ) + energy_bias_after = to_numpy_array(atomic_model["out_bias"])[0].ravel() # get ground-truth energy bias change sorter = np.argsort(full_type_map) From 75591a17270b1e0a428cee2534d8803b6eb49739 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 4 Apr 2024 10:05:20 +0800 Subject: [PATCH 04/13] change the atomic model's init interface of the dpmodel --- deepmd/dpmodel/atomic_model/base_atomic_model.py | 6 ++++++ deepmd/dpmodel/atomic_model/dp_atomic_model.py | 6 +----- deepmd/dpmodel/atomic_model/linear_atomic_model.py | 2 +- deepmd/dpmodel/atomic_model/pairtab_atomic_model.py | 2 +- deepmd/pt/model/atomic_model/base_atomic_model.py | 2 +- source/tests/common/dpmodel/test_dp_atomic_model.py | 2 +- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/deepmd/dpmodel/atomic_model/base_atomic_model.py b/deepmd/dpmodel/atomic_model/base_atomic_model.py index 42d1e67138..dbb344d5ca 100644 --- a/deepmd/dpmodel/atomic_model/base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/base_atomic_model.py @@ -27,13 +27,19 @@ class BaseAtomicModel(BaseAtomicModel_): def __init__( self, + type_map: List[str], atom_exclude_types: List[int] = [], pair_exclude_types: List[Tuple[int, int]] = [], ): super().__init__() + self.type_map = type_map self.reinit_atom_exclude(atom_exclude_types) self.reinit_pair_exclude(pair_exclude_types) + def get_type_map(self) -> List[str]: + """Get the type map.""" + return self.type_map + def reinit_atom_exclude( self, exclude_types: List[int] = [], diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index 8a40f8d238..cca46d3710 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -53,7 +53,7 @@ def __init__( self.descriptor = descriptor self.fitting = fitting self.type_map = type_map - super().__init__(**kwargs) + super().__init__(type_map, **kwargs) def fitting_output_def(self) -> FittingOutputDef: """Get the output def of the fitting net.""" @@ -67,10 +67,6 @@ def get_sel(self) -> List[int]: """Get the neighbor selection.""" return self.descriptor.get_sel() - def get_type_map(self) -> List[str]: - """Get the type map.""" - return self.type_map - def mixed_types(self) -> bool: """If true, the model 1. assumes total number of atoms aligned across frames; diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 93a885f3ab..71e4aa542a 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -66,7 +66,7 @@ def __init__( self.mapping_list.append(self.remap_atype(tpmp, self.type_map)) assert len(err_msg) == 0, "\n".join(err_msg) self.mixed_types_list = [model.mixed_types() for model in self.models] - super().__init__(**kwargs) + super().__init__(type_map, **kwargs) def mixed_types(self) -> bool: """If true, the model diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 30ab58928b..1b8dca7b40 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -61,7 +61,7 @@ def __init__( type_map: List[str], **kwargs, ): - super().__init__() + super().__init__(type_map, **kwargs) self.tab_file = tab_file self.rcut = rcut self.type_map = type_map diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index a27171f16c..6465a9ce39 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -45,7 +45,7 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): def __init__( self, - type_map, + type_map: List[str], atom_exclude_types: List[int] = [], pair_exclude_types: List[Tuple[int, int]] = [], ): diff --git a/source/tests/common/dpmodel/test_dp_atomic_model.py b/source/tests/common/dpmodel/test_dp_atomic_model.py index a3cad8b406..96bf24e451 100644 --- a/source/tests/common/dpmodel/test_dp_atomic_model.py +++ b/source/tests/common/dpmodel/test_dp_atomic_model.py @@ -41,7 +41,7 @@ def test_methods(self): md0 = DPAtomicModel(ds, ft, type_map=type_map) - self.assertEqual(md0.get_output_keys(), ["energy", "mask"]) + self.assertEqual(list(md0.atomic_output_def().keys()), ["energy", "mask"]) self.assertEqual(md0.get_type_map(), ["foo", "bar"]) self.assertEqual(md0.get_ntypes(), 2) self.assertAlmostEqual(md0.get_rcut(), self.rcut) From 7ab0a4f71c126f38e4ebbbd2907db8af559179bd Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 4 Apr 2024 22:42:53 +0800 Subject: [PATCH 05/13] fix ut --- deepmd/dpmodel/atomic_model/pairtab_atomic_model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index 1b8dca7b40..c970278bcf 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -59,6 +59,8 @@ def __init__( rcut: float, sel: Union[int, List[int]], type_map: List[str], + rcond: Optional[float] = None, + atom_ener: Optional[List[float]] = None, **kwargs, ): super().__init__(type_map, **kwargs) @@ -69,6 +71,8 @@ def __init__( self.tab = PairTab(self.tab_file, rcut=rcut) self.type_map = type_map self.ntypes = len(type_map) + self.rcond = rcond + self.atom_ener = atom_ener if self.tab_file is not None: self.tab_info, self.tab_data = self.tab.get() From ce7ec1f39be669fcc63a05e9474bc248d9a54d8a Mon Sep 17 00:00:00 2001 From: Han Wang Date: Fri, 5 Apr 2024 22:32:15 +0800 Subject: [PATCH 06/13] fix ut --- source/tests/pt/test_multitask.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/tests/pt/test_multitask.py b/source/tests/pt/test_multitask.py index 8bdb42df52..d3f06f89e0 100644 --- a/source/tests/pt/test_multitask.py +++ b/source/tests/pt/test_multitask.py @@ -128,17 +128,17 @@ def test_multitask_train(self): multi_state_dict[state_key], multi_state_dict_finetuned[state_key], ) - elif "model_2" in state_key and "bias_atom_e" not in state_key: + elif "model_2" in state_key and "out_bias" not in state_key: torch.testing.assert_close( multi_state_dict[state_key], multi_state_dict_finetuned[state_key], ) - elif "model_3" in state_key and "bias_atom_e" not in state_key: + elif "model_3" in state_key and "out_bias" not in state_key: torch.testing.assert_close( multi_state_dict[state_key.replace("model_3", "model_2")], multi_state_dict_finetuned[state_key], ) - elif "model_4" in state_key and "fitting_net" not in state_key: + elif "model_4" in state_key and "atomic_model" not in state_key: torch.testing.assert_close( multi_state_dict[state_key.replace("model_4", "model_2")], multi_state_dict_finetuned[state_key], From 48ee2729c406bc4a8f5b8980c59b92ac8ba05ce8 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sat, 6 Apr 2024 16:50:09 +0800 Subject: [PATCH 07/13] support preset atom bias. add doc str to base atomic model --- .../model/atomic_model/base_atomic_model.py | 32 +++- deepmd/pt/utils/stat.py | 50 +++++- deepmd/utils/out_stat.py | 5 +- .../tests/pt/model/test_atomic_model_stat.py | 170 ++++++++++++++++++ 4 files changed, 242 insertions(+), 15 deletions(-) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 6465a9ce39..ab7cd44ea3 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -43,19 +43,43 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_): + """The base of atomic model. + + Parameters + ---------- + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + atom_exclude_types + Exclude the atomic contribution of the given types + pair_exclude_types + Exclude the pair of atoms of the given types from computing the output + of the atomic model. Implemented by removing the pairs from the nlist. + rcond : float, optional + The condition number for the regression of atomic energy. + preset_out_bias : Dict[str, List[Optional[torch.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. + + """ + def __init__( self, type_map: List[str], atom_exclude_types: List[int] = [], pair_exclude_types: List[Tuple[int, int]] = [], + rcond: Optional[float] = None, + preset_out_bias: Optional[Dict[str, torch.Tensor]] = None, ): torch.nn.Module.__init__(self) BaseAtomicModel_.__init__(self) self.type_map = type_map self.reinit_atom_exclude(atom_exclude_types) self.reinit_pair_exclude(pair_exclude_types) - self.rcond = None - self.atom_ener = None + self.rcond = rcond + self.atom_ener = preset_out_bias def init_out_stat(self): """Initialize the output bias.""" @@ -338,7 +362,7 @@ def change_out_bias( stat_file_path=stat_file_path, model_forward=self._get_forward_wrapper_func(), rcond=self.rcond, - atom_ener=self.atom_ener, + preset_bias=self.atom_ener, ) # self.set_out_bias(delta_bias, add=True) self._store_out_stat(delta_bias, out_std, add=True) @@ -349,7 +373,7 @@ def change_out_bias( keys=list(self.atomic_output_def().keys()), stat_file_path=stat_file_path, rcond=self.rcond, - atom_ener=self.atom_ener, + preset_bias=self.atom_ener, ) # self.set_out_bias(bias_out) self._store_out_stat(bias_out, std_out) diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index bd8ab23e40..d85741b231 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -2,6 +2,7 @@ import logging from typing import ( Callable, + Dict, List, Optional, Union, @@ -177,13 +178,38 @@ def model_forward_auto_batch_size(*args, **kwargs): return model_predict +def _make_preset_out_bias( + ntypes: int, + ibias: List[Optional[np.array]], +) -> Optional[np.array]: + """Make preset out bias. + + output: + a np array of shape [ntypes, *(odim0, odim1, ...)] is any item is not None + None if all items are None. + """ + if len(ibias) != ntypes: + raise ValueError("the length of preset bias list should be ntypes") + if all(ii is None for ii in ibias): + return None + for refb in ibias: + if refb is not None: + break + refb = np.array(refb) + nbias = [ + np.full_like(refb, np.nan, dtype=np.float64) if ii is None else ii + for ii in ibias + ] + return np.array(nbias) + + def compute_output_stats( merged: Union[Callable[[], List[dict]], List[dict]], ntypes: int, keys: Union[str, List[str]] = ["energy"], stat_file_path: Optional[DPPath] = None, rcond: Optional[float] = None, - atom_ener: Optional[List[float]] = None, + preset_bias: Optional[Dict[str, List[Optional[torch.Tensor]]]] = None, model_forward: Optional[Callable[..., torch.Tensor]] = None, ): """ @@ -204,8 +230,11 @@ def compute_output_stats( The path to the stat file. rcond : float, optional The condition number for the regression of atomic energy. - atom_ener : List[float], optional - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + preset_bias : Dict[str, List[Optional[torch.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. model_forward : Callable[..., torch.Tensor], optional The wrapped forward function of atomic model. If not None, the model will be utilized to generate the original energy prediction, @@ -241,12 +270,15 @@ def compute_output_stats( # shape: (nframes, ntypes) merged_natoms = to_numpy_array(torch.cat(input_natoms)[:, 2:]) nf = merged_natoms.shape[0] - if atom_ener is not None and len(atom_ener) > 0: - assigned_atom_ener = np.array( - [ee if ee is not None else np.nan for ee in atom_ener] - ) + if preset_bias is not None: + assigned_atom_ener = { + kk: _make_preset_out_bias(ntypes, preset_bias[kk]) + if kk in preset_bias.keys() + else None + for kk in keys + } else: - assigned_atom_ener = None + assigned_atom_ener = {kk: None for kk in keys} if model_forward is None: stats_input = merged_output @@ -261,7 +293,7 @@ def compute_output_stats( bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( stats_input[kk], merged_natoms, - assigned_bias=assigned_atom_ener, + assigned_bias=assigned_atom_ener[kk], rcond=rcond, ) bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index bf883fd4fe..1dcbcb1280 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -27,8 +27,9 @@ def compute_stats_from_redu( natoms The number of atoms for each atom, shape is [nframes, ntypes]. assigned_bias - The assigned output bias, shape is [ntypes, *(odim0, odim1, ...)]. Set to nan - if not assigned. + The assigned output bias, shape is [ntypes, *(odim0, odim1, ...)]. + Set to a tensor of shape (odim0, odim1, ...) filled with nan if the bias + of the type is not assigned. rcond Cut-off ratio for small singular values of a. diff --git a/source/tests/pt/model/test_atomic_model_stat.py b/source/tests/pt/model/test_atomic_model_stat.py index 10f24e096d..56ecb047f7 100644 --- a/source/tests/pt/model/test_atomic_model_stat.py +++ b/source/tests/pt/model/test_atomic_model_stat.py @@ -252,3 +252,173 @@ def raise_error(): for kk in ["foo", "pix"]: np.testing.assert_almost_equal(to_numpy_array(ret3[kk]), expected_ret3[kk]) # bar is too complicated to be manually computed. + + def test_preset_bias(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + # "foo": np.array(3.0, 2.0]).reshape(2, 1), + "foo": [None, 2], + "bar": np.array([7.0, 5.0, 13.0, 11.0]).reshape(2, 1, 2), + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_torch_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + # foo sums: [5, 7], + # given bias of type 1 being 2, the bias left for type 0 is [5-2*1, 7-2*2] = [3,3] + # the solution of type 0 is 1.8 + foo_bias = np.array([1.8, preset_out_bias["foo"][1]]).reshape(2, 1) + bar_bias = preset_out_bias["bar"] + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_torch_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ## model output on foo: [[2.8, 3.8, 5], [5.8, 7., 8.]] given bias [1.8, 2] + ## foo sumed: [11.6, 20.8] compared with [5, 7], fit target is [-6.6, -13.8] + ## fit bias is [-7, 2] (2 is assigned. -7 is fit to [-8.6, -17.8]) + ## old bias[1.8,2] + fit bias[-7, 2] = [-5.2, 4] + ## new model output is [[-4.2, -3.2, 7], [-1.2, 9, 10]] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[-4.2, -3.2, 7.0], [-1.2, 9.0, 10.0]]).reshape( + 2, 3, 1 + ) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(to_numpy_array(ret3[kk]), expected_ret3[kk]) + # bar is too complicated to be manually computed. + + def test_preset_bias_all_none(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + "foo": [None, None], + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_torch_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) From 4c038d84fe6f2f4139a005706ffaddfd4b408b3b Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sat, 6 Apr 2024 17:08:44 +0800 Subject: [PATCH 08/13] solve name conflict --- deepmd/pt/model/atomic_model/base_atomic_model.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index ab7cd44ea3..088c5e00a2 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -79,7 +79,7 @@ def __init__( self.reinit_atom_exclude(atom_exclude_types) self.reinit_pair_exclude(pair_exclude_types) self.rcond = rcond - self.atom_ener = preset_out_bias + self.preset_out_bias = preset_out_bias def init_out_stat(self): """Initialize the output bias.""" @@ -235,7 +235,7 @@ def forward_common_atomic( fparam=fparam, aparam=aparam, ) - ret_dict = self.apply_out_bias(ret_dict, atype) + ret_dict = self.apply_out_stat(ret_dict, atype) # nf x nloc atom_mask = ext_atom_mask[:, :nloc].to(torch.int32) @@ -308,12 +308,12 @@ def compute_or_load_out_stat( bias_adjust_mode="set-by-statistic", ) - def apply_out_bias( + def apply_out_stat( self, ret: Dict[str, torch.Tensor], atype: torch.Tensor, ): - """Apply the bias to each atomic output. + """Apply the stat to each atomic output. The developer may override the method to define how the bias is applied to the atomic output of the model. @@ -362,7 +362,7 @@ def change_out_bias( stat_file_path=stat_file_path, model_forward=self._get_forward_wrapper_func(), rcond=self.rcond, - preset_bias=self.atom_ener, + preset_bias=self.preset_out_bias, ) # self.set_out_bias(delta_bias, add=True) self._store_out_stat(delta_bias, out_std, add=True) @@ -373,7 +373,7 @@ def change_out_bias( keys=list(self.atomic_output_def().keys()), stat_file_path=stat_file_path, rcond=self.rcond, - preset_bias=self.atom_ener, + preset_bias=self.preset_out_bias, ) # self.set_out_bias(bias_out) self._store_out_stat(bias_out, std_out) From 0dae3c9ba87f0b73194d727a1f84ff849f6acf41 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sat, 6 Apr 2024 20:30:33 +0800 Subject: [PATCH 09/13] fix bugs --- deepmd/pt/model/task/invar_fitting.py | 13 +++++++++---- source/tests/pt/test_stat.py | 10 +++++----- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 31f5b5d6c9..01e6a8b95d 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -78,8 +78,11 @@ class InvarFitting(GeneralFitting): Random seed. exclude_types: List[int] Atomic contributions of the excluded atom types are set zero. - atom_ener: List[float], optional - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. + atom_ener: List[Optional[torch.Tensor]], optional + Specifying atomic energy contribution in vacuum. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descrptor should be set. """ @@ -100,7 +103,7 @@ def __init__( rcond: Optional[float] = None, seed: Optional[int] = None, exclude_types: List[int] = [], - atom_ener: Optional[List[float]] = None, + atom_ener: Optional[List[Optional[torch.Tensor]]] = None, **kwargs, ): self.dim_out = dim_out @@ -171,7 +174,9 @@ def compute_output_stats( keys=[self.var_name], stat_file_path=stat_file_path, rcond=self.rcond, - atom_ener=self.atom_ener, + preset_bias={self.var_name: self.atom_ener} + if self.atom_ener is not None + else None, )[0][self.var_name] self.bias_atom_e.copy_(bias_atom_e.view([self.ntypes, self.dim_out])) diff --git a/source/tests/pt/test_stat.py b/source/tests/pt/test_stat.py index eff73a60c8..76549f0c7d 100644 --- a/source/tests/pt/test_stat.py +++ b/source/tests/pt/test_stat.py @@ -371,7 +371,7 @@ def test_calc_and_load(self): len(type_map), keys=["energy"], stat_file_path=stat_file_path, - atom_ener=None, + preset_bias=None, model_forward=None, ) # ground truth @@ -399,7 +399,7 @@ def raise_error(): len(type_map), keys=["energy"], stat_file_path=stat_file_path, - atom_ener=None, + preset_bias=None, model_forward=None, ) np.testing.assert_almost_equal( @@ -407,7 +407,7 @@ def raise_error(): ) def test_assigned(self): - atom_ener = np.array([3.0, 5.0]).reshape(2, 1) + atom_ener = {"energy": np.array([3.0, 5.0]).reshape(2, 1)} stat_file_path = self.stat_file_path type_map = self.type_map @@ -417,11 +417,11 @@ def test_assigned(self): len(type_map), keys=["energy"], stat_file_path=stat_file_path, - atom_ener=atom_ener, + preset_bias=atom_ener, model_forward=None, ) np.testing.assert_almost_equal( - to_numpy_array(ret2["energy"]), atom_ener, decimal=10 + to_numpy_array(ret2["energy"]), atom_ener["energy"], decimal=10 ) From 47146845d3963e55a9553eda18d7ae77a76ef74c Mon Sep 17 00:00:00 2001 From: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Date: Sun, 7 Apr 2024 00:10:41 +0800 Subject: [PATCH 10/13] Update source/tests/pt/test_multitask.py Co-authored-by: Duo <50307526+iProzd@users.noreply.github.com> Signed-off-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- source/tests/pt/test_multitask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/tests/pt/test_multitask.py b/source/tests/pt/test_multitask.py index d3f06f89e0..984c9c6079 100644 --- a/source/tests/pt/test_multitask.py +++ b/source/tests/pt/test_multitask.py @@ -138,7 +138,7 @@ def test_multitask_train(self): multi_state_dict[state_key.replace("model_3", "model_2")], multi_state_dict_finetuned[state_key], ) - elif "model_4" in state_key and "atomic_model" not in state_key: + elif "model_4" in state_key and "fitting_net" not in state_key and "out_bias" not in state_key: torch.testing.assert_close( multi_state_dict[state_key.replace("model_4", "model_2")], multi_state_dict_finetuned[state_key], From 1cbbd9b257aea9ca5310850da861cc38cf06acf4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 6 Apr 2024 16:11:06 +0000 Subject: [PATCH 11/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/tests/pt/test_multitask.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/tests/pt/test_multitask.py b/source/tests/pt/test_multitask.py index 984c9c6079..3c78484e1f 100644 --- a/source/tests/pt/test_multitask.py +++ b/source/tests/pt/test_multitask.py @@ -138,7 +138,11 @@ def test_multitask_train(self): multi_state_dict[state_key.replace("model_3", "model_2")], multi_state_dict_finetuned[state_key], ) - elif "model_4" in state_key and "fitting_net" not in state_key and "out_bias" not in state_key: + elif ( + "model_4" in state_key + and "fitting_net" not in state_key + and "out_bias" not in state_key + ): torch.testing.assert_close( multi_state_dict[state_key.replace("model_4", "model_2")], multi_state_dict_finetuned[state_key], From a3e6f578e8df109cd1f5b9164b368091fc70ea6c Mon Sep 17 00:00:00 2001 From: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Date: Sun, 7 Apr 2024 00:11:58 +0800 Subject: [PATCH 12/13] add doc str --- deepmd/pt/model/atomic_model/base_atomic_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index 088c5e00a2..57ca21a826 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -353,6 +353,8 @@ def change_out_bias( 'change-by-statistic' : perform predictions on labels of target dataset, and do least square on the errors to obtain the target shift as bias. 'set-by-statistic' : directly use the statistic output bias in the target dataset. + stat_file_path : Optional[DPPath] + The path to the stat file. """ if bias_adjust_mode == "change-by-statistic": delta_bias, out_std = compute_output_stats( From 2171e19cec5b5f9c07f8d530e210d1539e5f33ef Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sun, 7 Apr 2024 09:03:15 +0800 Subject: [PATCH 13/13] fix ut --- .../tests/pt/model/test_atomic_model_stat.py | 39 ++++++++++++++----- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/source/tests/pt/model/test_atomic_model_stat.py b/source/tests/pt/model/test_atomic_model_stat.py index 56ecb047f7..e266cf215a 100644 --- a/source/tests/pt/model/test_atomic_model_stat.py +++ b/source/tests/pt/model/test_atomic_model_stat.py @@ -179,9 +179,13 @@ def test_output_stat(self): # nf x nloc at = self.atype_ext[:, :nloc] + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + # 1. test run without bias # nf x na x odim ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) expected_ret0 = {} expected_ret0["foo"] = np.array( [ @@ -202,13 +206,14 @@ def test_output_stat(self): ] ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) # 2. test bias is applied md0.compute_or_load_out_stat( self.merged_output_stat, stat_file_path=self.stat_file_path ) ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) # nt x odim foo_bias = np.array([1.0, 3.0]).reshape(2, 1) bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) @@ -217,7 +222,7 @@ def test_output_stat(self): expected_ret1["pix"] = ret0["pix"] expected_ret1["bar"] = ret0["bar"] + bar_bias[at] for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) # 3. test bias load from file def raise_error(): @@ -225,8 +230,9 @@ def raise_error(): md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), ret2[kk]) + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) # 4. test change bias BaseAtomicModel.change_out_bias( @@ -241,6 +247,7 @@ def raise_error(): ] ] ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) ## model output on foo: [[2, 3, 6], [5, 8, 9]] given bias [1, 3] ## foo sumed: [11, 22] compared with [5, 7], fit target is [-6, -15] ## fit bias is [1, -8] @@ -250,7 +257,7 @@ def raise_error(): expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) expected_ret3["pix"] = ret0["pix"] for kk in ["foo", "pix"]: - np.testing.assert_almost_equal(to_numpy_array(ret3[kk]), expected_ret3[kk]) + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) # bar is too complicated to be manually computed. def test_preset_bias(self): @@ -280,9 +287,13 @@ def test_preset_bias(self): # nf x nloc at = self.atype_ext[:, :nloc] + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + # 1. test run without bias # nf x na x odim ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) expected_ret0 = {} expected_ret0["foo"] = np.array( [ @@ -303,13 +314,14 @@ def test_preset_bias(self): ] ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) # 2. test bias is applied md0.compute_or_load_out_stat( self.merged_output_stat, stat_file_path=self.stat_file_path ) ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) # foo sums: [5, 7], # given bias of type 1 being 2, the bias left for type 0 is [5-2*1, 7-2*2] = [3,3] # the solution of type 0 is 1.8 @@ -320,7 +332,7 @@ def test_preset_bias(self): expected_ret1["pix"] = ret0["pix"] expected_ret1["bar"] = ret0["bar"] + bar_bias[at] for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) # 3. test bias load from file def raise_error(): @@ -328,8 +340,9 @@ def raise_error(): md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), ret2[kk]) + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) # 4. test change bias BaseAtomicModel.change_out_bias( @@ -344,6 +357,7 @@ def raise_error(): ] ] ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) ## model output on foo: [[2.8, 3.8, 5], [5.8, 7., 8.]] given bias [1.8, 2] ## foo sumed: [11.6, 20.8] compared with [5, 7], fit target is [-6.6, -13.8] ## fit bias is [-7, 2] (2 is assigned. -7 is fit to [-8.6, -17.8]) @@ -355,7 +369,7 @@ def raise_error(): ) expected_ret3["pix"] = ret0["pix"] for kk in ["foo", "pix"]: - np.testing.assert_almost_equal(to_numpy_array(ret3[kk]), expected_ret3[kk]) + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) # bar is too complicated to be manually computed. def test_preset_bias_all_none(self): @@ -383,9 +397,13 @@ def test_preset_bias_all_none(self): # nf x nloc at = self.atype_ext[:, :nloc] + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + # 1. test run without bias # nf x na x odim ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) expected_ret0 = {} expected_ret0["foo"] = np.array( [ @@ -406,13 +424,14 @@ def test_preset_bias_all_none(self): ] ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret0[kk]), expected_ret0[kk]) + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) # 2. test bias is applied md0.compute_or_load_out_stat( self.merged_output_stat, stat_file_path=self.stat_file_path ) ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) # nt x odim foo_bias = np.array([1.0, 3.0]).reshape(2, 1) bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) @@ -421,4 +440,4 @@ def test_preset_bias_all_none(self): expected_ret1["pix"] = ret0["pix"] expected_ret1["bar"] = ret0["bar"] + bar_bias[at] for kk in ["foo", "pix", "bar"]: - np.testing.assert_almost_equal(to_numpy_array(ret1[kk]), expected_ret1[kk]) + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk])