diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 2906bed62c..be9068fb9d 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -42,13 +42,8 @@ class PropertyFittingNet(InvarFitting): intensive Whether the fitting property is intensive. property_name: - The names of fitting properties, which should be consistent with the property names in the dataset. - If the data file is named `humo.npy`, this parameter should be "humo" or ["humo"]. - If you want to fit two properties at the same time, supposing that the data files are named `humo.npy` and `lumo.npy`, - this parameter should be `["humo", "lumo"]`. - property_dim: - The dimensions of fitting properties, which should be consistent with the property dimensions in the dataset. - Note that the order here must be the same as the order of `property_name`. + The name of fitting property, which should be consistent with the property name in the dataset. + If the data file is named `humo.npy`, this parameter should be "humo". resnet_dt Time-step `dt` in the resnet construction: :math:`y = x + dt * \phi (Wx + b)` @@ -78,8 +73,7 @@ def __init__( rcond: Optional[float] = None, trainable: Union[bool, list[bool]] = True, intensive: bool = False, - property_name: Union[str, list] = "property", - property_dim: Union[int, list] = 1, + property_name: str = "property", resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -94,14 +88,9 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - if isinstance(property_name, str): - property_name = [property_name] self.property_name = property_name - if isinstance(property_dim, int): - property_dim = [property_dim] - self.property_dim = property_dim super().__init__( - var_name="property", + var_name=property_name, ntypes=ntypes, dim_descrpt=dim_descrpt, dim_out=task_dim, @@ -143,7 +132,6 @@ def serialize(self) -> dict: "task_dim": self.task_dim, "intensive": self.intensive, "property_name": self.property_name, - "property_dim": self.property_dim, } dd["@version"] = 4 diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index 55a0b532c0..da42646a0a 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -187,11 +187,6 @@ class OutputVariableDef: If the derivatives of variable have magnetic parts. intensive : bool It indicates whether the fitting property is intensive or extensive. - sub_var_name: Optional[Union[list[str], str]] - It is only useful in the property fitting. When one wants to fit multiple properties, - I need a uniform key "property" to manage these multiple properties, which can be seen as sub-names of "property". - For example, when one wants to fit "humo" and "lumo" which store in "humo.npy" and "lumo.npy", - the `sub_var_name` of "property" is ["humo", "lumo"]. """ def __init__( @@ -206,7 +201,6 @@ def __init__( r_hessian: bool = False, magnetic: bool = False, intensive: bool = False, - sub_var_name: Optional[Union[list[str], str]] = None, ) -> None: self.name = name self.shape = list(shape) @@ -230,9 +224,6 @@ def __init__( self.r_hessian = r_hessian self.magnetic = magnetic self.intensive = intensive - if isinstance(sub_var_name, str): - sub_var_name = [sub_var_name] - self.sub_var_name = sub_var_name if self.r_hessian: if not self.reducible: raise ValueError("only reducible variable can calculate hessian") diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 4d4145b64e..b906bda32d 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -780,17 +780,10 @@ def test_property( arrays with results and their shapes """ property_name = dp.get_property_name() - property_dim = dp.get_property_dim() - assert isinstance(property_name, list) - assert isinstance(property_dim, list) - assert sum(property_dim) == dp.task_dim - assert ( - len(property_name) == len(property_dim) - ), f"The shape of the `property_name` you provide must be consistent with the `property_dim`, but your `property_name` is {property_name} and your `property_dim` is {property_dim}!" - for name, dim in zip(property_name, property_dim): - data.add(name, dim, atomic=False, must=True, high_prec=True) - if has_atom_property: - data.add(f"atom_{name}", dim, atomic=True, must=False, high_prec=True) + assert isinstance(property_name, str) + data.add(property_name, dp.task_dim, atomic=False, must=True, high_prec=True) + if has_atom_property: + data.add(f"atom_{property_name}", dp.task_dim, atomic=True, must=False, high_prec=True) if dp.get_dim_fparam() > 0: data.add( @@ -841,26 +834,12 @@ def test_property( aproperty = ret[1] aproperty = aproperty.reshape([numb_test, natoms * dp.task_dim]) - concat_property = [] - concat_aproperty = [] - for name, dim in zip(property_name, property_dim): - test_data[name] = test_data[name].reshape([numb_test, dim]) - concat_property.append(test_data[name]) - if has_atom_property: - test_data[f"atom_{name}"] = test_data[f"atom_{name}"].reshape( - [numb_test, natoms * dim] - ) - concat_aproperty.append(test_data[f"atom_{name}"]) - test_data["property"] = np.concatenate(concat_property, axis=1) - if has_atom_property: - test_data["atom_property"] = np.concatenate(concat_aproperty, axis=1) - - diff_property = property - test_data["property"][:numb_test] + diff_property = property - test_data[property_name][:numb_test] mae_property = mae(diff_property) rmse_property = rmse(diff_property) if has_atom_property: - diff_aproperty = aproperty - test_data["atom_property"][:numb_test] + diff_aproperty = aproperty - test_data[f"atom_{property_name}"][:numb_test] mae_aproperty = mae(diff_aproperty) rmse_aproperty = rmse(diff_aproperty) @@ -877,13 +856,13 @@ def test_property( detail_path = Path(detail_file) for ii in range(numb_test): - test_out = test_data["property"][ii].reshape(-1, 1) + test_out = test_data[property_name][ii].reshape(-1, 1) pred_out = property[ii].reshape(-1, 1) frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(f".property.out.{ii}"), + detail_path.with_suffix(f".{property_name}.out.{ii}"), frame_output, header=f"{system} - {ii}: data_property pred_property", append=append_detail, @@ -891,13 +870,13 @@ def test_property( if has_atom_property: for ii in range(numb_test): - test_out = test_data["atom_property"][ii].reshape(-1, 1) + test_out = test_data[f"atom_{property_name}"][ii].reshape(-1, 1) pred_out = aproperty[ii].reshape(-1, 1) frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(f".aproperty.out.{ii}"), + detail_path.with_suffix(f".a{property_name}.out.{ii}"), frame_output, header=f"{system} - {ii}: data_aproperty pred_aproperty", append=append_detail, diff --git a/deepmd/infer/deep_property.py b/deepmd/infer/deep_property.py index e2d207fc74..26e128e2d2 100644 --- a/deepmd/infer/deep_property.py +++ b/deepmd/infer/deep_property.py @@ -102,6 +102,7 @@ def eval( The properties of the system, in shape (nframes, num_tasks). """ self.change_output_def() + ( coords, cells, @@ -141,13 +142,9 @@ def get_intensive(self) -> bool: """Get whether the property is intensive.""" return self.deep_eval.get_intensive() - def get_property_name(self) -> Union[list[str], str]: - """Get the names of the properties.""" + def get_property_name(self) -> str: + """Get the name of the fitting property.""" return self.deep_eval.get_property_name() - def get_property_dim(self) -> Union[list[int], int]: - """Get the dimensions of the properties.""" - return self.deep_eval.get_property_dim() - __all__ = ["DeepProperty"] diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 94107e35f4..28ee2b62c5 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -184,14 +184,10 @@ def get_dim_aparam(self) -> int: def get_intensive(self) -> bool: return self.dp.model["Default"].get_intensive() - def get_property_name(self) -> Union[list[str], str]: - """Get the names of the properties.""" + def get_property_name(self) -> str: + """Get the name of the property.""" return self.dp.model["Default"].get_property_name() - def get_property_dim(self) -> Union[list[int], int]: - """Get the dimensions of the properties.""" - return self.dp.model["Default"].get_property_dim() - @property def model_type(self) -> type["DeepEvalWrapper"]: """The the evaluator of the model type.""" @@ -208,7 +204,7 @@ def model_type(self) -> type["DeepEvalWrapper"]: return DeepGlobalPolar elif "wfc" in model_output_type: return DeepWFC - elif "property" in model_output_type: + elif self.get_property_name() in model_output_type: return DeepProperty else: raise RuntimeError("Unknown model type") diff --git a/deepmd/pt/loss/property.py b/deepmd/pt/loss/property.py index 6974a2cd8e..faaaf660e1 100644 --- a/deepmd/pt/loss/property.py +++ b/deepmd/pt/loss/property.py @@ -24,8 +24,7 @@ class PropertyLoss(TaskLoss): def __init__( self, task_dim, - property_name: Union[str, list], - property_dim: Union[int, list], + property_name: str, loss_func: str = "smooth_mae", metric: list = ["mae"], beta: float = 1.00, @@ -51,16 +50,7 @@ def __init__( self.loss_func = loss_func self.metric = metric self.beta = beta - if isinstance(property_name, str): - property_name = [property_name] - if isinstance(property_dim, int): - property_dim = [property_dim] self.property_name = property_name - assert self.task_dim == sum(property_dim) - assert ( - len(property_name) == len(property_dim) - ), f"The shape of the `property_name` you provide must be consistent with the `property_dim`, but your `property_name` is {property_name} and your `property_dim` is {property_dim}!" - self.property_name_dim_mapping = dict(zip(property_name, property_dim)) self.out_bias = out_bias self.out_std = out_std @@ -88,18 +78,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False Other losses for display. """ model_pred = model(**input_dict) - nbz = model_pred["property"].shape[0] - assert model_pred["property"].shape == (nbz, self.task_dim) - - concat_property = [] - for property_name in self.property_name: - assert label[property_name].shape == ( - nbz, - self.property_name_dim_mapping[property_name], - ) - concat_property.append(label[property_name]) - label["property"] = torch.cat(concat_property, dim=1) - assert label["property"].shape == (nbz, self.task_dim) + nbz = model_pred[self.property_name].shape[0] + assert model_pred[self.property_name].shape == (nbz, self.task_dim) + assert label[self.property_name].shape == (nbz, self.task_dim) if self.out_std is None: out_std = model.atomic_model.out_std[0][0] @@ -129,28 +110,28 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False # loss if self.loss_func == "smooth_mae": loss += F.smooth_l1_loss( - (label["property"] - out_bias) / out_std, - (model_pred["property"] - out_bias) / out_std, + (label[self.property_name] - out_bias) / out_std, + (model_pred[self.property_name] - out_bias) / out_std, reduction="sum", beta=self.beta, ) elif self.loss_func == "mae": loss += F.l1_loss( - (label["property"] - out_bias) / out_std, - (model_pred["property"] - out_bias) / out_std, + (label[self.property_name] - out_bias) / out_std, + (model_pred[self.property_name] - out_bias) / out_std, reduction="sum", ) elif self.loss_func == "mse": loss += F.mse_loss( - (label["property"] - out_bias) / out_std, - (model_pred["property"] - out_bias) / out_std, + (label[self.property_name] - out_bias) / out_std, + (model_pred[self.property_name] - out_bias) / out_std, reduction="sum", ) elif self.loss_func == "rmse": loss += torch.sqrt( F.mse_loss( - (label["property"] - out_bias) / out_std, - (model_pred["property"] - out_bias) / out_std, + (label[self.property_name] - out_bias) / out_std, + (model_pred[self.property_name] - out_bias) / out_std, reduction="mean", ) ) @@ -160,28 +141,28 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False # more loss if "smooth_mae" in self.metric: more_loss["smooth_mae"] = F.smooth_l1_loss( - label["property"], - model_pred["property"], + label[self.property_name], + model_pred[self.property_name], reduction="mean", beta=self.beta, ).detach() if "mae" in self.metric: more_loss["mae"] = F.l1_loss( - label["property"], - model_pred["property"], + label[self.property_name], + model_pred[self.property_name], reduction="mean", ).detach() if "mse" in self.metric: more_loss["mse"] = F.mse_loss( - label["property"], - model_pred["property"], + label[self.property_name], + model_pred[self.property_name], reduction="mean", ).detach() if "rmse" in self.metric: more_loss["rmse"] = torch.sqrt( F.mse_loss( - label["property"], - model_pred["property"], + label[self.property_name], + model_pred[self.property_name], reduction="mean", ) ).detach() @@ -192,14 +173,13 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" label_requirement = [] - for property_name in self.property_name: - label_requirement.append( - DataRequirementItem( - property_name, - ndof=self.property_name_dim_mapping[property_name], - atomic=False, - must=True, - high_prec=True, - ) + label_requirement.append( + DataRequirementItem( + self.property_name, + ndof=self.task_dim, + atomic=False, + must=True, + high_prec=True, ) + ) return label_requirement diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index a64eca0fe9..c1f0ef0b69 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -456,7 +456,6 @@ def change_out_bias( model_forward=self._get_forward_wrapper_func(), rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), ) self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": @@ -467,7 +466,7 @@ def change_out_bias( stat_file_path=stat_file_path, rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), + property_fitting=("property_name" in vars(self.fitting_net)), ) self._store_out_stat(bias_out, std_out) else: diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 4f357ca1ea..491a524da8 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -98,11 +98,6 @@ def _get_standard_model_components(model_params, ntypes): fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: fitting_net["return_energy"] = True - if "property" in fitting_net["type"]: - if isinstance(fitting_net["property_dim"], list): - fitting_net["task_dim"] = sum(fitting_net["property_dim"]) - else: - fitting_net["task_dim"] = fitting_net["property_dim"] fitting = BaseFitting(**fitting_net) return descriptor, fitting, fitting_net["type"] diff --git a/deepmd/pt/model/model/property_model.py b/deepmd/pt/model/model/property_model.py index 945b7c96ba..efe0854b5c 100644 --- a/deepmd/pt/model/model/property_model.py +++ b/deepmd/pt/model/model/property_model.py @@ -38,8 +38,8 @@ def __init__( def translated_output_def(self): out_def_data = self.model_output_def().get_data() output_def = { - "atom_property": out_def_data["property"], - "property": out_def_data["property_redu"], + f"atom_{self.get_property_name()}": out_def_data[self.get_property_name()], + self.get_property_name(): out_def_data[f"{self.get_property_name()}_redu"], } if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] @@ -63,8 +63,8 @@ def forward( do_atomic_virial=do_atomic_virial, ) model_predict = {} - model_predict["atom_property"] = model_ret["property"] - model_predict["property"] = model_ret["property_redu"] + model_predict[f"atom_{self.get_property_name()}"] = model_ret[self.get_property_name()] + model_predict[self.get_property_name()] = model_ret[f"{self.get_property_name()}_redu"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict @@ -77,18 +77,13 @@ def get_task_dim(self) -> int: @torch.jit.export def get_intensive(self) -> bool: """Get whether the property is intensive.""" - return self.model_output_def()["property"].intensive + return self.model_output_def()[self.get_property_name()].intensive @torch.jit.export - def get_property_name(self) -> Union[list[str], str]: + def get_property_name(self) -> str: """Get the name of the property.""" return self.get_fitting_net().property_name - @torch.jit.export - def get_property_dim(self) -> Union[list[int], int]: - """Get the dimension of the property.""" - return self.get_fitting_net().property_dim - @torch.jit.export def forward_lower( self, @@ -113,8 +108,8 @@ def forward_lower( extra_nlist_sort=self.need_sorted_nlist_for_lower(), ) model_predict = {} - model_predict["atom_property"] = model_ret["property"] - model_predict["property"] = model_ret["property_redu"] + model_predict[f"atom_{self.get_property_name()}"] = model_ret[self.get_property_name()] + model_predict[self.get_property_name()] = model_ret[f"{self.get_property_name()}_redu"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index d48751487d..710da718ec 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -2,7 +2,6 @@ import logging from typing import ( Optional, - Union, ) import torch @@ -46,13 +45,8 @@ class PropertyFittingNet(InvarFitting): task_dim : int The dimension of outputs of fitting net. property_name: - The names of fitting properties, which should be consistent with the property names in the dataset. - If the data file is named `humo.npy`, this parameter should be "humo" or ["humo"]. - If you want to fit two properties at the same time, supposing that the data files are named `humo.npy` and `lumo.npy`, - this parameter should be `["humo", "lumo"]`. - property_dim: - The dimensions of fitting properties, which should be consistent with the property dimensions in the dataset. - Note that the order here must be the same as the order of `property_name`. + The name of fitting property, which should be consistent with the property name in the dataset. + If the data file is named `humo.npy`, this parameter should be "humo". neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_p : torch.Tensor, optional @@ -82,9 +76,8 @@ def __init__( self, ntypes: int, dim_descrpt: int, - task_dim: int, - property_name: Union[str, list], - property_dim: Union[int, list] = 1, + property_name: str, + task_dim: int = 1, neuron: list[int] = [128, 128, 128], bias_atom_p: Optional[torch.Tensor] = None, intensive: bool = False, @@ -100,18 +93,9 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - if isinstance(property_name, str): - property_name = [property_name] - if isinstance(property_dim, int): - property_dim = [property_dim] - assert len(property_name) == len(property_dim), ( - f"The number of property names ({len(property_name)}) must match " - f"the number of property dimensions ({len(property_dim)})." - ) self.property_name = property_name - self.property_dim = property_dim super().__init__( - var_name="property", + var_name=property_name, ntypes=ntypes, dim_descrpt=dim_descrpt, dim_out=task_dim, @@ -138,7 +122,6 @@ def output_def(self) -> FittingOutputDef: r_differentiable=False, c_differentiable=False, intensive=self.intensive, - sub_var_name=self.property_name, ), ] ) @@ -161,7 +144,6 @@ def serialize(self) -> dict: "task_dim": self.task_dim, "intensive": self.intensive, "property_name": self.property_name, - "property_dim": self.property_dim, } dd["@version"] = 4 diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index ecd660ef2a..bdd1c0befd 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1241,10 +1241,8 @@ def get_loss(loss_params, start_lr, _ntypes, _model): elif loss_type == "property": task_dim = _model.get_task_dim() property_name = _model.get_property_name() - property_dim = _model.get_property_dim() loss_params["task_dim"] = task_dim loss_params["property_name"] = property_name - loss_params["property_dim"] = property_dim return PropertyLoss(**loss_params) else: loss_params["starter_learning_rate"] = start_lr diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 51297a721d..0503182a8c 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -243,7 +243,7 @@ def compute_output_stats( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_forward: Optional[Callable[..., torch.Tensor]] = None, - atomic_output: Optional[FittingOutputDef] = None, + property_fitting: bool = False, ): """ Compute the output statistics (e.g. energy bias) for the fitting net from packed data. @@ -273,8 +273,8 @@ def compute_output_stats( If not None, the model will be utilized to generate the original energy prediction, which will be subtracted from the energy label of the data. The difference will then be used to calculate the delta complement energy bias for each type. - atomic_output : FittingOutputDef, optional - The output of atomic model. + property_fitting : bool, optional + If the type of fitting net is property. """ # try to restore the bias from stat file bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) @@ -291,13 +291,6 @@ def compute_output_stats( # remove the keys that are not in the sample keys = [keys] if isinstance(keys, str) else keys assert isinstance(keys, list) - if "property" in atomic_output.var_defs: - sub_keys = [] - for key in keys: - if atomic_output.var_defs[key].sub_var_name is not None: - sub_keys.extend(atomic_output.var_defs[key].sub_var_name) - del keys - keys = sub_keys new_keys = [ ii for ii in keys @@ -370,7 +363,7 @@ def compute_output_stats( rcond, preset_bias, model_pred_g, - atomic_output, + property_fitting, ) bias_atom_a, std_atom_a = compute_output_stats_atomic( sampled, @@ -381,16 +374,6 @@ def compute_output_stats( # merge global/atomic bias bias_atom_e, std_atom_e = {}, {} - keys = ( - ["property"] - if ( - "property" in atomic_output.var_defs - and ( - ii in keys for ii in atomic_output.var_defs["property"].sub_var_name - ) - ) - else keys - ) for kk in keys: # use atomic bias whenever available if kk in bias_atom_a: @@ -423,7 +406,7 @@ def compute_output_stats_global( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_pred: Optional[dict[str, np.ndarray]] = None, - atomic_output: Optional[FittingOutputDef] = None, + property_fitting: bool = False, ): """This function only handle stat computation from reduced global labels.""" # return directly if model predict is empty for global @@ -494,7 +477,7 @@ def compute_output_stats_global( std_atom_e = {} for kk in keys: if kk in stats_input: - if "property" in atomic_output.var_defs: + if property_fitting: bias_atom_e[kk], std_atom_e[kk] = compute_stats_property( stats_input[kk], merged_natoms[kk], @@ -510,24 +493,9 @@ def compute_output_stats_global( else: # this key does not have global labels, skip it. continue - if "property" in atomic_output.var_defs: - concat_bias = [] - concat_std = [] - for ii in atomic_output.var_defs["property"].sub_var_name: - assert ii in bias_atom_e.keys() - assert ii in std_atom_e.keys() - concat_bias.append(bias_atom_e[ii]) - concat_std.append(std_atom_e[ii]) - bias_atom_e = {"property": np.concatenate(concat_bias, axis=-1)} - std_atom_e = { - "property": np.tile( - np.concatenate(concat_std, axis=-1), - (bias_atom_e["property"].shape[0], 1), - ) - } + if property_fitting: return bias_atom_e, std_atom_e - bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) # unbias_e is only used for print rmse diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 511b5378b3..98f2fc5fe4 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1578,9 +1578,9 @@ def fitting_property(): doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_seed = "Random seed for parameter initialization of the fitting net" + doc_task_dim = "The dimension of outputs of fitting net" doc_intensive = "Whether the fitting property is intensive" - doc_property_name = "The names of fitting properties, which should be consistent with the property names in the dataset." - doc_property_dim = "The dimensions of fitting properties, which should be consistent with the property dimensions in the dataset." + doc_property_name = "The names of fitting property, which should be consistent with the property name in the dataset." return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), @@ -1609,6 +1609,7 @@ def fitting_property(): Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt), Argument("precision", str, optional=True, default="default", doc=doc_precision), Argument("seed", [int, None], optional=True, doc=doc_seed), + Argument("task_dim", int, optional=True, default=1, doc=doc_task_dim), Argument("intensive", bool, optional=True, default=False, doc=doc_intensive), Argument( "property_name", @@ -1616,13 +1617,6 @@ def fitting_property(): optional=False, doc=doc_property_name, ), - Argument( - "property_dim", - [int, list], - optional=True, - default=1, - doc=doc_property_dim, - ), ] diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index 2cf11cb80d..0b2c31b5f7 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -179,5 +179,6 @@ def compute_stats_property( computed_output_bias = computed_output_bias.reshape([natoms.shape[1]] + var_shape) # noqa: RUF005 output_std = output_std.reshape(var_shape) + output_std = np.tile(output_std, (computed_output_bias.shape[0], 1)) - return computed_output_bias, output_std + return computed_output_bias, output_std \ No newline at end of file