From 786b52881a81f2c2ade35ebd0ac6e13f86275633 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 16:01:36 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/dpmodel/fitting/property_fitting.py | 26 +++--- deepmd/infer/deep_property.py | 1 + deepmd/pt/infer/deep_eval.py | 6 +- deepmd/pt/loss/__init__.py | 6 +- deepmd/pt/loss/property.py | 79 ++++++++++++++----- deepmd/pt/model/model/__init__.py | 2 +- deepmd/pt/model/model/denoise_model.py | 4 +- deepmd/pt/model/model/dp_model.py | 12 +-- deepmd/pt/model/model/property_model.py | 2 +- deepmd/pt/model/task/__init__.py | 6 +- deepmd/pt/model/task/denoise.py | 61 ++++++-------- deepmd/pt/model/task/property.py | 19 ++--- deepmd/pt/train/training.py | 2 +- deepmd/utils/argcheck.py | 35 ++++++-- .../tests/pt/model/test_property_fitting.py | 9 ++- 15 files changed, 156 insertions(+), 114 deletions(-) diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index ce03ca0be8..056a1af861 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -1,14 +1,20 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy from typing import ( - Callable, - Union, TYPE_CHECKING, Any, + Callable, List, Optional, + Union, ) +from deepmd.dpmodel.common import ( + DEFAULT_PRECISION, +) +from deepmd.dpmodel.fitting.invar_fitting import ( + InvarFitting, +) from deepmd.dpmodel.output_def import ( FittingOutputDef, OutputVariableDef, @@ -17,17 +23,11 @@ DPPath, ) -from deepmd.dpmodel.common import ( - DEFAULT_PRECISION, -) -from deepmd.dpmodel.fitting.invar_fitting import ( - InvarFitting, -) - if TYPE_CHECKING: from deepmd.dpmodel.fitting.general_fitting import ( GeneralFitting, ) + from deepmd.utils.version import ( check_version_compatibility, ) @@ -91,11 +91,7 @@ def deserialize(cls, data: dict) -> "GeneralFitting": def serialize(self) -> dict: """Serialize the fitting to dict.""" - return { - **super().serialize(), - "type": "property", - "task_num": self.task_num - } + return {**super().serialize(), "type": "property", "task_num": self.task_num} def output_def(self) -> FittingOutputDef: return FittingOutputDef( @@ -134,4 +130,4 @@ def compute_output_stats( pass # make jit happy with torch 2.0.0 - exclude_types: List[int] \ No newline at end of file + exclude_types: List[int] diff --git a/deepmd/infer/deep_property.py b/deepmd/infer/deep_property.py index d8eea8608f..f53d7acd25 100644 --- a/deepmd/infer/deep_property.py +++ b/deepmd/infer/deep_property.py @@ -133,4 +133,5 @@ def eval( def get_numb_task(self) -> int: return self.deep_eval.get_numb_task() + __all__ = ["DeepProperty"] diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index aaf37b96d8..f83a3c4782 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -35,12 +35,12 @@ from deepmd.infer.deep_pot import ( DeepPot, ) -from deepmd.infer.deep_wfc import ( - DeepWFC, -) from deepmd.infer.deep_property import ( DeepProperty, ) +from deepmd.infer.deep_wfc import ( + DeepWFC, +) from deepmd.pt.model.model import ( get_model, ) diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index c23df6b6d3..0656db8dbe 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -11,12 +11,12 @@ from .loss import ( TaskLoss, ) -from .tensor import ( - TensorLoss, -) from .property import ( PropertyLoss, ) +from .tensor import ( + TensorLoss, +) __all__ = [ "DenoiseLoss", diff --git a/deepmd/pt/loss/property.py b/deepmd/pt/loss/property.py index 69826f7f69..5aaaf0fcf4 100644 --- a/deepmd/pt/loss/property.py +++ b/deepmd/pt/loss/property.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import logging from typing import ( List, ) @@ -12,15 +13,13 @@ from deepmd.pt.utils import ( env, ) -from deepmd.pt.utils.env import ( - GLOBAL_PT_FLOAT_PRECISION, -) from deepmd.utils.data import ( DataRequirementItem, ) -import logging + log = logging.getLogger(__name__) + class PropertyLoss(TaskLoss): def __init__( self, @@ -33,11 +32,11 @@ def __init__( Parameters ---------- - starter_learning_rate : float + task_num : float The learning rate at the start of the training. - loss_func: str + loss_func : str The loss function, such as "smooth_mae", "mae", "rmse" - metric: list + metric : list The metric such as mae,rmse which will be printed. **kwargs Other keyword arguments. @@ -62,35 +61,77 @@ def forward(self, model_pred, label, natoms, learning_rate, mae=False): ------- - loss: Loss to minimize. """ - assert label['property'].shape[-1] == self.task_num - assert model_pred['property'].shape[-1] == self.task_num + assert label["property"].shape[-1] == self.task_num + assert model_pred["property"].shape[-1] == self.task_num loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} - label_mean = torch.tensor(self.mean, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) - label_std = torch.tensor(self.std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + label_mean = torch.tensor( + self.mean, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + label_std = torch.tensor( + self.std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) # loss if self.loss_func == "smooth_mae": - loss += F.smooth_l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum", beta=self.beta) + loss += F.smooth_l1_loss( + (label["property"] - label_mean) / label_std, + model_pred["property"], + reduction="sum", + beta=self.beta, + ) elif self.func == "mae": - loss += F.l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum") + loss += F.l1_loss( + (label["property"] - label_mean) / label_std, + model_pred["property"], + reduction="sum", + ) elif self.func == "mse": - loss += F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum") + loss += F.mse_loss( + (label["property"] - label_mean) / label_std, + model_pred["property"], + reduction="sum", + ) elif self.func == "rmse": - loss += torch.sqrt(F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="mean")) + loss += torch.sqrt( + F.mse_loss( + (label["property"] - label_mean) / label_std, + model_pred["property"], + reduction="mean", + ) + ) else: raise RuntimeError(f"Unknown loss function : {self.func}") # more loss if "smooth_mae" in self.metric: - more_loss["smooth_mae"] = F.smooth_l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean", beta=self.beta).detach() + more_loss["smooth_mae"] = F.smooth_l1_loss( + label["property"], + (model_pred["property"] * label_std) + label_mean, + reduction="mean", + beta=self.beta, + ).detach() if "mae" in self.metric: - more_loss['mae'] = F.l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach() + more_loss["mae"] = F.l1_loss( + label["property"], + (model_pred["property"] * label_std) + label_mean, + reduction="mean", + ).detach() if "mse" in self.metric: - more_loss['mse'] = F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach() + more_loss["mse"] = F.mse_loss( + label["property"], + (model_pred["property"] * label_std) + label_mean, + reduction="mean", + ).detach() if "rmse" in self.metric: - more_loss['rmse'] = torch.sqrt(F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean")).detach() + more_loss["rmse"] = torch.sqrt( + F.mse_loss( + label["property"], + (model_pred["property"] * label_std) + label_mean, + reduction="mean", + ) + ).detach() return loss, more_loss diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index c42f369318..efbef794c0 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -140,7 +140,7 @@ def get_standard_model(model_params): fitting_net["type"] = fitting_net.get("type", "ener") fitting_net["ntypes"] = descriptor.get_ntypes() fitting_net["mixed_types"] = descriptor.mixed_types() - if fitting_net["type"] in ["dipole", "polar","denoise"]: + if fitting_net["type"] in ["dipole", "polar", "denoise"]: fitting_net["embedding_width"] = descriptor.get_dim_emb() fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 249d995f7b..f5db6f5835 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -37,7 +37,7 @@ def forward( fparam=fparam, aparam=aparam, do_atomic_virial=do_atomic_virial, - ) + ) model_predict = model_ret return model_predict @@ -78,4 +78,4 @@ def forward_lower( model_predict["dforce"] = model_ret["dforce"] else: model_predict = model_ret - return model_predict \ No newline at end of file + return model_predict diff --git a/deepmd/pt/model/model/dp_model.py b/deepmd/pt/model/model/dp_model.py index 495a233d0b..1cf8282981 100644 --- a/deepmd/pt/model/model/dp_model.py +++ b/deepmd/pt/model/model/dp_model.py @@ -15,6 +15,9 @@ from deepmd.pt.model.model.model import ( BaseModel, ) +from deepmd.pt.model.task.denoise import ( + DenoiseFittingNet, +) from deepmd.pt.model.task.dipole import ( DipoleFittingNet, ) @@ -31,9 +34,6 @@ from deepmd.pt.model.task.property import ( PropertyFittingNet, ) -from deepmd.pt.model.task.denoise import ( - DenoiseFittingNet -) from .make_model import ( make_model, @@ -51,6 +51,9 @@ def __new__( atomic_model_: Optional[DPAtomicModel] = None, **kwargs, ): + from deepmd.pt.model.model.denoise_model import ( + DenoiseModel, + ) from deepmd.pt.model.model.dipole_model import ( DipoleModel, ) @@ -66,9 +69,6 @@ def __new__( from deepmd.pt.model.model.property_model import ( PropertyModel, ) - from deepmd.pt.model.model.denoise_model import ( - DenoiseModel, - ) if atomic_model_ is not None: fitting = atomic_model_.fitting_net diff --git a/deepmd/pt/model/model/property_model.py b/deepmd/pt/model/model/property_model.py index dc6372fd2e..66f58ca665 100644 --- a/deepmd/pt/model/model/property_model.py +++ b/deepmd/pt/model/model/property_model.py @@ -40,7 +40,7 @@ def forward( ) model_predict = {} model_predict["atom_property"] = model_ret["property"] - model_predict["property"] = model_ret["property_redu"]/atype.shape[-1] + model_predict["property"] = model_ret["property_redu"] / atype.shape[-1] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py index 4f43343f76..9906d435cb 100644 --- a/deepmd/pt/model/task/__init__.py +++ b/deepmd/pt/model/task/__init__.py @@ -21,12 +21,12 @@ from .polarizability import ( PolarFittingNet, ) -from .type_predict import ( - TypePredictNet, -) from .property import ( PropertyFittingNet, ) +from .type_predict import ( + TypePredictNet, +) __all__ = [ "FittingNetAttenLcc", diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 26fb4742c5..6ec85ef71d 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -1,11 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import copy import logging -import os -import tempfile -from abc import ( - abstractmethod, -) from typing import ( Callable, List, @@ -16,25 +11,27 @@ import numpy as np import torch -from deepmd.infer.deep_eval import ( - DeepEval, +from deepmd.dpmodel import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, ) from deepmd.pt.model.network.mlp import ( FittingNet, NetworkCollection, ) from deepmd.pt.model.network.network import ( - ResidualDeep, + MaskLMHead, + NonLinearHead, ) -from deepmd.pt.model.task.base_fitting import ( - BaseFitting, +from deepmd.pt.model.task.fitting import ( + Fitting, ) from deepmd.pt.utils import ( env, ) from deepmd.pt.utils.env import ( DEFAULT_PRECISION, - DEVICE, PRECISION_DICT, ) from deepmd.pt.utils.exclude_mask import ( @@ -44,24 +41,6 @@ to_numpy_array, to_torch_tensor, ) -from deepmd.utils.data_system import ( - DeepmdDataSystem, -) -from deepmd.utils.finetune import ( - change_energy_bias_lower, -) -from deepmd.pt.model.task.fitting import ( - Fitting, -) -from deepmd.dpmodel import ( - FittingOutputDef, - OutputVariableDef, - fitting_check_output, -) -from deepmd.pt.model.network.network import ( - MaskLMHead, - NonLinearHead, -) from deepmd.utils.path import ( DPPath, ) @@ -71,6 +50,7 @@ log = logging.getLogger(__name__) + @Fitting.register("denoise") class DenoiseFittingNet(Fitting): """Construct a denoise fitting net. @@ -139,7 +119,7 @@ def __init__( **kwargs, ): super().__init__() - self.var_name = ["updated_coord","logits"] + self.var_name = ["updated_coord", "logits"] self.ntypes = ntypes self.dim_descrpt = dim_descrpt self.embedding_width = embedding_width @@ -364,7 +344,7 @@ def __getitem__(self, key): def _net_out_dim(self): """Set the FittingNet output dim.""" return [3, self.ntypes] - #pass + # pass def output_def(self): return FittingOutputDef( @@ -482,17 +462,17 @@ def forward( device=descriptor.device, ) if self.mixed_types: - atom_updated_coord = ((self.filter_layers_coord.networks[0](g2)) * h2).sum(dim=-2) / (sw.sum(dim=-1).unsqueeze(-1)+1e-6) + atom_updated_coord = ((self.filter_layers_coord.networks[0](g2)) * h2).sum( + dim=-2 + ) / (sw.sum(dim=-1).unsqueeze(-1) + 1e-6) atom_logits = self.filter_layers_logits[0](xx) - #Is xx_zeros useful in denoise task?????????????? - #if xx_zeros is not None: + # Is xx_zeros useful in denoise task?????????????? + # if xx_zeros is not None: # atom_property -= self.filter_layers.networks[0](xx_zeros) outs_coord = ( outs_coord + atom_updated_coord ) # Shape is [nframes, natoms[0], net_dim_out] - outs_logits = ( - outs_logits + atom_logits - ) + outs_logits = outs_logits + atom_logits # TODO: else: for type_i, ll in enumerate(self.filter_layers_coord.networks): @@ -517,8 +497,10 @@ def forward( # nf x nloc x nod outs_coord = outs_coord * mask[:, :, None] outs_logits = outs_logits * mask[:, :, None] - return {self.var_name[0]: outs_coord.to(env.GLOBAL_PT_FLOAT_PRECISION), - self.var_name[1]: outs_logits.to(env.GLOBAL_PT_FLOAT_PRECISION)} + return { + self.var_name[0]: outs_coord.to(env.GLOBAL_PT_FLOAT_PRECISION), + self.var_name[1]: outs_logits.to(env.GLOBAL_PT_FLOAT_PRECISION), + } def compute_output_stats( self, @@ -543,6 +525,7 @@ def compute_output_stats( """ pass + @fitting_check_output class DenoiseNet(Fitting): def __init__( diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index 8524974058..9f3a7a2373 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -3,12 +3,11 @@ import logging from typing import ( Callable, - Union, List, Optional, + Union, ) -import numpy as np import torch from deepmd.dpmodel import ( @@ -28,12 +27,12 @@ from deepmd.pt.utils.env import ( DEFAULT_PRECISION, ) -from deepmd.utils.version import ( - check_version_compatibility, -) from deepmd.utils.path import ( DPPath, ) +from deepmd.utils.version import ( + check_version_compatibility, +) dtype = env.GLOBAL_PT_FLOAT_PRECISION device = env.DEVICE @@ -85,12 +84,8 @@ def deserialize(cls, data: dict) -> "GeneralFitting": def serialize(self) -> dict: """Serialize the fitting to dict.""" - return { - **super().serialize(), - "type": "property", - "task_num": self.task_num - } - + return {**super().serialize(), "type": "property", "task_num": self.task_num} + def output_def(self) -> FittingOutputDef: return FittingOutputDef( [ @@ -128,4 +123,4 @@ def compute_output_stats( pass # make jit happy with torch 2.0.0 - exclude_types: List[int] \ No newline at end of file + exclude_types: List[int] diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 4480800ec8..fda9196df1 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -27,8 +27,8 @@ DenoiseLoss, EnergySpinLoss, EnergyStdLoss, - TensorLoss, PropertyLoss, + TensorLoss, ) from deepmd.pt.model.model import ( DPZBLModel, diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index b3124cb859..84f33f3a07 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1175,6 +1175,7 @@ def fitting_dipole(): Argument("seed", [int, None], optional=True, doc=doc_seed), ] + @fitting_args_plugin.register("property") def fitting_property(): doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." @@ -1213,6 +1214,7 @@ def fitting_property(): Argument("task_num", int, optional=True, default=1, doc=doc_task_num), ] + @fitting_args_plugin.register("denoise") def fitting_denoise(): doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." @@ -1249,6 +1251,7 @@ def fitting_denoise(): Argument("seed", [int, None], optional=True, doc=doc_seed), ] + # YWolfeee: Delete global polar mode, merge it into polar mode and use loss setting to support. def fitting_variant_type_args(): doc_descrpt_type = "The type of the fitting. See explanation below. \n\n\ @@ -1991,6 +1994,7 @@ def loss_tensor(): ), ] + @loss_args_plugin.register("property") def loss_property(): doc_loss_func = "The loss function, such as 'mae','smooth_mae'." @@ -2000,22 +2004,43 @@ def loss_property(): doc_beta = "The 'beta' parameter in 'smooth_mae' loss." return [ Argument( - "loss_func", str, optional=True, default="smooth_mae", doc=doc_loss_func, + "loss_func", + str, + optional=True, + default="smooth_mae", + doc=doc_loss_func, ), Argument( - "metric", list, optional=True, default=["mae"], doc=doc_metric, + "metric", + list, + optional=True, + default=["mae"], + doc=doc_metric, ), Argument( - "mean", [float, int, list], optional=True, default=0, doc=doc_mean, + "mean", + [float, int, list], + optional=True, + default=0, + doc=doc_mean, ), Argument( - "std", [float, int, list], optional=True, default=1, doc=doc_std, + "std", + [float, int, list], + optional=True, + default=1, + doc=doc_std, ), Argument( - "beta", [float, int], optional=True, default=1.00, doc=doc_beta, + "beta", + [float, int], + optional=True, + default=1.00, + doc=doc_beta, ), ] + def loss_variant_type_args(): doc_loss = "The type of the loss. When the fitting type is `ener`, the loss type should be set to `ener` or left unset. When the fitting type is `dipole` or `polar`, the loss type should be set to `tensor`." diff --git a/source/tests/pt/model/test_property_fitting.py b/source/tests/pt/model/test_property_fitting.py index 64d1d2c4f5..00800a6251 100644 --- a/source/tests/pt/model/test_property_fitting.py +++ b/source/tests/pt/model/test_property_fitting.py @@ -145,6 +145,7 @@ def test_jit( ).to(env.DEVICE) torch.jit.script(ft0) + class TestInvariance(unittest.TestCase): def setUp(self) -> None: self.natoms = 5 @@ -229,7 +230,7 @@ def test_permu(self): ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, - task_num = 8, + task_num=8, numb_fparam=0, numb_aparam=0, mixed_types=True, @@ -274,7 +275,7 @@ def test_trans(self): ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, - task_num = 11, + task_num=11, numb_fparam=0, numb_aparam=0, mixed_types=True, @@ -320,7 +321,7 @@ def setUp(self): self.ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, - task_num = 3, + task_num=3, numb_fparam=0, numb_aparam=0, mixed_types=True, @@ -345,4 +346,4 @@ def tearDown(self) -> None: if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main()