Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Mar 18, 2024
1 parent 0dc11d6 commit 786b528
Show file tree
Hide file tree
Showing 15 changed files with 156 additions and 114 deletions.
26 changes: 11 additions & 15 deletions deepmd/dpmodel/fitting/property_fitting.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,20 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
from typing import (
Callable,
Union,
TYPE_CHECKING,
Any,
Callable,
List,
Optional,
Union,
)

from deepmd.dpmodel.common import (
DEFAULT_PRECISION,
)
from deepmd.dpmodel.fitting.invar_fitting import (
InvarFitting,
)
from deepmd.dpmodel.output_def import (
FittingOutputDef,
OutputVariableDef,
Expand All @@ -17,17 +23,11 @@
DPPath,
)

from deepmd.dpmodel.common import (
DEFAULT_PRECISION,
)
from deepmd.dpmodel.fitting.invar_fitting import (
InvarFitting,
)

if TYPE_CHECKING:
from deepmd.dpmodel.fitting.general_fitting import (
GeneralFitting,
)

from deepmd.utils.version import (
check_version_compatibility,
)
Expand Down Expand Up @@ -91,11 +91,7 @@ def deserialize(cls, data: dict) -> "GeneralFitting":

def serialize(self) -> dict:
"""Serialize the fitting to dict."""
return {
**super().serialize(),
"type": "property",
"task_num": self.task_num
}
return {**super().serialize(), "type": "property", "task_num": self.task_num}

def output_def(self) -> FittingOutputDef:
return FittingOutputDef(
Expand Down Expand Up @@ -134,4 +130,4 @@ def compute_output_stats(
pass

# make jit happy with torch 2.0.0
exclude_types: List[int]
exclude_types: List[int]
1 change: 1 addition & 0 deletions deepmd/infer/deep_property.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,4 +133,5 @@ def eval(
def get_numb_task(self) -> int:
return self.deep_eval.get_numb_task()


__all__ = ["DeepProperty"]
6 changes: 3 additions & 3 deletions deepmd/pt/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@
from deepmd.infer.deep_pot import (
DeepPot,
)
from deepmd.infer.deep_wfc import (
DeepWFC,
)
from deepmd.infer.deep_property import (
DeepProperty,
)
from deepmd.infer.deep_wfc import (
DeepWFC,
)
from deepmd.pt.model.model import (
get_model,
)
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/loss/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
from .loss import (
TaskLoss,
)
from .tensor import (
TensorLoss,
)
from .property import (
PropertyLoss,
)
from .tensor import (
TensorLoss,
)

__all__ = [
"DenoiseLoss",
Expand Down
79 changes: 60 additions & 19 deletions deepmd/pt/loss/property.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import logging
from typing import (
List,
)
Expand All @@ -12,15 +13,13 @@
from deepmd.pt.utils import (
env,
)
from deepmd.pt.utils.env import (
GLOBAL_PT_FLOAT_PRECISION,
)
from deepmd.utils.data import (
DataRequirementItem,
)
import logging

log = logging.getLogger(__name__)


class PropertyLoss(TaskLoss):
def __init__(
self,
Expand All @@ -33,11 +32,11 @@ def __init__(
Parameters
----------
starter_learning_rate : float
task_num : float
The learning rate at the start of the training.
loss_func: str
loss_func : str
The loss function, such as "smooth_mae", "mae", "rmse"
metric: list
metric : list
The metric such as mae,rmse which will be printed.
**kwargs
Other keyword arguments.
Expand All @@ -62,35 +61,77 @@ def forward(self, model_pred, label, natoms, learning_rate, mae=False):
-------
- loss: Loss to minimize.
"""
assert label['property'].shape[-1] == self.task_num
assert model_pred['property'].shape[-1] == self.task_num
assert label["property"].shape[-1] == self.task_num
assert model_pred["property"].shape[-1] == self.task_num
loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0]
more_loss = {}

label_mean = torch.tensor(self.mean, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)
label_std = torch.tensor(self.std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)
label_mean = torch.tensor(
self.mean, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE
)
label_std = torch.tensor(
self.std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE
)

# loss
if self.loss_func == "smooth_mae":
loss += F.smooth_l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum", beta=self.beta)
loss += F.smooth_l1_loss(
(label["property"] - label_mean) / label_std,
model_pred["property"],
reduction="sum",
beta=self.beta,
)
elif self.func == "mae":
loss += F.l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum")
loss += F.l1_loss(
(label["property"] - label_mean) / label_std,
model_pred["property"],
reduction="sum",
)
elif self.func == "mse":
loss += F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum")
loss += F.mse_loss(
(label["property"] - label_mean) / label_std,
model_pred["property"],
reduction="sum",
)
elif self.func == "rmse":
loss += torch.sqrt(F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="mean"))
loss += torch.sqrt(
F.mse_loss(
(label["property"] - label_mean) / label_std,
model_pred["property"],
reduction="mean",
)
)
else:
raise RuntimeError(f"Unknown loss function : {self.func}")

# more loss
if "smooth_mae" in self.metric:
more_loss["smooth_mae"] = F.smooth_l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean", beta=self.beta).detach()
more_loss["smooth_mae"] = F.smooth_l1_loss(
label["property"],
(model_pred["property"] * label_std) + label_mean,
reduction="mean",
beta=self.beta,
).detach()
if "mae" in self.metric:
more_loss['mae'] = F.l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach()
more_loss["mae"] = F.l1_loss(
label["property"],
(model_pred["property"] * label_std) + label_mean,
reduction="mean",
).detach()
if "mse" in self.metric:
more_loss['mse'] = F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach()
more_loss["mse"] = F.mse_loss(
label["property"],
(model_pred["property"] * label_std) + label_mean,
reduction="mean",
).detach()
if "rmse" in self.metric:
more_loss['rmse'] = torch.sqrt(F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean")).detach()
more_loss["rmse"] = torch.sqrt(
F.mse_loss(
label["property"],
(model_pred["property"] * label_std) + label_mean,
reduction="mean",
)
).detach()

return loss, more_loss

Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/model/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def get_standard_model(model_params):
fitting_net["type"] = fitting_net.get("type", "ener")
fitting_net["ntypes"] = descriptor.get_ntypes()
fitting_net["mixed_types"] = descriptor.mixed_types()
if fitting_net["type"] in ["dipole", "polar","denoise"]:
if fitting_net["type"] in ["dipole", "polar", "denoise"]:
fitting_net["embedding_width"] = descriptor.get_dim_emb()
fitting_net["dim_descrpt"] = descriptor.get_dim_out()
grad_force = "direct" not in fitting_net["type"]
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pt/model/model/denoise_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def forward(
fparam=fparam,
aparam=aparam,
do_atomic_virial=do_atomic_virial,
)
)
model_predict = model_ret
return model_predict

Expand Down Expand Up @@ -78,4 +78,4 @@ def forward_lower(
model_predict["dforce"] = model_ret["dforce"]
else:
model_predict = model_ret
return model_predict
return model_predict
12 changes: 6 additions & 6 deletions deepmd/pt/model/model/dp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
from deepmd.pt.model.model.model import (
BaseModel,
)
from deepmd.pt.model.task.denoise import (
DenoiseFittingNet,
)
from deepmd.pt.model.task.dipole import (
DipoleFittingNet,
)
Expand All @@ -31,9 +34,6 @@
from deepmd.pt.model.task.property import (
PropertyFittingNet,
)
from deepmd.pt.model.task.denoise import (
DenoiseFittingNet
)

from .make_model import (
make_model,
Expand All @@ -51,6 +51,9 @@ def __new__(
atomic_model_: Optional[DPAtomicModel] = None,
**kwargs,
):
from deepmd.pt.model.model.denoise_model import (
DenoiseModel,
)

Check notice

Code scanning / CodeQL

Cyclic import Note

Import of module
deepmd.pt.model.model.denoise_model
begins an import cycle.
from deepmd.pt.model.model.dipole_model import (
DipoleModel,
)
Expand All @@ -66,9 +69,6 @@ def __new__(
from deepmd.pt.model.model.property_model import (
PropertyModel,
)

Check notice

Code scanning / CodeQL

Cyclic import Note

Import of module
deepmd.pt.model.model.property_model
begins an import cycle.
from deepmd.pt.model.model.denoise_model import (
DenoiseModel,
)

if atomic_model_ is not None:
fitting = atomic_model_.fitting_net
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/model/model/property_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def forward(
)
model_predict = {}
model_predict["atom_property"] = model_ret["property"]
model_predict["property"] = model_ret["property_redu"]/atype.shape[-1]
model_predict["property"] = model_ret["property_redu"] / atype.shape[-1]
if "mask" in model_ret:
model_predict["mask"] = model_ret["mask"]
return model_predict
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/model/task/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@
from .polarizability import (
PolarFittingNet,
)
from .type_predict import (
TypePredictNet,
)
from .property import (
PropertyFittingNet,
)
from .type_predict import (
TypePredictNet,
)

__all__ = [
"FittingNetAttenLcc",
Expand Down
Loading

0 comments on commit 786b528

Please sign in to comment.