Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat : pt: support property fitting #3488

Closed
wants to merge 46 commits into from
Closed
Changes from 1 commit
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
d1d0a0a
3.16 update:support property fitting(only zero bias and mean pooling)…
Chengqian-Zhang Mar 16, 2024
b9a7be4
3.18 update
Chengqian-Zhang Mar 18, 2024
0d58b71
Merge branch 'deepmodeling:devel' into devel
Chengqian-Zhang Mar 18, 2024
3f95a82
Add DeepProperty and UT
Chengqian-Zhang Mar 18, 2024
0dc11d6
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 18, 2024
786b528
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 18, 2024
cea7476
fix pre-commit
Chengqian-Zhang Mar 18, 2024
5cd0d56
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 18, 2024
4965529
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 18, 2024
a05cace
Add example
Chengqian-Zhang Mar 18, 2024
d93bfdb
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 18, 2024
3050172
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 18, 2024
20cee41
delete input
Chengqian-Zhang Mar 18, 2024
89f6f31
resolve cof
Chengqian-Zhang Mar 18, 2024
501b46d
fix push bug
Chengqian-Zhang Mar 18, 2024
8c5645e
Merge branch 'deepmodeling:devel' into devel
Chengqian-Zhang Mar 19, 2024
c8dad8d
3.19 update
Chengqian-Zhang Mar 19, 2024
d5eaf30
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 19, 2024
70674f6
recover
Chengqian-Zhang Mar 19, 2024
da7e8c2
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 19, 2024
55c5f96
recover
Chengqian-Zhang Mar 19, 2024
b645176
delete denoise
Chengqian-Zhang Mar 19, 2024
03cbaba
delete denoise
Chengqian-Zhang Mar 19, 2024
196eb0d
delete denoise argcheck
Chengqian-Zhang Mar 19, 2024
e137ad5
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 19, 2024
8afc64f
delete denoise file
Chengqian-Zhang Mar 19, 2024
00c8ef2
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 19, 2024
f334119
delete kwargs in property head
Chengqian-Zhang Mar 19, 2024
479b106
fix pre-commit
Chengqian-Zhang Mar 19, 2024
63649db
Merge branch 'devel' into devel
Chengqian-Zhang Mar 20, 2024
5cffff9
task_num->task_dim
Chengqian-Zhang Mar 20, 2024
a364947
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 20, 2024
5bda82e
delete loss
Chengqian-Zhang Mar 20, 2024
b00b678
Merge branch 'deepmodeling:devel' into devel
Chengqian-Zhang Mar 20, 2024
00ec256
Add property loss
Chengqian-Zhang Mar 20, 2024
6396fb3
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 20, 2024
698ef21
fix eval
Chengqian-Zhang Mar 20, 2024
ff61d6b
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 20, 2024
5fafabb
resolve conversation
Chengqian-Zhang Mar 20, 2024
c76e23e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 20, 2024
97bd86f
Add example to tests
Chengqian-Zhang Mar 20, 2024
af6a7fe
Merge branch 'devel' of github.com:Chengqian-Zhang/deepmd-kit into devel
Chengqian-Zhang Mar 20, 2024
cbb9c4b
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 20, 2024
09d4de1
Merge branch 'devel' of https://github.com/deepmodeling/deepmd-kit in…
Chengqian-Zhang Mar 22, 2024
1818271
fix bug of loss_func
Chengqian-Zhang Mar 22, 2024
34ea4d8
Merge branch 'devel' of https://github.com/deepmodeling/deepmd-kit in…
Chengqian-Zhang Mar 22, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
3.18 update
Chengqian-Zhang committed Mar 18, 2024
commit b9a7be4a8d340b4816275ed3fc361972eaa67a8c
4 changes: 4 additions & 0 deletions deepmd/pt/loss/__init__.py
Original file line number Diff line number Diff line change
@@ -14,11 +14,15 @@
from .tensor import (
TensorLoss,
)
from .property import (
PropertyLoss,
)

__all__ = [
"DenoiseLoss",
"EnergyStdLoss",
"EnergySpinLoss",
"TensorLoss",
"TaskLoss",
"PropertyLoss",
]
110 changes: 110 additions & 0 deletions deepmd/pt/loss/property.py
Chengqian-Zhang marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
List,
)

import torch
import torch.nn.functional as F

from deepmd.pt.loss.loss import (
TaskLoss,
)
from deepmd.pt.utils import (
env,
)
from deepmd.pt.utils.env import (
GLOBAL_PT_FLOAT_PRECISION,
)
from deepmd.utils.data import (
DataRequirementItem,
)
import logging
log = logging.getLogger(__name__)

class PropertyLoss(TaskLoss):
def __init__(
self,
task_num,
loss_func: str = "smooth_mae",
metric: list = ["mae"],
**kwargs,
):
r"""Construct a layer to compute loss on property.

Parameters
----------
starter_learning_rate : float
The learning rate at the start of the training.
loss_func: str
The loss function, such as "smooth_mae", "mae", "rmse"
metric: list
The metric such as mae,rmse which will be printed.
**kwargs
Other keyword arguments.
"""
super().__init__()
self.loss_func = loss_func
self.metric = metric
self.task_num = task_num
self.mean = kwargs.get("mean", 0)
self.std = kwargs.get("std", 1)
self.beta = kwargs.get("beta", 1.00)

def forward(self, model_pred, label, natoms, learning_rate, mae=False):
"""Return loss on loss and force.

Args:
- model_pred: Property prediction.
- label: Target property.
- natoms: Tell atom count.

Returns
-------
- loss: Loss to minimize.
Chengqian-Zhang marked this conversation as resolved.
Show resolved Hide resolved
"""
assert label['property'].shape[-1] == self.task_num
assert model_pred['property'].shape[-1] == self.task_num
loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0]
more_loss = {}

label_mean = torch.tensor(self.mean, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)
label_std = torch.tensor(self.std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)

# loss
if self.loss_func == "smooth_mae":
loss += F.smooth_l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum", beta=self.beta)
elif self.func == "mae":
loss += F.l1_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum")
elif self.func == "mse":
loss += F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="sum")
elif self.func == "rmse":
loss += torch.sqrt(F.mse_loss((label['property'] - label_mean) / label_std, model_pred['property'], reduction="mean"))
else:
raise RuntimeError(f"Unknown loss function : {self.func}")

# more loss
if "smooth_mae" in self.metric:
more_loss["smooth_mae"] = F.smooth_l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean", beta=self.beta).detach()
if "mae" in self.metric:
more_loss['mae'] = F.l1_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach()
if "mse" in self.metric:
more_loss['mse'] = F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean").detach()
if "rmse" in self.metric:
more_loss['rmse'] = torch.sqrt(F.mse_loss(label['property'], (model_pred['property'] * label_std) + label_mean, reduction="mean")).detach()

return loss, more_loss

@property
def label_requirement(self) -> List[DataRequirementItem]:
"""Return data label requirements needed for this loss calculation."""
label_requirement = []
label_requirement.append(
DataRequirementItem(
"property",
ndof=self.task_num,
atomic=False,
must=False,
high_prec=True,
)
)
return label_requirement
1 change: 1 addition & 0 deletions deepmd/pt/model/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
@@ -176,6 +176,7 @@ def forward_atomic(
gr=rot_mat,
g2=g2,
h2=h2,
sw=sw,
fparam=fparam,
aparam=aparam,
)
2 changes: 1 addition & 1 deletion deepmd/pt/model/model/__init__.py
Original file line number Diff line number Diff line change
@@ -140,7 +140,7 @@ def get_standard_model(model_params):
fitting_net["type"] = fitting_net.get("type", "ener")
fitting_net["ntypes"] = descriptor.get_ntypes()
fitting_net["mixed_types"] = descriptor.mixed_types()
if fitting_net["type"] in ["dipole", "polar"]:
if fitting_net["type"] in ["dipole", "polar","denoise"]:
fitting_net["embedding_width"] = descriptor.get_dim_emb()
fitting_net["dim_descrpt"] = descriptor.get_dim_out()
grad_force = "direct" not in fitting_net["type"]
81 changes: 81 additions & 0 deletions deepmd/pt/model/model/denoise_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
Dict,
Optional,
)

import torch

from .dp_model import (
DPModel,
)
Fixed Show fixed Hide fixed


class DenoiseModel(DPModel):
model_type = "denoise"

def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)

def forward(
self,
coord,
atype,
box: Optional[torch.Tensor] = None,
fparam: Optional[torch.Tensor] = None,
aparam: Optional[torch.Tensor] = None,
do_atomic_virial: bool = False,
) -> Dict[str, torch.Tensor]:
model_ret = self.forward_common(
coord,
atype,
box,
fparam=fparam,
aparam=aparam,
do_atomic_virial=do_atomic_virial,
)
model_predict = model_ret
return model_predict

@torch.jit.export
def forward_lower(
self,
extended_coord,
extended_atype,
nlist,
mapping: Optional[torch.Tensor] = None,
fparam: Optional[torch.Tensor] = None,
aparam: Optional[torch.Tensor] = None,
do_atomic_virial: bool = False,
):
model_ret = self.forward_common_lower(
extended_coord,
extended_atype,
nlist,
mapping,
fparam=fparam,
aparam=aparam,
do_atomic_virial=do_atomic_virial,
)
if self.get_fitting_net() is not None:
model_predict = {}
model_predict["atom_energy"] = model_ret["energy"]
model_predict["energy"] = model_ret["energy_redu"]
if self.do_grad_r("energy"):
model_predict["extended_force"] = model_ret["energy_derv_r"].squeeze(-2)
if self.do_grad_c("energy"):
model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2)
if do_atomic_virial:
model_predict["extended_virial"] = model_ret[
"energy_derv_c"
].squeeze(-3)
else:
assert model_ret["dforce"] is not None
model_predict["dforce"] = model_ret["dforce"]
else:
model_predict = model_ret
return model_predict
8 changes: 8 additions & 0 deletions deepmd/pt/model/model/dp_model.py
Original file line number Diff line number Diff line change
@@ -31,6 +31,9 @@
from deepmd.pt.model.task.property import (
PropertyFittingNet,
)
from deepmd.pt.model.task.denoise import (
DenoiseFittingNet
)

from .make_model import (
make_model,
@@ -63,6 +66,9 @@ def __new__(
from deepmd.pt.model.model.property_model import (
PropertyModel,
)
from deepmd.pt.model.model.denoise_model import (
DenoiseModel,
)

if atomic_model_ is not None:
fitting = atomic_model_.fitting_net
@@ -84,6 +90,8 @@ def __new__(
cls = DOSModel
elif isinstance(fitting, PropertyFittingNet):
cls = PropertyModel
elif isinstance(fitting, DenoiseFittingNet):
cls = DenoiseModel
# else: unknown fitting type, fall back to DPModel
return super().__new__(cls)

Loading