Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add dipole consistency test #3321

Merged
merged 37 commits into from
Feb 23, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
e9dcf0f
feat: add dipole consistency test
anyangml Feb 22, 2024
fb7447c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
2ad5db7
fix: add serialize, deserialize
anyangml Feb 22, 2024
3f38853
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
5e78ce3
fix: UTs
anyangml Feb 22, 2024
e857afe
fix: UTs
anyangml Feb 22, 2024
cecdb8c
fix: UTs
anyangml Feb 22, 2024
501c534
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
a3d4b00
fix: UTs
anyangml Feb 22, 2024
c5fd81b
fix: UTs
anyangml Feb 22, 2024
eee3048
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
6da24c0
fix: UTs
anyangml Feb 22, 2024
e9d64cf
fix: UTs
anyangml Feb 22, 2024
8694af5
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
58543e5
fix: UTs
anyangml Feb 22, 2024
0c0f28f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
36188a8
fix: UTs
anyangml Feb 22, 2024
b453471
chore: move atom_ener to energyfitting
anyangml Feb 22, 2024
13df1db
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
6052f7e
fix: UTs
anyangml Feb 22, 2024
511e15e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
15b7425
fix: UTs
anyangml Feb 22, 2024
737cf37
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
186a15e
fix: UTs
anyangml Feb 23, 2024
0a84f33
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
abad3dd
chore: refactor
anyangml Feb 23, 2024
3e0f3d8
fix: se_r, se_t
anyangml Feb 23, 2024
d2cbe46
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
8bbaf25
chore: refactor
anyangml Feb 23, 2024
88d81a0
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
4c20c10
chore: typo
anyangml Feb 23, 2024
f538039
Merge branch 'devel' into devel
anyangml Feb 23, 2024
3e2d1f9
chore: refactor
anyangml Feb 23, 2024
9762912
chore: typo
anyangml Feb 23, 2024
344910a
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
9fe7bbd
Merge branch 'devel' into devel
anyangml Feb 23, 2024
9ac39b8
Merge branch 'devel' into devel
anyangml Feb 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions deepmd/dpmodel/fitting/dipole_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,8 @@ def __init__(
r_differentiable: bool = True,
c_differentiable: bool = True,
old_impl=False,
# not used
seed: Optional[int] = None,
):
# seed, uniform_seed are not included
if tot_ener_zero:
Expand Down
76 changes: 70 additions & 6 deletions deepmd/tf/fit/dipole.py
anyangml marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,12 @@ class DipoleFittingSeA(Fitting):

Parameters
----------
descrpt : tf.Tensor
The descrptor
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
embedding_width
The rotation matrix dimension of the descrptor :math:`\mathcal{D}`
neuron : List[int]
Number of neurons in each hidden layer of the fitting net
resnet_dt : bool
Expand All @@ -59,7 +63,9 @@ class DipoleFittingSeA(Fitting):

def __init__(
self,
descrpt: tf.Tensor,
ntypes: int,
dim_descrpt: int,
embedding_width: int,
neuron: List[int] = [120, 120, 120],
resnet_dt: bool = True,
sel_type: Optional[List[int]] = None,
Expand All @@ -70,8 +76,8 @@ def __init__(
**kwargs,
) -> None:
"""Constructor."""
self.ntypes = descrpt.get_ntypes()
self.dim_descrpt = descrpt.get_dim_out()
self.ntypes = ntypes
self.dim_descrpt = dim_descrpt
self.n_neuron = neuron
self.resnet_dt = resnet_dt
self.sel_type = sel_type
Expand All @@ -83,9 +89,10 @@ def __init__(
self.seed = seed
self.uniform_seed = uniform_seed
self.seed_shift = one_layer_rand_seed_shift()
self.activation_function_name = activation_function
self.fitting_activation_fn = get_activation_func(activation_function)
self.fitting_precision = get_precision(precision)
self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
self.dim_rot_mat_1 = embedding_width
self.dim_rot_mat = self.dim_rot_mat_1 * 3
self.useBN = False
self.fitting_net_variables = None
Expand Down Expand Up @@ -327,3 +334,60 @@ def get_loss(self, loss: dict, lr) -> Loss:
tensor_size=3,
label_name="dipole",
)

def serialize(self, suffix: str) -> dict:
"""Serialize the model.

Returns
-------
dict
The serialized data
"""
data = {
"var_name": "energy",
"ntypes": self.ntypes,
"dim_descrpt": self.dim_descrpt,
"embedding_width": self.dim_rot_mat_1,
# very bad design: type embedding is not passed to the class
# TODO: refactor the class
"distinguish_types": True,
anyangml marked this conversation as resolved.
Show resolved Hide resolved
"dim_out": 3,
"neuron": self.n_neuron,
"resnet_dt": self.resnet_dt,
"activation_function": self.activation_function_name,
"precision": self.fitting_precision.name,
"exclude_types": [],
"nets": self.serialize_network(
ntypes=self.ntypes,
# TODO: consider type embeddings
ndim=1,
in_dim=self.dim_descrpt,
neuron=self.n_neuron,
activation_function=self.activation_function_name,
resnet_dt=self.resnet_dt,
variables=self.fitting_net_variables,
suffix=suffix,
),
}
return data

@classmethod
def deserialize(cls, data: dict, suffix: str):
"""Deserialize the model.

Parameters
----------
data : dict
The serialized data

Returns
-------
Model
The deserialized model
"""
fitting = cls(**data)
fitting.fitting_net_variables = cls.deserialize_network(
data["nets"],
suffix=suffix,
)
return fitting
6 changes: 4 additions & 2 deletions deepmd/tf/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,10 @@ class EnerFitting(Fitting):

Parameters
----------
descrpt
The descrptor :math:`\mathcal{D}`
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
neuron
Number of neurons :math:`N` in each hidden layer of the fitting net
resnet_dt
Expand Down
32 changes: 32 additions & 0 deletions source/tests/consistent/fitting/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,35 @@ def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, suffix):
t_atype: atype,
**feed_dict,
}


class DipoleFittingTest:
"""Useful utilities for descriptor tests."""

def build_tf_fitting(self, obj, inputs, rot_mat, natoms, atype, fparam, suffix):
t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs")
t_rot_mat = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_rot_mat")
t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms")
t_atype = tf.placeholder(tf.int32, [None], name="i_atype")
extras = {}
feed_dict = {}
if fparam is not None:
t_fparam = tf.placeholder(
GLOBAL_TF_FLOAT_PRECISION, [None], name="i_fparam"
)
extras["fparam"] = t_fparam
feed_dict[t_fparam] = fparam
t_out = obj.build(
t_inputs,
t_rot_mat,
t_natoms,
{"atype": t_atype, **extras},
suffix=suffix,
)
return [t_out], {
t_inputs: inputs,
t_rot_mat: rot_mat,
t_natoms: natoms,
t_atype: atype,
**feed_dict,
}
191 changes: 191 additions & 0 deletions source/tests/consistent/fitting/test_dipole.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import unittest
from typing import (
Any,
Tuple,
)

import numpy as np

from deepmd.dpmodel.fitting.dipole_fitting import DipoleFitting as DipoleFittingDP
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
)

from ..common import (
INSTALLED_PT,
INSTALLED_TF,
CommonTest,
parameterized,
)
from .common import (
DipoleFittingTest,
)

if INSTALLED_PT:
import torch

from deepmd.pt.model.task.dipole import DipoleFittingNet as DipoleFittingPT
from deepmd.pt.utils.env import DEVICE as PT_DEVICE
else:
DipoleFittingPT = object
if INSTALLED_TF:
from deepmd.tf.fit.dipole import DipoleFittingSeA as DipoleFittingTF
else:
DipoleFittingTF = object
from deepmd.utils.argcheck import (
fitting_dipole,
)


@parameterized(
(True, False), # resnet_dt
("float64", "float32"), # precision
(True, False), # mixed_types
)
class TestDipole(CommonTest, DipoleFittingTest, unittest.TestCase):
@property
def data(self) -> dict:
(
resnet_dt,
precision,
mixed_types,
) = self.param
return {
"neuron": [5, 5, 5],
"resnet_dt": resnet_dt,
"precision": precision,
"seed": 20240217,
}

@property
def skip_tf(self) -> bool:
(
resnet_dt,
precision,
mixed_types,
) = self.param
# TODO: mixed_types
return mixed_types or CommonTest.skip_pt

@property
def skip_pt(self) -> bool:
(
resnet_dt,
Dismissed Show dismissed Hide dismissed
precision,
Dismissed Show dismissed Hide dismissed
mixed_types,
Dismissed Show dismissed Hide dismissed
) = self.param
return CommonTest.skip_pt

tf_class = DipoleFittingTF
dp_class = DipoleFittingDP
pt_class = DipoleFittingPT
args = fitting_dipole()

def setUp(self):
CommonTest.setUp(self)

self.ntypes = 2
self.natoms = np.array([6, 6, 2, 4], dtype=np.int32)
self.inputs = np.ones((1, 6, 20), dtype=GLOBAL_NP_FLOAT_PRECISION)
self.gr = np.ones((1, 6, 30, 3), dtype=GLOBAL_NP_FLOAT_PRECISION)
self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32)
# inconsistent if not sorted
self.atype.sort()

@property
def addtional_data(self) -> dict:
(
resnet_dt,
precision,
mixed_types,
) = self.param
return {
"ntypes": self.ntypes,
"dim_descrpt": self.inputs.shape[-1],
"mixed_types": mixed_types,
"var_name": "dipole",
"embedding_width": 30,
}

def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]:
(
resnet_dt,
Dismissed Show dismissed Hide dismissed
precision,
Fixed Show fixed Hide fixed
Dismissed Show dismissed Hide dismissed
mixed_types,
Fixed Show fixed Hide fixed
Dismissed Show dismissed Hide dismissed
) = self.param
return self.build_tf_fitting(
obj,
self.inputs.ravel(),
self.natoms,
self.atype,
None,
suffix,
)
Fixed Show fixed Hide fixed

def eval_pt(self, pt_obj: Any) -> Any:
(
resnet_dt,

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable resnet_dt is not used.
precision,
Dismissed Show dismissed Hide dismissed
mixed_types,
Dismissed Show dismissed Hide dismissed
) = self.param
return (
pt_obj(
torch.from_numpy(self.inputs).to(device=PT_DEVICE),
torch.from_numpy(self.atype.reshape(1, -1)).to(device=PT_DEVICE),
torch.from_numpy(self.gr).to(device=PT_DEVICE),
None,
)["dipole"]
.detach()
.cpu()
.numpy()
)

def eval_dp(self, dp_obj: Any) -> Any:
(
resnet_dt,
Dismissed Show dismissed Hide dismissed
precision,
Dismissed Show dismissed Hide dismissed
mixed_types,
Dismissed Show dismissed Hide dismissed
) = self.param
return dp_obj(
self.inputs,
self.atype.reshape(1, -1),
self.gr,
None,
)["dipole"]

def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]:
if backend == self.RefBackend.TF:
# shape is not same
ret = ret[0].reshape(-1, self.natoms[0], 1)
return (ret,)

@property
def rtol(self) -> float:
"""Relative tolerance for comparing the return value."""
(
resnet_dt,
precision,
mixed_types,
) = self.param
if precision == "float64":
return 1e-10
elif precision == "float32":
return 1e-4
else:
raise ValueError(f"Unknown precision: {precision}")

@property
def atol(self) -> float:
"""Absolute tolerance for comparing the return value."""
(
resnet_dt,
precision,
mixed_types,
) = self.param
if precision == "float64":
return 1e-10
elif precision == "float32":
return 1e-4
else:
raise ValueError(f"Unknown precision: {precision}")
1 change: 1 addition & 0 deletions source/tests/tf/data_modifier/dipole.json
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
"seed": 1
},
"fitting_net": {
"embedding_width": 100,
"type": "dipole",
"sel_type": [
0
Expand Down
3 changes: 3 additions & 0 deletions source/tests/tf/test_data_large_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ def test_data_mixed_type(self):
descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True)
jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes()
jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out()
jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1()
fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True)
typeebd_param = jdata["model"]["type_embedding"]
typeebd = TypeEmbedNet(
Expand Down Expand Up @@ -311,6 +312,7 @@ def test_stripped_data_mixed_type(self):
descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True)
jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes()
jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out()
jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1()
fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True)
typeebd_param = jdata["model"]["type_embedding"]
typeebd = TypeEmbedNet(
Expand Down Expand Up @@ -508,6 +510,7 @@ def test_compressible_data_mixed_type(self):
descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True)
jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes()
jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out()
jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1()
fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True)
typeebd_param = jdata["model"]["type_embedding"]
typeebd = TypeEmbedNet(
Expand Down
1 change: 1 addition & 0 deletions source/tests/tf/test_data_modifier_shuffle.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ def _setUp_jdata(self):
},
"fitting_net": {
"type": "dipole",
"embedding_width": 4,
anyangml marked this conversation as resolved.
Show resolved Hide resolved
"sel_type": [1, 3],
"neuron": [10],
"resnet_dt": True,
Expand Down
Loading
Loading