-
Notifications
You must be signed in to change notification settings - Fork 525
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
chore: support preset bias of atomic model output (#4116)
<!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit - **New Features** - Introduced a new `preset_out_bias` parameter for enhanced model configuration, allowing users to define biases. - Added documentation for the `preset_out_bias` parameter in the model arguments for improved clarity. - **Bug Fixes** - Implemented validation to ensure the `preset_out_bias` length matches the model's type map, preventing runtime errors. - **Tests** - Added unit tests for the `get_model` function to validate model attributes and ensure proper error handling for the new bias parameter. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: Han Wang <[email protected]> Signed-off-by: Jinzhe Zeng <[email protected]> Co-authored-by: Han Wang <[email protected]> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng <[email protected]>
- Loading branch information
1 parent
a5346f2
commit 85bd386
Showing
5 changed files
with
119 additions
and
9 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
# SPDX-License-Identifier: LGPL-3.0-or-later | ||
import copy | ||
import unittest | ||
|
||
import numpy as np | ||
import torch | ||
|
||
from deepmd.pt.model.model import ( | ||
get_model, | ||
) | ||
from deepmd.pt.utils import ( | ||
env, | ||
) | ||
|
||
dtype = torch.float64 | ||
|
||
model_se_e2_a = { | ||
"type_map": ["O", "H", "B"], | ||
"descriptor": { | ||
"type": "se_e2_a", | ||
"sel": [46, 92, 4], | ||
"rcut_smth": 0.50, | ||
"rcut": 4.00, | ||
"neuron": [25, 50, 100], | ||
"resnet_dt": False, | ||
"axis_neuron": 16, | ||
"seed": 1, | ||
}, | ||
"fitting_net": { | ||
"neuron": [24, 24, 24], | ||
"resnet_dt": True, | ||
"seed": 1, | ||
}, | ||
"data_stat_nbatch": 20, | ||
"atom_exclude_types": [1], | ||
"pair_exclude_types": [[1, 2]], | ||
"preset_out_bias": { | ||
"energy": [ | ||
None, | ||
[1.0], | ||
[3.0], | ||
] | ||
}, | ||
} | ||
|
||
|
||
class TestGetModel(unittest.TestCase): | ||
def test_model_attr(self): | ||
model_params = copy.deepcopy(model_se_e2_a) | ||
self.model = get_model(model_params).to(env.DEVICE) | ||
atomic_model = self.model.atomic_model | ||
self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) | ||
self.assertEqual( | ||
atomic_model.preset_out_bias, | ||
{ | ||
"energy": [ | ||
None, | ||
np.array([1.0]), | ||
np.array([3.0]), | ||
] | ||
}, | ||
) | ||
self.assertEqual(atomic_model.atom_exclude_types, [1]) | ||
self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]]) | ||
|
||
def test_notset_model_attr(self): | ||
model_params = copy.deepcopy(model_se_e2_a) | ||
model_params.pop("atom_exclude_types") | ||
model_params.pop("pair_exclude_types") | ||
model_params.pop("preset_out_bias") | ||
self.model = get_model(model_params).to(env.DEVICE) | ||
atomic_model = self.model.atomic_model | ||
self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) | ||
self.assertEqual(atomic_model.preset_out_bias, None) | ||
self.assertEqual(atomic_model.atom_exclude_types, []) | ||
self.assertEqual(atomic_model.pair_exclude_types, []) | ||
|
||
def test_preset_wrong_len(self): | ||
model_params = copy.deepcopy(model_se_e2_a) | ||
model_params["preset_out_bias"] = {"energy": [None]} | ||
with self.assertRaises(ValueError): | ||
self.model = get_model(model_params).to(env.DEVICE) |