Skip to content

Commit

Permalink
fix pre-commit ci fail
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Nov 14, 2024
1 parent f0fb516 commit f1fe02a
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 14 deletions.
9 changes: 6 additions & 3 deletions deepmd/pd/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
)

from typing import (
TYPE_CHECKING,
overload,
)

Expand All @@ -13,15 +14,17 @@
import paddle.nn.functional as F

from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT
from deepmd.pd.model.network.init import (
PaddleGenerator,
)

from .env import (
DEVICE,
)
from .env import PRECISION_DICT as PD_PRECISION_DICT

if TYPE_CHECKING:
from deepmd.pd.model.network.init import (
PaddleGenerator,
)


class ActivationFn(paddle.nn.Layer):
def __init__(self, activation: str | None):
Expand Down
31 changes: 20 additions & 11 deletions source/tests/pd/model/test_model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import collections
import json
import unittest
from typing import (
NamedTuple,
)

import numpy as np
import paddle
Expand Down Expand Up @@ -54,14 +56,19 @@
energy_data_requirement,
)

VariableState = collections.namedtuple("VariableState", ["value", "gradient"])

class VariableState(NamedTuple):
value: np.ndarray
gradient: np.ndarray


def paddle2tf(paddle_name, last_layer_id=None):
fields = paddle_name.split(".")
offset = int(fields[3] == "networks") + 1
element_id = int(fields[2 + offset])
if fields[1] == "descriptor":
if fields[2].startswith("compress_"):
return None
layer_id = int(fields[4 + offset]) + 1
weight_type = fields[5 + offset]
ret = "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id)
Expand All @@ -78,7 +85,7 @@ def paddle2tf(paddle_name, last_layer_id=None):


class DpTrainer:
def __init__(self):
def __init__(self) -> None:
with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin:
content = fin.read()
config = json.loads(content)
Expand Down Expand Up @@ -260,15 +267,15 @@ def _get_feed_dict(self, batch, place_holders):


class TestEnergy(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.dp_trainer = DpTrainer()
self.wanted_step = 0
for key in dir(self.dp_trainer):
if not key.startswith("_") or key == "get_intermediate_state":
value = getattr(self.dp_trainer, key)
setattr(self, key, value)

def test_consistency(self):
def test_consistency(self) -> None:
batch, head_dict, stat_dict, vs_dict = self.dp_trainer.get_intermediate_state(
self.wanted_step
)
Expand Down Expand Up @@ -300,7 +307,7 @@ def test_consistency(self):
limit_pref_f=self.limit_pref_f,
)

# Keep statistics consistency between 2 implentations
# Keep statistics consistency between 2 implementations
my_em = my_model.get_descriptor()
mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4])
stddev = stat_dict["descriptor.stddev"].reshape(
Expand All @@ -314,10 +321,12 @@ def test_consistency(self):
stat_dict["fitting_net.bias_atom_e"], place=DEVICE
)

# Keep parameter value consistency between 2 implentations
# Keep parameter value consistency between 2 implementations
for name, param in my_model.named_parameters():
name = name.replace("sea.", "")
var_name = paddle2tf(name, last_layer_id=len(self.n_neuron))
if var_name is None:
continue
var = vs_dict[var_name].value
with paddle.no_grad():
src = paddle.to_tensor(var)
Expand Down Expand Up @@ -399,12 +408,10 @@ def test_consistency(self):
.detach()
.numpy(),
)
optimizer = paddle.optimizer.Adam(
learning_rate=cur_lr, parameters=my_model.parameters()
)
optimizer = paddle.optimizer.Adam(cur_lr, parameters=my_model.parameters())
optimizer.clear_grad()

def step(step_id):
def step(step_id) -> None:

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable step is not used.
bdata = self.training_data.get_trainning_batch()

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable bdata is not used.
optimizer.clear_grad()

Expand All @@ -414,6 +421,8 @@ def step(step_id):
for name, param in my_model.named_parameters():
name = name.replace("sea.", "")
var_name = paddle2tf(name, last_layer_id=len(self.n_neuron))
if var_name is None:
continue
var_grad = vs_dict[var_name].gradient
param_grad = param.grad.cpu()
var_grad = paddle.to_tensor(var_grad).to(device="cpu")
Expand Down

0 comments on commit f1fe02a

Please sign in to comment.