Skip to content

Commit

Permalink
Merge pull request #529 from zhouwei25/deepmd2paddle
Browse files Browse the repository at this point in the history
Deepmd in Paddle for example, just 'water_se_a' model
  • Loading branch information
amcadmus authored Apr 19, 2021
2 parents e988870 + f813c77 commit ddcb9d7
Show file tree
Hide file tree
Showing 14 changed files with 702 additions and 998 deletions.
24 changes: 12 additions & 12 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
import numpy as np
import yaml

from deepmd.env import op_module, tf
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION, GLOBAL_NP_FLOAT_PRECISION
from deepmd.env import op_module, tf, paddle
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION, GLOBAL_PD_FLOAT_PRECISION, GLOBAL_NP_FLOAT_PRECISION

if TYPE_CHECKING:
_DICT_VAL = TypeVar("_DICT_VAL")
Expand All @@ -34,10 +34,10 @@

# define constants
PRECISION_DICT = {
"default": GLOBAL_TF_FLOAT_PRECISION,
"float16": tf.float16,
"float32": tf.float32,
"float64": tf.float64,
"default": GLOBAL_PD_FLOAT_PRECISION,
"float16": np.float16,
"float32": np.float32,
"float64": np.float64,
}


Expand Down Expand Up @@ -68,11 +68,11 @@ def gelu(x: tf.Tensor) -> tf.Tensor:
data_requirement = {}

ACTIVATION_FN_DICT = {
"relu": tf.nn.relu,
"relu6": tf.nn.relu6,
"softplus": tf.nn.softplus,
"sigmoid": tf.sigmoid,
"tanh": tf.nn.tanh,
"relu": paddle.nn.functional.relu,
"relu6": paddle.nn.functional.relu6,
"softplus": paddle.nn.functional.softplus,
"sigmoid": paddle.nn.functional.sigmoid,
"tanh": paddle.nn.functional.tanh,
"gelu": gelu,
}

Expand Down Expand Up @@ -367,7 +367,7 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]:

def get_activation_func(
activation_fn: "_ACTIVATION",
) -> Callable[[tf.Tensor], tf.Tensor]:
):
"""Get activation function callable based on string name.
Parameters
Expand Down
367 changes: 155 additions & 212 deletions deepmd/descriptor/se_a.py

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions deepmd/entrypoints/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@ def test_ener(
numb_test = min(nframes, numb_test)

coord = test_data["coord"][:numb_test].reshape([numb_test, -1])

box = test_data["box"][:numb_test]
if dp.has_efield:
efield = test_data["efield"][:numb_test].reshape([numb_test, -1])
Expand Down
7 changes: 5 additions & 2 deletions deepmd/entrypoints/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
from deepmd.utils.compat import convert_input_v0_v1
from deepmd.utils.data_system import DeepmdDataSystem

from collections import defaultdict

if TYPE_CHECKING:
from deepmd.run_options import TFServerV1

Expand Down Expand Up @@ -260,6 +262,7 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions):
# setup data modifier
modifier: Optional[DipoleChargeModifier]
modi_data = jdata["model"].get("modifier", None)

if modi_data is not None:
if modi_data["type"] == "dipole_charge":
modifier = DipoleChargeModifier(
Expand Down Expand Up @@ -287,12 +290,12 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions):
data.print_summary(run_opt, sys_probs=sys_probs, auto_prob_style=auto_prob_style)
data.add_dict(data_requirement)

# build the model with stats from the first system
# # build the model with stats from the first system
model.build(data, stop_batch)

# train the model with the provided systems in a cyclic way
start_time = time.time()
model.train(data)
model.train(data, stop_batch)
end_time = time.time()
log.info("finished training")
log.info(f"wall time: {(end_time - start_time):.3f} s")
22 changes: 8 additions & 14 deletions deepmd/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

# import tensorflow v1 compatability
try:
import paddle
import paddle_ops
import tensorflow.compat.v1 as tf

tf.disable_v2_behavior()
Expand Down Expand Up @@ -181,11 +183,13 @@ def _get_package_constants(

if GLOBAL_CONFIG["precision"] == "-DHIGH_PREC":
GLOBAL_TF_FLOAT_PRECISION = tf.float64
GLOBAL_PD_FLOAT_PRECISION = "float64"
GLOBAL_NP_FLOAT_PRECISION = np.float64
GLOBAL_ENER_FLOAT_PRECISION = np.float64
global_float_prec = "double"
else:
GLOBAL_TF_FLOAT_PRECISION = tf.float32
GLOBAL_PD_FLOAT_PRECISION = "float32"
GLOBAL_NP_FLOAT_PRECISION = np.float32
GLOBAL_ENER_FLOAT_PRECISION = np.float64
global_float_prec = "float"
Expand All @@ -207,19 +211,9 @@ def global_cvt_2_tf_float(xx: tf.Tensor) -> tf.Tensor:
return tf.cast(xx, GLOBAL_TF_FLOAT_PRECISION)


def global_cvt_2_ener_float(xx: tf.Tensor) -> tf.Tensor:
"""Cast tensor to globally set energy precision.
Parameters
----------
xx : tf.Tensor
input tensor
Returns
-------
tf.Tensor
output tensor cast to `GLOBAL_ENER_FLOAT_PRECISION`
"""
return tf.cast(xx, GLOBAL_ENER_FLOAT_PRECISION)
def global_cvt_2_pd_float(xx: paddle.Tensor) -> paddle.Tensor:
return paddle.cast(xx, GLOBAL_PD_FLOAT_PRECISION)


def global_cvt_2_ener_float(xx: paddle.Tensor) -> paddle.Tensor:
return paddle.cast(xx, GLOBAL_ENER_FLOAT_PRECISION)
Loading

0 comments on commit ddcb9d7

Please sign in to comment.