Skip to content

Commit

Permalink
Add Ener Model for Paddle
Browse files Browse the repository at this point in the history
  • Loading branch information
zhwesky2010 committed Apr 16, 2021
1 parent 0031e55 commit 13b8e6f
Show file tree
Hide file tree
Showing 7 changed files with 70 additions and 146 deletions.
15 changes: 5 additions & 10 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ def gelu(x: tf.Tensor) -> tf.Tensor:
data_requirement = {}

ACTIVATION_FN_DICT = {
"relu": tf.nn.relu,
"relu6": tf.nn.relu6,
"softplus": tf.nn.softplus,
"sigmoid": tf.sigmoid,
"tanh": tf.tanh,
"relu": paddle.nn.functional.relu,
"relu6": paddle.nn.functional.relu6,
"softplus": paddle.nn.functional.softplus,
"sigmoid": paddle.nn.functional.sigmoid,
"tanh": paddle.nn.functional.tanh,
"gelu": gelu,
}

Expand Down Expand Up @@ -385,11 +385,6 @@ def get_activation_func(
RuntimeError
if unknown activation function is specified
"""
#return paddle.nn.functional.tanh
def fun(x):
return paddle.clip(x, min=-1.0, max=1.0)
return fun

if activation_fn not in ACTIVATION_FN_DICT:
raise RuntimeError(f"{activation_fn} is not a valid activation function")
return ACTIVATION_FN_DICT[activation_fn]
Expand Down
55 changes: 13 additions & 42 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import sys


class DescrptSeA (paddle.nn.Layer):
class DescrptSeA(paddle.nn.Layer):
@docstring_parameter(list_to_doc(ACTIVATION_FN_DICT.keys()), list_to_doc(PRECISION_DICT.keys()))
def __init__ (self,
rcut: float,
Expand Down Expand Up @@ -308,12 +308,6 @@ def forward (self,
box = paddle.reshape(box_, [-1, 9])
atype = paddle.reshape(atype_, [-1, natoms[1]])

#print("coord= ", coord.shape)
#print("box= ", box.shape)
#print("atype= ", atype.shape)
#print("natoms= ", natoms.shape)
#print("mesh= ", mesh.shape)

self.descrpt, self.descrpt_deriv, self.rij, self.nlist \
= paddle_ops.prod_env_mat_a(coord,
atype,
Expand All @@ -328,16 +322,6 @@ def forward (self,
sel_a = self.sel_a,
sel_r = self.sel_r)

#self.descrpt = to_tensor(np.load('/workspace/deepmd-kit/examples/water/train/descrpt.npy'), stop_gradient=False)
#self.descrpt_deriv = to_tensor(np.load('/workspace/deepmd-kit/examples/water/train/descrpt_deriv.npy'))
#self.rij = to_tensor(np.load('/workspace/deepmd-kit/examples/water/train/rij.npy'))
#self.nlist = to_tensor(np.load('/workspace/deepmd-kit/examples/water/train/nlist.npy'))

#print("self.descrpt= ", self.descrpt)
#print("self.descrpt_deriv= ", self.descrpt_deriv)
#print("self.rij= ", self.rij)
#print("self.nlist= ", self.nlist)

self.descrpt_reshape = paddle.reshape(self.descrpt, [-1, self.ndescrpt])
self.descrpt_reshape.stop_gradient = False

Expand Down Expand Up @@ -386,17 +370,14 @@ def prod_force_virial(self,
net_deriv = paddle.grad(atom_ener, self.descrpt_reshape, create_graph=True)[0]
net_deriv_reshape = paddle.reshape (net_deriv, [-1, natoms[0] * self.ndescrpt])

self.net_deriv_reshape = net_deriv_reshape

paddle.set_device("cpu")

force \
= paddle_ops.prod_force_se_a (net_deriv_reshape,
self.descrpt_deriv,
self.nlist,
natoms,
n_a_sel = self.nnei_a,
n_r_sel = self.nnei_r)

virial, atom_virial \
= paddle_ops.prod_virial_se_a (net_deriv_reshape,
self.descrpt_deriv,
Expand All @@ -405,8 +386,7 @@ def prod_force_virial(self,
natoms,
n_a_sel = self.nnei_a,
n_r_sel = self.nnei_r)

paddle.set_device("gpu")

return force, virial, atom_virial


Expand Down Expand Up @@ -445,14 +425,6 @@ def _compute_dstats_sys_smth (self,
data_atype,
natoms_vec,
mesh) :

#print("pbefore sub_sess run========")
#print("data_coord= ", data_coord)
#print("data_atype= ", data_atype)
#print("natoms_vec= ", natoms_vec)
#print("data_box= ", data_box)
#print("mesh= ", mesh)

input_dict = {}
input_dict['coord'] = paddle.to_tensor(data_coord, dtype=GLOBAL_NP_FLOAT_PRECISION)
input_dict['box'] = paddle.to_tensor(data_box, dtype=GLOBAL_PD_FLOAT_PRECISION)
Expand All @@ -472,9 +444,7 @@ def _compute_dstats_sys_smth (self,
rcut_r_smth = self.rcut_r_smth,
sel_a = self.sel_a,
sel_r = self.sel_r)

#print("self.stat_descrpt ", stat_descrpt)
#print("==========after sub_sess run=========")

dd_all = self.stat_descrpt.numpy()
natoms = natoms_vec
dd_all = np.reshape(dd_all, [-1, self.ndescrpt * natoms[0]])
Expand Down Expand Up @@ -513,14 +483,14 @@ def _compute_std (self,sumv2, sumv, sumn) :
return val

def _filter(self,
inputs,
type_input,
natoms,
activation_fn=paddle.nn.functional.relu,
stddev=1.0,
bavg=0.0,
reuse=None,
seed=None,
inputs,
type_input,
natoms,
activation_fn=paddle.nn.functional.tanh,
stddev=1.0,
bavg=0.0,
reuse=None,
seed=None,
trainable = True):
# natom x (nei x 4)
shape = inputs.shape
Expand Down Expand Up @@ -577,3 +547,4 @@ def _filter(self,
result = paddle.reshape(result, [-1, outputs_size_2 * outputs_size[-1]])

return result, qmat

41 changes: 19 additions & 22 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,16 @@ def __init__ (self,

# stat fparam
if self.numb_fparam > 0:
self.t_fparam_avg = paddl.to_tensor(np.zeros([1, self.numb_fparam]),
self.t_fparam_avg = paddle.to_tensor(np.zeros([1, self.numb_fparam]),
dtype = GLOBAL_PD_FLOAT_PRECISION)
self.t_fparam_istd = paddl.to_tensor(np.ones([1, self.numb_fparam]),
self.t_fparam_istd = paddle.to_tensor(np.ones([1, self.numb_fparam]),
dtype = GLOBAL_PD_FLOAT_PRECISION)

# stat aparam
if self.numb_aparam > 0:
self.t_aparam_avg = paddl.to_tensor(np.zeros([1, self.numb_aparam]),
self.t_aparam_avg = paddle.to_tensor(np.zeros([1, self.numb_aparam]),
dtype = GLOBAL_PD_FLOAT_PRECISION)
self.t_aparam_istd = tf.get_variable(np.ones([1, self.numb_aparam]),
self.t_aparam_istd = paddle.to_tensor(np.ones([1, self.numb_aparam]),
dtype = GLOBAL_PD_FLOAT_PRECISION)


Expand Down Expand Up @@ -123,6 +123,15 @@ def compute_output_stats(self,
can be prepared by model.make_stat_input
"""
self.bias_atom_e = self._compute_output_stats(all_stat, rcond = self.rcond)
if self.bias_atom_e is not None:
assert (len(self.bias_atom_e) == self.ntypes)
for type_i in range(self.ntypes):
type_bias_ae = self.bias_atom_e[type_i]
paddle.seed(self.seed)
normal_init_ = paddle.nn.initializer.Normal(mean=type_bias_ae, std=1.0)
final_layer = self.ElementNets[type_i][-1]
normal_init_(final_layer.bias)


@classmethod
def _compute_output_stats(self, all_stat, rcond = 1e-3):
Expand Down Expand Up @@ -173,9 +182,9 @@ def compute_input_stats(self,
self.fparam_std[ii] = protection
self.fparam_inv_std = 1./self.fparam_std

self.t_fparam_avg = paddl.to_tensor(self.fparam_avg,
self.t_fparam_avg = paddle.to_tensor(self.fparam_avg,
dtype = GLOBAL_PD_FLOAT_PRECISION)
self.t_fparam_istd = paddl.to_tensor(self.fparam_inv_std,
self.t_fparam_istd = paddle.to_tensor(self.fparam_inv_std,
dtype = GLOBAL_PD_FLOAT_PRECISION)

# stat aparam
Expand All @@ -198,9 +207,9 @@ def compute_input_stats(self,
self.aparam_std[ii] = protection
self.aparam_inv_std = 1./self.aparam_std

self.t_aparam_avg = paddl.to_tensor(self.aparam_avg,
self.t_aparam_avg = paddle.to_tensor(self.aparam_avg,
dtype = GLOBAL_PD_FLOAT_PRECISION)
self.t_aparam_istd = tf.get_variable(self.aparam_inv_std,
self.t_aparam_istd = paddle.to_tensor(self.aparam_inv_std,
dtype = GLOBAL_PD_FLOAT_PRECISION)


Expand All @@ -209,7 +218,6 @@ def _compute_std (self, sumv2, sumv, sumn) :


def forward(self, inputs, natoms, input_dict, reuse=None, suffix=''):
bias_atom_e = self.bias_atom_e
if self.numb_fparam > 0 and (self.fparam_avg is None or self.fparam_inv_std is None):
raise RuntimeError('No data stat result. one should do data statisitic, before build')
if self.numb_aparam > 0 and (self.aparam_avg is None or self.aparam_inv_std is None):
Expand All @@ -218,9 +226,6 @@ def forward(self, inputs, natoms, input_dict, reuse=None, suffix=''):
start_index = 0
inputs = paddle.cast(paddle.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)

if bias_atom_e is not None:
assert (len(bias_atom_e) == self.ntypes)

if self.numb_fparam > 0:
fparam = input_dict['fparam']
fparam = paddle.reshape(fparam, [-1, self.numb_fparam])
Expand Down Expand Up @@ -252,10 +257,6 @@ def forward(self, inputs, natoms, input_dict, reuse=None, suffix=''):
layer = paddle.concat([layer, ext_aparam], axis=1)
start_index += natoms[2 + type_i]

if bias_atom_e is None:
type_bias_ae = 0.0
else:
type_bias_ae = bias_atom_e[type_i]

for ii in range(0, len(self.n_neuron)) :
if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
Expand All @@ -264,11 +265,7 @@ def forward(self, inputs, natoms, input_dict, reuse=None, suffix=''):
layer = self.ElementNets[type_i][ii](layer)
final_layer = self.ElementNets[type_i][len(self.n_neuron)](layer)

if type_i < len(self.atom_ener) and self.atom_ener[type_i] is not None:
zero_inputs = paddle.cast(layer, self.fitting_precision)
zero_inputs[:, :self.dim_descrpt] = 0.
zero_layer = net_i(zero_inputs)
final_layer += self.atom_ener[type_i] - zero_layer
# if type_i < len(self.atom_ener) and self.atom_ener[type_i] is not None: (Not implement)

final_layer = paddle.reshape(final_layer, [inputs.shape[0], natoms[2 + type_i]])

Expand All @@ -278,4 +275,4 @@ def forward(self, inputs, natoms, input_dict, reuse=None, suffix=''):
else:
outs = paddle.concat([outs, final_layer], axis=1)

return paddle.cast(paddle.reshape(outs, [-1]), GLOBAL_PD_FLOAT_PRECISION)
return paddle.cast(paddle.reshape(outs, [-1]), GLOBAL_PD_FLOAT_PRECISION)
9 changes: 5 additions & 4 deletions deepmd/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,17 @@ def calculate_loss (self,
l2_force_loss = paddle.mean(paddle.square(diff_f), name = "l2_force_" + suffix)
l2_pref_force_loss = paddle.mean(paddle.multiply(paddle.square(diff_f), atom_pref_reshape), name = "l2_pref_force_" + suffix)

virial_reshape = paddle.reshape (virial, [-1])
virial_reshape = paddle.reshape(virial, [-1])
virial_hat_reshape = paddle.reshape (virial_hat, [-1])
l2_virial_loss = paddle.mean (paddle.square(virial_hat_reshape - virial_reshape), name = "l2_virial_" + suffix)
l2_virial_loss = paddle.mean(paddle.square(virial_hat_reshape - virial_reshape), name = "l2_virial_" + suffix)

atom_ener_reshape = paddle.reshape (atom_ener, [-1])
atom_ener_hat_reshape = paddle.reshape (atom_ener_hat, [-1])
l2_atom_ener_loss = paddle.mean (paddle.square(atom_ener_hat_reshape - atom_ener_reshape), name = "l2_atom_ener_" + suffix)

atom_norm = 1./ global_cvt_2_pd_float(natoms[0])
atom_norm_ener = 1./ global_cvt_2_pd_float(natoms[0])
pref_e = global_cvt_2_pd_float(find_energy * (self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * learning_rate / self.starter_learning_rate) )
atom_norm_ener = 1./ global_cvt_2_ener_float(natoms[0])
pref_e = global_cvt_2_ener_float(find_energy * (self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * learning_rate / self.starter_learning_rate))
pref_f = global_cvt_2_pd_float(find_force * (self.limit_pref_f + (self.start_pref_f - self.limit_pref_f) * learning_rate / self.starter_learning_rate) )
pref_v = global_cvt_2_pd_float(find_virial * (self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * learning_rate / self.starter_learning_rate) )
pref_ae= global_cvt_2_pd_float(find_atom_ener * (self.limit_pref_ae+ (self.start_pref_ae-self.limit_pref_ae) * learning_rate / self.starter_learning_rate) )
Expand Down Expand Up @@ -136,6 +136,7 @@ def print_header(self):
print_str += prop_fmt % ('rmse_v_tst', 'rmse_v_trn')
if self.has_pf :
print_str += prop_fmt % ('rmse_pf_tst', 'rmse_pf_trn')

return print_str


Expand Down
14 changes: 7 additions & 7 deletions deepmd/model/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import sys


class EnerModel(paddle.nn.Layer) :
model_type = 'ener'

Expand Down Expand Up @@ -130,7 +131,7 @@ def forward (self,
reuse = reuse)

self.dout = dout

atom_ener = self.fitting (dout,
natoms,
input_dict,
Expand All @@ -143,11 +144,11 @@ def forward (self,
energy_raw = paddle.reshape(energy_raw, [-1, natoms[0]], name = 'o_atom_energy'+suffix)
energy = paddle.sum(paddle.cast(energy_raw, GLOBAL_ENER_FLOAT_PRECISION), axis=1, name='o_energy'+suffix)

force, virial, atom_virial = self.descrpt.prod_force_virial (atom_ener, natoms)

force = paddle.reshape (force, [-1, 3 * natoms[1]], name = "o_force"+suffix)
virial = paddle.reshape (virial, [-1, 9], name = "o_virial"+suffix)
atom_virial = paddle.reshape (atom_virial, [-1, 9 * natoms[1]], name = "o_atom_virial"+suffix)
force, virial, atom_virial = self.descrpt.prod_force_virial(atom_ener, natoms)
force = paddle.reshape(force, [-1, 3 * natoms[1]], name = "o_force"+suffix)
virial = paddle.reshape(virial, [-1, 9], name = "o_virial"+suffix)
atom_virial = paddle.reshape(atom_virial, [-1, 9 * natoms[1]], name = "o_atom_virial"+suffix)

model_dict = {}
model_dict['energy'] = energy
Expand All @@ -159,4 +160,3 @@ def forward (self,
model_dict['atype'] = atype

return model_dict

Loading

0 comments on commit 13b8e6f

Please sign in to comment.