Skip to content

Commit

Permalink
update code to align with newest paddle2
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Dec 29, 2023
1 parent 45ab1d6 commit f88b6e7
Show file tree
Hide file tree
Showing 13 changed files with 23 additions and 965 deletions.
2 changes: 2 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
Expand Down
6 changes: 3 additions & 3 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,13 +429,13 @@ def cast_precision(func: Callable) -> Callable:
The decorator should be used in a classmethod.
The decorator will do the following thing:
(1) It casts input Tensors from `GLOBAL_TF_FLOAT_PRECISION`
(1) It casts input Tensors from `GLOBAL_PD_FLOAT_PRECISION`
to precision defined by property `precision`.
(2) It casts output Tensors from `precision` to
`GLOBAL_TF_FLOAT_PRECISION`.
`GLOBAL_PD_FLOAT_PRECISION`.
(3) It checks inputs and outputs and only casts when
input or output is a Tensor and its dtype matches
`GLOBAL_TF_FLOAT_PRECISION` and `precision`, respectively.
`GLOBAL_PD_FLOAT_PRECISION` and `precision`, respectively.
If it does not match (e.g. it is an integer), the decorator
will do nothing on it.
Expand Down
18 changes: 9 additions & 9 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,12 +197,12 @@ def __init__(
assert self.ntypes == len(self.sel_r)
self.rcut_a = -1
# numb of neighbors and numb of descrptors
self.nnei_a = np.cumsum(self.sel_a)[-1] # 138 邻域内原子个数
self.nnei_r = np.cumsum(self.sel_r)[-1] # 0
self.nnei = self.nnei_a + self.nnei_r # 138
self.ndescrpt_a = self.nnei_a * 4 # 552 原子个数*4([s, s/x, s/y, s/z])
self.ndescrpt_r = self.nnei_r * 1 # 0
self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r # 552
self.nnei_a = np.cumsum(self.sel_a)[-1]
self.nnei_r = np.cumsum(self.sel_r)[-1]
self.nnei = self.nnei_a + self.nnei_r
self.ndescrpt_a = self.nnei_a * 4
self.ndescrpt_r = self.nnei_r * 1
self.ndescrpt = self.ndescrpt_a + self.ndescrpt_r
self.useBN = False
self.dstd = None
self.davg = None
Expand All @@ -211,6 +211,7 @@ def __init__(
self.mixed_prec = mixed_prec
# self.place_holders = {}
# self.nei_type = np.repeat(np.arange(self.ntypes), self.sel_a)

self.avg_zero = paddle.zeros(
[self.ntypes, self.ndescrpt], dtype=GLOBAL_PD_FLOAT_PRECISION
)
Expand Down Expand Up @@ -588,7 +589,7 @@ def forward(
suffix=suffix,
reuse=reuse,
trainable=self.trainable,
) # [1, all_atom, M1*M2], output_qmat: [1, all_atom, M1*3]
)

return self.dout

Expand Down Expand Up @@ -702,11 +703,10 @@ def _pass_filter(
reuse=reuse,
trainable=trainable,
activation_fn=self.filter_activation_fn,
) # [natom, M1*M2], qmat: [natom, M1, 3]
)
layer = paddle.reshape(
layer, [inputs.shape[0], natoms[2 + type_i], self.get_dim_out()]
)

qmat = paddle.reshape(
qmat,
[
Expand Down
3 changes: 1 addition & 2 deletions deepmd/entrypoints/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def test_ener(
data.add("energy", 1, atomic=False, must=False, high_prec=True)
data.add("force", 3, atomic=True, must=False, high_prec=False)
data.add("virial", 9, atomic=False, must=False, high_prec=False)
if dp.has_efield: # False
if dp.has_efield:
data.add("efield", 3, atomic=True, must=True, high_prec=False)
if has_atom_ener:
data.add("atom_ener", 1, atomic=True, must=True, high_prec=False)
Expand All @@ -278,7 +278,6 @@ def test_ener(
numb_test = min(nframes, numb_test)

coord = test_data["coord"][:numb_test].reshape([numb_test, -1])

box = test_data["box"][:numb_test]
if dp.has_efield:
efield = test_data["efield"][:numb_test].reshape([numb_test, -1])
Expand Down
9 changes: 4 additions & 5 deletions deepmd/entrypoints/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
dp_random.seed(seed)

# setup data modifier
modifier = get_modifier(jdata["model"].get("modifier", None)) # None
modifier = get_modifier(jdata["model"].get("modifier", None))

# check the multi-task mode
multi_task_mode = "fitting_net_dict" in jdata["model"]
Expand Down Expand Up @@ -275,7 +275,6 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
origin_type_map = get_data(
jdata["training"]["training_data"], rcut, None, modifier
).get_type_map()
print("model.build")
model.build(train_data, stop_batch, origin_type_map=origin_type_map)

if not is_compress:
Expand Down Expand Up @@ -377,7 +376,7 @@ def get_nbor_stat(jdata, rcut, one_type: bool = False):
if type_map and len(type_map) == 0:
type_map = None
multi_task_mode = "data_dict" in jdata["training"]
if not multi_task_mode: # here
if not multi_task_mode:
train_data = get_data(
jdata["training"]["training_data"], max_rcut, type_map, None
)
Expand Down Expand Up @@ -465,7 +464,7 @@ def update_one_sel(jdata, descriptor):
return descriptor
rcut = descriptor["rcut"]
tmp_sel = get_sel(jdata, rcut, one_type=descriptor["type"] in ("se_atten",))
sel = descriptor["sel"] # [46, 92]
sel = descriptor["sel"]
if isinstance(sel, int):
# convert to list and finnally convert back to int
sel = [sel]
Expand Down Expand Up @@ -496,7 +495,7 @@ def update_sel(jdata):
if descrpt_data["type"] == "hybrid":
for ii in range(len(descrpt_data["list"])):
descrpt_data["list"][ii] = update_one_sel(jdata, descrpt_data["list"][ii])
else: # here
else:
descrpt_data = update_one_sel(jdata, descrpt_data)
jdata["model"]["descriptor"] = descrpt_data
return jdata
16 changes: 2 additions & 14 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def _build_lower(
if (not self.uniform_seed) and (self.seed is not None):
self.seed += self.seed_shift

return final_layer # [natoms, 1]
return final_layer

def forward(
self,
Expand Down Expand Up @@ -621,18 +621,6 @@ def forward(
start_index = 0
outs_list = []
for type_i in range(ntypes_atom):
# final_layer = inputs
# for layer_j in range(type_i * ntypes_atom, (type_i + 1) * ntypes_atom):
# final_layer = self.one_layers[layer_j](final_layer)
# final_layer = self.final_layers[type_i](final_layer)
# print(final_layer.shape)

# # concat the results
# if type_i < len(self.atom_ener) and self.atom_ener[type_i] is not None:
# zero_layer = inputs_zero
# for layer_j in range(type_i * ntypes_atom, (type_i + 1) * ntypes_atom):
# zero_layer = self.one_layers[layer_j](zero_layer)
# zero_layer = self.final_layers[type_i](zero_layer)
final_layer = self._build_lower(
start_index,
natoms[2 + type_i],
Expand Down Expand Up @@ -707,7 +695,7 @@ def forward(
),
[paddle.shape(inputs)[0], paddle.sum(natoms[2 : 2 + ntypes_atom]).item()],
)
outs = outs + self.add_type # 类型编码(类似于transformer的位置编码,每种类型自己有一个特征,加到原特征上)
outs = outs + self.add_type
outs *= atype_filter
self.atom_ener_after = outs

Expand Down
Loading

0 comments on commit f88b6e7

Please sign in to comment.