Skip to content

Commit

Permalink
refine code
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Dec 4, 2023
1 parent 691d85e commit 7bbc875
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 4 deletions.
2 changes: 1 addition & 1 deletion deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ def freeze_graph(
)
for name, param in st_model.named_buffers():
print(
f"[{name}, {param.shape}] generated name in static_model is: {param.name}"
f"[{name}, {param.dtype}, {param.shape}] generated name in static_model is: {param.name}"
)
# 跳过对program的裁剪,从而保留rcut、ntypes等不参与前向的参数,从而在C++端可以获取这些参数
skip_prune_program = True
Expand Down
4 changes: 3 additions & 1 deletion deepmd/infer/deep_pot.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,9 @@ def __init__(
self.rcut = float(self.model.descrpt.buffer_rcut)
self.dfparam = 0
self.daparam = 0
self.t_tmap = "".join([chr(idx) for idx in self.model.buffer_tmap.tolist()])
# self.t_tmap = "".join([chr(idx) for idx in self.model.buffer_tmap.tolist()])
self.t_tmap = [chr(idx) for idx in self.model.buffer_tmap.tolist()]
self.t_tmap = [c for c in self.t_tmap if c != " "]

# setup modifier
try:
Expand Down
24 changes: 22 additions & 2 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ void DeepPot::init(const std::string& model,
DPGetDeviceCount(gpu_num); // check current device environment
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (use_paddle_inference) {
/*
/* water se_e2_a
tensorflow::DT_DOUBLE = 2
tensorflow::DT_FLOAT = 1
paddle_infer::DataType::FLOAT64 = 7
Expand Down Expand Up @@ -749,6 +749,26 @@ void DeepPot::init(const std::string& model,
[fitting.buffer_dfparam, []] generated name in static_model is: generated_tensor_9
[fitting.buffer_daparam, []] generated name in static_model is: generated_tensor_10
**/
/* spin se_e2_a
[buffer_tmap, [4]] generated name in static_model is: generated_tensor_14
[buffer_model_type, [4]] generated name in static_model is: generated_tensor_15
[buffer_model_version, [1]] generated name in static_model is: generated_tensor_16
[descrpt.buffer_rcut, []] generated name in static_model is: generated_tensor_3
[descrpt.buffer_ntypes, []] generated name in static_model is: generated_tensor_4
[descrpt.avg_zero, [3, 720]] generated name in static_model is: eager_tmp_0
[descrpt.std_ones, [3, 720]] generated name in static_model is: eager_tmp_1
[descrpt.t_rcut, []] generated name in static_model is: generated_tensor_5
[descrpt.buffer_sel, [3]] generated name in static_model is: generated_tensor_6
[descrpt.buffer_ndescrpt, []] generated name in static_model is: generated_tensor_7
[descrpt.buffer_original_sel, [3]] generated name in static_model is: generated_tensor_8
[descrpt.t_avg, [3, 720]] generated name in static_model is: generated_tensor_9
[descrpt.t_std, [3, 720]] generated name in static_model is: generated_tensor_10
[descrpt.spin.buffer_ntypes_spin, [1]] generated name in static_model is: generated_tensor_0
[descrpt.spin.buffer_virtual_len, [1, 1]] generated name in static_model is: generated_tensor_1
[descrpt.spin.buffer_spin_norm, [1, 1]] generated name in static_model is: generated_tensor_2
[fitting.buffer_dfparam, []] generated name in static_model is: generated_tensor_11
[fitting.buffer_daparam, []] generated name in static_model is: generated_tensor_12 [fitting.t_bias_atom_e, [2]] generated name in static_model is: generated_tensor_13
*/
model_version = paddle_get_scalar<std::string>("generated_tensor_14");
dtype = predictor_get_dtype(predictor, "generated_tensor_0");
if (dtype == paddle_infer::DataType::FLOAT64) {
Expand All @@ -757,7 +777,7 @@ void DeepPot::init(const std::string& model,
rcut = paddle_get_scalar<float>("generated_tensor_0");
}
ntypes = paddle_get_scalar<int32_t>("generated_tensor_2");
// ntypes_spin = paddle_get_scalar<int64_t>("buffer_ntypes_spin");
// ntypes_spin = paddle_get_scalar<int32_t>("buffer_ntypes_spin");
ntypes_spin = 0;
dfparam = paddle_get_scalar<int64_t>("generated_tensor_9");
daparam = paddle_get_scalar<int64_t>("generated_tensor_10");
Expand Down

0 comments on commit 7bbc875

Please sign in to comment.