From 7bbc8757aeac8e8c052875d69c661e6d94cdddbc Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 4 Dec 2023 03:00:33 +0000 Subject: [PATCH] refine code --- deepmd/entrypoints/freeze.py | 2 +- deepmd/infer/deep_pot.py | 4 +++- source/api_cc/src/DeepPot.cc | 24 ++++++++++++++++++++++-- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/deepmd/entrypoints/freeze.py b/deepmd/entrypoints/freeze.py index 1308322d4d..e51c14d8a5 100755 --- a/deepmd/entrypoints/freeze.py +++ b/deepmd/entrypoints/freeze.py @@ -367,7 +367,7 @@ def freeze_graph( ) for name, param in st_model.named_buffers(): print( - f"[{name}, {param.shape}] generated name in static_model is: {param.name}" + f"[{name}, {param.dtype}, {param.shape}] generated name in static_model is: {param.name}" ) # 跳过对program的裁剪,从而保留rcut、ntypes等不参与前向的参数,从而在C++端可以获取这些参数 skip_prune_program = True diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py index 0936142ccf..2b0312ddd4 100644 --- a/deepmd/infer/deep_pot.py +++ b/deepmd/infer/deep_pot.py @@ -155,7 +155,9 @@ def __init__( self.rcut = float(self.model.descrpt.buffer_rcut) self.dfparam = 0 self.daparam = 0 - self.t_tmap = "".join([chr(idx) for idx in self.model.buffer_tmap.tolist()]) + # self.t_tmap = "".join([chr(idx) for idx in self.model.buffer_tmap.tolist()]) + self.t_tmap = [chr(idx) for idx in self.model.buffer_tmap.tolist()] + self.t_tmap = [c for c in self.t_tmap if c != " "] # setup modifier try: diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index afd7225a71..0cac6501ae 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -719,7 +719,7 @@ void DeepPot::init(const std::string& model, DPGetDeviceCount(gpu_num); // check current device environment #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (use_paddle_inference) { - /* + /* water se_e2_a tensorflow::DT_DOUBLE = 2 tensorflow::DT_FLOAT = 1 paddle_infer::DataType::FLOAT64 = 7 @@ -749,6 +749,26 @@ void DeepPot::init(const std::string& model, [fitting.buffer_dfparam, []] generated name in static_model is: generated_tensor_9 [fitting.buffer_daparam, []] generated name in static_model is: generated_tensor_10 **/ + /* spin se_e2_a + [buffer_tmap, [4]] generated name in static_model is: generated_tensor_14 + [buffer_model_type, [4]] generated name in static_model is: generated_tensor_15 + [buffer_model_version, [1]] generated name in static_model is: generated_tensor_16 + [descrpt.buffer_rcut, []] generated name in static_model is: generated_tensor_3 + [descrpt.buffer_ntypes, []] generated name in static_model is: generated_tensor_4 + [descrpt.avg_zero, [3, 720]] generated name in static_model is: eager_tmp_0 + [descrpt.std_ones, [3, 720]] generated name in static_model is: eager_tmp_1 + [descrpt.t_rcut, []] generated name in static_model is: generated_tensor_5 + [descrpt.buffer_sel, [3]] generated name in static_model is: generated_tensor_6 + [descrpt.buffer_ndescrpt, []] generated name in static_model is: generated_tensor_7 + [descrpt.buffer_original_sel, [3]] generated name in static_model is: generated_tensor_8 + [descrpt.t_avg, [3, 720]] generated name in static_model is: generated_tensor_9 + [descrpt.t_std, [3, 720]] generated name in static_model is: generated_tensor_10 + [descrpt.spin.buffer_ntypes_spin, [1]] generated name in static_model is: generated_tensor_0 + [descrpt.spin.buffer_virtual_len, [1, 1]] generated name in static_model is: generated_tensor_1 + [descrpt.spin.buffer_spin_norm, [1, 1]] generated name in static_model is: generated_tensor_2 + [fitting.buffer_dfparam, []] generated name in static_model is: generated_tensor_11 + [fitting.buffer_daparam, []] generated name in static_model is: generated_tensor_12 [fitting.t_bias_atom_e, [2]] generated name in static_model is: generated_tensor_13 + */ model_version = paddle_get_scalar("generated_tensor_14"); dtype = predictor_get_dtype(predictor, "generated_tensor_0"); if (dtype == paddle_infer::DataType::FLOAT64) { @@ -757,7 +777,7 @@ void DeepPot::init(const std::string& model, rcut = paddle_get_scalar("generated_tensor_0"); } ntypes = paddle_get_scalar("generated_tensor_2"); - // ntypes_spin = paddle_get_scalar("buffer_ntypes_spin"); + // ntypes_spin = paddle_get_scalar("buffer_ntypes_spin"); ntypes_spin = 0; dfparam = paddle_get_scalar("generated_tensor_9"); daparam = paddle_get_scalar("generated_tensor_10");