Skip to content

Commit

Permalink
update ugly but runnable code
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Nov 26, 2023
1 parent fc78e6d commit 0af71a0
Show file tree
Hide file tree
Showing 39 changed files with 3,045 additions and 2,862 deletions.
3 changes: 3 additions & 0 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def __init__(
)
self.sel_a = sel
self.rcut_r = rcut
self.register_buffer("buffer_rcut", paddle.to_tensor(rcut, dtype="float64"))
self.rcut_r_smth = rcut_smth
self.filter_neuron = neuron
self.n_axis_neuron = axis_neuron
Expand Down Expand Up @@ -175,10 +176,12 @@ def __init__(
self.sel_a.extend(self.sel_a_spin)
else:
self.ntypes_spin = 0
self.register_buffer("buffer_ntypes_spin", paddle.to_tensor(self.ntypes_spin))

# descrpt config
self.sel_r = [0 for ii in range(len(self.sel_a))]
self.ntypes = len(self.sel_a)
self.register_buffer("buffer_ntypes", paddle.to_tensor(self.ntypes))
assert self.ntypes == len(self.sel_r)
self.rcut_a = -1
# numb of neighbors and numb of descrptors
Expand Down
15 changes: 14 additions & 1 deletion deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,20 @@ def freeze_graph(
False,
],
)
paddle.jit.save(st_model, output)
print(f"st_model.descrpt.buffer_rcut.name = {st_model.descrpt.buffer_rcut.name}")
print(
f"st_model.descrpt.buffer_ntypes.name = {st_model.descrpt.buffer_ntypes.name}"
)
print(
f"st_model.fitting.buffer_dfparam.name = {st_model.fitting.buffer_dfparam.name}"
)
print(
f"st_model.fitting.buffer_daparam.name = {st_model.fitting.buffer_daparam.name}"
)
# 跳过对program的裁剪,从而保留rcut、ntypes等不参与前向的参数,从而在C++端可以获取这些参数
skip_prune_program = True
print(f"==>> skip_prune_program = {skip_prune_program}")
paddle.jit.save(st_model, output, skip_prune_program=skip_prune_program)
print(f"Saved to path: {output}")


Expand Down
2 changes: 2 additions & 0 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ def __init__(
# .add("precision", str, default = "default")\
# .add("trainable", [list, bool], default = True)
self.numb_fparam = numb_fparam
self.register_buffer("buffer_dfparam", paddle.to_tensor(self.numb_fparam))
self.numb_aparam = numb_aparam
self.register_buffer("buffer_daparam", paddle.to_tensor(self.numb_aparam))
self.n_neuron = neuron
self.resnet_dt = resnet_dt
self.rcond = rcond
Expand Down
10 changes: 8 additions & 2 deletions deepmd/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,17 +94,23 @@ def __init__(
for k, v in load_state_dict.items():
if k in self.model.state_dict():
if load_state_dict[k].dtype != self.model.state_dict()[k].dtype:
# print(f"convert dtype from {load_state_dict[k].dtype} to {self.model.state_dict()[k].dtype}")
print(
f"convert {k}'s dtype from {load_state_dict[k].dtype} to {self.model.state_dict()[k].dtype}"
)
load_state_dict[k] = load_state_dict[k].astype(
self.model.state_dict()[k].dtype
)
if list(load_state_dict[k].shape) != list(
self.model.state_dict()[k].shape
):
# print(f"convert shape from {load_state_dict[k].shape} to {self.model.state_dict()[k].shape}")
print(
f"convert {k}'s shape from {load_state_dict[k].shape} to {self.model.state_dict()[k].shape}"
)
load_state_dict[k] = load_state_dict[k].reshape(
self.model.state_dict()[k].shape
)
# print(f"==>> Load pretraied model successfully from: {str(model_file)}")
# exit()
self.model.set_state_dict(load_state_dict)
self.load_prefix = load_prefix

Expand Down
17 changes: 17 additions & 0 deletions deepmd/infer/deep_pot.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,17 @@ def _eval_inner(
eval_inputs["box"], # [45] paddle.float64
eval_inputs["default_mesh"], # [6] paddle.int32
)
# print(eval_inputs["coord"].shape)
# print(eval_inputs["type"].shape)
# print(eval_inputs["natoms_vec"].shape)
# print(eval_inputs["box"].shape)
# print(eval_inputs["default_mesh"].shape)
# np.save("/workspace/hesensen/deepmd_backend/python_infer_data/coord.npy", eval_inputs["coord"].numpy())
# np.save("/workspace/hesensen/deepmd_backend/python_infer_data/type.npy", eval_inputs["type"].numpy())
# np.save("/workspace/hesensen/deepmd_backend/python_infer_data/natoms_vec.npy", eval_inputs["natoms_vec"].numpy())
# np.save("/workspace/hesensen/deepmd_backend/python_infer_data/box.npy", eval_inputs["box"].numpy())
# np.save("/workspace/hesensen/deepmd_backend/python_infer_data/default_mesh.npy", eval_inputs["default_mesh"].numpy())
# exit()
eval_outputs = {
"atom_ener": eval_outputs[0],
"atom_virial": eval_outputs[1],
Expand All @@ -656,6 +667,12 @@ def _eval_inner(
# "xx1": eval_outputs[9],
# "hidden1": eval_outputs[10],
}

# for k, v in eval_outputs.items():
# print(k, v.shape)
# np.save(f"/workspace/hesensen/deepmd_backend/python_infer_data/st_model_{k}.npy", v.numpy())
# print(f"finished save {k}")
# exit()
else:
eval_outputs = self.model(
eval_inputs["coord"], # [2880] paddle.float64
Expand Down
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion examples/water/lmp/in.lammps
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ mass 1 16
mass 2 2

# See https://deepmd.rtfd.io/lammps/ for usage
pair_style deepmd frozen_model.pb
pair_style deepmd Model_1000000_with_buffer
# If atom names (O H in this example) are not set in the pair_coeff command, the type_map defined by the training parameter will be used by default.
pair_coeff * * O H

Expand Down
Binary file added examples/water/lmp/model.pb
Binary file not shown.
120 changes: 120 additions & 0 deletions source/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,98 @@
cmake_minimum_required(VERSION 3.16)
project(DeePMD)

macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()

if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
set(PADDLE_LIB ${PADDLE_LIB} CACHE PATH "/path/paddle/lib")

include_directories("${PADDLE_LIB}/")
set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/")

include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include")

link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
link_directories("${PADDLE_LIB}/paddle/lib")

# add custom operators
option(USE_TENSORRT "Compile demo with TensorRT." OFF)

if(WITH_GPU)
if(NOT WIN32)
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
else()
if(CUDA_LIB STREQUAL "")
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64")
endif()
endif(NOT WIN32)
endif()

if (NOT WIN32)
if (USE_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_INCLUDE_DIR}")
link_directories("${TENSORRT_LIB_DIR}")
endif()
endif(NOT WIN32)


if(WITH_STATIC_LIB)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
if(WIN32)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()

if (NOT WIN32)
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf xxhash
${EXTERNAL_LIB})
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)

if(WITH_GPU)
if(NOT WIN32)
if (USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
if(USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} )
endif()
endif()


option(BUILD_TESTING "Build test and enable converage" OFF)
set(DEEPMD_C_ROOT
""
Expand Down Expand Up @@ -175,6 +267,7 @@ if(BUILD_CPP_IF)
set(LIB_DEEPMD_CC "deepmd_cc")
set(LIB_DEEPMD_C "deepmd_c")
if(USE_CUDA_TOOLKIT)
set(LIB_DEEPMD_OP_DEVICE "deepmd_paddle_op_cuda")
set(LIB_DEEPMD_OP_DEVICE "deepmd_op_cuda")
elseif(USE_ROCM_TOOLKIT)
set(LIB_DEEPMD_OP_DEVICE "deepmd_op_rocm")
Expand Down Expand Up @@ -260,6 +353,33 @@ if(BUILD_CPP_IF)
endif()
endif(BUILD_CPP_IF)

# if(WIN32)
# if(USE_TENSORRT)
# add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
# ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
# COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
# ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
# )
# endif()
# if(WITH_MKL)
# add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
# COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
# COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release
# )
# else()
# add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
# )
# endif()
# if(NOT WITH_STATIC_LIB)
# add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
# )
# endif()
# endif()

# uninstall target
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
Expand Down
14 changes: 14 additions & 0 deletions source/api_c/src/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,25 @@ DP_DeepPot* DP_NewDeepPotWithParam2(const char* c_model,
const char* c_file_content,
const int size_file_content) {
std::string model(c_model);
printf("==>> [DP_NewDeepPotWithParam2]\n");
std::string file_content(c_file_content, c_file_content + size_file_content);
DP_NEW_OK(DP_DeepPot, deepmd::DeepPot dp(model, gpu_rank, file_content);
DP_DeepPot* new_dp = new DP_DeepPot(dp); return new_dp;)
}

// DP_DeepPot* DP_NewDeepPotWithParam3(const char* c_pdmodel_path,
// const char* c_pdiparams_path,
// const int gpu_rank,
// const char* c_file_content,
// const int size_file_content) {
// std::string pdmodel(c_pdmodel_path);
// std::string pdiparams(c_pdiparams_path);
// printf("==>> [DP_NewDeepPotWithParam3 Paddle ver]\n");
// std::string file_content(c_file_content, c_file_content + size_file_content);
// DP_NEW_OK(DP_DeepPot, deepmd::DeepPot dp(pdmodel, pdiparams, gpu_rank, file_content);
// DP_DeepPot* new_dp = new DP_DeepPot(dp); return new_dp;)
// }

DP_DeepPotModelDevi::DP_DeepPotModelDevi() {}
DP_DeepPotModelDevi::DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp)
: dp(dp) {
Expand Down
6 changes: 6 additions & 0 deletions source/api_cc/include/DeepPot.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,12 +291,18 @@ class DeepPot {
void get_type_map(std::string& type_map);

private:
std::shared_ptr<paddle_infer::Predictor> predictor = nullptr;
paddle_infer::Config config;
int math_lib_num_threads;
tensorflow::Session* session;
int num_intra_nthreads, num_inter_nthreads;
tensorflow::GraphDef* graph_def;
bool inited;
template <class VT>
VT get_scalar(const std::string& name) const;
template <class VT>
VT paddle_get_scalar(const std::string& name) const;

// VALUETYPE get_rcut () const;
// int get_ntypes () const;
double rcut;
Expand Down
50 changes: 50 additions & 0 deletions source/api_cc/include/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#else
#include "tf_public.h"
#endif
#include "paddle/include/paddle_inference_api.h"

namespace deepmd {

Expand Down Expand Up @@ -191,6 +192,16 @@ VT session_get_scalar(tensorflow::Session* session,
const std::string name,
const std::string scope = "");

/**
* @brief Get the value of a tensor.
* @param[in] predictor Paddle inference predictor.
* @param[in] name The name of the tensor.
* @return The value of the tensor.
**/
template <typename VT>
VT predictor_get_scalar(const std::shared_ptr<paddle_infer::Predictor>& predictor,
const std::string name_);

/**
* @brief Get the vector of a tensor.
* @param[out] o_vec The output vector.
Expand All @@ -215,6 +226,16 @@ int session_get_dtype(tensorflow::Session* session,
const std::string name,
const std::string scope = "");

/**
* @brief Get the type of a tensor.
* @param[in] predictor Paddle inference predictor.
* @param[in] name The name of the tensor.
* @return The type of the tensor.
**/
paddle_infer::DataType predictor_get_dtype(
const std::shared_ptr<paddle_infer::Predictor>& predictor,
const std::string& name_);

/**
* @brief Get input tensors.
* @param[out] input_tensors Input tensors.
Expand Down Expand Up @@ -270,6 +291,35 @@ int session_input_tensors(
const int ago,
const std::string scope = "");

/**
* @brief Send input data into paddle tensor handles.
* @param[in] predictor The paddle predictor pointer.
* @param[in] dcoord_ Coordinates of atoms.
* @param[in] ntypes Number of atom types.
* @param[in] datype_ Atom types.
* @param[in] dlist Neighbor list.
* @param[in] fparam_ Frame parameters.
* @param[in] aparam_ Atom parameters.
* @param[in] atommap Atom map.
* @param[in] nghost Number of ghost atoms.
* @param[in] ago Update the internal neighbour list if ago is 0.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
int predictor_input_tensors(
const std::shared_ptr<paddle_infer::Predictor>& predictor,
const std::vector<VALUETYPE>& dcoord_,
const int& ntypes,
const std::vector<int>& datype_,
const std::vector<VALUETYPE>& dbox,
InputNlist& dlist,
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam_,
const deepmd::AtomMap& atommap,
const int nghost,
const int ago,
const std::string scope = "");

/**
* @brief Get input tensors for mixed type.
* @param[out] input_tensors Input tensors.
Expand Down
Loading

0 comments on commit 0af71a0

Please sign in to comment.