Skip to content

Commit

Permalink
Update water/se_e2_a + LAMMPS code
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Sep 23, 2024
1 parent 2b4832a commit f1100e4
Show file tree
Hide file tree
Showing 16 changed files with 779 additions and 471 deletions.
21 changes: 12 additions & 9 deletions deepmd/pd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
)
from deepmd.pd.utils.env import (
DEVICE,
PIR_ENABLED,
)
from deepmd.pd.utils.finetune import (
get_finetune_rules,
Expand Down Expand Up @@ -349,17 +350,20 @@ def freeze(FLAGS):
)

"""
** coord [None, 192, 3] paddle.float64
** atype [None, 192] paddle.int64
** box [None, 3, 3] paddle.float64
** coord [None, natoms, 3] paddle.float64
** atype [None, natoms] paddle.int64
** nlist [None, natoms, nnei] paddle.int32
"""
model.atomic_model.buffer_type_map.set_value(
paddle.to_tensor([ord(c) for c in model.atomic_model.type_map], dtype="int32")
)
model = paddle.jit.to_static(
model,
model.forward_lower,
full_graph=True,
input_spec=[
InputSpec([None, 192, 3], dtype="float64", name="coord"),
InputSpec([None, 192], dtype="int64", name="atype"),
InputSpec([None, 3, 3], dtype="float64", name="box"),
InputSpec([-1, -1, 3], dtype="float64", name="coord"),
InputSpec([-1, -1], dtype="int32", name="atype"),
InputSpec([-1, -1, -1], dtype="int32", name="nlist"),
],
)
extra_files = {}
Expand All @@ -369,8 +373,7 @@ def freeze(FLAGS):
skip_prune_program=True,
# extra_files,
)
pir_flag = os.getenv("FLAGS_enable_pir_api", "false")
suffix = "json" if pir_flag.lower() in ["true", "1"] else "pdmodel"
suffix = "json" if PIR_ENABLED.lower() in ["true", "1"] else "pdmodel"
log.info(
f"Paddle inference model has been exported to: {FLAGS.output}.{suffix}(.pdiparams)"
)
Expand Down
5 changes: 5 additions & 0 deletions deepmd/pd/model/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ def __init__(
super().__init__(type_map, **kwargs)
ntypes = len(type_map)
self.type_map = type_map
self.register_buffer(
"buffer_type_map",
paddle.to_tensor([ord(c) for c in self.type_map], dtype="int32"),
)
self.buffer_type_map.name = "type_map"
self.ntypes = ntypes
self.descriptor = descriptor
self.rcut = self.descriptor.get_rcut()
Expand Down
7 changes: 3 additions & 4 deletions deepmd/pd/model/model/make_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,11 +429,10 @@ def _format_nlist(
axis=-1,
)

# if n_nnei > nnei or extra_nlist_sort:
if False:
if True: # TODO: Fix controlflow + backward in PIR static graph
n_nf, n_nloc, n_nnei = nlist.shape
m_real_nei = nlist >= 0
nlist = paddle.where(m_real_nei, nlist, 0)
nlist = paddle.where(m_real_nei, nlist, paddle.zeros_like(nlist))
# nf x nloc x 3
coord0 = extended_coord[:, :n_nloc, :]
# nf x (nloc x nnei) x 3
Expand All @@ -450,7 +449,7 @@ def _format_nlist(
paddle.argsort(rr, axis=-1),
)
nlist = aux.take_along_axis(nlist, axis=2, indices=nlist_mapping)
nlist = paddle.where(rr > rcut, -1, nlist)
nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist)
nlist = nlist[..., :nnei]
else: # not extra_nlist_sort and n_nnei <= nnei:
pass # great!
Expand Down
1 change: 1 addition & 0 deletions deepmd/pd/utils/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
}
assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISON_DICT.keys())
DEFAULT_PRECISION = "float64"
PIR_ENABLED = os.getenv("FLAGS_enable_pir_api", "false")

# throw warnings if threads not set
set_default_nthreads()
Expand Down
34 changes: 19 additions & 15 deletions deepmd/pd/utils/nlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def nlist_distinguish_types(
for ii, ss in enumerate(sel):
# nloc x s(nsel)
# to int because bool cannot be sort on GPU
pick_mask = (tnlist == ii).to(paddle.int32)
pick_mask = (tnlist == ii).to(paddle.int64)
# nloc x s(nsel), stable sort, nearer neighbors first
pick_mask, imap = (
paddle.sort(pick_mask, axis=-1, descending=True, stable=True),
Expand Down Expand Up @@ -477,32 +477,36 @@ def extend_coord_with_ghosts(
nbuff = paddle.ceil(rcut / to_face).to(paddle.int64)
# 3
nbuff = paddle.amax(nbuff, axis=0) # faster than paddle.max
nbuff_cpu = nbuff.cpu()
# nbuff_cpu = nbuff.cpu()
xi = (
paddle.arange(-nbuff_cpu[0], nbuff_cpu[0] + 1, 1)
.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION)
.cpu()
paddle.arange(-nbuff[0], nbuff[0] + 1, 1).to(
dtype=env.GLOBAL_PD_FLOAT_PRECISION
)
# .cpu()
) # pylint: disable=no-explicit-dtype
yi = (
paddle.arange(-nbuff_cpu[1], nbuff_cpu[1] + 1, 1)
.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION)
.cpu()
paddle.arange(-nbuff[1], nbuff[1] + 1, 1).to(
dtype=env.GLOBAL_PD_FLOAT_PRECISION
)
# .cpu()
) # pylint: disable=no-explicit-dtype
zi = (
paddle.arange(-nbuff_cpu[2], nbuff_cpu[2] + 1, 1)
.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION)
.cpu()
paddle.arange(-nbuff[2], nbuff[2] + 1, 1).to(
dtype=env.GLOBAL_PD_FLOAT_PRECISION
)
# .cpu()
) # pylint: disable=no-explicit-dtype
eye_3 = (
paddle.eye(3, dtype=env.GLOBAL_PD_FLOAT_PRECISION)
.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION)
.cpu()
paddle.eye(3, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to(
dtype=env.GLOBAL_PD_FLOAT_PRECISION
)
# .cpu()
)
xyz = xi.reshape([-1, 1, 1, 1]) * eye_3[0]
xyz = xyz + yi.reshape([1, -1, 1, 1]) * eye_3[1]
xyz = xyz + zi.reshape([1, 1, -1, 1]) * eye_3[2]
xyz = xyz.reshape([-1, 3])
xyz = xyz.to(device=device)
# xyz = xyz.to(device=device)
# ns x 3
# shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))]
shift_idx = xyz[paddle.argsort(aux.norm(xyz, axis=1))]
Expand Down
11 changes: 7 additions & 4 deletions deepmd/pd/utils/region.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,14 @@ def phys2inter(
the internal coordinates
"""
try:
if paddle.in_dynamic_mode():
try:
rec_cell = paddle.linalg.inv(cell)
except Exception as e:
rec_cell = paddle.full_like(cell, float("nan"))
rec_cell.stop_gradient = cell.stop_gradient
else:
rec_cell = paddle.linalg.inv(cell)
except Exception:
rec_cell = paddle.full_like(cell, float("nan"))
rec_cell.stop_gradient = False
return paddle.matmul(coord, rec_cell)


Expand Down
1 change: 0 additions & 1 deletion source/api_c/include/deepmd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,6 @@ class DeepPot {
<< std::endl;
return;
}
std::cout << "** [deepmd.hpp] DeepPot.init" << std::endl;
dp = DP_NewDeepPotWithParam2(model.c_str(), gpu_rank, file_content.c_str(),
file_content.size());
DP_CHECK_OK(DP_DeepPotCheckOK, dp);
Expand Down
2 changes: 1 addition & 1 deletion source/api_c/src/c_api.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: LGPL-3.0-or-later
#include "c_api.h"

#include <numeric>
#include <string>
Expand All @@ -10,6 +9,7 @@
#include "DeepTensor.h"
#include "c_api_internal.h"
#include "common.h"
// #include "/workspace/hesensen/deepmd_backend/deepmd_paddle_new/source/api_c/include/c_api.h"

extern "C" {

Expand Down
4 changes: 2 additions & 2 deletions source/api_cc/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ if(ENABLE_PYTORCH
target_compile_definitions(${libname} PRIVATE BUILD_PYTORCH)
endif()
if(ENABLE_PADDLE AND NOT BUILD_PY_IF)
target_link_libraries(${libname} PRIVATE "${PADDLE_LIBRARIES}")
target_compile_definitions(${libname} PRIVATE BUILD_PADDLE)
target_link_libraries(${libname} PUBLIC "${PADDLE_LIBRARIES}")
target_compile_definitions(${libname} PUBLIC BUILD_PADDLE)
endif()

target_include_directories(
Expand Down
50 changes: 25 additions & 25 deletions source/api_cc/include/DeepPotPD.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,17 +142,17 @@ class DeepPotPD : public DeepPotBase {
*same aparam.
* @param[in] atomic Whether to compute the atomic energy and virial.
**/
// template <typename VALUETYPE, typename ENERGYVTYPE>
// void compute_mixed_type(ENERGYVTYPE& ener,
// std::vector<VALUETYPE>& force,
// std::vector<VALUETYPE>& virial,
// const int& nframes,
// const std::vector<VALUETYPE>& coord,
// const std::vector<int>& atype,
// const std::vector<VALUETYPE>& box,
// const std::vector<VALUETYPE>& fparam,
// const std::vector<VALUETYPE>& aparam,
// const bool atomic);
template <typename VALUETYPE, typename ENERGYVTYPE>
void compute_mixed_type(ENERGYVTYPE& ener,
std::vector<VALUETYPE>& force,
std::vector<VALUETYPE>& virial,
const int& nframes,
const std::vector<VALUETYPE>& coord,
const std::vector<int>& atype,
const std::vector<VALUETYPE>& box,
const std::vector<VALUETYPE>& fparam,
const std::vector<VALUETYPE>& aparam,
const bool atomic);
/**
* @brief Evaluate the energy, force, and virial with the mixed type
*by using this DP.
Expand All @@ -178,19 +178,19 @@ class DeepPotPD : public DeepPotBase {
*same aparam.
* @param[in] atomic Whether to compute the atomic energy and virial.
**/
// template <typename VALUETYPE, typename ENERGYVTYPE>
// void compute_mixed_type(ENERGYVTYPE& ener,
// std::vector<VALUETYPE>& force,
// std::vector<VALUETYPE>& virial,
// std::vector<VALUETYPE>& atom_energy,
// std::vector<VALUETYPE>& atom_virial,
// const int& nframes,
// const std::vector<VALUETYPE>& coord,
// const std::vector<int>& atype,
// const std::vector<VALUETYPE>& box,
// const std::vector<VALUETYPE>& fparam,
// const std::vector<VALUETYPE>& aparam,
// const bool atomic);
template <typename VALUETYPE, typename ENERGYVTYPE>
void compute_mixed_type(ENERGYVTYPE& ener,
std::vector<VALUETYPE>& force,
std::vector<VALUETYPE>& virial,
std::vector<VALUETYPE>& atom_energy,
std::vector<VALUETYPE>& atom_virial,
const int& nframes,
const std::vector<VALUETYPE>& coord,
const std::vector<int>& atype,
const std::vector<VALUETYPE>& box,
const std::vector<VALUETYPE>& fparam,
const std::vector<VALUETYPE>& aparam,
const bool atomic);

public:
/**
Expand Down Expand Up @@ -349,7 +349,7 @@ class DeepPotPD : public DeepPotBase {
int gpu_id = 0;
int do_message_passing = 0; // 1:dpa2 model 0:others
bool gpu_enabled = true;
int dtype = paddle_infer::DataType::FLOAT32;
int dtype = paddle_infer::DataType::FLOAT64;
// paddle::Tensor firstneigh_tensor;
// std::unordered_map<std::string, paddle::Tensor> comm_dict;
/**
Expand Down
17 changes: 7 additions & 10 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,9 @@
#ifdef BUILD_PYTORCH
#include "DeepPotPT.h"
#endif
// #define BUILD_PADDLE
// #ifdef BUILD_PADDLE
#ifdef BUILD_PADDLE
#include "DeepPotPD.h"
// #endif
#endif
#include "device.h"

using namespace deepmd;
Expand All @@ -34,7 +33,6 @@ DeepPot::~DeepPot() {}
void DeepPot::init(const std::string& model,
const int& gpu_rank,
const std::string& file_content) {
std::cout << "****** access here" << std::endl;
if (inited) {
std::cerr << "WARNING: deepmd-kit should not be initialized twice, do "
"nothing at the second call of initializer"
Expand All @@ -46,11 +44,10 @@ void DeepPot::init(const std::string& model,
backend = deepmd::DPBackend::PyTorch;
} else if (model.length() >= 3 && model.substr(model.length() - 3) == ".pb") {
backend = deepmd::DPBackend::TensorFlow;
// } else if (model.length() >= 3 && (model.substr(model.length() - 5) == ".json" || model.substr(model.length() - 8) == ".pdmodel")) {
} else if (true) {
} else if ((model.length() >= 5 && model.substr(model.length() - 5) == ".json") || (model.length() >= 8 && model.substr(model.length() - 8) == ".pdmodel")) {
backend = deepmd::DPBackend::Paddle;
} else {
throw deepmd::deepmd_exception("Unsupported model file formatt");
throw deepmd::deepmd_exception("Unsupported model file format");
}

if (deepmd::DPBackend::TensorFlow == backend) {
Expand All @@ -66,11 +63,11 @@ void DeepPot::init(const std::string& model,
throw deepmd::deepmd_exception("PyTorch backend is not built");
#endif
} else if (deepmd::DPBackend::Paddle == backend) {
// #ifdef BUILD_PADDLE
#ifdef BUILD_PADDLE
dp = std::make_shared<deepmd::DeepPotPD>(model, gpu_rank, file_content);
// #else
#else
throw deepmd::deepmd_exception("Paddle backend is not built");
// #endif
#endif
} else {
throw deepmd::deepmd_exception("Unknown file type");
}
Expand Down
Loading

0 comments on commit f1100e4

Please sign in to comment.