Skip to content

Commit

Permalink
support dimension of aparam to be nall
Browse files Browse the repository at this point in the history
Signed-off-by: Jinzhe Zeng <[email protected]>
  • Loading branch information
njzjz committed Sep 13, 2023
1 parent 445ec23 commit 4fa5f60
Show file tree
Hide file tree
Showing 10 changed files with 142 additions and 55 deletions.
4 changes: 3 additions & 1 deletion deepmd/descriptor/se_a_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,12 @@ def build(
dstd = self.dstd

"""
``aparam'' shape is [nframes, natoms]
``aparam'' shape is [nframes, nall]
aparam[:, :] is the real/virtual sign for each atom.
"""
aparam = input_dict["aparam"]
with tf.variable_scope("fitting_attr" + suffix, reuse=reuse):
t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool)
self.mask = tf.cast(aparam, tf.int32)
self.mask = tf.reshape(self.mask, [-1, natoms[1]])

Expand Down
1 change: 1 addition & 0 deletions deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ def _make_node_names(
"spin_attr/ntypes_spin",
"fitting_attr/dfparam",
"fitting_attr/daparam",
"fitting_attr/aparam_nall",
]
elif model_type == "dos":
nodes += [
Expand Down
1 change: 1 addition & 0 deletions deepmd/model/pairwise_dprc.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def build(
with tf.variable_scope("fitting_attr" + suffix, reuse=reuse):
t_dfparam = tf.constant(0, name="dfparam", dtype=tf.int32)
t_daparam = tf.constant(1, name="daparam", dtype=tf.int32)
t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool)
with tf.variable_scope("descrpt_attr" + suffix, reuse=reuse):
t_ntypes = tf.constant(self.ntypes, name="ntypes", dtype=tf.int32)
t_rcut = tf.constant(
Expand Down
2 changes: 2 additions & 0 deletions source/api_c/include/c_api_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ struct DP_DeepPot {
std::string exception;
int dfparam;
int daparam;
bool aparam_nall;
};

struct DP_DeepPotModelDevi {
Expand All @@ -51,6 +52,7 @@ struct DP_DeepPotModelDevi {
std::string exception;
int dfparam;
int daparam;
bool aparam_nall;
};

struct DP_DeepTensor {
Expand Down
8 changes: 6 additions & 2 deletions source/api_c/include/deepmd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,9 @@ class DeepPot {
}

if (aparam.size() != daparam * nloc &&
aparam.size() != nframes * daparam * nloc) {
aparam.size() != nframes * daparam * nloc &&
aparam.size() != daparam * nall &&
aparam.size() != nframes * daparam * nall) {
throw deepmd::hpp::deepmd_exception(
"the dim of atom parameter provided is not consistent with what the "
"model uses");
Expand Down Expand Up @@ -1460,7 +1462,9 @@ class DeepPotModelDevi {
}

if (aparam.size() != daparam * nloc &&
aparam.size() != nframes * daparam * nloc) {
aparam.size() != nframes * daparam * nloc &&
aparam.size() != daparam * nall &&
aparam.size() != nframes * daparam * nall) {
throw deepmd::hpp::deepmd_exception(
"the dim of atom parameter provided is not consistent with what the "
"model uses");
Expand Down
11 changes: 9 additions & 2 deletions source/api_c/src/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ DP_DeepPot::DP_DeepPot() {}
DP_DeepPot::DP_DeepPot(deepmd::DeepPot& dp) : dp(dp) {
dfparam = dp.dim_fparam();
daparam = dp.dim_aparam();
aparam_nall = dp.is_aparam_all();
}

DP_DeepPot* DP_NewDeepPot(const char* c_model) {
Expand Down Expand Up @@ -65,6 +66,7 @@ DP_DeepPotModelDevi::DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp)
: dp(dp) {
dfparam = dp.dim_fparam();
daparam = dp.dim_aparam();
aparam_nall = dp.is_aparam_all();
}

DP_DeepPotModelDevi* DP_NewDeepPotModelDevi(const char** c_models,
Expand Down Expand Up @@ -249,7 +251,10 @@ inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp,
}
std::vector<VALUETYPE> aparam_;
if (aparam) {
aparam_.assign(aparam, aparam + nframes * (natoms - nghost) * dp->daparam);
aparam_.assign(aparam,
aparam + nframes *
(dp->aparam_all ? natoms : (natoms - nghost)) *
dp->daparam);
}
std::vector<double> e;
std::vector<VALUETYPE> f, v, ae, av;
Expand Down Expand Up @@ -433,7 +438,9 @@ void DP_DeepPotModelDeviComputeNList_variant(DP_DeepPotModelDevi* dp,
}
std::vector<VALUETYPE> aparam_;
if (aparam) {
aparam_.assign(aparam, aparam + (natoms - nghost) * dp->daparam);
aparam_.assign(
aparam,
aparam + (dp->aparam_all ? natoms : (natoms - nghost)) * dp->daparam);
}
// different from DeepPot
std::vector<double> e;
Expand Down
21 changes: 21 additions & 0 deletions source/api_cc/include/DeepPot.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,16 @@ class DeepPot {
**/
void get_type_map(std::string& type_map);

/**
* @brief Get whether the atom dimension of aparam is nall instead of fparam.
* @param[out] aparam_nall whether the atom dimension of aparam is nall
*instead of fparam.
**/
bool is_aparam_nall() const {
assert(inited);
return aparam_nall;
};

private:
tensorflow::Session* session;
int num_intra_nthreads, num_inter_nthreads;
Expand All @@ -309,6 +319,7 @@ class DeepPot {
int ntypes_spin;
int dfparam;
int daparam;
bool aparam_nall;
/**
* @brief Validate the size of frame and atomic parameters.
* @param[in] nframes The number of frames.
Expand Down Expand Up @@ -572,6 +583,15 @@ class DeepPotModelDevi {
void compute_relative_std_f(std::vector<VALUETYPE>& std,
const std::vector<VALUETYPE>& avg,
const VALUETYPE eps);
/**
* @brief Get whether the atom dimension of aparam is nall instead of fparam.
* @param[out] aparam_nall whether the atom dimension of aparam is nall
*instead of fparam.
**/
bool is_aparam_nall() const {
assert(inited);
return aparam_nall;
};

private:
unsigned numb_models;
Expand All @@ -592,6 +612,7 @@ class DeepPotModelDevi {
int ntypes_spin;
int dfparam;
int daparam;
bool aparam_nall;
template <typename VALUETYPE>
void validate_fparam_aparam(const int& nloc,
const std::vector<VALUETYPE>& fparam,
Expand Down
9 changes: 9 additions & 0 deletions source/api_cc/include/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,8 @@ int session_get_dtype(tensorflow::Session* session,
* @param[in] fparam_ Frame parameters.
* @param[in] aparam_ Atom parameters.
* @param[in] atommap Atom map.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
Expand All @@ -240,6 +242,7 @@ int session_input_tensors(
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam_,
const deepmd::AtomMap& atommap,
const bool aparam_nall = false,
const std::string scope = "");

/**
Expand All @@ -254,6 +257,8 @@ int session_input_tensors(
* @param[in] atommap Atom map.
* @param[in] nghost Number of ghost atoms.
* @param[in] ago Update the internal neighbour list if ago is 0.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
Expand All @@ -269,6 +274,7 @@ int session_input_tensors(
const deepmd::AtomMap& atommap,
const int nghost,
const int ago,
const bool aparam_nall = false,
const std::string scope = "");

/**
Expand All @@ -284,6 +290,8 @@ int session_input_tensors(
* @param[in] atommap Atom map.
* @param[in] nghost Number of ghost atoms.
* @param[in] ago Update the internal neighbour list if ago is 0.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
Expand All @@ -298,6 +306,7 @@ int session_input_tensors_mixed_type(
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam_,
const deepmd::AtomMap& atommap,
const bool aparam_nall = false,
const std::string scope = "");

/**
Expand Down
Loading

0 comments on commit 4fa5f60

Please sign in to comment.