Skip to content

Commit

Permalink
make the new argument in the end
Browse files Browse the repository at this point in the history
Signed-off-by: Jinzhe Zeng <[email protected]>
  • Loading branch information
njzjz committed Sep 13, 2023
1 parent 4fa5f60 commit 8ba972f
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 31 deletions.
14 changes: 7 additions & 7 deletions source/api_cc/include/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,9 +227,9 @@ int session_get_dtype(tensorflow::Session* session,
* @param[in] fparam_ Frame parameters.
* @param[in] aparam_ Atom parameters.
* @param[in] atommap Atom map.
* @param[in] scope The scope of the tensors.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
int session_input_tensors(
Expand Down Expand Up @@ -257,9 +257,9 @@ int session_input_tensors(
* @param[in] atommap Atom map.
* @param[in] nghost Number of ghost atoms.
* @param[in] ago Update the internal neighbour list if ago is 0.
* @param[in] scope The scope of the tensors.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
int session_input_tensors(
Expand All @@ -274,8 +274,8 @@ int session_input_tensors(
const deepmd::AtomMap& atommap,
const int nghost,
const int ago,
const bool aparam_nall = false,
const std::string scope = "");
const std::string scope = "",
const bool aparam_nall = false);

/**
* @brief Get input tensors for mixed type.
Expand All @@ -290,9 +290,9 @@ int session_input_tensors(
* @param[in] atommap Atom map.
* @param[in] nghost Number of ghost atoms.
* @param[in] ago Update the internal neighbour list if ago is 0.
* @param[in] scope The scope of the tensors.
* @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is
* nall.
* @param[in] scope The scope of the tensors.
*/
template <typename MODELTYPE, typename VALUETYPE>
int session_input_tensors_mixed_type(
Expand All @@ -306,8 +306,8 @@ int session_input_tensors_mixed_type(
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam_,
const deepmd::AtomMap& atommap,
const bool aparam_nall = false,
const std::string scope = "");
const std::string scope = "",
const bool aparam_nall = false);

/**
* @brief Read model file to a string.
Expand Down
36 changes: 18 additions & 18 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -591,14 +591,14 @@ void DeepPot::compute(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int ret = session_input_tensors<double>(input_tensors, dcoord_, ntypes,
datype_, dbox, cell_size, fparam,
aparam, atommap, aparam_nall);
aparam, atommap, "", aparam_nall);
assert(ret == nloc);
run_model<double>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes);
} else {
int ret = session_input_tensors<float>(input_tensors, dcoord_, ntypes,
datype_, dbox, cell_size, fparam,
aparam, atommap, aparam_nall);
aparam, atommap, "", aparam_nall);
assert(ret == nloc);
run_model<float>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes);
Expand Down Expand Up @@ -768,14 +768,14 @@ void DeepPot::compute_inner(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int ret = session_input_tensors<double>(
input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam,
atommap, nghost, ago, aparam_nall);
atommap, nghost, ago, "", aparam_nall);
assert(nloc == ret);
run_model<double>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes, nghost);
} else {
int ret = session_input_tensors<float>(input_tensors, dcoord_, ntypes,
datype_, dbox, nlist, fparam, aparam,
atommap, nghost, ago, aparam_nall);
int ret = session_input_tensors<float>(
input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam,
atommap, nghost, ago, "", aparam_nall);
assert(nloc == ret);
run_model<float>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes, nghost);
Expand Down Expand Up @@ -857,13 +857,13 @@ void DeepPot::compute(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int nloc = session_input_tensors<double>(input_tensors, dcoord_, ntypes,
datype_, dbox, cell_size, fparam,
aparam, atommap, aparam_nall);
aparam, atommap, "", aparam_nall);
run_model<double>(dener, dforce_, dvirial, datom_energy_, datom_virial_,
session, input_tensors, atommap, nframes);
} else {
int nloc = session_input_tensors<float>(input_tensors, dcoord_, ntypes,
datype_, dbox, cell_size, fparam,
aparam, atommap, aparam_nall);
aparam, atommap, "", aparam_nall);
run_model<float>(dener, dforce_, dvirial, datom_energy_, datom_virial_,
session, input_tensors, atommap, nframes);
}
Expand Down Expand Up @@ -963,14 +963,14 @@ void DeepPot::compute(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int ret = session_input_tensors<double>(
input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam,
atommap, nghost_real, ago, aparam_nall);
atommap, nghost_real, ago, "", aparam_nall);
assert(nloc_real == ret);
run_model<double>(dener, dforce, dvirial, datom_energy, datom_virial,
session, input_tensors, atommap, nframes, nghost_real);
} else {
int ret = session_input_tensors<float>(
input_tensors, dcoord, ntypes, datype, dbox, nlist, fparam, aparam,
atommap, nghost_real, ago, aparam_nall);
atommap, nghost_real, ago, "", aparam_nall);
assert(nloc_real == ret);
run_model<float>(dener, dforce, dvirial, datom_energy, datom_virial,
session, input_tensors, atommap, nframes, nghost_real);
Expand Down Expand Up @@ -1075,14 +1075,14 @@ void DeepPot::compute_mixed_type(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int ret = session_input_tensors_mixed_type<double>(
input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size,
fparam, aparam, atommap, aparam_nall);
fparam, aparam, atommap, "", aparam_nall);
assert(ret == nloc);
run_model<double>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes);
} else {
int ret = session_input_tensors_mixed_type<float>(
input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size,
fparam, aparam, atommap, aparam_nall);
fparam, aparam, atommap, "", aparam_nall);
assert(ret == nloc);
run_model<float>(dener, dforce_, dvirial, session, input_tensors, atommap,
nframes);
Expand Down Expand Up @@ -1161,13 +1161,13 @@ void DeepPot::compute_mixed_type(ENERGYVTYPE& dener,
if (dtype == tensorflow::DT_DOUBLE) {
int nloc = session_input_tensors_mixed_type<double>(
input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size,
fparam, aparam, atommap, aparam_nall);
fparam, aparam, atommap, "", aparam_nall);
run_model<double>(dener, dforce_, dvirial, datom_energy_, datom_virial_,
session, input_tensors, atommap, nframes);
} else {
int nloc = session_input_tensors_mixed_type<float>(
input_tensors, nframes, dcoord_, ntypes, datype_, dbox, cell_size,
fparam, aparam, atommap, aparam_nall);
fparam, aparam, atommap, "", aparam_nall);
run_model<float>(dener, dforce_, dvirial, datom_energy_, datom_virial_,
session, input_tensors, atommap, nframes);
}
Expand Down Expand Up @@ -1481,11 +1481,11 @@ void DeepPotModelDevi::compute(std::vector<ENERGYTYPE>& all_energy,
if (dtype == tensorflow::DT_DOUBLE) {
ret = session_input_tensors<double>(input_tensors, dcoord, ntypes, datype,
dbox, nlist, fparam, aparam, atommap,
nghost_real, ago, aparam_nall);
nghost_real, ago, "", aparam_nall);
} else {
ret = session_input_tensors<float>(input_tensors, dcoord, ntypes, datype,
dbox, nlist, fparam, aparam, atommap,
nghost_real, ago, aparam_nall);
nghost_real, ago, "", aparam_nall);
}
all_energy.resize(numb_models);
all_force.resize(numb_models);
Expand Down Expand Up @@ -1579,11 +1579,11 @@ void DeepPotModelDevi::compute(
if (dtype == tensorflow::DT_DOUBLE) {
ret = session_input_tensors<double>(input_tensors, dcoord, ntypes, datype,
dbox, nlist, fparam, aparam, atommap,
nghost_real, ago, aparam_nall);
nghost_real, ago, "", aparam_nall);
} else {
ret = session_input_tensors<float>(input_tensors, dcoord, ntypes, datype,
dbox, nlist, fparam, aparam, atommap,
nghost_real, ago, aparam_nall);
nghost_real, ago, "", aparam_nall);
}

all_energy.resize(numb_models);
Expand Down
12 changes: 6 additions & 6 deletions source/api_cc/src/common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -374,8 +374,8 @@ int deepmd::session_input_tensors(
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam__,
const deepmd::AtomMap& atommap,
const bool aparam_nall,
const std::string scope) {
const std::string scope,
const bool aparam_nall) {
int nframes = dcoord_.size() / 3 / datype_.size();
int nall = datype_.size();
int nloc = nall;
Expand Down Expand Up @@ -514,8 +514,8 @@ int deepmd::session_input_tensors(
const deepmd::AtomMap& atommap,
const int nghost,
const int ago,
const bool aparam_nall,
const std::string scope) {
const std::string scope,
const bool aparam_nall) {
int nframes = dcoord_.size() / 3 / datype_.size();
int nall = datype_.size();
int nloc = nall - nghost;
Expand Down Expand Up @@ -651,8 +651,8 @@ int deepmd::session_input_tensors_mixed_type(
const std::vector<VALUETYPE>& fparam_,
const std::vector<VALUETYPE>& aparam__,
const deepmd::AtomMap& atommap,
const bool aparam_nall,
const std::string scope) {
const std::string scope,
const bool aparam_nall) {
int nall = datype_.size() / nframes;
int nloc = nall;
assert(nall * 3 * nframes == dcoord_.size());
Expand Down

0 comments on commit 8ba972f

Please sign in to comment.