diff --git a/source/api_c/include/deepmd.hpp b/source/api_c/include/deepmd.hpp index 4a376e0bec..503a4c4b4b 100644 --- a/source/api_c/include/deepmd.hpp +++ b/source/api_c/include/deepmd.hpp @@ -644,8 +644,8 @@ class DeepPot { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; std::vector fparam_, aparam_; @@ -706,10 +706,10 @@ class DeepPot { const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); - atom_energy.resize(nframes * natoms); - atom_virial.resize(nframes * natoms * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; VALUETYPE *atomic_ener_ = &atom_energy[0]; @@ -774,8 +774,8 @@ class DeepPot { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; std::vector fparam_, aparam_; @@ -845,10 +845,10 @@ class DeepPot { const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); - atom_energy.resize(nframes * natoms); - atom_virial.resize(nframes * natoms * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; VALUETYPE *atomic_ener_ = &atom_energy[0]; @@ -910,8 +910,8 @@ class DeepPot { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; std::vector fparam_, aparam_; @@ -972,10 +972,10 @@ class DeepPot { const int *atype_ = &atype[0]; double *ener_ = _DP_Get_Energy_Pointer(ener, nframes); - force.resize(nframes * natoms * 3); - virial.resize(nframes * 9); - atom_energy.resize(nframes * natoms); - atom_virial.resize(nframes * natoms * 9); + force.resize(static_cast(nframes) * natoms * 3); + virial.resize(static_cast(nframes) * 9); + atom_energy.resize(static_cast(nframes) * natoms); + atom_virial.resize(static_cast(nframes) * natoms * 9); VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; VALUETYPE *atomic_ener_ = &atom_energy[0]; @@ -1079,7 +1079,7 @@ class DeepPot { const int &dparam, const std::vector ¶m) const { if (param.size() == dparam) { - out_param.resize(nframes * dparam); + out_param.resize(static_cast(nframes) * dparam); for (int ii = 0; ii < nframes; ++ii) { std::copy(param.begin(), param.end(), out_param.begin() + ii * dparam); } @@ -1210,7 +1210,7 @@ class DeepPotModelDevi { virial.resize(numb_models); for (int i = 0; i < numb_models; i++) { ener[i] = energy_flat[i]; - force[i].resize(natoms * 3); + force[i].resize(static_cast(natoms) * 3); virial[i].resize(9); for (int j = 0; j < natoms * 3; j++) { force[i][j] = force_flat[i * natoms * 3 + j]; @@ -1292,10 +1292,10 @@ class DeepPotModelDevi { atom_virial.resize(numb_models); for (int i = 0; i < numb_models; i++) { ener[i] = energy_flat[i]; - force[i].resize(natoms * 3); + force[i].resize(static_cast(natoms) * 3); virial[i].resize(9); atom_energy[i].resize(natoms); - atom_virial[i].resize(natoms * 9); + atom_virial[i].resize(static_cast(natoms) * 9); for (int j = 0; j < natoms * 3; j++) { force[i][j] = force_flat[i * natoms * 3 + j]; } @@ -1496,7 +1496,7 @@ class DeepPotModelDevi { const int &dparam, const std::vector ¶m) const { if (param.size() == dparam) { - out_param.resize(nframes * dparam); + out_param.resize(static_cast(nframes) * dparam); for (int ii = 0; ii < nframes; ++ii) { std::copy(param.begin(), param.end(), out_param.begin() + ii * dparam); } @@ -1653,8 +1653,8 @@ class DeepTensor { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; global_tensor.resize(odim); - force.resize(odim * natoms * 3); - virial.resize(odim * 9); + force.resize(static_cast(odim) * natoms * 3); + virial.resize(static_cast(odim) * 9); VALUETYPE *global_tensor_ = &global_tensor[0]; VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; @@ -1697,9 +1697,9 @@ class DeepTensor { const int *atype_ = &atype[0]; global_tensor.resize(odim); - force.resize(odim * natoms * 3); - virial.resize(odim * 9); - atom_virial.resize(odim * natoms * 9); + force.resize(static_cast(odim) * natoms * 3); + virial.resize(static_cast(odim) * 9); + atom_virial.resize(static_cast(odim) * natoms * 9); VALUETYPE *global_tensor_ = &global_tensor[0]; VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; @@ -1752,8 +1752,8 @@ class DeepTensor { const VALUETYPE *box_ = !box.empty() ? &box[0] : nullptr; const int *atype_ = &atype[0]; global_tensor.resize(odim); - force.resize(odim * natoms * 3); - virial.resize(odim * 9); + force.resize(static_cast(odim) * natoms * 3); + virial.resize(static_cast(odim) * 9); VALUETYPE *global_tensor_ = &global_tensor[0]; VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; @@ -1800,9 +1800,9 @@ class DeepTensor { const int *atype_ = &atype[0]; global_tensor.resize(odim); - force.resize(odim * natoms * 3); - virial.resize(odim * 9); - atom_virial.resize(odim * natoms * 9); + force.resize(static_cast(odim) * natoms * 3); + virial.resize(static_cast(odim) * 9); + atom_virial.resize(static_cast(odim) * natoms * 9); VALUETYPE *global_tensor_ = &global_tensor[0]; VALUETYPE *force_ = &force[0]; VALUETYPE *virial_ = &virial[0]; @@ -1954,7 +1954,7 @@ class DipoleChargeModifier { const int *dpairs = reinterpret_cast(&pairs[0]); const VALUETYPE *delef = &delef_[0]; - dfcorr_.resize(natoms * 3); + dfcorr_.resize(static_cast(natoms) * 3); dvcorr_.resize(9); VALUETYPE *dfcorr = &dfcorr_[0]; VALUETYPE *dvcorr = &dvcorr_[0]; @@ -2071,7 +2071,7 @@ void select_map(std::vector &out, nall2++; } } - out.resize(nall2 * stride); + out.resize(static_cast(nall2) * stride); DP_SelectMapInt(&in[0], &fwd_map[0], stride, nall1, nall2, &out[0]); }; diff --git a/source/api_c/tests/test_deepdipole_hpp.cc b/source/api_c/tests/test_deepdipole_hpp.cc index 49958469e0..f781c34c5b 100644 --- a/source/api_c/tests/test_deepdipole_hpp.cc +++ b/source/api_c/tests/test_deepdipole_hpp.cc @@ -234,7 +234,7 @@ class TestInferDeepDipoleNew : public ::testing::Test { } } - expected_gv.resize(odim * 9); + expected_gv.resize(static_cast(odim) * 9); for (int kk = 0; kk < odim; ++kk) { for (int ii = 0; ii < natoms; ++ii) { for (int dd = 0; dd < 9; ++dd) { diff --git a/source/api_c/tests/test_deeppolar_hpp.cc b/source/api_c/tests/test_deeppolar_hpp.cc index 1fc2075afb..63ebf5d760 100644 --- a/source/api_c/tests/test_deeppolar_hpp.cc +++ b/source/api_c/tests/test_deeppolar_hpp.cc @@ -466,7 +466,7 @@ class TestInferDeepPolarNew : public ::testing::Test { } } - expected_gv.resize(odim * 9); + expected_gv.resize(static_cast(odim) * 9); for (int kk = 0; kk < odim; ++kk) { for (int ii = 0; ii < natoms; ++ii) { for (int dd = 0; dd < 9; ++dd) { diff --git a/source/api_c/tests/test_deeppot_a_fparam_aparam_nframes.cc b/source/api_c/tests/test_deeppot_a_fparam_aparam_nframes.cc index b94087916a..383c8f5fb1 100644 --- a/source/api_c/tests/test_deeppot_a_fparam_aparam_nframes.cc +++ b/source/api_c/tests/test_deeppot_a_fparam_aparam_nframes.cc @@ -126,7 +126,7 @@ class TestInferDeepPotAFparamAparamNFrames : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { @@ -767,7 +767,7 @@ class TestInferDeepPotAFparamAparamNFramesSingleParam : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { diff --git a/source/api_c/tests/test_deeppot_a_nframes_hpp.cc b/source/api_c/tests/test_deeppot_a_nframes_hpp.cc index 1177957899..af132c0146 100644 --- a/source/api_c/tests/test_deeppot_a_nframes_hpp.cc +++ b/source/api_c/tests/test_deeppot_a_nframes_hpp.cc @@ -119,7 +119,7 @@ class TestInferDeepPotANFrames : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { @@ -728,7 +728,7 @@ class TestInferDeepPotANFramesNoPbc : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { diff --git a/source/api_c/tests/test_utils.h b/source/api_c/tests/test_utils.h index 01636156b2..5167732bc8 100644 --- a/source/api_c/tests/test_utils.h +++ b/source/api_c/tests/test_utils.h @@ -42,7 +42,7 @@ inline void _fold_back(std::vector &out, const int nall, const int ndim, const int nframes = 1) { - out.resize(nframes * nloc * ndim); + out.resize(static_cast(nframes) * nloc * ndim); _fold_back(out.begin(), in.begin(), mapping, nloc, nall, ndim, nframes); } diff --git a/source/api_cc/src/DataModifier.cc b/source/api_cc/src/DataModifier.cc index d687c02e75..c44bbceaa2 100644 --- a/source/api_cc/src/DataModifier.cc +++ b/source/api_cc/src/DataModifier.cc @@ -123,7 +123,7 @@ void DipoleChargeModifier::run_model( auto of = output_f.flat(); auto ov = output_v.flat(); - dforce.resize(nall * 3); + dforce.resize(static_cast(nall) * 3); dvirial.resize(9); for (int ii = 0; ii < nall * 3; ++ii) { dforce[ii] = of(ii); @@ -186,7 +186,7 @@ void DipoleChargeModifier::compute( int nall_real = real_bkw_map.size(); int nloc_real = nall_real - nghost_real; if (nloc_real == 0) { - dfcorr_.resize(nall * 3); + dfcorr_.resize(static_cast(nall) * 3); dvcorr_.resize(9); fill(dfcorr_.begin(), dfcorr_.end(), (VALUETYPE)0.0); fill(dvcorr_.begin(), dvcorr_.end(), (VALUETYPE)0.0); @@ -196,8 +196,8 @@ void DipoleChargeModifier::compute( std::vector dcoord_real; std::vector delef_real; std::vector datype_real; - dcoord_real.resize(nall_real * 3); - delef_real.resize(nall_real * 3); + dcoord_real.resize(static_cast(nall_real) * 3); + delef_real.resize(static_cast(nall_real) * 3); datype_real.resize(nall_real); // fwd map select_map(dcoord_real, dcoord_, real_fwd_map, 3); diff --git a/source/api_cc/src/DeepTensor.cc b/source/api_cc/src/DeepTensor.cc index 655819e086..11a131a604 100644 --- a/source/api_cc/src/DeepTensor.cc +++ b/source/api_cc/src/DeepTensor.cc @@ -253,7 +253,7 @@ void DeepTensor::run_model( } // component-wise virial - dvirial_.resize(odim * 9); + dvirial_.resize(static_cast(odim) * 9); for (unsigned ii = 0; ii < odim * 9; ++ii) { dvirial_[ii] = ov(ii); } @@ -266,7 +266,7 @@ void DeepTensor::run_model( std::vector sel_srt = sel_fwd; select_map(sel_srt, sel_fwd, atommap.get_fwd_map(), 1); std::remove(sel_srt.begin(), sel_srt.end(), -1); - datom_tensor_.resize(nsel * odim); + datom_tensor_.resize(static_cast(nsel) * odim); select_map(datom_tensor_, datom_tensor, sel_srt, odim); // component-wise atomic virial diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index 33c433a90a..2f75aaa291 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -171,14 +171,15 @@ void deepmd::select_real_atoms_coord(std::vector& dcoord, // resize to nall_real nall_real = bkw_map.size(); nloc_real = nall_real - nghost_real; - dcoord.resize(nframes * nall_real * 3); + dcoord.resize(static_cast(nframes) * nall_real * 3); datype.resize(nall_real); // fwd map select_map(dcoord, dcoord_, fwd_map, 3, nframes, nall_real, nall); select_map(datype, datype_, fwd_map, 1); // aparam if (daparam > 0) { - aparam.resize(nframes * (aparam_nall ? nall_real : nloc_real)); + aparam.resize(static_cast(nframes) * + (aparam_nall ? nall_real : nloc_real)); select_map(aparam, aparam_, fwd_map, daparam, nframes, (aparam_nall ? nall_real : nloc_real), (aparam_nall ? nall : (nall - nghost))); @@ -396,7 +397,7 @@ int deepmd::session_input_tensors( TensorShape coord_shape; coord_shape.AddDim(nframes); - coord_shape.AddDim(nall * 3); + coord_shape.AddDim(static_cast(nall) * 3); TensorShape type_shape; type_shape.AddDim(nframes); type_shape.AddDim(nall); @@ -540,7 +541,7 @@ int deepmd::session_input_tensors( TensorShape coord_shape; coord_shape.AddDim(nframes); - coord_shape.AddDim(nall * 3); + coord_shape.AddDim(static_cast(nall) * 3); TensorShape type_shape; type_shape.AddDim(nframes); type_shape.AddDim(nall); @@ -675,7 +676,7 @@ int deepmd::session_input_tensors_mixed_type( TensorShape coord_shape; coord_shape.AddDim(nframes); - coord_shape.AddDim(nall * 3); + coord_shape.AddDim(static_cast(nall) * 3); TensorShape type_shape; type_shape.AddDim(nframes); type_shape.AddDim(nall); diff --git a/source/api_cc/tests/test_deepdipole.cc b/source/api_cc/tests/test_deepdipole.cc index b8f2195728..86a8a4131f 100644 --- a/source/api_cc/tests/test_deepdipole.cc +++ b/source/api_cc/tests/test_deepdipole.cc @@ -238,7 +238,7 @@ class TestInferDeepDipoleNew : public ::testing::Test { } } - expected_gv.resize(odim * 9); + expected_gv.resize(static_cast(odim) * 9); for (int kk = 0; kk < odim; ++kk) { for (int ii = 0; ii < natoms; ++ii) { for (int dd = 0; dd < 9; ++dd) { diff --git a/source/api_cc/tests/test_deeppolar.cc b/source/api_cc/tests/test_deeppolar.cc index d8ad497054..89014fd245 100644 --- a/source/api_cc/tests/test_deeppolar.cc +++ b/source/api_cc/tests/test_deeppolar.cc @@ -470,7 +470,7 @@ class TestInferDeepPolarNew : public ::testing::Test { } } - expected_gv.resize(odim * 9); + expected_gv.resize(static_cast(odim) * 9); for (int kk = 0; kk < odim; ++kk) { for (int ii = 0; ii < natoms; ++ii) { for (int dd = 0; dd < 9; ++dd) { diff --git a/source/api_cc/tests/test_deeppot_a_fparam_aparam_nframes.cc b/source/api_cc/tests/test_deeppot_a_fparam_aparam_nframes.cc index 0f45eaabb0..0851523814 100644 --- a/source/api_cc/tests/test_deeppot_a_fparam_aparam_nframes.cc +++ b/source/api_cc/tests/test_deeppot_a_fparam_aparam_nframes.cc @@ -127,7 +127,7 @@ class TestInferDeepPotAFparamAparamNFrames : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { @@ -768,7 +768,7 @@ class TestInferDeepPotAFparamAparamNFramesSingleParam : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { diff --git a/source/api_cc/tests/test_deeppot_a_nframes.cc b/source/api_cc/tests/test_deeppot_a_nframes.cc index 835971d106..c83a7a0b41 100644 --- a/source/api_cc/tests/test_deeppot_a_nframes.cc +++ b/source/api_cc/tests/test_deeppot_a_nframes.cc @@ -123,7 +123,7 @@ class TestInferDeepPotANFrames : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { @@ -732,7 +732,7 @@ class TestInferDeepPotANFramesNoPbc : public ::testing::Test { EXPECT_EQ(nframes * natoms * 3, expected_f.size()); EXPECT_EQ(nframes * natoms * 9, expected_v.size()); expected_tot_e.resize(nframes); - expected_tot_v.resize(nframes * 9); + expected_tot_v.resize(static_cast(nframes) * 9); std::fill(expected_tot_e.begin(), expected_tot_e.end(), 0.); std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.); for (int kk = 0; kk < nframes; ++kk) { diff --git a/source/api_cc/tests/test_utils.h b/source/api_cc/tests/test_utils.h index 46732ca935..d06823b4e0 100644 --- a/source/api_cc/tests/test_utils.h +++ b/source/api_cc/tests/test_utils.h @@ -42,7 +42,7 @@ inline void _fold_back(std::vector &out, const int nall, const int ndim, const int nframes = 1) { - out.resize(nframes * nloc * ndim); + out.resize(static_cast(nframes) * nloc * ndim); _fold_back(out.begin(), in.begin(), mapping, nloc, nall, ndim, nframes); } diff --git a/source/ipi/driver.cc b/source/ipi/driver.cc index 22d5415bd1..1e3d92eb5e 100644 --- a/source/ipi/driver.cc +++ b/source/ipi/driver.cc @@ -190,11 +190,11 @@ int main(int argc, char *argv[]) { << std::endl; } - dcoord.resize(3 * natoms); - dforce.resize(3 * natoms, 0); - dcoord_tmp.resize(3 * natoms); - dforce_tmp.resize(3 * natoms, 0); - msg_buff = new double[3 * natoms]; + dcoord.resize(3 * static_cast(natoms)); + dforce.resize(3 * static_cast(natoms), 0); + dcoord_tmp.resize(3 * static_cast(natoms)); + dforce_tmp.resize(3 * static_cast(natoms), 0); + msg_buff = new double[3 * static_cast(natoms)]; } // get coord diff --git a/source/lib/src/ewald.cc b/source/lib/src/ewald.cc index 0f3b960d9f..ec201c4d7d 100644 --- a/source/lib/src/ewald.cc +++ b/source/lib/src/ewald.cc @@ -98,7 +98,7 @@ void deepmd::ewald_recp(VALUETYPE& ener, // natoms int natoms = charge.size(); // init returns - force.resize(natoms * 3); + force.resize(static_cast(natoms) * 3); virial.resize(9); ener = 0; fill(force.begin(), force.end(), static_cast(0)); @@ -179,7 +179,7 @@ void deepmd::ewald_recp(VALUETYPE& ener, std::vector > thread_force(nthreads); std::vector > thread_virial(nthreads); for (int ii = 0; ii < nthreads; ++ii) { - thread_force[ii].resize(natoms * 3, 0.); + thread_force[ii].resize(static_cast(natoms) * 3, 0.); thread_virial[ii].resize(9, 0.); } // calculate ener, force and virial diff --git a/source/lib/src/neighbor_list.cc b/source/lib/src/neighbor_list.cc index fc797ce6a9..6723e3de66 100644 --- a/source/lib/src/neighbor_list.cc +++ b/source/lib/src/neighbor_list.cc @@ -784,7 +784,7 @@ void copy_coord(std::vector& out_c, build_clist(clist, in_c, nloc, nat_stt, ncell, nat_stt, ncell, region, ncell); // copy local atoms - out_c.resize(nloc * 3); + out_c.resize(static_cast(nloc) * 3); out_t.resize(nloc); mapping.resize(nloc); copy(in_c.begin(), in_c.end(), out_c.begin()); diff --git a/source/lib/src/pairwise.cc b/source/lib/src/pairwise.cc index 3fea27bd71..f5b21d9856 100644 --- a/source/lib/src/pairwise.cc +++ b/source/lib/src/pairwise.cc @@ -93,7 +93,7 @@ void deepmd::dprc_pairwise_map_cpu( // (3, 4, 0, 1, 2, 10, 11), // (3, 4, 5, 6, 7, 10, -1), // (3, 4, 8, 9, -1, 10, -1) - forward_qmmm_map.resize((nfragments - 1) * map_size); + forward_qmmm_map.resize(static_cast(nfragments - 1) * map_size); std::fill(forward_qmmm_map.begin(), forward_qmmm_map.end(), -1); int nqm_real = nloc; // init for nfragments = 1 for (int ii = 0; ii < nfragments - 1; ++ii) { @@ -133,7 +133,7 @@ void deepmd::dprc_pairwise_map_cpu( // (2, 3, 4, 0, 1, -1, -1, -1, -1, -1, 5, 6) // (-1, -1, -1, 0, 1, 2, 3, 4, -1, -1, 5, -1) // (-1, -1, -1, 0, 1, -1, -1, -1, 2, 3, 5, -1) - backward_qmmm_map.resize((nfragments - 1) * nall); + backward_qmmm_map.resize(static_cast(nfragments - 1) * nall); std::fill(backward_qmmm_map.begin(), backward_qmmm_map.end(), -1); for (int ii = 0; ii < nfragments - 1; ++ii) { for (int jj = 0; jj < map_size; ++jj) { diff --git a/source/lib/src/prod_env_mat.cc b/source/lib/src/prod_env_mat.cc index 63813069b0..81984c78e4 100644 --- a/source/lib/src/prod_env_mat.cc +++ b/source/lib/src/prod_env_mat.cc @@ -304,7 +304,8 @@ void deepmd::env_mat_nbor_update(InputNlist &inlist, max_nbor_size = _max_nbor_size; // copy nbor list from host to the device - std::vector nbor_list_host(inum * max_nbor_size, 0); + std::vector nbor_list_host(static_cast(inum) * max_nbor_size, + 0); int **_firstneigh = (int **)malloc(sizeof(int *) * inum); for (int ii = 0; ii < inum; ii++) { _firstneigh[ii] = nbor_list_dev + ii * max_nbor_size; @@ -313,7 +314,7 @@ void deepmd::env_mat_nbor_update(InputNlist &inlist, } } memcpy_host_to_device(nbor_list_dev, &nbor_list_host[0], - inum * max_nbor_size); + static_cast(inum) * max_nbor_size); memcpy_host_to_device(gpu_inlist.firstneigh, _firstneigh, inum); free(_firstneigh); } diff --git a/source/lib/tests/test_coord.cc b/source/lib/tests/test_coord.cc index c939dd6fa6..0427521416 100644 --- a/source/lib/tests/test_coord.cc +++ b/source/lib/tests/test_coord.cc @@ -234,7 +234,7 @@ TEST_F(TestCopyCoord, cpu) { // << nloc << " " // << nall << std::endl; - out_c.resize(nall * 3); + out_c.resize(static_cast(nall) * 3); out_t.resize(nall); mapping.resize(nall); @@ -322,7 +322,7 @@ TEST_F(TestCopyCoord, gpu) { deepmd::delete_device_memory(int_data_dev); EXPECT_EQ(ret, 0); EXPECT_EQ(nall, expected_nall); - out_c.resize(nall * 3); + out_c.resize(static_cast(nall) * 3); out_t.resize(nall); mapping.resize(nall); @@ -468,7 +468,7 @@ TEST_F(TestCopyCoordMoreCell, cpu) { // << nloc << " " // << nall << std::endl; - out_c.resize(nall * 3); + out_c.resize(static_cast(nall) * 3); out_t.resize(nall); mapping.resize(nall); @@ -556,7 +556,7 @@ TEST_F(TestCopyCoordMoreCell, gpu) { deepmd::delete_device_memory(int_data_dev); EXPECT_EQ(ret, 0); EXPECT_EQ(nall, expected_nall); - out_c.resize(nall * 3); + out_c.resize(static_cast(nall) * 3); out_t.resize(nall); mapping.resize(nall); diff --git a/source/lib/tests/test_map_aparam.cc b/source/lib/tests/test_map_aparam.cc index 4adf3ffd98..061ae49f28 100644 --- a/source/lib/tests/test_map_aparam.cc +++ b/source/lib/tests/test_map_aparam.cc @@ -65,7 +65,7 @@ class TestMapAparam : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); + nlist.resize(static_cast(nloc) * nnei); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -74,7 +74,7 @@ class TestMapAparam : public ::testing::Test { nlist[ii * nnei + jj] = fmt_nlist_a[jj]; } } - aparam.resize(nall * numb_aparam); + aparam.resize(static_cast(nall) * numb_aparam); for (int ii = 0; ii < nall * numb_aparam; ++ii) { aparam[ii] = 10 - 0.1 * ii; } diff --git a/source/lib/tests/test_pair_tab.cc b/source/lib/tests/test_pair_tab.cc index 9f68cd98b9..7002beb1da 100644 --- a/source/lib/tests/test_pair_tab.cc +++ b/source/lib/tests/test_pair_tab.cc @@ -235,10 +235,10 @@ class TestPairTab : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, diff --git a/source/lib/tests/test_prod_force_a.cc b/source/lib/tests/test_prod_force_a.cc index 2031f086b4..f49b173769 100644 --- a/source/lib/tests/test_prod_force_a.cc +++ b/source/lib/tests/test_prod_force_a.cc @@ -82,10 +82,10 @@ class TestProdForceA : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij_a.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij_a.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -105,7 +105,7 @@ class TestProdForceA : public ::testing::Test { } } } - net_deriv.resize(nloc * ndescrpt); + net_deriv.resize(static_cast(nloc) * ndescrpt); for (int ii = 0; ii < nloc * ndescrpt; ++ii) { net_deriv[ii] = 10 - ii * 0.01; } diff --git a/source/lib/tests/test_prod_force_grad_a.cc b/source/lib/tests/test_prod_force_grad_a.cc index abb04eaa01..a946639638 100644 --- a/source/lib/tests/test_prod_force_grad_a.cc +++ b/source/lib/tests/test_prod_force_grad_a.cc @@ -93,10 +93,10 @@ class TestProdForceGradA : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij_a.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij_a.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -116,7 +116,7 @@ class TestProdForceGradA : public ::testing::Test { } } } - grad.resize(nloc * 3); + grad.resize(static_cast(nloc) * 3); for (int ii = 0; ii < nloc * 3; ++ii) { grad[ii] = 10 - ii * 0.1; } diff --git a/source/lib/tests/test_prod_force_grad_r.cc b/source/lib/tests/test_prod_force_grad_r.cc index c8a27077c3..e143633bea 100644 --- a/source/lib/tests/test_prod_force_grad_r.cc +++ b/source/lib/tests/test_prod_force_grad_r.cc @@ -67,10 +67,10 @@ class TestProdForceGradR : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij_a.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij_a.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -90,7 +90,7 @@ class TestProdForceGradR : public ::testing::Test { } } } - grad.resize(nloc * 3); + grad.resize(static_cast(nloc) * 3); for (int ii = 0; ii < nloc * 3; ++ii) { grad[ii] = 10 - ii * 0.1; } diff --git a/source/lib/tests/test_prod_force_r.cc b/source/lib/tests/test_prod_force_r.cc index ff3245742d..544152c759 100644 --- a/source/lib/tests/test_prod_force_r.cc +++ b/source/lib/tests/test_prod_force_r.cc @@ -79,10 +79,10 @@ class TestProdForceR : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij_a.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij_a.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -102,7 +102,7 @@ class TestProdForceR : public ::testing::Test { } } } - net_deriv.resize(nloc * ndescrpt); + net_deriv.resize(static_cast(nloc) * ndescrpt); for (int ii = 0; ii < nloc * ndescrpt; ++ii) { net_deriv[ii] = 10 - ii * 0.01; } diff --git a/source/lib/tests/test_prod_virial_a.cc b/source/lib/tests/test_prod_virial_a.cc index b2f2a11989..c6fe254db8 100644 --- a/source/lib/tests/test_prod_virial_a.cc +++ b/source/lib/tests/test_prod_virial_a.cc @@ -118,10 +118,10 @@ class TestProdVirialA : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -144,7 +144,7 @@ class TestProdVirialA : public ::testing::Test { rij[ii * nnei * 3 + jj] = t_rij[jj]; } } - net_deriv.resize(nloc * ndescrpt); + net_deriv.resize(static_cast(nloc) * ndescrpt); for (int ii = 0; ii < nloc * ndescrpt; ++ii) { net_deriv[ii] = 10 - ii * 0.01; } diff --git a/source/lib/tests/test_prod_virial_grad_a.cc b/source/lib/tests/test_prod_virial_grad_a.cc index 09af51d6ed..598df91c86 100644 --- a/source/lib/tests/test_prod_virial_grad_a.cc +++ b/source/lib/tests/test_prod_virial_grad_a.cc @@ -87,10 +87,10 @@ class TestProdVirialGradA : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, diff --git a/source/lib/tests/test_prod_virial_grad_r.cc b/source/lib/tests/test_prod_virial_grad_r.cc index 93a7291176..9b520ed898 100644 --- a/source/lib/tests/test_prod_virial_grad_r.cc +++ b/source/lib/tests/test_prod_virial_grad_r.cc @@ -61,10 +61,10 @@ class TestProdVirialGradR : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, diff --git a/source/lib/tests/test_prod_virial_r.cc b/source/lib/tests/test_prod_virial_r.cc index aed4abc512..f0fab48e78 100644 --- a/source/lib/tests/test_prod_virial_r.cc +++ b/source/lib/tests/test_prod_virial_r.cc @@ -118,10 +118,10 @@ class TestProdVirialR : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - env.resize(nloc * ndescrpt); - env_deriv.resize(nloc * ndescrpt * 3); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + env.resize(static_cast(nloc) * ndescrpt); + env_deriv.resize(static_cast(nloc) * ndescrpt * 3); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -144,7 +144,7 @@ class TestProdVirialR : public ::testing::Test { rij[ii * nnei * 3 + jj] = t_rij[jj]; } } - net_deriv.resize(nloc * ndescrpt); + net_deriv.resize(static_cast(nloc) * ndescrpt); for (int ii = 0; ii < nloc * ndescrpt; ++ii) { net_deriv[ii] = 10 - ii * 0.01; } diff --git a/source/lib/tests/test_soft_min_switch.cc b/source/lib/tests/test_soft_min_switch.cc index f7a4f43b1a..fbce26e352 100644 --- a/source/lib/tests/test_soft_min_switch.cc +++ b/source/lib/tests/test_soft_min_switch.cc @@ -55,8 +55,8 @@ class TestSoftMinSwitch : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, diff --git a/source/lib/tests/test_soft_min_switch_force.cc b/source/lib/tests/test_soft_min_switch_force.cc index dacc681792..a49661fdbd 100644 --- a/source/lib/tests/test_soft_min_switch_force.cc +++ b/source/lib/tests/test_soft_min_switch_force.cc @@ -76,8 +76,8 @@ class TestSoftMinSwitchForce : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -95,7 +95,7 @@ class TestSoftMinSwitchForce : public ::testing::Test { } } sw_value.resize(nloc); - sw_deriv.resize(nloc * nnei * 3); + sw_deriv.resize(static_cast(nloc) * nnei * 3); deepmd::soft_min_switch_cpu(&sw_value[0], &sw_deriv[0], &rij[0], &nlist[0], nloc, nnei, alpha, rmin, rmax); diff --git a/source/lib/tests/test_soft_min_switch_force_grad.cc b/source/lib/tests/test_soft_min_switch_force_grad.cc index 7c36296a79..9cef91bed0 100644 --- a/source/lib/tests/test_soft_min_switch_force_grad.cc +++ b/source/lib/tests/test_soft_min_switch_force_grad.cc @@ -56,8 +56,8 @@ class TestSoftMinSwitchForceGrad : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -75,11 +75,11 @@ class TestSoftMinSwitchForceGrad : public ::testing::Test { } } sw_value.resize(nloc); - sw_deriv.resize(nloc * nnei * 3); + sw_deriv.resize(static_cast(nloc) * nnei * 3); deepmd::soft_min_switch_cpu(&sw_value[0], &sw_deriv[0], &rij[0], &nlist[0], nloc, nnei, alpha, rmin, rmax); - grad.resize(nloc * 3); + grad.resize(static_cast(nloc) * 3); for (int ii = 0; ii < nloc; ++ii) { grad[ii] = 1.0 - ii * 0.1; } diff --git a/source/lib/tests/test_soft_min_switch_virial.cc b/source/lib/tests/test_soft_min_switch_virial.cc index 76ddf9fa7f..8b38805528 100644 --- a/source/lib/tests/test_soft_min_switch_virial.cc +++ b/source/lib/tests/test_soft_min_switch_virial.cc @@ -121,8 +121,8 @@ class TestSoftMinSwitchVirial : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -140,7 +140,7 @@ class TestSoftMinSwitchVirial : public ::testing::Test { } } sw_value.resize(nloc); - sw_deriv.resize(nloc * nnei * 3); + sw_deriv.resize(static_cast(nloc) * nnei * 3); deepmd::soft_min_switch_cpu(&sw_value[0], &sw_deriv[0], &rij[0], &nlist[0], nloc, nnei, alpha, rmin, rmax); diff --git a/source/lib/tests/test_soft_min_switch_virial_grad.cc b/source/lib/tests/test_soft_min_switch_virial_grad.cc index 315880b3ac..fef87d4d4e 100644 --- a/source/lib/tests/test_soft_min_switch_virial_grad.cc +++ b/source/lib/tests/test_soft_min_switch_virial_grad.cc @@ -56,8 +56,8 @@ class TestSoftMinSwitchVirialGrad : public ::testing::Test { } build_nlist(nlist_a_cpy, nlist_r_cpy, posi_cpy, nloc, rc, rc, nat_stt, ncell, ext_stt, ext_end, region, ncell); - nlist.resize(nloc * nnei); - rij.resize(nloc * nnei * 3); + nlist.resize(static_cast(nloc) * nnei); + rij.resize(static_cast(nloc) * nnei * 3); for (int ii = 0; ii < nloc; ++ii) { // format nlist and record format_nlist_i_cpu(fmt_nlist_a, posi_cpy, atype_cpy, ii, @@ -75,11 +75,11 @@ class TestSoftMinSwitchVirialGrad : public ::testing::Test { } } sw_value.resize(nloc); - sw_deriv.resize(nloc * nnei * 3); + sw_deriv.resize(static_cast(nloc) * nnei * 3); deepmd::soft_min_switch_cpu(&sw_value[0], &sw_deriv[0], &rij[0], &nlist[0], nloc, nnei, alpha, rmin, rmax); - grad.resize(nloc * 3); + grad.resize(static_cast(nloc) * 3); for (int ii = 0; ii < nloc; ++ii) { grad[ii] = 1.0 - ii * 0.1; } diff --git a/source/lmp/fix_dplr.cpp b/source/lmp/fix_dplr.cpp index 628f435bb7..ea60023e26 100644 --- a/source/lmp/fix_dplr.cpp +++ b/source/lmp/fix_dplr.cpp @@ -517,7 +517,7 @@ void FixDPLR::pre_force(int vflag) { int odim = dpt.output_dim(); assert(odim == 3); - dipole_recd.resize(nall * 3); + dipole_recd.resize(static_cast(nall) * 3); fill(dipole_recd.begin(), dipole_recd.end(), 0.0); for (int ii = 0; ii < valid_pairs.size(); ++ii) { int idx0 = valid_pairs[ii].first; diff --git a/source/lmp/pair_deepmd.cpp b/source/lmp/pair_deepmd.cpp index 3a6c1c8bbf..90aa453143 100644 --- a/source/lmp/pair_deepmd.cpp +++ b/source/lmp/pair_deepmd.cpp @@ -204,7 +204,7 @@ static void make_uniform_aparam(vector &daparam, const vector &aparam, const int &nlocal) { unsigned dim_aparam = aparam.size(); - daparam.resize(dim_aparam * nlocal); + daparam.resize(static_cast(dim_aparam) * nlocal); for (int ii = 0; ii < nlocal; ++ii) { for (int jj = 0; jj < dim_aparam; ++jj) { daparam[ii * dim_aparam + jj] = aparam[jj]; @@ -247,7 +247,7 @@ void PairDeepMD::make_aparam_from_compute(vector &aparam) { assert(compute); int nlocal = atom->nlocal; - aparam.resize(dim_aparam * nlocal); + aparam.resize(static_cast(dim_aparam) * nlocal); if (!(compute->invoked_flag & Compute::INVOKED_PERATOM)) { compute->compute_peratom(); @@ -573,7 +573,7 @@ void PairDeepMD::compute(int eflag, int vflag) { error->one(FLERR, e.what()); } } else { - dforce.resize((extend_inum + extend_nghost) * 3); + dforce.resize(static_cast(extend_inum + extend_nghost) * 3); try { deep_pot.compute(dener, dforce, dvirial, extend_dcoord, extend_dtype, dbox, extend_nghost, extend_lmp_list, @@ -596,7 +596,7 @@ void PairDeepMD::compute(int eflag, int vflag) { error->one(FLERR, e.what()); } } else { - dforce.resize((extend_inum + extend_nghost) * 3); + dforce.resize(static_cast(extend_inum + extend_nghost) * 3); try { deep_pot.compute(dener, dforce, dvirial, extend_dcoord, extend_dtype, dbox, extend_nghost, extend_lmp_list, @@ -1495,7 +1495,7 @@ void PairDeepMD::extend(int &extend_inum, } // extend coord - extend_dcoord.resize(extend_nall * 3); + extend_dcoord.resize(static_cast(extend_nall) * 3); for (int ii = 0; ii < nloc; ii++) { for (int jj = 0; jj < 3; jj++) { extend_dcoord[new_idx_map[ii] * 3 + jj] = dcoord[ii * 3 + jj]; diff --git a/source/lmp/pppm_dplr.cpp b/source/lmp/pppm_dplr.cpp index faa80ee308..613a9f1c93 100644 --- a/source/lmp/pppm_dplr.cpp +++ b/source/lmp/pppm_dplr.cpp @@ -59,7 +59,7 @@ void PPPMDPLR::init() { int nlocal = atom->nlocal; // cout << " ninit pppm/dplr ---------------------- " << nlocal << endl; - fele.resize(nlocal * 3); + fele.resize(static_cast(nlocal) * 3); fill(fele.begin(), fele.end(), 0.0); } @@ -296,7 +296,7 @@ void PPPMDPLR::fieldforce_ik() { int nghost = atom->nghost; int nall = nlocal + nghost; - fele.resize(nlocal * 3); + fele.resize(static_cast(nlocal) * 3); fill(fele.begin(), fele.end(), 0.0); for (i = 0; i < nlocal; i++) { @@ -372,7 +372,7 @@ void PPPMDPLR::fieldforce_ad() { int nghost = atom->nghost; int nall = nlocal + nghost; - fele.resize(nlocal * 3); + fele.resize(static_cast(nlocal) * 3); fill(fele.begin(), fele.end(), 0.0); for (i = 0; i < nlocal; i++) { diff --git a/source/md/src/Convert.cc b/source/md/src/Convert.cc index 198c746c23..b8014bf974 100644 --- a/source/md/src/Convert.cc +++ b/source/md/src/Convert.cc @@ -53,8 +53,8 @@ void Convert::gro2nnp(vector& coord, assert(posi.size() == idx_map_nnp2gro.size()); assert(velo.size() == idx_map_nnp2gro.size()); int natoms = idx_map_nnp2gro.size(); - coord.resize(3 * natoms); - veloc.resize(3 * natoms); + coord.resize(3 * static_cast(natoms)); + veloc.resize(3 * static_cast(natoms)); for (unsigned ii = 0; ii < natoms; ++ii) { int gro_i = idx_map_nnp2gro[ii]; for (int dd = 0; dd < 3; ++dd) { diff --git a/source/md/src/Tabulated.cc b/source/md/src/Tabulated.cc index 1ecf8ee53d..6e9777ea29 100644 --- a/source/md/src/Tabulated.cc +++ b/source/md/src/Tabulated.cc @@ -24,7 +24,7 @@ void Tabulated::reinit(const VALUETYPE rc, hi = 1. / hh; rc2 = rc * rc; - data.resize(tableLength * stride); + data.resize(static_cast(tableLength) * stride); int ii; for (ii = 0; ii < tableLength - 1; ++ii) { diff --git a/source/op/descrpt.cc b/source/op/descrpt.cc index ef040c3de0..6362b8d37a 100644 --- a/source/op/descrpt.cc +++ b/source/op/descrpt.cc @@ -145,22 +145,22 @@ class DescrptOp : public OpKernel { // Create an output tensor TensorShape descrpt_shape; descrpt_shape.AddDim(nsamples); - descrpt_shape.AddDim(nloc * ndescrpt); + descrpt_shape.AddDim(static_cast(nloc) * ndescrpt); TensorShape descrpt_deriv_shape; descrpt_deriv_shape.AddDim(nsamples); - descrpt_deriv_shape.AddDim(nloc * ndescrpt * 12); + descrpt_deriv_shape.AddDim(static_cast(nloc) * ndescrpt * 12); TensorShape rij_shape; rij_shape.AddDim(nsamples); - rij_shape.AddDim(nloc * nnei * 3); + rij_shape.AddDim(static_cast(nloc) * nnei * 3); TensorShape nlist_shape; nlist_shape.AddDim(nsamples); - nlist_shape.AddDim(nloc * nnei); + nlist_shape.AddDim(static_cast(nloc) * nnei); TensorShape axis_shape; axis_shape.AddDim(nsamples); - axis_shape.AddDim(nloc * 4); + axis_shape.AddDim(static_cast(nloc) * 4); TensorShape rot_mat_shape; rot_mat_shape.AddDim(nsamples); - rot_mat_shape.AddDim(nloc * 9); + rot_mat_shape.AddDim(static_cast(nloc) * 9); Tensor* descrpt_tensor = NULL; OP_REQUIRES_OK(context, diff --git a/source/op/descrpt_se_a_ef.cc b/source/op/descrpt_se_a_ef.cc index 030c184b46..96c953f167 100644 --- a/source/op/descrpt_se_a_ef.cc +++ b/source/op/descrpt_se_a_ef.cc @@ -161,16 +161,16 @@ class DescrptSeAEfOp : public OpKernel { // Create an output tensor TensorShape descrpt_shape; descrpt_shape.AddDim(nsamples); - descrpt_shape.AddDim(nloc * ndescrpt); + descrpt_shape.AddDim(static_cast(nloc) * ndescrpt); TensorShape descrpt_deriv_shape; descrpt_deriv_shape.AddDim(nsamples); - descrpt_deriv_shape.AddDim(nloc * ndescrpt * 3); + descrpt_deriv_shape.AddDim(static_cast(nloc) * ndescrpt * 3); TensorShape rij_shape; rij_shape.AddDim(nsamples); - rij_shape.AddDim(nloc * nnei * 3); + rij_shape.AddDim(static_cast(nloc) * nnei * 3); TensorShape nlist_shape; nlist_shape.AddDim(nsamples); - nlist_shape.AddDim(nloc * nnei); + nlist_shape.AddDim(static_cast(nloc) * nnei); int context_output_index = 0; Tensor* descrpt_tensor = NULL; diff --git a/source/op/descrpt_se_a_ef_para.cc b/source/op/descrpt_se_a_ef_para.cc index 06f7f138fb..6dc4442ee6 100644 --- a/source/op/descrpt_se_a_ef_para.cc +++ b/source/op/descrpt_se_a_ef_para.cc @@ -161,16 +161,16 @@ class DescrptSeAEfParaOp : public OpKernel { // Create an output tensor TensorShape descrpt_shape; descrpt_shape.AddDim(nsamples); - descrpt_shape.AddDim(nloc * ndescrpt); + descrpt_shape.AddDim(static_cast(nloc) * ndescrpt); TensorShape descrpt_deriv_shape; descrpt_deriv_shape.AddDim(nsamples); - descrpt_deriv_shape.AddDim(nloc * ndescrpt * 3); + descrpt_deriv_shape.AddDim(static_cast(nloc) * ndescrpt * 3); TensorShape rij_shape; rij_shape.AddDim(nsamples); - rij_shape.AddDim(nloc * nnei * 3); + rij_shape.AddDim(static_cast(nloc) * nnei * 3); TensorShape nlist_shape; nlist_shape.AddDim(nsamples); - nlist_shape.AddDim(nloc * nnei); + nlist_shape.AddDim(static_cast(nloc) * nnei); int context_output_index = 0; Tensor* descrpt_tensor = NULL; diff --git a/source/op/descrpt_se_a_ef_vert.cc b/source/op/descrpt_se_a_ef_vert.cc index d70e9b201b..9899e29f06 100644 --- a/source/op/descrpt_se_a_ef_vert.cc +++ b/source/op/descrpt_se_a_ef_vert.cc @@ -161,16 +161,16 @@ class DescrptSeAEfVertOp : public OpKernel { // Create an output tensor TensorShape descrpt_shape; descrpt_shape.AddDim(nsamples); - descrpt_shape.AddDim(nloc * ndescrpt); + descrpt_shape.AddDim(static_cast(nloc) * ndescrpt); TensorShape descrpt_deriv_shape; descrpt_deriv_shape.AddDim(nsamples); - descrpt_deriv_shape.AddDim(nloc * ndescrpt * 3); + descrpt_deriv_shape.AddDim(static_cast(nloc) * ndescrpt * 3); TensorShape rij_shape; rij_shape.AddDim(nsamples); - rij_shape.AddDim(nloc * nnei * 3); + rij_shape.AddDim(static_cast(nloc) * nnei * 3); TensorShape nlist_shape; nlist_shape.AddDim(nsamples); - nlist_shape.AddDim(nloc * nnei); + nlist_shape.AddDim(static_cast(nloc) * nnei); int context_output_index = 0; Tensor* descrpt_tensor = NULL; diff --git a/source/op/descrpt_se_a_mask.cc b/source/op/descrpt_se_a_mask.cc index 4f133e5210..e27ea099ab 100644 --- a/source/op/descrpt_se_a_mask.cc +++ b/source/op/descrpt_se_a_mask.cc @@ -95,16 +95,18 @@ class DescrptSeAMaskOp : public OpKernel { // Create an output tensor TensorShape descrpt_shape; descrpt_shape.AddDim(nsamples); - descrpt_shape.AddDim(total_atom_num * total_atom_num * n_descrpt); + descrpt_shape.AddDim(static_cast(total_atom_num) * total_atom_num * + n_descrpt); TensorShape descrpt_deriv_shape; descrpt_deriv_shape.AddDim(nsamples); - descrpt_deriv_shape.AddDim(total_atom_num * total_atom_num * n_descrpt * 3); + descrpt_deriv_shape.AddDim(static_cast(total_atom_num) * + total_atom_num * n_descrpt * 3); TensorShape rij_shape; rij_shape.AddDim(nsamples); - rij_shape.AddDim(total_atom_num * total_atom_num * 3); + rij_shape.AddDim(static_cast(total_atom_num) * total_atom_num * 3); TensorShape nlist_shape; nlist_shape.AddDim(nsamples); - nlist_shape.AddDim(total_atom_num * total_atom_num); + nlist_shape.AddDim(static_cast(total_atom_num) * total_atom_num); int context_output_index = 0; Tensor *descrpt_tensor = NULL; diff --git a/source/op/ewald_recp.cc b/source/op/ewald_recp.cc index a0fbc7f580..72f3c3d5dc 100644 --- a/source/op/ewald_recp.cc +++ b/source/op/ewald_recp.cc @@ -70,7 +70,7 @@ class EwaldRecpOp : public OpKernel { energy_shape.AddDim(nsamples); TensorShape force_shape; force_shape.AddDim(nsamples); - force_shape.AddDim(nloc * 3); + force_shape.AddDim(static_cast(nloc) * 3); TensorShape virial_shape; virial_shape.AddDim(nsamples); virial_shape.AddDim(9); diff --git a/source/op/map_aparam.cc b/source/op/map_aparam.cc index d0ff08032d..7ac3b48a4f 100644 --- a/source/op/map_aparam.cc +++ b/source/op/map_aparam.cc @@ -61,7 +61,7 @@ class MapAparamOp : public OpKernel { // Create an output tensor TensorShape output_shape; output_shape.AddDim(nframes); - output_shape.AddDim(nloc * nnei * numb_aparam); + output_shape.AddDim(static_cast(nloc) * nnei * numb_aparam); Tensor* output_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output_tensor)); diff --git a/source/op/neighbor_stat.cc b/source/op/neighbor_stat.cc index d917c60a5f..d2a6b3ab31 100644 --- a/source/op/neighbor_stat.cc +++ b/source/op/neighbor_stat.cc @@ -112,7 +112,7 @@ class NeighborStatOp : public OpKernel { if (nei_mode == 1) { // Tensor FPTYPE_temp; TensorShape FPTYPE_shape; - FPTYPE_shape.AddDim(nall * 3); + FPTYPE_shape.AddDim(static_cast(nall) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, FPTYPE_shape, &tensor_list[0])); @@ -125,20 +125,20 @@ class NeighborStatOp : public OpKernel { double_shape, &tensor_list[1])); // Tensor cpy_temp; TensorShape cpy_shape; - cpy_shape.AddDim(mem_cpy * 3); + cpy_shape.AddDim(static_cast(mem_cpy) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, cpy_shape, &tensor_list[3])); // Tensor t_temp; TensorShape t_shape; - t_shape.AddDim(mem_cpy * 2); + t_shape.AddDim(static_cast(mem_cpy) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, t_shape, &tensor_list[4])); } // Tensor nlist_temp; TensorShape nlist_shape; - nlist_shape.AddDim(nloc * 2); + nlist_shape.AddDim(static_cast(nloc) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, nlist_shape, &tensor_list[5])); @@ -167,7 +167,7 @@ class NeighborStatOp : public OpKernel { rcut, max_cpy_trial, max_nnei_trial); TensorShape min_nbor_dist_shape; - min_nbor_dist_shape.AddDim(nloc * mem_nnei); + min_nbor_dist_shape.AddDim(static_cast(nloc) * mem_nnei); Tensor* min_nbor_dist_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, min_nbor_dist_shape, @@ -253,7 +253,7 @@ class NeighborStatOp : public OpKernel { } // allocate output tensor for deepmd-kit TensorShape min_nbor_dist_shape; - min_nbor_dist_shape.AddDim(nloc * MAX_NNEI); + min_nbor_dist_shape.AddDim(static_cast(nloc) * MAX_NNEI); Tensor* min_nbor_dist_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, min_nbor_dist_shape, diff --git a/source/op/pair_tab.cc b/source/op/pair_tab.cc index e412aa6c2a..5c16e0faa4 100644 --- a/source/op/pair_tab.cc +++ b/source/op/pair_tab.cc @@ -103,10 +103,10 @@ class PairTabOp : public OpKernel { energy_shape.AddDim(nloc); TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); TensorShape virial_shape; virial_shape.AddDim(nframes); - virial_shape.AddDim(9 * nall); + virial_shape.AddDim(9 * static_cast(nall)); Tensor* energy_tensor = NULL; Tensor* force_tensor = NULL; Tensor* virial_tensor = NULL; diff --git a/source/op/prod_env_mat_multi_device.cc b/source/op/prod_env_mat_multi_device.cc index 22654b5f3a..a99804cb9e 100644 --- a/source/op/prod_env_mat_multi_device.cc +++ b/source/op/prod_env_mat_multi_device.cc @@ -497,7 +497,7 @@ class ProdEnvMatAOp : public OpKernel { if (nei_mode == 1) { // Tensor FPTYPE_temp; TensorShape FPTYPE_shape; - FPTYPE_shape.AddDim(nall * 3); + FPTYPE_shape.AddDim(static_cast(nall) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, FPTYPE_shape, &tensor_list[0])); @@ -510,20 +510,20 @@ class ProdEnvMatAOp : public OpKernel { double_shape, &tensor_list[1])); // Tensor cpy_temp; TensorShape cpy_shape; - cpy_shape.AddDim(mem_cpy * 3); + cpy_shape.AddDim(static_cast(mem_cpy) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, cpy_shape, &tensor_list[3])); // Tensor t_temp; TensorShape t_shape; - t_shape.AddDim(mem_cpy * 2); + t_shape.AddDim(static_cast(mem_cpy) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, t_shape, &tensor_list[4])); } // Tensor nlist_temp; TensorShape nlist_shape; - nlist_shape.AddDim(nloc * 2); + nlist_shape.AddDim(static_cast(nloc) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, nlist_shape, &tensor_list[5])); @@ -794,7 +794,7 @@ class ProdEnvMatROp : public OpKernel { if (nei_mode == 1) { // Tensor FPTYPE_temp; TensorShape FPTYPE_shape; - FPTYPE_shape.AddDim(nall * 3); + FPTYPE_shape.AddDim(static_cast(nall) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, FPTYPE_shape, &tensor_list[0])); @@ -807,20 +807,20 @@ class ProdEnvMatROp : public OpKernel { double_shape, &tensor_list[1])); // Tensor cpy_temp; TensorShape cpy_shape; - cpy_shape.AddDim(mem_cpy * 3); + cpy_shape.AddDim(static_cast(mem_cpy) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, cpy_shape, &tensor_list[3])); // Tensor t_temp; TensorShape t_shape; - t_shape.AddDim(mem_cpy * 2); + t_shape.AddDim(static_cast(mem_cpy) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, t_shape, &tensor_list[4])); } // Tensor nlist_temp; TensorShape nlist_shape; - nlist_shape.AddDim(nloc * 2); + nlist_shape.AddDim(static_cast(nloc) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, nlist_shape, &tensor_list[5])); @@ -1066,10 +1066,10 @@ class ProdEnvMatAMixOp : public OpKernel { nlist_shape.AddDim(int_64(nloc) * nnei); TensorShape ntype_shape; ntype_shape.AddDim(nsamples); - ntype_shape.AddDim(nloc * nnei); + ntype_shape.AddDim(static_cast(nloc) * nnei); TensorShape nmask_shape; nmask_shape.AddDim(nsamples); - nmask_shape.AddDim(nloc * nnei); + nmask_shape.AddDim(static_cast(nloc) * nnei); // define output tensor int context_output_index = 0; Tensor* descrpt_tensor = NULL; @@ -1137,7 +1137,7 @@ class ProdEnvMatAMixOp : public OpKernel { if (nei_mode == 1) { // Tensor FPTYPE_temp; TensorShape FPTYPE_shape; - FPTYPE_shape.AddDim(nall * 3); + FPTYPE_shape.AddDim(static_cast(nall) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, FPTYPE_shape, &tensor_list[0])); @@ -1150,20 +1150,20 @@ class ProdEnvMatAMixOp : public OpKernel { double_shape, &tensor_list[1])); // Tensor cpy_temp; TensorShape cpy_shape; - cpy_shape.AddDim(mem_cpy * 3); + cpy_shape.AddDim(static_cast(mem_cpy) * 3); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum::value, cpy_shape, &tensor_list[3])); // Tensor t_temp; TensorShape t_shape; - t_shape.AddDim(mem_cpy * 2); + t_shape.AddDim(static_cast(mem_cpy) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, t_shape, &tensor_list[4])); } // Tensor nlist_temp; TensorShape nlist_shape; - nlist_shape.AddDim(nloc * 2); + nlist_shape.AddDim(static_cast(nloc) * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, nlist_shape, &tensor_list[5])); @@ -1296,7 +1296,7 @@ static int _norm_copy_coord_cpu(std::vector& coord_cpy, normalize_coord_cpu(&tmp_coord[0], nall, region); int tt; for (tt = 0; tt < max_cpy_trial; ++tt) { - coord_cpy.resize(mem_cpy * 3); + coord_cpy.resize(static_cast(mem_cpy) * 3); type_cpy.resize(mem_cpy); idx_mapping.resize(mem_cpy); int ret = @@ -1512,7 +1512,7 @@ static int _norm_copy_coord_gpu(OpKernelContext* context, mem_cpy *= 2; // Tensor cpy_temp; TensorShape cpy_shape; - cpy_shape.AddDim(mem_cpy * 3); + cpy_shape.AddDim(static_cast(mem_cpy) * 3); status = context->allocate_temp(DataTypeToEnum::value, cpy_shape, tensor_list + 3); if (!status.ok()) { @@ -1520,7 +1520,7 @@ static int _norm_copy_coord_gpu(OpKernelContext* context, } // Tensor t_temp; TensorShape t_shape; - t_shape.AddDim(mem_cpy * 2); + t_shape.AddDim(static_cast(mem_cpy) * 2); status = context->allocate_temp(DT_INT32, t_shape, tensor_list + 4); if (!status.ok()) { return false; diff --git a/source/op/prod_env_mat_multi_device_nvnmd.cc b/source/op/prod_env_mat_multi_device_nvnmd.cc index 1cbfb968f1..d9f9275b86 100644 --- a/source/op/prod_env_mat_multi_device_nvnmd.cc +++ b/source/op/prod_env_mat_multi_device_nvnmd.cc @@ -156,7 +156,7 @@ static int _norm_copy_coord_cpu(std::vector& coord_cpy, normalize_coord_cpu(&tmp_coord[0], nall, region); int tt; for (tt = 0; tt < max_cpy_trial; ++tt) { - coord_cpy.resize(mem_cpy * 3); + coord_cpy.resize(static_cast(mem_cpy) * 3); type_cpy.resize(mem_cpy); idx_mapping.resize(mem_cpy); int ret = @@ -675,10 +675,10 @@ class ProdEnvMatAMixNvnmdQuantizeOp : public OpKernel { nlist_shape.AddDim(int_64(nloc) * nnei); TensorShape ntype_shape; ntype_shape.AddDim(nsamples); - ntype_shape.AddDim(nloc * nnei); + ntype_shape.AddDim(static_cast(nloc) * nnei); TensorShape nmask_shape; nmask_shape.AddDim(nsamples); - nmask_shape.AddDim(nloc * nnei); + nmask_shape.AddDim(static_cast(nloc) * nnei); // define output tensor int context_output_index = 0; Tensor* descrpt_tensor = NULL; @@ -707,7 +707,7 @@ class ProdEnvMatAMixNvnmdQuantizeOp : public OpKernel { Tensor fake_type_tensor; // all zeros TensorShape fake_type_shape; - fake_type_shape.AddDim(nsamples * nall); + fake_type_shape.AddDim(static_cast(nsamples) * nall); OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, fake_type_shape, &fake_type_tensor)); diff --git a/source/op/prod_force.cc b/source/op/prod_force.cc index d8ced591b9..57d1cd1331 100644 --- a/source/op/prod_force.cc +++ b/source/op/prod_force.cc @@ -81,7 +81,7 @@ class ProdForceOp : public OpKernel { // Create an output tensor TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); // std::cout << "forcesahpe " << force_shape.dim_size(0) << " " << // force_shape.dim_size(1) << std::endl; Tensor* force_tensor = NULL; diff --git a/source/op/prod_force_grad.cc b/source/op/prod_force_grad.cc index 2d14022279..c1cf63917e 100644 --- a/source/op/prod_force_grad.cc +++ b/source/op/prod_force_grad.cc @@ -92,7 +92,7 @@ class ProdForceGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/prod_force_multi_device.cc b/source/op/prod_force_multi_device.cc index 3eaf005f9a..20cc96dd31 100644 --- a/source/op/prod_force_multi_device.cc +++ b/source/op/prod_force_multi_device.cc @@ -103,7 +103,7 @@ class ProdForceSeAOp : public OpKernel { // Create an output tensor TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); Tensor* force_tensor = NULL; int context_output_index = 0; OP_REQUIRES_OK(context, @@ -200,7 +200,7 @@ class ProdForceSeROp : public OpKernel { // Create an output tensor TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); Tensor* force_tensor = NULL; int context_output_index = 0; OP_REQUIRES_OK(context, diff --git a/source/op/prod_force_se_a_grad.cc b/source/op/prod_force_se_a_grad.cc index 21dd4fe00a..5aaf030512 100644 --- a/source/op/prod_force_se_a_grad.cc +++ b/source/op/prod_force_se_a_grad.cc @@ -85,7 +85,7 @@ class ProdForceSeAGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/prod_force_se_a_mask.cc b/source/op/prod_force_se_a_mask.cc index 32fcf54a79..aa4268434d 100644 --- a/source/op/prod_force_se_a_mask.cc +++ b/source/op/prod_force_se_a_mask.cc @@ -63,7 +63,7 @@ class ProdForceSeAMaskOp : public OpKernel { // Create an output tensor TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); // std::cout << "forcesahpe " << force_shape.dim_size(0) << " " << // force_shape.dim_size(1) << std::endl; Tensor *force_tensor = NULL; diff --git a/source/op/prod_force_se_a_mask_grad.cc b/source/op/prod_force_se_a_mask_grad.cc index 6f841b1c7d..dabe405545 100644 --- a/source/op/prod_force_se_a_mask_grad.cc +++ b/source/op/prod_force_se_a_mask_grad.cc @@ -77,7 +77,7 @@ class ProdForceSeAMaskGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor *grad_net_tensor = NULL; diff --git a/source/op/prod_force_se_r_grad.cc b/source/op/prod_force_se_r_grad.cc index f0b4b18323..0b5338c241 100644 --- a/source/op/prod_force_se_r_grad.cc +++ b/source/op/prod_force_se_r_grad.cc @@ -77,7 +77,7 @@ class ProdForceSeRGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/prod_virial.cc b/source/op/prod_virial.cc index 2719c6c670..10532d74db 100644 --- a/source/op/prod_virial.cc +++ b/source/op/prod_virial.cc @@ -96,7 +96,7 @@ class ProdVirialOp : public OpKernel { context->allocate_output(0, virial_shape, &virial_tensor)); TensorShape atom_virial_shape; atom_virial_shape.AddDim(nframes); - atom_virial_shape.AddDim(9 * nall); + atom_virial_shape.AddDim(9 * static_cast(nall)); Tensor* atom_virial_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(1, atom_virial_shape, &atom_virial_tensor)); diff --git a/source/op/prod_virial_grad.cc b/source/op/prod_virial_grad.cc index b06e273453..02feba4eee 100644 --- a/source/op/prod_virial_grad.cc +++ b/source/op/prod_virial_grad.cc @@ -101,7 +101,7 @@ class ProdVirialGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/prod_virial_multi_device.cc b/source/op/prod_virial_multi_device.cc index 23b312b797..a544b010c5 100644 --- a/source/op/prod_virial_multi_device.cc +++ b/source/op/prod_virial_multi_device.cc @@ -93,7 +93,7 @@ class ProdVirialSeAOp : public OpKernel { virial_shape.AddDim(9); TensorShape atom_virial_shape; atom_virial_shape.AddDim(nframes); - atom_virial_shape.AddDim(9 * nall); + atom_virial_shape.AddDim(9 * static_cast(nall)); int context_output_index = 0; Tensor* virial_tensor = NULL; OP_REQUIRES_OK( @@ -192,7 +192,7 @@ class ProdVirialSeROp : public OpKernel { virial_shape.AddDim(9); TensorShape atom_virial_shape; atom_virial_shape.AddDim(nframes); - atom_virial_shape.AddDim(9 * nall); + atom_virial_shape.AddDim(9 * static_cast(nall)); int context_output_index = 0; Tensor* virial_tensor = NULL; OP_REQUIRES_OK( diff --git a/source/op/prod_virial_se_a_grad.cc b/source/op/prod_virial_se_a_grad.cc index a22401d654..d6c55b6969 100644 --- a/source/op/prod_virial_se_a_grad.cc +++ b/source/op/prod_virial_se_a_grad.cc @@ -94,7 +94,7 @@ class ProdVirialSeAGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/prod_virial_se_r_grad.cc b/source/op/prod_virial_se_r_grad.cc index b874c828df..40c2828ca7 100644 --- a/source/op/prod_virial_se_r_grad.cc +++ b/source/op/prod_virial_se_r_grad.cc @@ -86,7 +86,7 @@ class ProdVirialSeRGradOp : public OpKernel { // Create an output tensor TensorShape grad_net_shape; grad_net_shape.AddDim(nframes); - grad_net_shape.AddDim(nloc * ndescrpt); + grad_net_shape.AddDim(static_cast(nloc) * ndescrpt); // allocate the output tensor Tensor* grad_net_tensor = NULL; diff --git a/source/op/soft_min.cc b/source/op/soft_min.cc index 4062ddc4cb..85aade5e7b 100644 --- a/source/op/soft_min.cc +++ b/source/op/soft_min.cc @@ -94,7 +94,7 @@ class SoftMinSwitchOp : public OpKernel { sw_value_shape.AddDim(nloc); TensorShape sw_deriv_shape; sw_deriv_shape.AddDim(nframes); - sw_deriv_shape.AddDim(3 * nnei * nloc); + sw_deriv_shape.AddDim(3 * static_cast(nnei) * nloc); Tensor* sw_value_tensor = NULL; Tensor* sw_deriv_tensor = NULL; tmp_idx = 0; diff --git a/source/op/soft_min_force.cc b/source/op/soft_min_force.cc index a2970f4c3a..0801170597 100644 --- a/source/op/soft_min_force.cc +++ b/source/op/soft_min_force.cc @@ -73,7 +73,7 @@ class SoftMinForceOp : public OpKernel { // Create an output tensor TensorShape force_shape; force_shape.AddDim(nframes); - force_shape.AddDim(3 * nall); + force_shape.AddDim(3 * static_cast(nall)); Tensor* force_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(0, force_shape, &force_tensor)); diff --git a/source/op/soft_min_virial.cc b/source/op/soft_min_virial.cc index 91a94e01c3..26daa78604 100644 --- a/source/op/soft_min_virial.cc +++ b/source/op/soft_min_virial.cc @@ -89,7 +89,7 @@ class SoftMinVirialOp : public OpKernel { context->allocate_output(0, virial_shape, &virial_tensor)); TensorShape atom_virial_shape; atom_virial_shape.AddDim(nframes); - atom_virial_shape.AddDim(9 * nall); + atom_virial_shape.AddDim(9 * static_cast(nall)); Tensor* atom_virial_tensor = NULL; OP_REQUIRES_OK(context, context->allocate_output(1, atom_virial_shape, &atom_virial_tensor));