Skip to content

Commit

Permalink
resolve "Multiplication result converted to larger type" (deepmodelin…
Browse files Browse the repository at this point in the history
…g#3159)

Follow up deepmodeling#3149.

---------

Signed-off-by: Jinzhe Zeng <[email protected]>
  • Loading branch information
njzjz authored Jan 20, 2024
1 parent 4d82430 commit 1097062
Show file tree
Hide file tree
Showing 32 changed files with 202 additions and 117 deletions.
42 changes: 25 additions & 17 deletions source/api_c/include/deepmd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1060,14 +1060,15 @@ class DeepPot {
const int &nloc,
const std::vector<VALUETYPE> &fparam,
const std::vector<VALUETYPE> &aparam) const {
if (fparam.size() != dfparam && fparam.size() != nframes * dfparam) {
if (fparam.size() != dfparam &&
fparam.size() != static_cast<size_t>(nframes) * dfparam) {
throw deepmd::hpp::deepmd_exception(
"the dim of frame parameter provided is not consistent with what the "
"model uses");
}

if (aparam.size() != daparam * nloc &&
aparam.size() != nframes * daparam * nloc) {
if (aparam.size() != static_cast<size_t>(daparam) * nloc &&
aparam.size() != static_cast<size_t>(nframes) * daparam * nloc) {
throw deepmd::hpp::deepmd_exception(
"the dim of atom parameter provided is not consistent with what the "
"model uses");
Expand All @@ -1081,9 +1082,10 @@ class DeepPot {
if (param.size() == dparam) {
out_param.resize(static_cast<size_t>(nframes) * dparam);
for (int ii = 0; ii < nframes; ++ii) {
std::copy(param.begin(), param.end(), out_param.begin() + ii * dparam);
std::copy(param.begin(), param.end(),
out_param.begin() + static_cast<std::ptrdiff_t>(ii) * dparam);
}
} else if (param.size() == nframes * dparam) {
} else if (param.size() == static_cast<size_t>(nframes) * dparam) {
out_param = param;
}
}
Expand Down Expand Up @@ -1184,7 +1186,8 @@ class DeepPotModelDevi {

// memory will be continous for std::vector but not std::vector<std::vector>
std::vector<double> energy_flat(numb_models);
std::vector<VALUETYPE> force_flat(numb_models * natoms * 3);
std::vector<VALUETYPE> force_flat(static_cast<size_t>(numb_models) *
natoms * 3);
std::vector<VALUETYPE> virial_flat(numb_models * 9);
double *ener_ = &energy_flat[0];
VALUETYPE *force_ = &force_flat[0];
Expand Down Expand Up @@ -1260,10 +1263,13 @@ class DeepPotModelDevi {
const int *atype_ = &atype[0];

std::vector<double> energy_flat(numb_models);
std::vector<VALUETYPE> force_flat(numb_models * natoms * 3);
std::vector<VALUETYPE> force_flat(static_cast<size_t>(numb_models) *
natoms * 3);
std::vector<VALUETYPE> virial_flat(numb_models * 9);
std::vector<VALUETYPE> atom_energy_flat(numb_models * natoms);
std::vector<VALUETYPE> atom_virial_flat(numb_models * natoms * 9);
std::vector<VALUETYPE> atom_energy_flat(static_cast<size_t>(numb_models) *
natoms);
std::vector<VALUETYPE> atom_virial_flat(static_cast<size_t>(numb_models) *
natoms * 9);
double *ener_ = &energy_flat[0];
VALUETYPE *force_ = &force_flat[0];
VALUETYPE *virial_ = &virial_flat[0];
Expand Down Expand Up @@ -1402,8 +1408,8 @@ class DeepPotModelDevi {

for (unsigned ii = 0; ii < numb_models; ++ii) {
for (unsigned jj = 0; jj < nloc; ++jj) {
const VALUETYPE *tmp_f = &(xx[ii][jj * stride]);
const VALUETYPE *tmp_avg = &(avg[jj * stride]);
const VALUETYPE *tmp_f = &(xx[ii][static_cast<size_t>(jj) * stride]);
const VALUETYPE *tmp_avg = &(avg[static_cast<size_t>(jj) * stride]);
for (unsigned dd = 0; dd < stride; ++dd) {
VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd];
std[jj] += vdiff * vdiff;
Expand Down Expand Up @@ -1432,7 +1438,7 @@ class DeepPotModelDevi {
assert(nloc * stride == ndof);

for (unsigned ii = 0; ii < nloc; ++ii) {
const VALUETYPE *tmp_avg = &(avg[ii * stride]);
const VALUETYPE *tmp_avg = &(avg[static_cast<size_t>(ii) * stride]);
VALUETYPE f_norm = 0.0;
for (unsigned dd = 0; dd < stride; ++dd) {
f_norm += tmp_avg[dd] * tmp_avg[dd];
Expand Down Expand Up @@ -1477,14 +1483,15 @@ class DeepPotModelDevi {
const int &nloc,
const std::vector<VALUETYPE> &fparam,
const std::vector<VALUETYPE> &aparam) const {
if (fparam.size() != dfparam && fparam.size() != nframes * dfparam) {
if (fparam.size() != dfparam &&
fparam.size() != static_cast<size_t>(nframes) * dfparam) {
throw deepmd::hpp::deepmd_exception(
"the dim of frame parameter provided is not consistent with what the "
"model uses");
}

if (aparam.size() != daparam * nloc &&
aparam.size() != nframes * daparam * nloc) {
if (aparam.size() != static_cast<size_t>(daparam) * nloc &&
aparam.size() != static_cast<size_t>(nframes) * daparam * nloc) {
throw deepmd::hpp::deepmd_exception(
"the dim of atom parameter provided is not consistent with what the "
"model uses");
Expand All @@ -1498,9 +1505,10 @@ class DeepPotModelDevi {
if (param.size() == dparam) {
out_param.resize(static_cast<size_t>(nframes) * dparam);
for (int ii = 0; ii < nframes; ++ii) {
std::copy(param.begin(), param.end(), out_param.begin() + ii * dparam);
std::copy(param.begin(), param.end(),
out_param.begin() + static_cast<std::ptrdiff_t>(ii) * dparam);
}
} else if (param.size() == nframes * dparam) {
} else if (param.size() == static_cast<size_t>(nframes) * dparam) {
out_param = param;
}
}
Expand Down
2 changes: 1 addition & 1 deletion source/api_c/src/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1414,7 +1414,7 @@ void DP_SelectMapInt(const int* in,
int* out) {
std::vector<int> in_(in, in + stride * nall1);
std::vector<int> fwd_map_(fwd_map, fwd_map + nall1);
std::vector<int> out_(stride * nall2);
std::vector<int> out_(static_cast<size_t>(stride) * nall2);
deepmd::select_map(out_, in_, fwd_map_, stride);
if (out) {
std::copy(out_.begin(), out_.end(), out);
Expand Down
12 changes: 8 additions & 4 deletions source/api_cc/src/AtomMap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,10 @@ void AtomMap::forward(typename std::vector<VALUETYPE>::iterator out,
int gro_i = idx_map[ii];
for (int dd = 0; dd < stride; ++dd) {
// out[ii*stride+dd] = in[gro_i*stride+dd];
*(out + kk * nall * stride + ii * stride + dd) =
*(in + kk * nall * stride + gro_i * stride + dd);
*(out + static_cast<std::ptrdiff_t>(kk) * nall * stride +
static_cast<std::ptrdiff_t>(ii) * stride + dd) =
*(in + static_cast<std::ptrdiff_t>(kk) * nall * stride +
static_cast<std::ptrdiff_t>(gro_i) * stride + dd);
}
}
}
Expand All @@ -58,8 +60,10 @@ void AtomMap::backward(typename std::vector<VALUETYPE>::iterator out,
int gro_i = idx_map[ii];
for (int dd = 0; dd < stride; ++dd) {
// out[gro_i*stride+dd] = in[ii*stride+dd];
*(out + kk * nall * stride + gro_i * stride + dd) =
*(in + kk * nall * stride + ii * stride + dd);
*(out + static_cast<std::ptrdiff_t>(kk) * nall * stride +
static_cast<std::ptrdiff_t>(gro_i) * stride + dd) =
*(in + static_cast<std::ptrdiff_t>(kk) * nall * stride +
static_cast<std::ptrdiff_t>(ii) * stride + dd);
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -761,8 +761,8 @@ void DeepPotModelDevi::compute_std(

for (unsigned ii = 0; ii < numb_models; ++ii) {
for (unsigned jj = 0; jj < nloc; ++jj) {
const VALUETYPE* tmp_f = &(xx[ii][jj * stride]);
const VALUETYPE* tmp_avg = &(avg[jj * stride]);
const VALUETYPE* tmp_f = &(xx[ii][static_cast<size_t>(jj) * stride]);
const VALUETYPE* tmp_avg = &(avg[static_cast<size_t>(jj) * stride]);
for (unsigned dd = 0; dd < stride; ++dd) {
VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd];
std[jj] += vdiff * vdiff;
Expand Down Expand Up @@ -833,7 +833,7 @@ void DeepPotModelDevi::compute_relative_std(std::vector<VALUETYPE>& std,
assert(nloc * stride == ndof);

for (unsigned ii = 0; ii < nloc; ++ii) {
const VALUETYPE* tmp_avg = &(avg[ii * stride]);
const VALUETYPE* tmp_avg = &(avg[static_cast<size_t>(ii) * stride]);
VALUETYPE f_norm = 0.0;
for (unsigned dd = 0; dd < stride; ++dd) {
f_norm += tmp_avg[dd] * tmp_avg[dd];
Expand Down
4 changes: 2 additions & 2 deletions source/ipi/src/Convert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void Convert<VALUETYPE>::forward(std::vector<VALUETYPE>& out,
const int stride) const {
assert(in.size() == stride * idx_map.size());
int natoms = idx_map.size();
out.resize(stride * natoms);
out.resize(static_cast<size_t>(stride) * natoms);
for (int ii = 0; ii < natoms; ++ii) {
int gro_i = idx_map[ii];
for (int dd = 0; dd < stride; ++dd) {
Expand All @@ -45,7 +45,7 @@ void Convert<VALUETYPE>::backward(std::vector<VALUETYPE>& out,
const int stride) const {
int natoms = idx_map.size();
assert(in.size() == stride * idx_map.size());
out.resize(stride * natoms);
out.resize(static_cast<size_t>(stride) * natoms);
for (int ii = 0; ii < natoms; ++ii) {
int gro_i = idx_map[ii];
for (int dd = 0; dd < stride; ++dd) {
Expand Down
22 changes: 12 additions & 10 deletions source/lib/tests/test_env_mat_a.cc
Original file line number Diff line number Diff line change
Expand Up @@ -504,11 +504,12 @@ TEST_F(TestEnvMatA, prod_cpu) {
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
deepmd::convert_nlist(inlist, nlist_a_cpy);

std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
std::vector<double> em(static_cast<size_t>(nloc) * ndescrpt),
em_deriv(static_cast<size_t>(nloc) * ndescrpt * 3),
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(static_cast<size_t>(nloc) * nnei);
std::vector<double> avg(static_cast<size_t>(ntypes) * ndescrpt, 0);
std::vector<double> std(static_cast<size_t>(ntypes) * ndescrpt, 1);
deepmd::prod_env_mat_a_cpu(&em[0], &em_deriv[0], &rij[0], &nlist[0],
&posi_cpy[0], &atype_cpy[0], inlist, max_nbor_size,
&avg[0], &std[0], nloc, nall, rc, rc_smth, sec_a);
Expand Down Expand Up @@ -538,11 +539,12 @@ TEST_F(TestEnvMatA, prod_cpu_equal_cpu) {
std::vector<int *> firstneigh(nloc);
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_a_cpy);
std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
std::vector<double> em(static_cast<size_t>(nloc) * ndescrpt),
em_deriv(static_cast<size_t>(nloc) * ndescrpt * 3),
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(static_cast<size_t>(nloc) * nnei);
std::vector<double> avg(static_cast<size_t>(ntypes) * ndescrpt, 0);
std::vector<double> std(static_cast<size_t>(ntypes) * ndescrpt, 1);
deepmd::prod_env_mat_a_cpu(&em[0], &em_deriv[0], &rij[0], &nlist[0],
&posi_cpy[0], &atype_cpy[0], inlist, max_nbor_size,
&avg[0], &std[0], nloc, nall, rc, rc_smth, sec_a);
Expand Down
33 changes: 18 additions & 15 deletions source/lib/tests/test_env_mat_a_mix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -532,11 +532,12 @@ TEST_F(TestEnvMatAMix, prod_cpu) {
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
deepmd::convert_nlist(inlist, nlist_a_cpy);

std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<int> ntype(nloc * nnei);
bool *nmask = new bool[nloc * nnei];
std::vector<double> em(static_cast<size_t>(nloc) * ndescrpt),
em_deriv(static_cast<size_t>(nloc) * ndescrpt * 3),
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(static_cast<size_t>(nloc) * nnei);
std::vector<int> ntype(static_cast<size_t>(nloc) * nnei);
bool *nmask = new bool[static_cast<size_t>(nloc) * nnei];
memset(nmask, 0, sizeof(bool) * nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
Expand Down Expand Up @@ -575,11 +576,12 @@ TEST_F(TestEnvMatAMix, prod_cpu_equal_cpu) {
std::vector<int *> firstneigh(nloc);
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_a_cpy);
std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
std::vector<double> em(static_cast<size_t>(nloc) * ndescrpt),
em_deriv(static_cast<size_t>(nloc) * ndescrpt * 3),
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(static_cast<size_t>(nloc) * nnei);
std::vector<double> avg(static_cast<size_t>(ntypes) * ndescrpt, 0);
std::vector<double> std(static_cast<size_t>(ntypes) * ndescrpt, 1);
deepmd::prod_env_mat_a_cpu(&em[0], &em_deriv[0], &rij[0], &nlist[0],
&posi_cpy[0], &atype[0], inlist, max_nbor_size,
&avg[0], &std[0], nloc, nall, rc, rc_smth, sec_a,
Expand Down Expand Up @@ -652,11 +654,12 @@ TEST_F(TestEnvMatAMix, prod_gpu) {
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]),
gpu_inlist;
convert_nlist(inlist, nlist_a_cpy);
std::vector<double> em(nloc * ndescrpt, 0.0),
em_deriv(nloc * ndescrpt * 3, 0.0), rij(nloc * nnei * 3, 0.0);
std::vector<int> nlist(nloc * nnei, 0);
std::vector<int> ntype(nloc * nnei, 0);
bool *nmask = new bool[nloc * nnei];
std::vector<double> em(static_cast<size_t>(nloc) * ndescrpt, 0.0),
em_deriv(nloc * ndescrpt * 3, 0.0),
rij(static_cast<size_t>(nloc) * nnei * 3, 0.0);
std::vector<int> nlist(static_cast<size_t>(nloc) * nnei, 0);
std::vector<int> ntype(static_cast<size_t>(nloc) * nnei, 0);
bool *nmask = new bool[static_cast<size_t>(nloc) * nnei];
memset(nmask, 0, sizeof(bool) * nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
Expand Down
4 changes: 2 additions & 2 deletions source/lib/tests/test_env_mat_a_nvnmd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ TEST_F(TestEnvMatANvnmd, prod_cpu) {
deepmd::convert_nlist(inlist, nlist_a_cpy);

std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
Expand Down Expand Up @@ -308,7 +308,7 @@ TEST_F(TestEnvMatANvnmd, prod_cpu_equal_cpu) {
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_a_cpy);
std::vector<double> em(nloc * ndescrpt), em_deriv(nloc * ndescrpt * 3),
rij(nloc * nnei * 3);
rij(static_cast<size_t>(nloc) * nnei * 3);
std::vector<int> nlist(nloc * nnei);
std::vector<double> avg(ntypes * ndescrpt, 0);
std::vector<double> std(ntypes * ndescrpt, 1);
Expand Down
2 changes: 1 addition & 1 deletion source/lmp/compute_deeptensor_atom.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,6 @@ void ComputeDeeptensorAtom::compute_peratom() {
------------------------------------------------------------------------- */

double ComputeDeeptensorAtom::memory_usage() {
double bytes = nmax * size_peratom_cols * sizeof(double);
double bytes = static_cast<size_t>(nmax) * size_peratom_cols * sizeof(double);
return bytes;
}
8 changes: 6 additions & 2 deletions source/op/ewald_recp.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,14 @@ class EwaldRecpOp : public OpKernel {

// check the sizes
OP_REQUIRES(
context, (nsamples * nloc * 3 == coord_tensor.shape().dim_size(0)),
context,
(static_cast<int64_t>(nsamples) * nloc * 3 ==
coord_tensor.shape().dim_size(0)),
errors::InvalidArgument("coord number of samples should match"));
OP_REQUIRES(
context, (nsamples * nloc * 1 == charge_tensor.shape().dim_size(0)),
context,
(static_cast<int64_t>(nsamples) * nloc * 1 ==
charge_tensor.shape().dim_size(0)),
errors::InvalidArgument("charge number of samples should match"));
OP_REQUIRES(
context, (nsamples * 9 == box_tensor.shape().dim_size(0)),
Expand Down
2 changes: 1 addition & 1 deletion source/op/matmul_fitnet_nvnmd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class MatmulFitnetNvnmdOp : public OpKernel {
expo_maxs.resize(K);

if (normw == 0) {
find_max_expo(expo_max, (FPTYPE*)&w[0], M * K);
find_max_expo(expo_max, (FPTYPE*)&w[0], static_cast<int64_t>(M) * K);
for (kk = 0; kk < K; kk++) {
expo_maxs[kk] = expo_max;
}
Expand Down
6 changes: 4 additions & 2 deletions source/op/matmul_flt_nvnmd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ class MatmulFltNvnmdOp : public OpKernel {
for (hh = 0; hh < H; hh++) {
// find x max exponnet
if ((normx & 0x0f) == 0) { // normalize x[:,:]
find_max_expo(expo_max1, (FPTYPE *)&x[hh * N * M], N * M);
find_max_expo(expo_max1, (FPTYPE *)&x[hh * N * M],
static_cast<int64_t>(N) * M);
for (ii = 0; ii < N; ii++) {
expo_max1s[ii] = expo_max1;
}
Expand All @@ -144,7 +145,8 @@ class MatmulFltNvnmdOp : public OpKernel {

// find w max exponnet
if ((normw & 0x0f) == 0) { // normalize w[:,:]
find_max_expo(expo_max2, (FPTYPE *)&w[hh * M * K], M * K);
find_max_expo(expo_max2, (FPTYPE *)&w[hh * M * K],
static_cast<int64_t>(M) * K);
for (kk = 0; kk < K; kk++) {
expo_max2s[kk] = expo_max2;
}
Expand Down
12 changes: 8 additions & 4 deletions source/op/pair_tab.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,13 @@ class PairTabOp : public OpKernel {
OP_REQUIRES(context, (nall == type_tensor.shape().dim_size(1)),
errors::InvalidArgument("shape of type should be nall"));
OP_REQUIRES(
context, (3 * nnei * nloc == rij_tensor.shape().dim_size(1)),
context,
(3 * static_cast<int64_t>(nnei) * nloc ==
rij_tensor.shape().dim_size(1)),
errors::InvalidArgument("shape of rij should be 3 * nloc * nnei"));
OP_REQUIRES(
context, (nnei * nloc == nlist_tensor.shape().dim_size(1)),
context,
(static_cast<int64_t>(nnei) * nloc == nlist_tensor.shape().dim_size(1)),
errors::InvalidArgument("shape of nlist should be nloc * nnei"));
OP_REQUIRES(context, (nloc == scale_tensor.shape().dim_size(1)),
errors::InvalidArgument("shape of scale should be nloc"));
Expand Down Expand Up @@ -134,10 +137,11 @@ class PairTabOp : public OpKernel {
"ntypes provided in table does not match deeppot"));
int nspline = table_info(2) + 0.1;
int tab_stride = 4 * nspline;
assert(ntypes * ntypes * tab_stride ==
assert(static_cast<int64_t>(ntypes) * ntypes * tab_stride ==
table_data_tensor.shape().dim_size(0));
std::vector<double> d_table_info(4);
std::vector<double> d_table_data(ntypes * ntypes * tab_stride);
std::vector<double> d_table_data(static_cast<size_t>(ntypes) * ntypes *
tab_stride);
for (unsigned ii = 0; ii < d_table_info.size(); ++ii) {
d_table_info[ii] = table_info(ii);
}
Expand Down
2 changes: 1 addition & 1 deletion source/op/prod_env_mat_multi_device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1098,7 +1098,7 @@ class ProdEnvMatAMixOp : public OpKernel {

Tensor fake_type_tensor; // all zeros
TensorShape fake_type_shape;
fake_type_shape.AddDim(nsamples * nall);
fake_type_shape.AddDim(static_cast<int64_t>(nsamples) * nall);
OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, fake_type_shape,
&fake_type_tensor));

Expand Down
3 changes: 2 additions & 1 deletion source/op/prod_force.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ class ProdForceOp : public OpKernel {
errors::InvalidArgument("number of samples should match"));

OP_REQUIRES(context,
(nloc * ndescrpt * 12 == in_deriv_tensor.shape().dim_size(1)),
(static_cast<int64_t>(nloc) * ndescrpt * 12 ==
in_deriv_tensor.shape().dim_size(1)),
errors::InvalidArgument("number of descriptors should match"));
OP_REQUIRES(context, (nnei == n_a_sel + n_r_sel),
errors::InvalidArgument("number of neighbors should match"));
Expand Down
Loading

0 comments on commit 1097062

Please sign in to comment.