Skip to content

Commit

Permalink
Merge branch 'paddle2' into add_amp_fmt
Browse files Browse the repository at this point in the history
Signed-off-by: HydrogenSulfate <[email protected]>
  • Loading branch information
HydrogenSulfate authored Dec 29, 2023
2 parents 9851bb3 + bb28e11 commit 7c8d7d9
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 7 deletions.
1 change: 1 addition & 0 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
GLOBAL_PD_FLOAT_PRECISION,
GLOBAL_TF_FLOAT_PRECISION,
op_module,
paddle,
tf,
Expand Down
2 changes: 2 additions & 0 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,7 @@ def forward(
coord = paddle.reshape(coord_, [-1, natoms[1] * 3])
box = paddle.reshape(box_, [-1, 9])
atype = paddle.reshape(atype_, [-1, natoms[1]])

(
self.descrpt,
self.descrpt_deriv,
Expand Down Expand Up @@ -705,6 +706,7 @@ def _pass_filter(
layer = paddle.reshape(
layer, [inputs.shape[0], natoms[2 + type_i], self.get_dim_out()]
)

qmat = paddle.reshape(
qmat,
[
Expand Down
2 changes: 1 addition & 1 deletion deepmd/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def _get_value(
name of tensor to get
attr_name : Optional[str], optional
if specified, class attribute with this name will be created and tensor will
be assigned to it, by default None.
be assigned to it, by default None
"""
# do not use os.path.join as it doesn't work on Windows
value = None
Expand Down
5 changes: 3 additions & 2 deletions deepmd/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,13 +305,13 @@ def variable_summaries(var: tf.Variable, name: str):
tf.summary.scalar("min", tf.reduce_min(var))
tf.summary.histogram("histogram", var)


def cast_to_dtype(x, dtype: paddle.dtype) -> paddle.Tensor:
if x.dtype != dtype:
return paddle.cast(x, dtype)
return x


class OneLayer(nn.Layer):
def __init__(
self,
Expand Down Expand Up @@ -546,4 +546,5 @@ def forward(self, xx):
xx = hidden
if self.mixed_prec is not None:
xx = cast_to_dtype(xx, get_precision(self.mixed_prec["output_prec"]))

return xx
7 changes: 3 additions & 4 deletions source/lib/paddle_src/paddle_prod_env_mat.cu
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,7 @@
#include <cub/block/block_radix_sort.cuh>
#include <cub/block/block_store.cuh>
#include <type_traits>
<<<<<<< HEAD
=======

>>>>>>> paddle2
#include "paddle/extension.h"

#define GOOGLE_CUDA 1
Expand All @@ -21,7 +18,9 @@
#include "region.h"
#include "utilities.h"

typedef long long int_64;

typedef long long int_64;


#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
#define CHECK_INPUT_ON_CPU(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
Expand Down

0 comments on commit 7c8d7d9

Please sign in to comment.