diff --git a/doc/freeze/freeze.md b/doc/freeze/freeze.md index 4cee02c6d0..f0d3c8abd8 100644 --- a/doc/freeze/freeze.md +++ b/doc/freeze/freeze.md @@ -38,7 +38,7 @@ The output model is called `model_branch1.pth`, which is the specifically frozen :::{tab-item} Paddle {{ paddle_icon }} ```bash -$ dp --pd freeze -o model.json +$ dp --pd freeze -o model DEEPMD INFO Paddle inference model has been exported to: model.json(.pdiparams) ``` @@ -48,7 +48,7 @@ In [multi-task mode](../train/multi-task-training-pt.md), you need to choose one to specify which model branch you want to freeze: ```bash -$ dp --pd freeze -o model_branch1.json --head CHOSEN_BRANCH +$ dp --pd freeze -o model_branch1 --head CHOSEN_BRANCH ``` The output model is called `model_branch1.json`, which is the specifically frozen model with the `CHOSEN_BRANCH` head. diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 5079135f16..7e1be1a432 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -145,7 +145,7 @@ One should remember to activate the virtual environment every time he/she uses D Check the compiler version on your machine -``` +```bash gcc --version ``` @@ -422,10 +422,10 @@ cmake -DENABLE_PYTORCH=TRUE -DUSE_PT_PYTHON_LIBS=TRUE -DCMAKE_INSTALL_PREFIX=$de :::{tab-item} Paddle {{ paddle_icon }} -I assume you have installed the Paddle (either Python or C++ interface) to `$paddle_root`, then execute CMake +I assume you have compiled the Paddle inference library(C++ interface) to `$PADDLE_INFERENCE_DIR`, then execute CMake ```bash -cmake -DENABLE_PYTORCH=TRUE -DCMAKE_PREFIX_PATH=$paddle_root -DCMAKE_INSTALL_PREFIX=$deepmd_root .. +cmake -DENABLE_PADDLE=ON -DCMAKE_PREFIX_PATH=$PADDLE_INFERENCE_DIR -DPADDLE_INFERENCE_DIR=$PADDLE_INFERENCE_DIR -DCMAKE_INSTALL_PREFIX=$deepmd_root .. ``` ::: diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 5960fd6398..fc3f7c8c58 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -51,7 +51,9 @@ if(ENABLE_PADDLE) link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_INFERENCE_DIR}/paddle/lib") + # if (USE_ROCM_TOOLKIT) add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1) + # endif() endif(ENABLE_PADDLE) if(BUILD_TESTING) diff --git a/source/install/build_cc_pd.sh b/source/install/build_cc_pd.sh index d45cf5b993..36389c5ec3 100755 --- a/source/install/build_cc_pd.sh +++ b/source/install/build_cc_pd.sh @@ -22,70 +22,33 @@ export LAMMPS_DIR="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/source/b export LAMMPS_SOURCE_ROOT="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/source/build_lammps/lammps-stable_29Aug2024/" # 设置推理时的 GPU 卡号 -export CUDA_VISIBLE_DEVICES=3 -# export FLAGS_benchmark=1 -# export GLOG_v=6 +export CUDA_VISIBLE_DEVICES=1 -# PADDLE_DIR 设置为第二步 clone下来的 Paddle 目录 -export PADDLE_DIR="/workspace/hesensen/PaddleScience_enn_debug/Paddle/" - -# DEEPMD_DIR 设置为本项目的根目录 -export DEEPMD_DIR="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/" +# deepmd_root 设置为本项目的根目录 +export deepmd_root="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/" # PADDLE_INFERENCE_DIR 设置为第二步编译得到的 Paddle 推理库目录 export PADDLE_INFERENCE_DIR="/workspace/hesensen/PaddleScience_enn_debug/Paddle/build/paddle_inference_install_dir/" -# TENSORFLOW_DIR 设置为 tensorflow 的安装目录,可用 pip show tensorflow 确定 -# export TENSORFLOW_DIR="/path/to/tensorflow" - -export LD_LIBRARY_PATH=${PADDLE_DIR}/paddle/fluid/pybind/:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${DEEPMD_DIR}/deepmd/op:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${deepmd_root}/deepmd/op:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/paddle/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/third_party/install/mkldnn/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/third_party/install/mklml/lib:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${DEEPMD_DIR}/source/build:$LD_LIBRARY_PATH -export LIBRARY_PATH=${DEEPMD_DIR}/deepmd/op:$LIBRARY_PATH -# export FLAGS_check_nan_inf=1 -# cd ${DEEPMD_DIR}/source -# rm -rf build # 若改动CMakeLists.txt,则需要打开该注释 -# mkdir build -# cd - - -# DEEPMD_INSTALL_DIR 设置为 deepmd-lammps 的目标安装目录,可自行设置任意路径 -# export DEEPMD_INSTALL_DIR="path/to/deepmd_root" - -# 开始编译 -# cmake -DCMAKE_INSTALL_PREFIX=${DEEPMD_INSTALL_DIR} \ -# -DUSE_CUDA_TOOLKIT=TRUE \ -# -DTENSORFLOW_ROOT=${TENSORFLOW_DIR} \ -# -DPADDLE_LIB=${PADDLE_INFERENCE_DIR} \ -# -DFLOAT_PREC=low .. -# make -j4 && make install -# make lammps - -# cd ${LAMMPS_DIR}/src/ -# \cp -r ${DEEPMD_DIR}/source/build/USER-DEEPMD . -# make yes-kspace -# make yes-extra-fix -# make yes-user-deepmd -# make serial -j -# export PATH=${LAMMPS_DIR}/src:$PATH - -# cd ${DEEPMD_DIR}/examples/water/lmp +export LD_LIBRARY_PATH=${deepmd_root}/source/build:$LD_LIBRARY_PATH -# lmp_serial -in in.lammps +cd ${deepmd_root}/source +rm -rf build # 若改动CMakeLists.txt,则需要打开该注释 +mkdir build +cd - BUILD_TMP_DIR=${SCRIPT_PATH}/../build mkdir -p ${BUILD_TMP_DIR} cd ${BUILD_TMP_DIR} -cmake -D ENABLE_TENSORFLOW=OFF \ - -D ENABLE_PYTORCH=OFF \ - -D ENABLE_PADDLE=ON \ - -D PADDLE_LIB=${PADDLE_INFERENCE_DIR} \ +cmake -D ENABLE_PADDLE=ON \ + -D PADDLE_INFERENCE_DIR=${PADDLE_INFERENCE_DIR} \ -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ -D USE_TF_PYTHON_LIBS=TRUE \ -D LAMMPS_SOURCE_ROOT=${LAMMPS_SOURCE_ROOT} \ - -D ENABLE_IPI=OFF \ ${CUDA_ARGS} \ -D LAMMPS_VERSION=stable_29Aug2024 \ .. @@ -95,11 +58,11 @@ cmake --install . #------------------ echo "Congratulations! DeePMD-kit has been installed at ${INSTALL_PREFIX}" -cd ${DEEPMD_DIR}/source +cd ${deepmd_root}/source cd build make lammps cd ${LAMMPS_DIR}/src/ -\cp -r ${DEEPMD_DIR}/source/build/USER-DEEPMD . +\cp -r ${deepmd_root}/source/build/USER-DEEPMD . make no-kspace make yes-kspace make no-extra-fix @@ -107,11 +70,11 @@ make yes-extra-fix make no-user-deepmd make yes-user-deepmd # make serial -j -make mpi -j 20 +make mpi -j 10 export PATH=${LAMMPS_DIR}/src:$PATH -cd ${DEEPMD_DIR}/examples/water/lmp +cd ${deepmd_root}/examples/water/lmp echo "START INFERENCE..." # lmp_serial -in paddle_in.lammps 2>&1 | tee paddle_infer.log -mpirun -np 1 lmp_mpi -in paddle_in.lammps 2>&1 | tee paddle_infer.log +mpirun -np 2 lmp_mpi -in paddle_in.lammps 2>&1 | tee paddle_infer.log