From 1423750a4770ff8262add0415f44b8577fa1a7a4 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Wed, 27 Mar 2024 00:00:56 -0400 Subject: [PATCH] feat: Add `USE_PT_PYTHON_LIBS` CMake variable (#3605) Like `USE_TF_PYTHON_LIBS`. Signed-off-by: Jinzhe Zeng --- doc/install/install-from-source.md | 13 +++++++++++++ source/CMakeLists.txt | 24 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/doc/install/install-from-source.md b/doc/install/install-from-source.md index 8676928e09..9e86ee33b0 100644 --- a/doc/install/install-from-source.md +++ b/doc/install/install-from-source.md @@ -52,6 +52,8 @@ If one does not need the GPU support of DeePMD-kit and is concerned about packag pip install --upgrade tensorflow-cpu ``` +One can also [use conda](https://docs.deepmodeling.org/faq/conda.html) to install TensorFlow from [conda-forge](https://conda-forge.org). + To verify the installation, run ```bash @@ -72,6 +74,8 @@ pip install torch Follow [PyTorch documentation](https://pytorch.org/get-started/locally/) to install PyTorch built against different CUDA versions or without CUDA. +One can also [use conda](https://docs.deepmodeling.org/faq/conda.html) to install PyTorch from [conda-forge](https://conda-forge.org). + ::: :::: @@ -255,6 +259,7 @@ pip install -U cmake You must enable at least one backend. If you enable two or more backends, these backend libraries must be built in a compatible way, e.g. using the same `_GLIBCXX_USE_CXX11_ABI` flag. +We recommend using [conda pacakges](https://docs.deepmodeling.org/faq/conda.html) from [conda-forge](https://conda-forge.org), which are usually compatible to each other. ::::{tab-set} @@ -278,6 +283,13 @@ I assume you have installed the PyTorch (either Python or C++ interface) to `$to cmake -DENABLE_PYTORCH=TRUE -DCMAKE_PREFIX_PATH=$torch_root -DCMAKE_INSTALL_PREFIX=$deepmd_root .. ``` +You can specify `-DUSE_PT_PYTHON_LIBS=TRUE` to use libtorch from the Python installation, +but you need to be careful that [PyTorch PyPI packages are still built using `_GLIBCXX_USE_CXX11_ABI=0`](https://github.com/pytorch/pytorch/issues/51039), which may be not compatible with other libraries. + +```bash +cmake -DENABLE_PYTORCH=TRUE -DUSE_PT_PYTHON_LIBS=TRUE -DCMAKE_INSTALL_PREFIX=$deepmd_root .. +``` + ::: :::: @@ -296,6 +308,7 @@ One may add the following arguments to `cmake`: | -DCMAKE_HIP_COMPILER_ROCM_ROOT=<value> | Path | Detected automatically | The path to the ROCM toolkit directory. | | -DLAMMPS_SOURCE_ROOT=<value> | Path | - | Only neccessary for LAMMPS plugin mode. The path to the [LAMMPS source code](install-lammps.md). LAMMPS 8Apr2021 or later is supported. If not assigned, the plugin mode will not be enabled. | | -DUSE_TF_PYTHON_LIBS=<value> | `TRUE` or `FALSE` | `FALSE` | {{ tensorflow_icon }} If `TRUE`, Build C++ interface with TensorFlow's Python libraries (TensorFlow's Python Interface is required). And there's no need for building TensorFlow's C++ interface. | +| -DUSE_PT_PYTHON_LIBS=<value> | `TRUE` or `FALSE` | `FALSE` | {{ pytorch_icon }} If `TRUE`, Build C++ interface with PyTorch's Python libraries (PyTorch's Python Interface is required). And there's no need for downloading PyTorch's C++ libraries. | | -DENABLE_NATIVE_OPTIMIZATION=<value> | `TRUE` or `FALSE` | `FALSE` | Enable compilation optimization for the native machine's CPU type. Do not enable it if generated code will run on different CPUs. | | -DCMAKE\_<LANG>\_FLAGS=<value> (``=`CXX`, `CUDA` or `HIP`) | str | - | Default compilation flags to be used when compiling `` files. See [CMake documentation](https://cmake.org/cmake/help/latest/variable/CMAKE_LANG_FLAGS.html). | diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index 931013016d..9560b69a70 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -152,6 +152,30 @@ endif() if(ENABLE_TENSORFLOW AND NOT DEEPMD_C_ROOT) find_package(tensorflow REQUIRED) endif() +if(BUILD_CPP_IF + AND USE_PT_PYTHON_LIBS + AND NOT CMAKE_CROSSCOMPILING + AND NOT SKBUILD) + find_package( + Python + COMPONENTS Interpreter + REQUIRED) + execute_process( + COMMAND ${Python_EXECUTABLE} -c + "import torch;print(torch.utils.cmake_prefix_path)" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + OUTPUT_VARIABLE PYTORCH_CMAKE_PREFIX_PATH + RESULT_VARIABLE PYTORCH_CMAKE_PREFIX_PATH_RESULT_VAR + ERROR_VARIABLE PYTORCH_CMAKE_PREFIX_PATH_ERROR_VAR + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(NOT ${PYTORCH_CMAKE_PREFIX_PATH_RESULT_VAR} EQUAL 0) + message( + FATAL_ERROR + "Cannot determine PyTorch CMake prefix path, error code: $PYTORCH_CMAKE_PREFIX_PATH_RESULT_VAR}, error message: ${PYTORCH_CMAKE_PREFIX_PATH_ERROR_VAR}" + ) + endif() + list(APPEND CMAKE_PREFIX_PATH ${PYTORCH_CMAKE_PREFIX_PATH}) +endif() if(ENABLE_PYTORCH AND NOT DEEPMD_C_ROOT) find_package(Torch REQUIRED) string(REGEX MATCH "_GLIBCXX_USE_CXX11_ABI=([0-9]+)" CXXABI_PT_MATCH