Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pytorch optimization #974

Merged
merged 17 commits into from
Sep 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/linuxWF.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ jobs:
# added by luigibonati
mkdir -p $HOME/opt/
cd $HOME/opt/ # GB: install in $HOME/opt/libtorch
wget --no-check-certificate https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-1.13.1%2Bcpu.zip ;
unzip libtorch-cxx11-abi-shared-with-deps-1.13.1+cpu.zip ;
wget --no-check-certificate https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.0.0%2Bcpu.zip ;
unzip libtorch-cxx11-abi-shared-with-deps-2.0.0+cpu.zip ;
echo "LIBRARY_PATH=$PWD/libtorch/lib:$LIBRARY_PATH" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$PWD/libtorch/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV
echo "CPATH=$PWD/libtorch/include/torch/csrc/api/include/:$PWD/libtorch/include/:$PWD/libtorch/include/torch:$CPATH" >> $GITHUB_ENV
Expand Down
244 changes: 219 additions & 25 deletions configure
Original file line number Diff line number Diff line change
Expand Up @@ -9555,7 +9555,197 @@ fi

#added by luigibonati
if test $libtorch = true ; then
# test program
# disable as-needed in linking libraries (both static and shared)

save_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="$CXXFLAGS -Wl,--no-as-needed"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -Wl,--no-as-needed" >&5
$as_echo_n "checking whether $CXX accepts -Wl,--no-as-needed... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

int
main ()
{

;
return 0;
}
_ACEOF
if ac_fn_cxx_try_compile "$LINENO"; then :

cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

int
main ()
{

;
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: not linking" >&5
$as_echo "not linking" >&6; }; CXXFLAGS="$save_CXXFLAGS"
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext

else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }; CXXFLAGS="$save_CXXFLAGS"

fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext

LDSHARED="$LDSHARED -Wl,--no-as-needed "

# CUDA and CPU libtorch libs have different libraries
# first test CUDA program

found=ko
__PLUMED_HAS_LIBTORCH=no
if test "${libsearch}" = true ; then
testlibs=" torch_cpu c10 c10_cuda torch_cuda "
else
testlibs=""
fi
save_LIBS="$LIBS"

# check if multiple libraries are required simultaneously
multiple="no"
if test "true" = "true"; then
multiple="yes"
all_LIBS=""
for testlib in $testlibs;
do
all_LIBS="$all_LIBS -l$testlib"
done
testlibs=" " # to check only without libraries, and later with all together
fi

# check without libraries
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cuda without extra libs" >&5
$as_echo_n "checking libtorch_cuda without extra libs... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
#include <torch/cuda.h>
int main() {
std::cerr << "CUDA is available: " << torch::cuda::is_available() << std::endl;
torch::Tensor tensor = torch::rand({2, 3});
torch::Device device = torch::kCPU;
device = torch::kCUDA;
tensor.to(device);
return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext

if test "$found" = "ko" ; then
if test "$multiple" = "yes" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cuda with $all_LIBS" >&5
$as_echo_n "checking libtorch_cuda with $all_LIBS... " >&6; }
LIBS="$all_LIBS $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
#include <torch/cuda.h>
int main() {
std::cerr << "CUDA is available: " << torch::cuda::is_available() << std::endl;
torch::Tensor tensor = torch::rand({2, 3});
torch::Device device = torch::kCPU;
device = torch::kCUDA;
tensor.to(device);
return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
else
for testlib in $testlibs
do
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cuda with -l$testlib" >&5
$as_echo_n "checking libtorch_cuda with -l$testlib... " >&6; }
LIBS="-l$testlib $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
#include <torch/cuda.h>
int main() {
std::cerr << "CUDA is available: " << torch::cuda::is_available() << std::endl;
torch::Tensor tensor = torch::rand({2, 3});
torch::Device device = torch::kCPU;
device = torch::kCUDA;
tensor.to(device);
return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
found=ok
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }

fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
if test $found = ok ; then
break
fi
LIBS="$save_LIBS"
done
fi
fi

if test $found = ok ; then
$as_echo "#define __PLUMED_HAS_LIBTORCH 1" >>confdefs.h

__PLUMED_HAS_LIBTORCH=yes
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot enable __PLUMED_HAS_LIBTORCH" >&5
$as_echo "$as_me: WARNING: cannot enable __PLUMED_HAS_LIBTORCH" >&2;}
LIBS="$save_LIBS"
fi


# if not found test CPU version
if test "$__PLUMED_HAS_LIBTORCH" = no; then
# AC_MSG_NOTICE([CUDA-enabled libtorch not found (or devices not available), trying with CPU version.])

found=ko
__PLUMED_HAS_LIBTORCH=no
Expand All @@ -9579,17 +9769,18 @@ if test $libtorch = true ; then
fi

# check without libraries
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch without extra libs" >&5
$as_echo_n "checking libtorch without extra libs... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cpu without extra libs" >&5
$as_echo_n "checking libtorch_cpu without extra libs... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});
return 0;
}
#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});

return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
Expand All @@ -9606,18 +9797,19 @@ rm -f core conftest.err conftest.$ac_objext \

if test "$found" = "ko" ; then
if test "$multiple" = "yes" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch with $all_LIBS" >&5
$as_echo_n "checking libtorch with $all_LIBS... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cpu with $all_LIBS" >&5
$as_echo_n "checking libtorch_cpu with $all_LIBS... " >&6; }
LIBS="$all_LIBS $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});
return 0;
}
#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});

return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
Expand All @@ -9634,18 +9826,19 @@ rm -f core conftest.err conftest.$ac_objext \
else
for testlib in $testlibs
do
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch with -l$testlib" >&5
$as_echo_n "checking libtorch with -l$testlib... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libtorch_cpu with -l$testlib" >&5
$as_echo_n "checking libtorch_cpu with -l$testlib... " >&6; }
LIBS="-l$testlib $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */

#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});
return 0;
}
#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});

return 0;
}

_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
Expand Down Expand Up @@ -9677,6 +9870,7 @@ $as_echo "$as_me: WARNING: cannot enable __PLUMED_HAS_LIBTORCH" >&2;}
LIBS="$save_LIBS"
fi

fi
fi

# in non-debug mode, add -DNDEBUG
Expand Down
40 changes: 32 additions & 8 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -879,15 +879,39 @@ fi

#added by luigibonati
if test $libtorch = true ; then
# test program
PLUMED_CHECK_CXX_PACKAGE([libtorch],[
#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});
return 0;
}
# disable as-needed in linking libraries (both static and shared)
PLUMED_CHECK_CXXFLAG([-Wl,--no-as-needed])
LDSHARED="$LDSHARED -Wl,--no-as-needed "

# CUDA and CPU libtorch libs have different libraries
# first test CUDA program
PLUMED_CHECK_CXX_PACKAGE([libtorch_cuda],[
#include <torch/torch.h>
#include <torch/script.h>
#include <torch/cuda.h>
int main() {
std::cerr << "CUDA is available: " << torch::cuda::is_available() << std::endl;
torch::Tensor tensor = torch::rand({2, 3});
torch::Device device = torch::kCPU;
device = torch::kCUDA;
tensor.to(device);
return 0;
}
], [__PLUMED_HAS_LIBTORCH], [ torch_cpu c10 c10_cuda torch_cuda ], [true])

# if not found test CPU version
if test "$__PLUMED_HAS_LIBTORCH" = no; then
# AC_MSG_NOTICE([CUDA-enabled libtorch not found (or devices not available), trying with CPU version.])
PLUMED_CHECK_CXX_PACKAGE([libtorch_cpu],[
#include <torch/torch.h>
#include <torch/script.h>
int main() {
torch::Tensor tensor = torch::rand({2, 3});

return 0;
}
], [__PLUMED_HAS_LIBTORCH], [ torch_cpu c10 ], [true])
fi
fi

# in non-debug mode, add -DNDEBUG
Expand Down
Loading