Skip to content

Commit

Permalink
Re-applied all changes but preferred gcc version
Browse files Browse the repository at this point in the history
  • Loading branch information
stemann committed Sep 12, 2022
1 parent 931edf6 commit e7dc80e
Showing 1 changed file with 28 additions and 14 deletions.
42 changes: 28 additions & 14 deletions T/Torch/build_tarballs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ atomic_patch -p1 ../patches/pytorch-aten-qnnpack-cmake-windows.patch
cmake_extra_args=""
include_paths=""
if [[ $bb_full_target == *cxx11* ]]; then
cmake_extra_args+="-DGLIBCXX_USE_CXX11_ABI=1 "
fi
if [[ $target == i686-linux-gnu*
|| $target == x86_64-linux-gnu*
|| $target == x86_64-apple-darwin*
Expand All @@ -52,19 +56,22 @@ elif [[ $bb_full_target == armv6l-linux-gnu* ]]; then
cmake_extra_args+="-DBLAS=OpenBLAS "
fi
if [[ $target == x86_64* ]]; then
if [[ $target == x86_64* ]]; then # Restricting PYTORCH_QNNPACK to x86_64: Adapted from https://salsa.debian.org/deeplearning-team/pytorch/-/blob/master/debian/rules
cmake_extra_args+="-DUSE_PYTORCH_QNNPACK=ON "
else
cmake_extra_args+="-DUSE_PYTORCH_QNNPACK=OFF "
fi
if [[ $target == aarch64-linux-gnu* || $target == *-w64-mingw32* || $target == *-freebsd* ]]; then
if [[ $target == aarch64-linux-gnu* # Disabled use of breakpad on aarch64-linux-gnu: Fails to build embedded breakpad library.
|| $target == *-w64-mingw32* # Disabling breakpad enables configure on Windows - in combination with pytorch-aten-qnnpack-cmake-windows.patch
|| $target == *-freebsd*
]]; then
cmake_extra_args+="-DUSE_BREAKPAD=OFF "
else
cmake_extra_args+="-DUSE_BREAKPAD=ON "
fi
if [[ $target == *-linux-musl*
if [[ $target == *-linux-musl* # Disabled use of TensorPipe on linux-musl: Fails to build embedded TensorPipe library.
|| $target == *-w64-mingw32* # TensorPipe cannot be used on Windows
]]; then
cmake_extra_args+="-DUSE_TENSORPIPE=OFF "
Expand All @@ -81,9 +88,9 @@ if [[ $target != arm-* && $target == *-linux-musl* ]]; then
cmake_extra_args+="-DUSE_SYSTEM_GLOO=ON "
fi
if [[ $target == aarch64-linux-* # A compiler with AVX512 support is required for FBGEM
|| $target == arm-linux-* # A compiler with AVX512 support is required for FBGEM
|| $target == i686-linux-* # x64 operating system is required for FBGEMM
if [[ $target == aarch64-* # A compiler with AVX512 support is required for FBGEM
|| $target == arm-* # A compiler with AVX512 support is required for FBGEM
|| $target == i686-* # x64 operating system is required for FBGEMM
|| $target == x86_64-w64-mingw32*
]]; then
cmake_extra_args+="-DUSE_FBGEMM=OFF -DUSE_FAKELOWP=OFF "
Expand All @@ -103,7 +110,7 @@ if [[ $bb_full_target == *cuda* ]]; then
apk del cmake
apk add 'cmake<3.17' --repository=http://dl-cdn.alpinelinux.org/alpine/v3.11/main
export PATH=$PATH:$cuda_full_path/bin
export CUDACXX="ccache nvcc"
export CUDACXX=$cuda_full_path/bin/nvcc
export CUDAHOSTCXX=$CXX
mkdir $WORKSPACE/tmpdir
export TMPDIR=$WORKSPACE/tmpdir
Expand Down Expand Up @@ -193,8 +200,9 @@ install_license ../LICENSE
platforms = supported_platforms()
filter!(p -> !(Sys.islinux(p) && libc(p) == "musl"), platforms) # musl fails due to conflicting declaration of C function ‘void __assert_fail(const char*, const char*, int, const char*) - between /opt/x86_64-linux-musl/x86_64-linux-musl/include/c++/8.1.0/cassert:44 and /opt/x86_64-linux-musl/x86_64-linux-musl/sys-root/usr/include/assert.h
filter!(!Sys.iswindows, platforms) # ONNX does not support cross-compiling for w64-mingw32 on linux
filter!(p -> arch(p) != "armv6l", platforms) # armv6l is not supported by XNNPACK
filter!(p -> arch(p) != "armv7l", platforms) # armv7l is not supported by XNNPACK
filter!(p -> arch(p) != "powerpc64le", platforms) # PowerPC64LE is not supported by XNNPACK
filter!(p -> !(Sys.isapple(p) && arch(p) == "aarch64"), platforms) # aarch64-apple not supported by CPUInfo_jll v0.0.20200612 referenced by XNNPACK_jll v0.0.20200323
filter!(!Sys.isfreebsd, platforms) # Build fails: Clang v12 crashes compiling aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp.

mkl_platforms = [
Expand Down Expand Up @@ -225,6 +233,12 @@ for p in cuda_platforms
push!(platforms, p)
end

platforms = expand_cxxstring_abis(platforms)
mkl_platforms = expand_cxxstring_abis(mkl_platforms)
blis_platforms = expand_cxxstring_abis(blis_platforms)
openblas_platforms = expand_cxxstring_abis(openblas_platforms)
cuda_platforms = expand_cxxstring_abis(cuda_platforms)

# The products that we will ensure are always built
products = [
LibraryProduct(["libtorch", "torch"], :libtorch),
Expand All @@ -235,17 +249,17 @@ products = [
dependencies = [
Dependency(PackageSpec(name="CompilerSupportLibraries_jll", uuid="e66e0078-7015-5450-92f7-15fbd957f2ae")),
Dependency("blis_jll"; platforms = blis_platforms),
Dependency("CPUInfo_jll"),
Dependency("CUDNN_jll"; platforms = cuda_platforms),
Dependency("Gloo_jll"; platforms = filter(p -> nbits(p) == 64, platforms)),
Dependency("CPUInfo_jll"; compat = "0.0.20201217"),
Dependency("CUDNN_jll", v"8.2.4"; compat = "8", platforms = cuda_platforms),
Dependency("Gloo_jll"; compat = "0.0.20210521", platforms = filter(p -> nbits(p) == 64, platforms)),
Dependency("LAPACK_jll"; platforms = openblas_platforms),
Dependency("MKL_jll"; platforms = mkl_platforms),
BuildDependency("MKL_Headers_jll"; platforms = mkl_platforms),
Dependency("OpenBLAS_jll"; platforms = openblas_platforms),
Dependency("PThreadPool_jll"),
Dependency("SLEEF_jll"),
Dependency("PThreadPool_jll"; compat = "0.0.20210414"),
Dependency("SLEEF_jll", v"3.5.2"; compat = "3"),
# Dependency("TensorRT_jll"; platforms = cuda_platforms), # Building with TensorRT is not supported: https://github.com/pytorch/pytorch/issues/60228
Dependency("XNNPACK_jll", v"0.0.20200323"),
Dependency("XNNPACK_jll"; compat = "0.0.20210622"),
BuildDependency(PackageSpec("protoc_jll", Base.UUID("c7845625-083e-5bbe-8504-b32d602b7110"), v"3.13.0")),
HostBuildDependency(PackageSpec("protoc_jll", Base.UUID("c7845625-083e-5bbe-8504-b32d602b7110"), v"3.13.0")),
]
Expand Down

0 comments on commit e7dc80e

Please sign in to comment.