[arch-commits] Commit in python-pytorch/repos/community-x86_64 (8 files)

Sven-Hendrik Haase svenstaro at archlinux.org
Tue Mar 17 10:17:11 UTC 2020


    Date: Tuesday, March 17, 2020 @ 10:17:11
  Author: svenstaro
Revision: 600134

archrelease: copy trunk to community-x86_64

Added:
  python-pytorch/repos/community-x86_64/PKGBUILD
    (from rev 600133, python-pytorch/trunk/PKGBUILD)
  python-pytorch/repos/community-x86_64/fix_include_system.patch
    (from rev 600133, python-pytorch/trunk/fix_include_system.patch)
  python-pytorch/repos/community-x86_64/nccl_version.patch
    (from rev 600133, python-pytorch/trunk/nccl_version.patch)
  python-pytorch/repos/community-x86_64/torch_cuda_api.patch
    (from rev 600133, python-pytorch/trunk/torch_cuda_api.patch)
Deleted:
  python-pytorch/repos/community-x86_64/PKGBUILD
  python-pytorch/repos/community-x86_64/fix_include_system.patch
  python-pytorch/repos/community-x86_64/nccl_version.patch
  python-pytorch/repos/community-x86_64/torch_cuda_api.patch

--------------------------+
 PKGBUILD                 |  380 ++++++++++++++++++++++-----------------------
 fix_include_system.patch |   22 +-
 nccl_version.patch       |   92 +++++-----
 torch_cuda_api.patch     |   26 +--
 4 files changed, 259 insertions(+), 261 deletions(-)

Deleted: PKGBUILD
===================================================================
--- PKGBUILD	2020-03-17 10:17:05 UTC (rev 600133)
+++ PKGBUILD	2020-03-17 10:17:11 UTC (rev 600134)
@@ -1,191 +0,0 @@
-# Maintainer: Sven-Hendrik Haase <svenstaro at gmail.com>
-# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
-
-pkgbase="python-pytorch"
-pkgname=("python-pytorch" "python-pytorch-opt" "python-pytorch-cuda" "python-pytorch-opt-cuda")
-_pkgname="pytorch"
-pkgver=1.4.0
-pkgrel=5
-pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration"
-arch=('x86_64')
-url="https://pytorch.org"
-license=('BSD')
-depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 'python-yaml'
-         'python-numpy' 'protobuf' 'ffmpeg' 'python-future' 'qt5-base' 'intel-dnnl' 'intel-mkl')
-makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 'cuda'
-             'cudnn' 'git' 'magma' 'ninja' 'pkgconfig')
-source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$pkgver"
-        fix_include_system.patch
-        nccl_version.patch
-        torch_cuda_api.patch
-        https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/30332.patch
-        https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/30333.patch)
-sha256sums=('SKIP'
-            '147bdaeac8ec46ea46382e6146878bd8f8d51e05d5bd6f930dfd8e2b520859b9'
-            '1a276bd827a0c76dab908cbc6605fa4c9fc2cc2b9431b6578a41133ae27dba2b'
-            '8965f003f5812c5ab1bd27ab66d916560ea4a644364727b9755dc0dea752ad77'
-            '3170551116798dc496636a87f00b86c6463895bb6d174df616c5224adfb74ff3'
-            'c8c305c892be85c47872ae1f1ecd5b3b8af12876fbfe3641045c8c839e5126da')
-
-get_pyver () {
-  python -c 'import sys; print(str(sys.version_info[0]) + "." + str(sys.version_info[1]))'
-}
-
-prepare() {
-  cd "${_pkgname}-${pkgver}"
-
-  # This is the lazy way since pytorch has sooo many submodules and they keep
-  # changing them around but we've run into more problems so far doing it the
-  # manual than the lazy way. This lazy way (not explicitly specifying all
-  # submodules) will make building inefficient but for now I'll take it.
-  # It will result in the same package, don't worry.
-  git submodule update --init --recursive
-
-  # https://github.com/pytorch/pytorch/issues/26555
-  sed -i 's#^  ${CMAKE_CURRENT_SOURCE_DIR}/tensor_iterator_test.cpp##g' aten/src/ATen/test/CMakeLists.txt
-
-  sed -i "s/intel64//g" cmake/Modules/FindMKL.cmake
-
-  # https://bugs.archlinux.org/task/64981
-  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
-
-  # FindNCCL patch to export correct nccl version
-  patch -Np1 -i "${srcdir}"/nccl_version.patch
-
-  # correctly export torch cuda api for nccl runtime error
-  patch -Np1 -i "${srcdir}"/torch_cuda_api.patch
-
-  # https://github.com/pytorch/pytorch/issues/32277
-  patch -Np1 -i "$srcdir"/30332.patch
-  patch -Np1 -i "$srcdir"/30333.patch
-
-  # remove local nccl
-  rm -rf third_party/nccl/nccl
-
-  cd ..
-
-  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
-  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
-  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
-
-  export VERBOSE=1
-  export PYTORCH_BUILD_VERSION="${pkgver}"
-  export PYTORCH_BUILD_NUMBER=1
-
-  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list of flags that can be set via env vars.
-  export USE_MKLDNN=ON
-  # export BUILD_CUSTOM_PROTOBUF=OFF
-  # export BUILD_SHARED_LIBS=OFF
-  export USE_FFMPEG=ON
-  export USE_GFLAGS=ON
-  export USE_GLOG=ON
-  export BUILD_BINARY=ON
-  export USE_OPENCV=ON
-  export USE_SYSTEM_NCCL=ON
-  export NCCL_VERSION=$(pkg-config nccl --modversion)
-  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' /usr/include/nccl.h)
-  export CUDAHOSTCXX=g++-8
-  export CUDA_HOME=/opt/cuda
-  export CUDNN_LIB_DIR=/usr/lib
-  export CUDNN_INCLUDE_DIR=/usr/include
-  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
-  export TORCH_CUDA_ARCH_LIST="3.2;3.5;3.7;5.0;5.2;5.3;6.0;6.0+PTX;6.1;6.1+PTX;6.2;6.2+PTX;7.0;7.0+PTX;7.2;7.2+PTX;7.5;7.5+PTX"
-}
-
-build() {
-  echo "Building without cuda and without non-x86-64 optimizations"
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-  python setup.py build
-
-
-  echo "Building without cuda and with non-x86-64 optimizations"
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  python setup.py build
-
-
-  echo "Building with cuda and without non-x86-64 optimizations"
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  python setup.py build
-
-
-  echo "Building with cuda and with non-x86-64 optimizations"
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  python setup.py build
-}
-
-_package() {
-  # Prevent setup.py from re-running CMake and rebuilding
-  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
-
-  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
-
-  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
-
-  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
-  install -d "${pkgdir}/usr/lib"
-
-  # put CMake files in correct place
-  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
-
-  # put C++ API in correct place
-  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
-  mv "${pkgdir}/${pytorchpath}/lib"/*.so* "${pkgdir}/usr/lib/"
-
-  # clean up duplicates
-  # TODO: move towards direct shared library dependecy of:
-  #   c10, caffe2, libcpuinfo, CUDA RT, gloo, GTest, Intel MKL,
-  #   NVRTC, ONNX, protobuf, libthreadpool, QNNPACK
-  rm -rf "${pkgdir}/usr/include/pybind11"
-
-  # python module is hardcoded to look there at runtime
-  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
-  find "${pkgdir}"/usr/lib -type f -name "*.so*" -print0 | while read -rd $'\0' _lib; do
-    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
-  done
-}
-
-package_python-pytorch() {
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-  _package
-}
-
-package_python-pytorch-opt() {
-  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CPU optimizations)"
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
-  _package
-}
-
-package_python-pytorch-cuda() {
-  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CUDA)"
-  depends+=(cuda cudnn magma)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  _package
-}
-
-package_python-pytorch-opt-cuda() {
-  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CUDA and CPU optimizations)"
-  depends+=(cuda cudnn magma)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch python-pytorch-cuda)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
-  _package
-}
-
-# vim:set ts=2 sw=2 et:

Copied: python-pytorch/repos/community-x86_64/PKGBUILD (from rev 600133, python-pytorch/trunk/PKGBUILD)
===================================================================
--- PKGBUILD	                        (rev 0)
+++ PKGBUILD	2020-03-17 10:17:11 UTC (rev 600134)
@@ -0,0 +1,189 @@
+# Maintainer: Sven-Hendrik Haase <svenstaro at gmail.com>
+# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
+
+pkgbase=python-pytorch
+pkgname=("python-pytorch" "python-pytorch-opt" "python-pytorch-cuda" "python-pytorch-opt-cuda")
+_pkgname="pytorch"
+pkgver=1.4.0
+pkgrel=6
+pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+arch=('x86_64')
+url="https://pytorch.org"
+license=('BSD')
+depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 'python-yaml'
+         'python-numpy' 'protobuf' 'ffmpeg' 'python-future' 'qt5-base' 'intel-dnnl' 'intel-mkl')
+makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 'cuda'
+             'cudnn' 'git' 'magma' 'ninja' 'pkgconfig' 'doxygen')
+source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$pkgver"
+        fix_include_system.patch
+        nccl_version.patch
+        torch_cuda_api.patch
+        https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/30332.patch
+        https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/30333.patch)
+sha256sums=('SKIP'
+            '147bdaeac8ec46ea46382e6146878bd8f8d51e05d5bd6f930dfd8e2b520859b9'
+            '1a276bd827a0c76dab908cbc6605fa4c9fc2cc2b9431b6578a41133ae27dba2b'
+            '8965f003f5812c5ab1bd27ab66d916560ea4a644364727b9755dc0dea752ad77'
+            '3170551116798dc496636a87f00b86c6463895bb6d174df616c5224adfb74ff3'
+            'c8c305c892be85c47872ae1f1ecd5b3b8af12876fbfe3641045c8c839e5126da')
+
+get_pyver () {
+  python -c 'import sys; print(str(sys.version_info[0]) + "." + str(sys.version_info[1]))'
+}
+
+prepare() {
+  cd "${_pkgname}-${pkgver}"
+
+  # This is the lazy way since pytorch has sooo many submodules and they keep
+  # changing them around but we've run into more problems so far doing it the
+  # manual than the lazy way. This lazy way (not explicitly specifying all
+  # submodules) will make building inefficient but for now I'll take it.
+  # It will result in the same package, don't worry.
+  git submodule update --init --recursive
+
+  # https://github.com/pytorch/pytorch/issues/26555
+  sed -i 's#^  ${CMAKE_CURRENT_SOURCE_DIR}/tensor_iterator_test.cpp##g' aten/src/ATen/test/CMakeLists.txt
+
+  # https://bugs.archlinux.org/task/64981
+  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
+
+  # FindNCCL patch to export correct nccl version
+  patch -Np1 -i "${srcdir}"/nccl_version.patch
+
+  # correctly export torch cuda api for nccl runtime error
+  patch -Np1 -i "${srcdir}"/torch_cuda_api.patch
+
+  # https://github.com/pytorch/pytorch/issues/32277
+  patch -Np1 -i "$srcdir"/30332.patch
+  patch -Np1 -i "$srcdir"/30333.patch
+
+  # remove local nccl
+  rm -rf third_party/nccl/nccl
+
+  cd ..
+
+  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
+  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
+  cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
+
+  export VERBOSE=1
+  export PYTORCH_BUILD_VERSION="${pkgver}"
+  export PYTORCH_BUILD_NUMBER=1
+
+  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list of flags that can be set via env vars.
+  export USE_MKLDNN=ON
+  # export BUILD_CUSTOM_PROTOBUF=OFF
+  # export BUILD_SHARED_LIBS=OFF
+  export USE_FFMPEG=ON
+  export USE_GFLAGS=ON
+  export USE_GLOG=ON
+  export BUILD_BINARY=ON
+  export USE_OPENCV=ON
+  export USE_SYSTEM_NCCL=ON
+  export NCCL_VERSION=$(pkg-config nccl --modversion)
+  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' /usr/include/nccl.h)
+  export CUDAHOSTCXX=g++-8
+  export CUDA_HOME=/opt/cuda
+  export CUDNN_LIB_DIR=/usr/lib
+  export CUDNN_INCLUDE_DIR=/usr/include
+  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
+  export TORCH_CUDA_ARCH_LIST="3.2;3.5;3.7;5.0;5.2;5.3;6.0;6.0+PTX;6.1;6.1+PTX;6.2;6.2+PTX;7.0;7.0+PTX;7.2;7.2+PTX;7.5;7.5+PTX"
+}
+
+build() {
+  echo "Building without cuda and without non-x86-64 optimizations"
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+  python setup.py build
+
+
+  echo "Building without cuda and with non-x86-64 optimizations"
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  python setup.py build
+
+
+  echo "Building with cuda and without non-x86-64 optimizations"
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  python setup.py build
+
+
+  echo "Building with cuda and with non-x86-64 optimizations"
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  python setup.py build
+}
+
+_package() {
+  # Prevent setup.py from re-running CMake and rebuilding
+  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
+
+  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
+
+  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+
+  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
+  install -d "${pkgdir}/usr/lib"
+
+  # put CMake files in correct place
+  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
+
+  # put C++ API in correct place
+  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
+  mv "${pkgdir}/${pytorchpath}/lib"/*.so* "${pkgdir}/usr/lib/"
+
+  # clean up duplicates
+  # TODO: move towards direct shared library dependecy of:
+  #   c10, caffe2, libcpuinfo, CUDA RT, gloo, GTest, Intel MKL,
+  #   NVRTC, ONNX, protobuf, libthreadpool, QNNPACK
+  rm -rf "${pkgdir}/usr/include/pybind11"
+
+  # python module is hardcoded to look there at runtime
+  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
+  find "${pkgdir}"/usr/lib -type f -name "*.so*" -print0 | while read -rd $'\0' _lib; do
+    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
+  done
+}
+
+package_python-pytorch() {
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+  _package
+}
+
+package_python-pytorch-opt() {
+  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CPU optimizations)"
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
+  _package
+}
+
+package_python-pytorch-cuda() {
+  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CUDA)"
+  depends+=(cuda cudnn magma)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  _package
+}
+
+package_python-pytorch-opt-cuda() {
+  pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration (with CUDA and CPU optimizations)"
+  depends+=(cuda cudnn magma)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch python-pytorch-cuda)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
+  _package
+}
+
+# vim:set ts=2 sw=2 et:

Deleted: fix_include_system.patch
===================================================================
--- fix_include_system.patch	2020-03-17 10:17:05 UTC (rev 600133)
+++ fix_include_system.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -1,11 +0,0 @@
---- a/utils/cpp_extension.py	2019-12-29 12:59:36.083692770 -0300
-+++ b/utils/cpp_extension.py	2020-01-08 09:52:16.435316701 -0300
-@@ -1098,7 +1098,7 @@
-     if BUILD_NAMEDTENSOR:
-         common_cflags.append('-DBUILD_NAMEDTENSOR')
-     common_cflags += ['-I{}'.format(include) for include in user_includes]
--    common_cflags += ['-isystem {}'.format(include) for include in system_includes]
-+    common_cflags += ['-I{}'.format(include) for include in system_includes]
- 
-     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
-

Copied: python-pytorch/repos/community-x86_64/fix_include_system.patch (from rev 600133, python-pytorch/trunk/fix_include_system.patch)
===================================================================
--- fix_include_system.patch	                        (rev 0)
+++ fix_include_system.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -0,0 +1,11 @@
+--- a/utils/cpp_extension.py	2019-12-29 12:59:36.083692770 -0300
++++ b/utils/cpp_extension.py	2020-01-08 09:52:16.435316701 -0300
+@@ -1098,7 +1098,7 @@
+     if BUILD_NAMEDTENSOR:
+         common_cflags.append('-DBUILD_NAMEDTENSOR')
+     common_cflags += ['-I{}'.format(include) for include in user_includes]
+-    common_cflags += ['-isystem {}'.format(include) for include in system_includes]
++    common_cflags += ['-I{}'.format(include) for include in system_includes]
+ 
+     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
+

Deleted: nccl_version.patch
===================================================================
--- nccl_version.patch	2020-03-17 10:17:05 UTC (rev 600133)
+++ nccl_version.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -1,46 +0,0 @@
-diff --git a/cmake/Modules/FindNCCL.cmake b/cmake/Modules/FindNCCL.cmake
-index 5bd3ccd606..f92eda98f5 100644
---- a/cmake/Modules/FindNCCL.cmake
-+++ b/cmake/Modules/FindNCCL.cmake
-@@ -48,37 +48,39 @@ find_library(NCCL_LIBRARIES
- 
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
- 
- if(NCCL_FOUND)  # obtaining NCCL version and some sanity checks
-   set (NCCL_HEADER_FILE "${NCCL_INCLUDE_DIRS}/nccl.h")
-   message (STATUS "Determining NCCL version from ${NCCL_HEADER_FILE}...")
-   set (OLD_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
-   list (APPEND CMAKE_REQUIRED_INCLUDES ${NCCL_INCLUDE_DIRS})
-   include(CheckCXXSymbolExists)
--  check_cxx_symbol_exists(NCCL_VERSION_CODE nccl.h NCCL_VERSION_DEFINED)
-+  set(NCCL_VERSION_CODE $ENV{NCCL_VER_CODE})
-+  set(NCCL_VERSION_DEFINED $ENV{NCCL_VER_CODE})
- 
--  if (NCCL_VERSION_DEFINED)
-+  if (DEFINED NCCL_VERSION_DEFINED)
-     set(file "${PROJECT_BINARY_DIR}/detect_nccl_version.cc")
-     file(WRITE ${file} "
-       #include <iostream>
-       #include <nccl.h>
-       int main()
-       {
-         std::cout << NCCL_MAJOR << '.' << NCCL_MINOR << '.' << NCCL_PATCH << std::endl;
- 
-         int x;
-         ncclGetVersion(&x);
-         return x == NCCL_VERSION_CODE;
-       }
- ")
-     try_run(NCCL_VERSION_MATCHED compile_result ${PROJECT_BINARY_DIR} ${file}
-+          CMAKE_FLAGS -DINCLUDE_DIRECTORIES=/opt/cuda/include
-           RUN_OUTPUT_VARIABLE NCCL_VERSION_FROM_HEADER
-           LINK_LIBRARIES ${NCCL_LIBRARIES})
-     if (NOT NCCL_VERSION_MATCHED)
-       message(FATAL_ERROR "Found NCCL header version and library version do not match! \
- (include: ${NCCL_INCLUDE_DIRS}, library: ${NCCL_LIBRARIES}) Please set NCCL_INCLUDE_DIR and NCCL_LIB_DIR manually.")
-     endif()
-     message(STATUS "NCCL version: ${NCCL_VERSION_FROM_HEADER}")
-   else()
-     message(STATUS "NCCL version < 2.3.5-5")
-   endif ()

Copied: python-pytorch/repos/community-x86_64/nccl_version.patch (from rev 600133, python-pytorch/trunk/nccl_version.patch)
===================================================================
--- nccl_version.patch	                        (rev 0)
+++ nccl_version.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -0,0 +1,46 @@
+diff --git a/cmake/Modules/FindNCCL.cmake b/cmake/Modules/FindNCCL.cmake
+index 5bd3ccd606..f92eda98f5 100644
+--- a/cmake/Modules/FindNCCL.cmake
++++ b/cmake/Modules/FindNCCL.cmake
+@@ -48,37 +48,39 @@ find_library(NCCL_LIBRARIES
+ 
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
+ 
+ if(NCCL_FOUND)  # obtaining NCCL version and some sanity checks
+   set (NCCL_HEADER_FILE "${NCCL_INCLUDE_DIRS}/nccl.h")
+   message (STATUS "Determining NCCL version from ${NCCL_HEADER_FILE}...")
+   set (OLD_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
+   list (APPEND CMAKE_REQUIRED_INCLUDES ${NCCL_INCLUDE_DIRS})
+   include(CheckCXXSymbolExists)
+-  check_cxx_symbol_exists(NCCL_VERSION_CODE nccl.h NCCL_VERSION_DEFINED)
++  set(NCCL_VERSION_CODE $ENV{NCCL_VER_CODE})
++  set(NCCL_VERSION_DEFINED $ENV{NCCL_VER_CODE})
+ 
+-  if (NCCL_VERSION_DEFINED)
++  if (DEFINED NCCL_VERSION_DEFINED)
+     set(file "${PROJECT_BINARY_DIR}/detect_nccl_version.cc")
+     file(WRITE ${file} "
+       #include <iostream>
+       #include <nccl.h>
+       int main()
+       {
+         std::cout << NCCL_MAJOR << '.' << NCCL_MINOR << '.' << NCCL_PATCH << std::endl;
+ 
+         int x;
+         ncclGetVersion(&x);
+         return x == NCCL_VERSION_CODE;
+       }
+ ")
+     try_run(NCCL_VERSION_MATCHED compile_result ${PROJECT_BINARY_DIR} ${file}
++          CMAKE_FLAGS -DINCLUDE_DIRECTORIES=/opt/cuda/include
+           RUN_OUTPUT_VARIABLE NCCL_VERSION_FROM_HEADER
+           LINK_LIBRARIES ${NCCL_LIBRARIES})
+     if (NOT NCCL_VERSION_MATCHED)
+       message(FATAL_ERROR "Found NCCL header version and library version do not match! \
+ (include: ${NCCL_INCLUDE_DIRS}, library: ${NCCL_LIBRARIES}) Please set NCCL_INCLUDE_DIR and NCCL_LIB_DIR manually.")
+     endif()
+     message(STATUS "NCCL version: ${NCCL_VERSION_FROM_HEADER}")
+   else()
+     message(STATUS "NCCL version < 2.3.5-5")
+   endif ()

Deleted: torch_cuda_api.patch
===================================================================
--- torch_cuda_api.patch	2020-03-17 10:17:05 UTC (rev 600133)
+++ torch_cuda_api.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -1,13 +0,0 @@
-diff --git a/torch/csrc/cuda/nccl.h b/torch/csrc/cuda/nccl.h
-index 9f276f76fa..8a9062e857 100644
---- a/torch/csrc/cuda/nccl.h
-+++ b/torch/csrc/cuda/nccl.h
-@@ -19,7 +21,7 @@ namespace nccl {
- // Don't use them outside of these files.
- namespace detail {
- 
--void throw_nccl_error(ncclResult_t status);
-+TORCH_CUDA_API void throw_nccl_error(ncclResult_t status);
- 
- static inline void NCCL_CHECK(ncclResult_t status) {
-   if (status != ncclSuccess) {

Copied: python-pytorch/repos/community-x86_64/torch_cuda_api.patch (from rev 600133, python-pytorch/trunk/torch_cuda_api.patch)
===================================================================
--- torch_cuda_api.patch	                        (rev 0)
+++ torch_cuda_api.patch	2020-03-17 10:17:11 UTC (rev 600134)
@@ -0,0 +1,13 @@
+diff --git a/torch/csrc/cuda/nccl.h b/torch/csrc/cuda/nccl.h
+index 9f276f76fa..8a9062e857 100644
+--- a/torch/csrc/cuda/nccl.h
++++ b/torch/csrc/cuda/nccl.h
+@@ -19,7 +21,7 @@ namespace nccl {
+ // Don't use them outside of these files.
+ namespace detail {
+ 
+-void throw_nccl_error(ncclResult_t status);
++TORCH_CUDA_API void throw_nccl_error(ncclResult_t status);
+ 
+ static inline void NCCL_CHECK(ncclResult_t status) {
+   if (status != ncclSuccess) {



More information about the arch-commits mailing list