[arch-commits] Commit in python-pytorch/repos/community-x86_64 (6 files)
Sven-Hendrik Haase
svenstaro at archlinux.org
Thu Dec 6 05:24:18 UTC 2018
Date: Thursday, December 6, 2018 @ 05:24:18
Author: svenstaro
Revision: 411616
archrelease: copy trunk to community-x86_64
Added:
python-pytorch/repos/community-x86_64/12116.patch
(from rev 411615, python-pytorch/trunk/12116.patch)
python-pytorch/repos/community-x86_64/PKGBUILD
(from rev 411615, python-pytorch/trunk/PKGBUILD)
python-pytorch/repos/community-x86_64/opencv4.patch
(from rev 411615, python-pytorch/trunk/opencv4.patch)
Deleted:
python-pytorch/repos/community-x86_64/12116.patch
python-pytorch/repos/community-x86_64/PKGBUILD
python-pytorch/repos/community-x86_64/opencv4.patch
---------------+
12116.patch | 140 ++++++++++++------------
PKGBUILD | 315 +++++++++++++++++++++++++++-----------------------------
opencv4.patch | 144 ++++++++++++-------------
3 files changed, 299 insertions(+), 300 deletions(-)
Deleted: 12116.patch
===================================================================
--- 12116.patch 2018-12-06 05:23:46 UTC (rev 411615)
+++ 12116.patch 2018-12-06 05:24:18 UTC (rev 411616)
@@ -1,70 +0,0 @@
-From cea0d3269daf34fa32b55237d393de7c47928f65 Mon Sep 17 00:00:00 2001
-From: Xiaodong Wang <xdwang at fb.com>
-Date: Wed, 26 Sep 2018 13:44:53 -0700
-Subject: [PATCH] Caffe 2 adoption
-
-Summary:
-Adapt Caffe 2 to platform007 (gcc 8):
-* gcc 8 + nvcc template symbol lookup (D9319742):
-context_.template CopySameDevice<T> ==> this->context_.template CopySameDevice<T>
-* New gcc 8 warning (error):
- * -Werror=sizeof-pointer-div
- * Unnecessary parenthesis
-
-Differential Revision: D10045844
-
-fbshipit-source-id: 0b0569401fac69f83b8c2be758eea7c8fa18a019
----
- aten/src/THC/generic/THCTensor.cpp | 2 +-
- caffe2/operators/rnn/recurrent_op_cudnn.cc | 8 ++++----
- 2 files changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/aten/src/THC/generic/THCTensor.cpp b/aten/src/THC/generic/THCTensor.cpp
-index a7779047863..673870288e8 100644
---- a/aten/src/THC/generic/THCTensor.cpp
-+++ b/aten/src/THC/generic/THCTensor.cpp
-@@ -596,7 +596,7 @@ int THCTensor_(checkGPU)(THCState *state, unsigned int nTensors, ...)
- {
- int curDev = -1;
- THCudaCheck(cudaGetDevice(&curDev));
-- va_list(args);
-+ va_list args;
- va_start(args, nTensors);
- int valid = 1;
- for (unsigned int i = 0; i < nTensors; i++) {
-diff --git a/caffe2/operators/rnn/recurrent_op_cudnn.cc b/caffe2/operators/rnn/recurrent_op_cudnn.cc
-index 4b349655843..fa37874bd3e 100644
---- a/caffe2/operators/rnn/recurrent_op_cudnn.cc
-+++ b/caffe2/operators/rnn/recurrent_op_cudnn.cc
-@@ -458,13 +458,13 @@ bool RecurrentParamAccessOp<T, mode>::RunOnDevice() {
- if (mode == SET_PARAM) {
- CAFFE_ENFORCE_EQ(
- biasDims[0] * biasDims[1] * biasDims[2], Input(2).size());
-- context_.template CopySameDevice<T>(
-+ this->context_.template CopySameDevice<T>(
- biasDims[0] * biasDims[1] * biasDims[2],
- Input(2).template data<T>(),
- static_cast<T*>(bias));
- } else {
- Output(0)->Resize(biasDims);
-- context_.template CopySameDevice<T>(
-+ this->context_.template CopySameDevice<T>(
- biasDims[0] * biasDims[1] * biasDims[2],
- static_cast<T*>(bias),
- Output(0)->template mutable_data<T>());
-@@ -495,13 +495,13 @@ bool RecurrentParamAccessOp<T, mode>::RunOnDevice() {
- CAFFE_ENFORCE_EQ(numDims, 3);
- if (mode == SET_PARAM) {
- CAFFE_ENFORCE_EQ(matDims[0] * matDims[1] * matDims[2], Input(2).size());
-- context_.template CopySameDevice<T>(
-+ this->context_.template CopySameDevice<T>(
- matDims[0] * matDims[1] * matDims[2],
- Input(2).template data<T>(),
- static_cast<T*>(pmatrix));
- } else {
- Output(0)->Resize(matDims);
-- context_.template CopySameDevice<T>(
-+ this->context_.template CopySameDevice<T>(
- matDims[0] * matDims[1] * matDims[2],
- static_cast<T*>(pmatrix),
- Output(0)->template mutable_data<T>());
Copied: python-pytorch/repos/community-x86_64/12116.patch (from rev 411615, python-pytorch/trunk/12116.patch)
===================================================================
--- 12116.patch (rev 0)
+++ 12116.patch 2018-12-06 05:24:18 UTC (rev 411616)
@@ -0,0 +1,70 @@
+From cea0d3269daf34fa32b55237d393de7c47928f65 Mon Sep 17 00:00:00 2001
+From: Xiaodong Wang <xdwang at fb.com>
+Date: Wed, 26 Sep 2018 13:44:53 -0700
+Subject: [PATCH] Caffe 2 adoption
+
+Summary:
+Adapt Caffe 2 to platform007 (gcc 8):
+* gcc 8 + nvcc template symbol lookup (D9319742):
+context_.template CopySameDevice<T> ==> this->context_.template CopySameDevice<T>
+* New gcc 8 warning (error):
+ * -Werror=sizeof-pointer-div
+ * Unnecessary parenthesis
+
+Differential Revision: D10045844
+
+fbshipit-source-id: 0b0569401fac69f83b8c2be758eea7c8fa18a019
+---
+ aten/src/THC/generic/THCTensor.cpp | 2 +-
+ caffe2/operators/rnn/recurrent_op_cudnn.cc | 8 ++++----
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/aten/src/THC/generic/THCTensor.cpp b/aten/src/THC/generic/THCTensor.cpp
+index a7779047863..673870288e8 100644
+--- a/aten/src/THC/generic/THCTensor.cpp
++++ b/aten/src/THC/generic/THCTensor.cpp
+@@ -596,7 +596,7 @@ int THCTensor_(checkGPU)(THCState *state, unsigned int nTensors, ...)
+ {
+ int curDev = -1;
+ THCudaCheck(cudaGetDevice(&curDev));
+- va_list(args);
++ va_list args;
+ va_start(args, nTensors);
+ int valid = 1;
+ for (unsigned int i = 0; i < nTensors; i++) {
+diff --git a/caffe2/operators/rnn/recurrent_op_cudnn.cc b/caffe2/operators/rnn/recurrent_op_cudnn.cc
+index 4b349655843..fa37874bd3e 100644
+--- a/caffe2/operators/rnn/recurrent_op_cudnn.cc
++++ b/caffe2/operators/rnn/recurrent_op_cudnn.cc
+@@ -458,13 +458,13 @@ bool RecurrentParamAccessOp<T, mode>::RunOnDevice() {
+ if (mode == SET_PARAM) {
+ CAFFE_ENFORCE_EQ(
+ biasDims[0] * biasDims[1] * biasDims[2], Input(2).size());
+- context_.template CopySameDevice<T>(
++ this->context_.template CopySameDevice<T>(
+ biasDims[0] * biasDims[1] * biasDims[2],
+ Input(2).template data<T>(),
+ static_cast<T*>(bias));
+ } else {
+ Output(0)->Resize(biasDims);
+- context_.template CopySameDevice<T>(
++ this->context_.template CopySameDevice<T>(
+ biasDims[0] * biasDims[1] * biasDims[2],
+ static_cast<T*>(bias),
+ Output(0)->template mutable_data<T>());
+@@ -495,13 +495,13 @@ bool RecurrentParamAccessOp<T, mode>::RunOnDevice() {
+ CAFFE_ENFORCE_EQ(numDims, 3);
+ if (mode == SET_PARAM) {
+ CAFFE_ENFORCE_EQ(matDims[0] * matDims[1] * matDims[2], Input(2).size());
+- context_.template CopySameDevice<T>(
++ this->context_.template CopySameDevice<T>(
+ matDims[0] * matDims[1] * matDims[2],
+ Input(2).template data<T>(),
+ static_cast<T*>(pmatrix));
+ } else {
+ Output(0)->Resize(matDims);
+- context_.template CopySameDevice<T>(
++ this->context_.template CopySameDevice<T>(
+ matDims[0] * matDims[1] * matDims[2],
+ static_cast<T*>(pmatrix),
+ Output(0)->template mutable_data<T>());
Deleted: PKGBUILD
===================================================================
--- PKGBUILD 2018-12-06 05:23:46 UTC (rev 411615)
+++ PKGBUILD 2018-12-06 05:24:18 UTC (rev 411616)
@@ -1,158 +0,0 @@
-# Maintainer: Sven-Hendrik Haase <sh at lutzhaase.com>
-# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
-
-pkgbase="python-pytorch"
-pkgname=("python-pytorch" "python-pytorch-cuda")
-_pkgname="pytorch"
-pkgver=1.0rc1
-pkgrel=5
-pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration"
-arch=('x86_64')
-url="https://pytorch.org"
-license=('BSD')
-depends=('python' 'python-yaml' 'python-numpy' 'opencv' 'nccl')
-makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 'cuda' 'cudnn' 'git')
-source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$pkgver"
- "git+https://github.com/catchorg/Catch2"
- "git+https://github.com/pybind/pybind11"
- "git+https://github.com/NVlabs/cub"
- "git+https://github.com/eigenteam/eigen-git-mirror"
- "git+https://github.com/google/googletest"
- "git+https://github.com/NervanaSystems/nervanagpu"
- "git+https://github.com/google/benchmark"
- "git+https://github.com/google/protobuf"
- "git+https://github.com/Yangqing/ios-cmake"
- "git+https://github.com/Maratyszcza/NNPACK"
- "git+https://github.com/facebookincubator/gloo"
- "git+https://github.com/Maratyszcza/pthreadpool"
- "git+https://github.com/Maratyszcza/FXdiv"
- "git+https://github.com/Maratyszcza/FP16"
- "git+https://github.com/Maratyszcza/psimd"
- "git+https://github.com/facebook/zstd"
- "git+https://github.com/Maratyszcza/cpuinfo"
- "git+https://github.com/PeachPy/enum34"
- "git+https://github.com/Maratyszcza/PeachPy"
- "git+https://github.com/benjaminp/six"
- "git+https://github.com/ARM-software/ComputeLibrary"
- "git+https://github.com/onnx/onnx"
- "git+https://github.com/USCILab/cereal"
- "git+https://github.com/onnx/onnx-tensorrt"
- "git+https://github.com/shibatch/sleef"
- "git+https://github.com/intel/ideep"
- 12116.patch
- opencv4.patch
- )
-sha256sums=('SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- '2a2cd21dbdf7253185c8835a3f36b543a4b1655d837e01f7cfd27ab81819f2d5'
- '8cf4226099f92315f14c83066f77e44443bc3d35aedf94d99b910f035cc9bc90')
-
-prepare() {
- cd "${_pkgname}-${pkgver}"
-
- patch -Np1 -i "${srcdir}"/12116.patch
- patch -Np1 -i "${srcdir}"/opencv4.patch
-
- git submodule init
- git config submodule."third_party/catch".url "${srcdir}"/Catch2
- git config submodule."third_party/pybind11".url "${srcdir}"/pybind11
- git config submodule."third_party/cub".url "${srcdir}"/cub
- git config submodule."third_party/eigen".url "${srcdir}"/eigen-git-mirror
- git config submodule."third_party/googletest".url "${srcdir}"/googletest
- git config submodule."third_party/nervanagpu".url "${srcdir}"/nervanagpu
- git config submodule."third_party/benchmark".url "${srcdir}"/benchmark
- git config submodule."third_party/protobuf".url "${srcdir}"/protobuf
- git config submodule."third_party/ios-cmake".url "${srcdir}"/ios-cmake
- git config submodule."third_party/NNPACK".url "${srcdir}"/NNPACK
- git config submodule."third_party/gloo".url "${srcdir}"/gloo
- git config submodule."third_party/NNPACK_deps/pthread_ool".url "${srcdir}"/pthreadpool
- git config submodule."third_party/NNPACK_deps/FXdiv".url "${srcdir}"/FXdiv
- git config submodule."third_party/NNPACK_deps/FP16".url "${srcdir}"/FP16
- git config submodule."third_party/NNPACK_deps/psimd".url "${srcdir}"/psimd
- git config submodule."third_party/zstd".url "${srcdir}"/zstd
- git config submodule."third_party/cpuinfo".url "${srcdir}"/cpuinfo
- git config submodule."third_party/python-enum".url "${srcdir}"/enum34
- git config submodule."third_party/python-peachpy".url "${srcdir}"/PeachPy
- git config submodule."third_party/python-six".url "${srcdir}"/six
- git config submodule."third_party/ComputeLibrary".url "${srcdir}"/ComputeLibrary
- git config submodule."third_party/onnx".url "${srcdir}"/onnx
- git config submodule."third_party/cereal".url "${srcdir}"/cereal
- git config submodule."third_party/onnx-tensorrt".url "${srcdir}"/onnx-tensorrt
- git config submodule."third_party/sleef".url "${srcdir}"/sleef
- git config submodule."third_party/ideep".url "${srcdir}"/ideep
- git submodule update
-
- cd ..
-
- cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
-}
-
-build() {
- # Uncomment and modify the following line to enable Intel MKL and magma support
- # export CMAKE_PREFIX_PATH=/opt/intel/mkl/include:/opt/intel/mkl/lib/intel64:/opt/magma \
-
- echo "Building without cuda"
- export NO_CUDA=1
- export WITH_CUDNN=0
- export USE_OPENCV=1
- export BUILD_BINARY=1
-
- cd "$srcdir/${_pkgname}-${pkgver}"
- python setup.py build
-
- echo "Building with cuda"
- export CC=gcc-7
- export CXX=g++-7
- export NO_CUDA=0
- export WITH_CUDNN=1
- export CUDAHOSTCXX=g++-7
- export CUDA_HOME=/opt/cuda
- export CUDNN_LIB_DIR=/opt/cuda/lib64
- export CUDNN_INCLUDE_DIR=/opt/cuda/include
- export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5"
-
- cd "$srcdir/${_pkgname}-${pkgver}-cuda"
- python setup.py build
-}
-
-package_python-pytorch() {
- cd "$srcdir/${_pkgname}-${pkgver}"
- python setup.py install --root="$pkgdir"/ --optimize=1 --skip-build
- install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE.txt"
-}
-
-package_python-pytorch-cuda() {
- depends+=('cuda' 'cudnn')
- provides=('python-pytorch')
- conflicts=('python-pytorch')
- cd "$srcdir/${_pkgname}-${pkgver}-cuda"
- python setup.py install --root="$pkgdir"/ --optimize=1 --skip-build
- install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE.txt"
-}
-
-# vim:set ts=2 sw=2 et:
Copied: python-pytorch/repos/community-x86_64/PKGBUILD (from rev 411615, python-pytorch/trunk/PKGBUILD)
===================================================================
--- PKGBUILD (rev 0)
+++ PKGBUILD 2018-12-06 05:24:18 UTC (rev 411616)
@@ -0,0 +1,157 @@
+# Maintainer: Sven-Hendrik Haase <sh at lutzhaase.com>
+# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
+
+pkgbase="python-pytorch"
+pkgname=("python-pytorch" "python-pytorch-cuda")
+_pkgname="pytorch"
+pkgver=1.0rc1
+pkgrel=6
+pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+arch=('x86_64')
+url="https://pytorch.org"
+license=('BSD')
+depends=('python' 'python-yaml' 'python-numpy' 'opencv' 'nccl')
+makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 'cuda' 'cudnn' 'git')
+source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$pkgver"
+ "git+https://github.com/catchorg/Catch2"
+ "git+https://github.com/pybind/pybind11"
+ "git+https://github.com/NVlabs/cub"
+ "git+https://github.com/eigenteam/eigen-git-mirror"
+ "git+https://github.com/google/googletest"
+ "git+https://github.com/NervanaSystems/nervanagpu"
+ "git+https://github.com/google/benchmark"
+ "git+https://github.com/google/protobuf"
+ "git+https://github.com/Yangqing/ios-cmake"
+ "git+https://github.com/Maratyszcza/NNPACK"
+ "git+https://github.com/facebookincubator/gloo"
+ "git+https://github.com/Maratyszcza/pthreadpool"
+ "git+https://github.com/Maratyszcza/FXdiv"
+ "git+https://github.com/Maratyszcza/FP16"
+ "git+https://github.com/Maratyszcza/psimd"
+ "git+https://github.com/facebook/zstd"
+ "git+https://github.com/Maratyszcza/cpuinfo"
+ "git+https://github.com/PeachPy/enum34"
+ "git+https://github.com/Maratyszcza/PeachPy"
+ "git+https://github.com/benjaminp/six"
+ "git+https://github.com/ARM-software/ComputeLibrary"
+ "git+https://github.com/onnx/onnx"
+ "git+https://github.com/USCILab/cereal"
+ "git+https://github.com/onnx/onnx-tensorrt"
+ "git+https://github.com/shibatch/sleef"
+ "git+https://github.com/intel/ideep"
+ 12116.patch
+ opencv4.patch)
+sha256sums=('SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+ '2a2cd21dbdf7253185c8835a3f36b543a4b1655d837e01f7cfd27ab81819f2d5'
+ '8cf4226099f92315f14c83066f77e44443bc3d35aedf94d99b910f035cc9bc90')
+
+prepare() {
+ cd "${_pkgname}-${pkgver}"
+
+ patch -Np1 -i "${srcdir}"/12116.patch
+ patch -Np1 -i "${srcdir}"/opencv4.patch
+
+ git submodule init
+ git config submodule."third_party/catch".url "${srcdir}"/Catch2
+ git config submodule."third_party/pybind11".url "${srcdir}"/pybind11
+ git config submodule."third_party/cub".url "${srcdir}"/cub
+ git config submodule."third_party/eigen".url "${srcdir}"/eigen-git-mirror
+ git config submodule."third_party/googletest".url "${srcdir}"/googletest
+ git config submodule."third_party/nervanagpu".url "${srcdir}"/nervanagpu
+ git config submodule."third_party/benchmark".url "${srcdir}"/benchmark
+ git config submodule."third_party/protobuf".url "${srcdir}"/protobuf
+ git config submodule."third_party/ios-cmake".url "${srcdir}"/ios-cmake
+ git config submodule."third_party/NNPACK".url "${srcdir}"/NNPACK
+ git config submodule."third_party/gloo".url "${srcdir}"/gloo
+ git config submodule."third_party/NNPACK_deps/pthread_ool".url "${srcdir}"/pthreadpool
+ git config submodule."third_party/NNPACK_deps/FXdiv".url "${srcdir}"/FXdiv
+ git config submodule."third_party/NNPACK_deps/FP16".url "${srcdir}"/FP16
+ git config submodule."third_party/NNPACK_deps/psimd".url "${srcdir}"/psimd
+ git config submodule."third_party/zstd".url "${srcdir}"/zstd
+ git config submodule."third_party/cpuinfo".url "${srcdir}"/cpuinfo
+ git config submodule."third_party/python-enum".url "${srcdir}"/enum34
+ git config submodule."third_party/python-peachpy".url "${srcdir}"/PeachPy
+ git config submodule."third_party/python-six".url "${srcdir}"/six
+ git config submodule."third_party/ComputeLibrary".url "${srcdir}"/ComputeLibrary
+ git config submodule."third_party/onnx".url "${srcdir}"/onnx
+ git config submodule."third_party/cereal".url "${srcdir}"/cereal
+ git config submodule."third_party/onnx-tensorrt".url "${srcdir}"/onnx-tensorrt
+ git config submodule."third_party/sleef".url "${srcdir}"/sleef
+ git config submodule."third_party/ideep".url "${srcdir}"/ideep
+ git submodule update
+
+ cd ..
+
+ cp -a "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
+}
+
+build() {
+ # Uncomment and modify the following line to enable Intel MKL and magma support
+ # export CMAKE_PREFIX_PATH=/opt/intel/mkl/include:/opt/intel/mkl/lib/intel64:/opt/magma \
+
+ echo "Building without cuda"
+ export NO_CUDA=1
+ export WITH_CUDNN=0
+ export USE_OPENCV=1
+ export BUILD_BINARY=1
+
+ cd "$srcdir/${_pkgname}-${pkgver}"
+ python setup.py build
+
+ echo "Building with cuda"
+ export CC=gcc-7
+ export CXX=g++-7
+ export NO_CUDA=0
+ export WITH_CUDNN=1
+ export CUDAHOSTCXX=g++-7
+ export CUDA_HOME=/opt/cuda
+ export CUDNN_LIB_DIR=/opt/cuda/lib64
+ export CUDNN_INCLUDE_DIR=/opt/cuda/include
+ export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5"
+
+ cd "$srcdir/${_pkgname}-${pkgver}-cuda"
+ python setup.py build
+}
+
+package_python-pytorch() {
+ cd "$srcdir/${_pkgname}-${pkgver}"
+ python setup.py install --root="$pkgdir"/ --optimize=1 --skip-build
+ install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE.txt"
+}
+
+package_python-pytorch-cuda() {
+ depends+=('cuda' 'cudnn')
+ provides=('python-pytorch')
+ conflicts=('python-pytorch')
+ cd "$srcdir/${_pkgname}-${pkgver}-cuda"
+ python setup.py install --root="$pkgdir"/ --optimize=1 --skip-build
+ install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE.txt"
+}
+
+# vim:set ts=2 sw=2 et:
Deleted: opencv4.patch
===================================================================
--- opencv4.patch 2018-12-06 05:23:46 UTC (rev 411615)
+++ opencv4.patch 2018-12-06 05:24:18 UTC (rev 411616)
@@ -1,72 +0,0 @@
-diff --git a/binaries/convert_encoded_to_raw_leveldb.cc b/binaries/convert_encoded_to_raw_leveldb.cc
-index c8ad32015..dea93d75a 100644
---- a/binaries/convert_encoded_to_raw_leveldb.cc
-+++ b/binaries/convert_encoded_to_raw_leveldb.cc
-@@ -109,7 +109,7 @@ void ConvertToRawDataset(
- cv::Mat img = cv::imdecode(
- cv::Mat(1, &encoded_size, CV_8UC1,
- const_cast<char*>(encoded_image.data())),
-- caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
-+ caffe2::FLAGS_color ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
- cv::Mat resized_img;
- int scaled_width, scaled_height;
- if (caffe2::FLAGS_warp) {
-diff --git a/binaries/make_image_db.cc b/binaries/make_image_db.cc
-index 196000308..c47b52d99 100644
---- a/binaries/make_image_db.cc
-+++ b/binaries/make_image_db.cc
-@@ -140,8 +140,8 @@ class Converter {
- // Load image
- cv::Mat img = cv::imread(
- input_folder + pair.first,
-- caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR
-- : CV_LOAD_IMAGE_GRAYSCALE);
-+ caffe2::FLAGS_color ? cv::IMREAD_COLOR
-+ : cv::IMREAD_GRAYSCALE);
-
- // Resize image
- cv::Mat resized_img;
-diff --git a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
-index 0e3c0c772..720339287 100644
---- a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
-+++ b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
-@@ -93,7 +93,7 @@ void DecoderThread::makeJPEG(int idx) {
- size_t src_len = PyString_GET_SIZE(pySrc);
- vector<uchar> src_vec(src, src + src_len);
-
-- cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), CV_LOAD_IMAGE_COLOR);
-+ cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), cv::IMREAD_COLOR);
- assert(decoded_mat.channels() == 3);
-
- /*
-diff --git a/caffe2/image/image_input_op.h b/caffe2/image/image_input_op.h
-index 2ce313758..9deb7ed2c 100644
---- a/caffe2/image/image_input_op.h
-+++ b/caffe2/image/image_input_op.h
-@@ -465,7 +465,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
- datum.data().size(),
- CV_8UC1,
- const_cast<char*>(datum.data().data())),
-- color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
-+ color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
- if (src.rows == 0 or src.cols == 0) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
-@@ -540,7 +540,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
- &encoded_size,
- CV_8UC1,
- const_cast<char*>(encoded_image_str.data())),
-- color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
-+ color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
- if (src.rows == 0 or src.cols == 0) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
-@@ -681,7 +681,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
- if (out_c == src.channels()) {
- *img = src;
- } else {
-- cv::cvtColor(src, *img, (out_c == 1) ? CV_BGR2GRAY : CV_GRAY2BGR);
-+ cv::cvtColor(src, *img, (out_c == 1) ? cv::COLOR_BGR2GRAY : cv::COLOR_GRAY2BGR);
- }
-
- // Note(Yangqing): I believe that the mat should be created continuous.
Copied: python-pytorch/repos/community-x86_64/opencv4.patch (from rev 411615, python-pytorch/trunk/opencv4.patch)
===================================================================
--- opencv4.patch (rev 0)
+++ opencv4.patch 2018-12-06 05:24:18 UTC (rev 411616)
@@ -0,0 +1,72 @@
+diff --git a/binaries/convert_encoded_to_raw_leveldb.cc b/binaries/convert_encoded_to_raw_leveldb.cc
+index c8ad32015..dea93d75a 100644
+--- a/binaries/convert_encoded_to_raw_leveldb.cc
++++ b/binaries/convert_encoded_to_raw_leveldb.cc
+@@ -109,7 +109,7 @@ void ConvertToRawDataset(
+ cv::Mat img = cv::imdecode(
+ cv::Mat(1, &encoded_size, CV_8UC1,
+ const_cast<char*>(encoded_image.data())),
+- caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++ caffe2::FLAGS_color ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+ cv::Mat resized_img;
+ int scaled_width, scaled_height;
+ if (caffe2::FLAGS_warp) {
+diff --git a/binaries/make_image_db.cc b/binaries/make_image_db.cc
+index 196000308..c47b52d99 100644
+--- a/binaries/make_image_db.cc
++++ b/binaries/make_image_db.cc
+@@ -140,8 +140,8 @@ class Converter {
+ // Load image
+ cv::Mat img = cv::imread(
+ input_folder + pair.first,
+- caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR
+- : CV_LOAD_IMAGE_GRAYSCALE);
++ caffe2::FLAGS_color ? cv::IMREAD_COLOR
++ : cv::IMREAD_GRAYSCALE);
+
+ // Resize image
+ cv::Mat resized_img;
+diff --git a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
+index 0e3c0c772..720339287 100644
+--- a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
++++ b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
+@@ -93,7 +93,7 @@ void DecoderThread::makeJPEG(int idx) {
+ size_t src_len = PyString_GET_SIZE(pySrc);
+ vector<uchar> src_vec(src, src + src_len);
+
+- cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), CV_LOAD_IMAGE_COLOR);
++ cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), cv::IMREAD_COLOR);
+ assert(decoded_mat.channels() == 3);
+
+ /*
+diff --git a/caffe2/image/image_input_op.h b/caffe2/image/image_input_op.h
+index 2ce313758..9deb7ed2c 100644
+--- a/caffe2/image/image_input_op.h
++++ b/caffe2/image/image_input_op.h
+@@ -465,7 +465,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+ datum.data().size(),
+ CV_8UC1,
+ const_cast<char*>(datum.data().data())),
+- color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++ color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+ if (src.rows == 0 or src.cols == 0) {
+ num_decode_errors_in_batch_++;
+ src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
+@@ -540,7 +540,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+ &encoded_size,
+ CV_8UC1,
+ const_cast<char*>(encoded_image_str.data())),
+- color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++ color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+ if (src.rows == 0 or src.cols == 0) {
+ num_decode_errors_in_batch_++;
+ src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
+@@ -681,7 +681,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+ if (out_c == src.channels()) {
+ *img = src;
+ } else {
+- cv::cvtColor(src, *img, (out_c == 1) ? CV_BGR2GRAY : CV_GRAY2BGR);
++ cv::cvtColor(src, *img, (out_c == 1) ? cv::COLOR_BGR2GRAY : cv::COLOR_GRAY2BGR);
+ }
+
+ // Note(Yangqing): I believe that the mat should be created continuous.
More information about the arch-commits
mailing list