[arch-commits] Commit in python-pytorch/trunk (PKGBUILD opencv4.patch)

Sven-Hendrik Haase svenstaro at archlinux.org
Mon Nov 19 19:43:32 UTC 2018


    Date: Monday, November 19, 2018 @ 19:43:31
  Author: svenstaro
Revision: 408960

upgpkg: python-pytorch 1.0rc1-5

opencv 4 rebuild

Added:
  python-pytorch/trunk/opencv4.patch
Modified:
  python-pytorch/trunk/PKGBUILD

---------------+
 PKGBUILD      |    7 +++--
 opencv4.patch |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 77 insertions(+), 2 deletions(-)

Modified: PKGBUILD
===================================================================
--- PKGBUILD	2018-11-19 19:31:26 UTC (rev 408959)
+++ PKGBUILD	2018-11-19 19:43:31 UTC (rev 408960)
@@ -5,7 +5,7 @@
 pkgname=("python-pytorch" "python-pytorch-cuda")
 _pkgname="pytorch"
 pkgver=1.0rc1
-pkgrel=4
+pkgrel=5
 pkgdesc="Tensors and Dynamic neural networks in Python with strong GPU acceleration"
 arch=('x86_64')
 url="https://pytorch.org"
@@ -40,6 +40,7 @@
         "git+https://github.com/shibatch/sleef"
         "git+https://github.com/intel/ideep"
         12116.patch
+        opencv4.patch
         )
 sha256sums=('SKIP'
             'SKIP'
@@ -68,12 +69,14 @@
             'SKIP'
             'SKIP'
             'SKIP'
-            '2a2cd21dbdf7253185c8835a3f36b543a4b1655d837e01f7cfd27ab81819f2d5')
+            '2a2cd21dbdf7253185c8835a3f36b543a4b1655d837e01f7cfd27ab81819f2d5'
+            '8cf4226099f92315f14c83066f77e44443bc3d35aedf94d99b910f035cc9bc90')
 
 prepare() {
   cd "${_pkgname}-${pkgver}"
 
   patch -Np1 -i "${srcdir}"/12116.patch
+  patch -Np1 -i "${srcdir}"/opencv4.patch
 
   git submodule init
   git config submodule."third_party/catch".url "${srcdir}"/Catch2

Added: opencv4.patch
===================================================================
--- opencv4.patch	                        (rev 0)
+++ opencv4.patch	2018-11-19 19:43:31 UTC (rev 408960)
@@ -0,0 +1,72 @@
+diff --git a/binaries/convert_encoded_to_raw_leveldb.cc b/binaries/convert_encoded_to_raw_leveldb.cc
+index c8ad32015..dea93d75a 100644
+--- a/binaries/convert_encoded_to_raw_leveldb.cc
++++ b/binaries/convert_encoded_to_raw_leveldb.cc
+@@ -109,7 +109,7 @@ void ConvertToRawDataset(
+     cv::Mat img = cv::imdecode(
+         cv::Mat(1, &encoded_size, CV_8UC1,
+         const_cast<char*>(encoded_image.data())),
+-        caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++        caffe2::FLAGS_color ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+     cv::Mat resized_img;
+     int scaled_width, scaled_height;
+     if (caffe2::FLAGS_warp) {
+diff --git a/binaries/make_image_db.cc b/binaries/make_image_db.cc
+index 196000308..c47b52d99 100644
+--- a/binaries/make_image_db.cc
++++ b/binaries/make_image_db.cc
+@@ -140,8 +140,8 @@ class Converter {
+         // Load image
+         cv::Mat img = cv::imread(
+             input_folder + pair.first,
+-            caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR
+-                                : CV_LOAD_IMAGE_GRAYSCALE);
++            caffe2::FLAGS_color ? cv::IMREAD_COLOR
++                                : cv::IMREAD_GRAYSCALE);
+ 
+         // Resize image
+         cv::Mat resized_img;
+diff --git a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
+index 0e3c0c772..720339287 100644
+--- a/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
++++ b/caffe2/contrib/cuda-convnet2/make-data/pyext/src/pyext.cpp
+@@ -93,7 +93,7 @@ void DecoderThread::makeJPEG(int idx) {
+     size_t src_len = PyString_GET_SIZE(pySrc);
+     vector<uchar> src_vec(src, src + src_len);
+ 
+-    cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), CV_LOAD_IMAGE_COLOR);
++    cv::Mat decoded_mat = cv::imdecode(cv::Mat(src_vec), cv::IMREAD_COLOR);
+     assert(decoded_mat.channels() == 3);
+ 
+     /*
+diff --git a/caffe2/image/image_input_op.h b/caffe2/image/image_input_op.h
+index 2ce313758..9deb7ed2c 100644
+--- a/caffe2/image/image_input_op.h
++++ b/caffe2/image/image_input_op.h
+@@ -465,7 +465,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+                 datum.data().size(),
+                 CV_8UC1,
+                 const_cast<char*>(datum.data().data())),
+-            color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++            color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+         if (src.rows == 0 or src.cols == 0) {
+           num_decode_errors_in_batch_++;
+           src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
+@@ -540,7 +540,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+                 &encoded_size,
+                 CV_8UC1,
+                 const_cast<char*>(encoded_image_str.data())),
+-            color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
++            color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+         if (src.rows == 0 or src.cols == 0) {
+           num_decode_errors_in_batch_++;
+           src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
+@@ -681,7 +681,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
+   if (out_c == src.channels()) {
+     *img = src;
+   } else {
+-    cv::cvtColor(src, *img, (out_c == 1) ? CV_BGR2GRAY : CV_GRAY2BGR);
++    cv::cvtColor(src, *img, (out_c == 1) ? cv::COLOR_BGR2GRAY : cv::COLOR_GRAY2BGR);
+   }
+ 
+   // Note(Yangqing): I believe that the mat should be created continuous.



More information about the arch-commits mailing list