[arch-commits] Commit in ffmpeg/repos/extra-x86_64 (7 files)
Maxime Gauduin
alucryd at gemini.archlinux.org
Tue Sep 6 12:32:45 UTC 2022
Date: Tuesday, September 6, 2022 @ 12:32:45
Author: alucryd
Revision: 455118
archrelease: copy trunk to extra-x86_64
Added:
ffmpeg/repos/extra-x86_64/PKGBUILD
(from rev 455117, ffmpeg/trunk/PKGBUILD)
ffmpeg/repos/extra-x86_64/add-av_stream_get_first_dts-for-chromium.patch
(from rev 455117, ffmpeg/trunk/add-av_stream_get_first_dts-for-chromium.patch)
ffmpeg/repos/extra-x86_64/ffmpeg-vmaf2.x.patch
(from rev 455117, ffmpeg/trunk/ffmpeg-vmaf2.x.patch)
ffmpeg/repos/extra-x86_64/keys/
Deleted:
ffmpeg/repos/extra-x86_64/PKGBUILD
ffmpeg/repos/extra-x86_64/add-av_stream_get_first_dts-for-chromium.patch
ffmpeg/repos/extra-x86_64/ffmpeg-vmaf2.x.patch
------------------------------------------------+
PKGBUILD | 376 ++--
add-av_stream_get_first_dts-for-chromium.patch | 62
ffmpeg-vmaf2.x.patch | 1910 +++++++++++------------
3 files changed, 1173 insertions(+), 1175 deletions(-)
Deleted: PKGBUILD
===================================================================
--- PKGBUILD 2022-09-06 12:32:32 UTC (rev 455117)
+++ PKGBUILD 2022-09-06 12:32:45 UTC (rev 455118)
@@ -1,189 +0,0 @@
-# Maintainer: Maxime Gauduin <alucryd at archlinux.org>
-# Contributor: Bartłomiej Piotrowski <bpiotrowski at archlinux.org>
-# Contributor: Ionut Biru <ibiru at archlinux.org>
-# Contributor: Tom Newsom <Jeepster at gmx.co.uk>
-# Contributor: Paul Mattal <paul at archlinux.org>
-
-pkgname=ffmpeg
-pkgver=5.1.1
-pkgrel=1
-epoch=2
-pkgdesc='Complete solution to record, convert and stream audio and video'
-arch=(x86_64)
-url=https://ffmpeg.org/
-license=(GPL3)
-depends=(
- alsa-lib
- aom
- bzip2
- fontconfig
- fribidi
- gmp
- gnutls
- gsm
- jack
- lame
- libass.so
- libavc1394
- libbluray.so
- libdav1d.so
- libdrm
- libfreetype.so
- libiec61883
- libmfx
- libmodplug
- libpulse
- librav1e.so
- libraw1394
- librsvg-2.so
- libsoxr
- libssh
- libtheora
- libva.so
- libva-drm.so
- libva-x11.so
- libvdpau
- libvidstab.so
- libvorbisenc.so
- libvorbis.so
- libvpx.so
- libwebp
- libx11
- libx264.so
- libx265.so
- libxcb
- libxext
- libxml2
- libxv
- libxvidcore.so
- libzimg.so
- opencore-amr
- openjpeg2
- opus
- sdl2
- speex
- srt
- svt-av1
- v4l-utils
- vmaf
- xz
- zlib
-)
-makedepends=(
- amf-headers
- avisynthplus
- clang
- ffnvcodec-headers
- git
- ladspa
- nasm
-)
-optdepends=(
- 'avisynthplus: AviSynthPlus support'
- 'intel-media-sdk: Intel QuickSync support'
- 'ladspa: LADSPA filters'
- 'nvidia-utils: Nvidia NVDEC/NVENC support'
-)
-provides=(
- libavcodec.so
- libavdevice.so
- libavfilter.so
- libavformat.so
- libavutil.so
- libpostproc.so
- libswresample.so
- libswscale.so
-)
-options=(
- debug
-)
-_tag=1bad30dbe34f2d100b43e8f773d3fe0b5eb23523
-source=(
- git+https://git.ffmpeg.org/ffmpeg.git#tag=${_tag}
- add-av_stream_get_first_dts-for-chromium.patch
-)
-b2sums=('SKIP'
- '555274228e09a233d92beb365d413ff5c718a782008075552cafb2130a3783cf976b51dfe4513c15777fb6e8397a34122d475080f2c4483e8feea5c0d878e6de')
-
-prepare() {
- cd ffmpeg
- git cherry-pick -n 412922cc6fa790897ef6bb2be5d6f9a5f030754d # remove default IPFS gateway
- patch -Np1 -i ../add-av_stream_get_first_dts-for-chromium.patch # https://crbug.com/1251779
-}
-
-pkgver() {
- cd ffmpeg
- git describe --tags | sed 's/^n//'
-}
-
-build() {
- cd ffmpeg
-
- ./configure \
- --prefix=/usr \
- --disable-debug \
- --disable-static \
- --disable-stripping \
- --enable-amf \
- --enable-avisynth \
- --enable-cuda-llvm \
- --enable-lto \
- --enable-fontconfig \
- --enable-gmp \
- --enable-gnutls \
- --enable-gpl \
- --enable-ladspa \
- --enable-libaom \
- --enable-libass \
- --enable-libbluray \
- --enable-libdav1d \
- --enable-libdrm \
- --enable-libfreetype \
- --enable-libfribidi \
- --enable-libgsm \
- --enable-libiec61883 \
- --enable-libjack \
- --enable-libmfx \
- --enable-libmodplug \
- --enable-libmp3lame \
- --enable-libopencore_amrnb \
- --enable-libopencore_amrwb \
- --enable-libopenjpeg \
- --enable-libopus \
- --enable-libpulse \
- --enable-librav1e \
- --enable-librsvg \
- --enable-libsoxr \
- --enable-libspeex \
- --enable-libsrt \
- --enable-libssh \
- --enable-libsvtav1 \
- --enable-libtheora \
- --enable-libv4l2 \
- --enable-libvidstab \
- --enable-libvmaf \
- --enable-libvorbis \
- --enable-libvpx \
- --enable-libwebp \
- --enable-libx264 \
- --enable-libx265 \
- --enable-libxcb \
- --enable-libxml2 \
- --enable-libxvid \
- --enable-libzimg \
- --enable-nvdec \
- --enable-nvenc \
- --enable-shared \
- --enable-version3
-
- make
- make tools/qt-faststart
- make doc/ff{mpeg,play}.1
-}
-
-package() {
- make DESTDIR="${pkgdir}" -C ffmpeg install install-man
- install -Dm 755 ffmpeg/tools/qt-faststart "${pkgdir}"/usr/bin/
-}
-
-# vim: ts=2 sw=2 et:
Copied: ffmpeg/repos/extra-x86_64/PKGBUILD (from rev 455117, ffmpeg/trunk/PKGBUILD)
===================================================================
--- PKGBUILD (rev 0)
+++ PKGBUILD 2022-09-06 12:32:45 UTC (rev 455118)
@@ -0,0 +1,187 @@
+# Maintainer: Maxime Gauduin <alucryd at archlinux.org>
+# Contributor: Bartłomiej Piotrowski <bpiotrowski at archlinux.org>
+# Contributor: Ionut Biru <ibiru at archlinux.org>
+# Contributor: Tom Newsom <Jeepster at gmx.co.uk>
+# Contributor: Paul Mattal <paul at archlinux.org>
+
+pkgname=ffmpeg
+pkgver=5.1.1
+pkgrel=2
+epoch=2
+pkgdesc='Complete solution to record, convert and stream audio and video'
+arch=(x86_64)
+url=https://ffmpeg.org/
+license=(GPL3)
+depends=(
+ alsa-lib
+ aom
+ bzip2
+ fontconfig
+ fribidi
+ gmp
+ gnutls
+ gsm
+ jack
+ lame
+ libass.so
+ libavc1394
+ libbluray.so
+ libdav1d.so
+ libdrm
+ libfreetype.so
+ libiec61883
+ libmfx
+ libmodplug
+ libpulse
+ librav1e.so
+ libraw1394
+ librsvg-2.so
+ libsoxr
+ libssh
+ libtheora
+ libva.so
+ libva-drm.so
+ libva-x11.so
+ libvdpau
+ libvidstab.so
+ libvorbisenc.so
+ libvorbis.so
+ libvpx.so
+ libwebp
+ libx11
+ libx264.so
+ libx265.so
+ libxcb
+ libxext
+ libxml2
+ libxv
+ libxvidcore.so
+ libzimg.so
+ opencore-amr
+ openjpeg2
+ opus
+ sdl2
+ speex
+ srt
+ svt-av1
+ v4l-utils
+ vmaf
+ xz
+ zlib
+)
+makedepends=(
+ amf-headers
+ avisynthplus
+ clang
+ ffnvcodec-headers
+ git
+ ladspa
+ nasm
+)
+optdepends=(
+ 'avisynthplus: AviSynthPlus support'
+ 'intel-media-sdk: Intel QuickSync support'
+ 'ladspa: LADSPA filters'
+ 'nvidia-utils: Nvidia NVDEC/NVENC support'
+)
+provides=(
+ libavcodec.so
+ libavdevice.so
+ libavfilter.so
+ libavformat.so
+ libavutil.so
+ libpostproc.so
+ libswresample.so
+ libswscale.so
+)
+options=(
+ debug
+)
+_tag=8536e629f0c35c0e8a2b67e65d3bc60a088fe413
+source=(
+ git+https://git.ffmpeg.org/ffmpeg.git?signed#tag=${_tag}
+ add-av_stream_get_first_dts-for-chromium.patch
+)
+b2sums=('SKIP'
+ '555274228e09a233d92beb365d413ff5c718a782008075552cafb2130a3783cf976b51dfe4513c15777fb6e8397a34122d475080f2c4483e8feea5c0d878e6de')
+validpgpkeys=(DD1EC9E8DE085C629B3E1846B18E8928B3948D64) # Michael Niedermayer <michael at niedermayer.cc>
+
+prepare() {
+ cd ffmpeg
+ patch -Np1 -i ../add-av_stream_get_first_dts-for-chromium.patch # https://crbug.com/1251779
+}
+
+pkgver() {
+ cd ffmpeg
+ git describe --tags | sed 's/^n//'
+}
+
+build() {
+ cd ffmpeg
+ ./configure \
+ --prefix=/usr \
+ --disable-debug \
+ --disable-static \
+ --disable-stripping \
+ --enable-amf \
+ --enable-avisynth \
+ --enable-cuda-llvm \
+ --enable-lto \
+ --enable-fontconfig \
+ --enable-gmp \
+ --enable-gnutls \
+ --enable-gpl \
+ --enable-ladspa \
+ --enable-libaom \
+ --enable-libass \
+ --enable-libbluray \
+ --enable-libdav1d \
+ --enable-libdrm \
+ --enable-libfreetype \
+ --enable-libfribidi \
+ --enable-libgsm \
+ --enable-libiec61883 \
+ --enable-libjack \
+ --enable-libmfx \
+ --enable-libmodplug \
+ --enable-libmp3lame \
+ --enable-libopencore_amrnb \
+ --enable-libopencore_amrwb \
+ --enable-libopenjpeg \
+ --enable-libopus \
+ --enable-libpulse \
+ --enable-librav1e \
+ --enable-librsvg \
+ --enable-libsoxr \
+ --enable-libspeex \
+ --enable-libsrt \
+ --enable-libssh \
+ --enable-libsvtav1 \
+ --enable-libtheora \
+ --enable-libv4l2 \
+ --enable-libvidstab \
+ --enable-libvmaf \
+ --enable-libvorbis \
+ --enable-libvpx \
+ --enable-libwebp \
+ --enable-libx264 \
+ --enable-libx265 \
+ --enable-libxcb \
+ --enable-libxml2 \
+ --enable-libxvid \
+ --enable-libzimg \
+ --enable-nvdec \
+ --enable-nvenc \
+ --enable-shared \
+ --enable-version3
+ make
+ make tools/qt-faststart
+ make doc/ff{mpeg,play}.1
+}
+
+package() {
+ make DESTDIR="${pkgdir}" -C ffmpeg install install-man
+ install -Dm 755 ffmpeg/tools/qt-faststart "${pkgdir}"/usr/bin/
+}
+
+# vim: ts=2 sw=2 et:
Deleted: add-av_stream_get_first_dts-for-chromium.patch
===================================================================
--- add-av_stream_get_first_dts-for-chromium.patch 2022-09-06 12:32:32 UTC (rev 455117)
+++ add-av_stream_get_first_dts-for-chromium.patch 2022-09-06 12:32:45 UTC (rev 455118)
@@ -1,31 +0,0 @@
-diff '--color=auto' -rupN ffmpeg.orig/libavformat/avformat.h ffmpeg/libavformat/avformat.h
---- ffmpeg.orig/libavformat/avformat.h 2022-08-19 17:42:47.323422603 +0200
-+++ ffmpeg/libavformat/avformat.h 2022-08-19 17:42:51.347130436 +0200
-@@ -1128,6 +1128,10 @@ struct AVCodecParserContext *av_stream_g
- */
- int64_t av_stream_get_end_pts(const AVStream *st);
-
-+// Chromium: We use the internal field first_dts vvv
-+int64_t av_stream_get_first_dts(const AVStream *st);
-+// Chromium: We use the internal field first_dts ^^^
-+
- #define AV_PROGRAM_RUNNING 1
-
- /**
-diff '--color=auto' -rupN ffmpeg.orig/libavformat/mux_utils.c ffmpeg/libavformat/mux_utils.c
---- ffmpeg.orig/libavformat/mux_utils.c 2022-08-19 17:42:47.346758108 +0200
-+++ ffmpeg/libavformat/mux_utils.c 2022-08-19 17:47:28.549589002 +0200
-@@ -37,6 +37,13 @@ int64_t av_stream_get_end_pts(const AVSt
- return AV_NOPTS_VALUE;
- }
-
-+// Chromium: We use the internal field first_dts vvv
-+int64_t av_stream_get_first_dts(const AVStream *st)
-+{
-+ return cffstream(st)->first_dts;
-+}
-+// Chromium: We use the internal field first_dts ^^^
-+
- int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
- int std_compliance)
- {
Copied: ffmpeg/repos/extra-x86_64/add-av_stream_get_first_dts-for-chromium.patch (from rev 455117, ffmpeg/trunk/add-av_stream_get_first_dts-for-chromium.patch)
===================================================================
--- add-av_stream_get_first_dts-for-chromium.patch (rev 0)
+++ add-av_stream_get_first_dts-for-chromium.patch 2022-09-06 12:32:45 UTC (rev 455118)
@@ -0,0 +1,31 @@
+diff '--color=auto' -rupN ffmpeg.orig/libavformat/avformat.h ffmpeg/libavformat/avformat.h
+--- ffmpeg.orig/libavformat/avformat.h 2022-08-19 17:42:47.323422603 +0200
++++ ffmpeg/libavformat/avformat.h 2022-08-19 17:42:51.347130436 +0200
+@@ -1128,6 +1128,10 @@ struct AVCodecParserContext *av_stream_g
+ */
+ int64_t av_stream_get_end_pts(const AVStream *st);
+
++// Chromium: We use the internal field first_dts vvv
++int64_t av_stream_get_first_dts(const AVStream *st);
++// Chromium: We use the internal field first_dts ^^^
++
+ #define AV_PROGRAM_RUNNING 1
+
+ /**
+diff '--color=auto' -rupN ffmpeg.orig/libavformat/mux_utils.c ffmpeg/libavformat/mux_utils.c
+--- ffmpeg.orig/libavformat/mux_utils.c 2022-08-19 17:42:47.346758108 +0200
++++ ffmpeg/libavformat/mux_utils.c 2022-08-19 17:47:28.549589002 +0200
+@@ -37,6 +37,13 @@ int64_t av_stream_get_end_pts(const AVSt
+ return AV_NOPTS_VALUE;
+ }
+
++// Chromium: We use the internal field first_dts vvv
++int64_t av_stream_get_first_dts(const AVStream *st)
++{
++ return cffstream(st)->first_dts;
++}
++// Chromium: We use the internal field first_dts ^^^
++
+ int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
+ int std_compliance)
+ {
Deleted: ffmpeg-vmaf2.x.patch
===================================================================
--- ffmpeg-vmaf2.x.patch 2022-09-06 12:32:32 UTC (rev 455117)
+++ ffmpeg-vmaf2.x.patch 2022-09-06 12:32:45 UTC (rev 455118)
@@ -1,955 +0,0 @@
-diff --git a/configure b/configure
-index 94f513288a..493493b4c5 100755
---- a/configure
-+++ b/configure
-@@ -3751,7 +3751,7 @@ vaguedenoiser_filter_deps="gpl"
- vflip_vulkan_filter_deps="vulkan spirv_compiler"
- vidstabdetect_filter_deps="libvidstab"
- vidstabtransform_filter_deps="libvidstab"
--libvmaf_filter_deps="libvmaf pthreads"
-+libvmaf_filter_deps="libvmaf"
- zmq_filter_deps="libzmq"
- zoompan_filter_deps="swscale"
- zscale_filter_deps="libzimg const_nan"
-@@ -6626,7 +6626,7 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame
- enabled libuavs3d && require_pkg_config libuavs3d "uavs3d >= 1.1.41" uavs3d.h uavs3d_decode
- enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl
- enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit
--enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.5.2" libvmaf.h compute_vmaf
-+enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 2.0.0" libvmaf.h vmaf_init
- enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc
- enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init &&
- require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init
-diff --git a/doc/filters.texi b/doc/filters.texi
-index 248c09caf8..9a890d1555 100644
---- a/doc/filters.texi
-+++ b/doc/filters.texi
-@@ -14666,68 +14666,60 @@ ffmpeg -i input.mov -vf lensfun=make=Canon:model="Canon EOS 100D":lens_model="Ca
-
- @section libvmaf
-
--Obtain the VMAF (Video Multi-Method Assessment Fusion)
--score between two input videos.
-+Calulate the VMAF (Video Multi-Method Assessment Fusion) score for a
-+reference/distorted pair of input videos.
-
--The first input is the encoded video, and the second input is the reference video.
-+The first input is the distorted video, and the second input is the reference video.
-
- The obtained VMAF score is printed through the logging system.
-
- It requires Netflix's vmaf library (libvmaf) as a pre-requisite.
- After installing the library it can be enabled using:
- @code{./configure --enable-libvmaf}.
--If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}.
-
- The filter has following options:
-
- @table @option
-- at item model_path
--Set the model path which is to be used for SVM.
--Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"}
--
-- at item log_path
--Set the file path to be used to store logs.
-+ at item model
-+A `|` delimited list of vmaf models. Each model can be configured with a number of parameters.
-+Default value: @code{"version=vmaf_v0.6.1"}
-
-- at item log_fmt
--Set the format of the log file (csv, json or xml).
-+ at item model_path
-+Deprecated, use model='path=...'.
-
- @item enable_transform
--This option can enable/disable the @code{score_transform} applied to the final predicted VMAF score,
--if you have specified score_transform option in the input parameter file passed to @code{run_vmaf_training.py}
--Default value: @code{false}
-+Deprecated, use model='enable_transform=true'.
-
- @item phone_model
--Invokes the phone model which will generate VMAF scores higher than in the
--regular model, which is more suitable for laptop, TV, etc. viewing conditions.
--Default value: @code{false}
-+Deprecated, use model='enable_transform=true'.
-+
-+ at item enable_conf_interval
-+Deprecated, use model='enable_conf_interval=true'.
-+
-+ at item feature
-+A `|` delimited list of features. Each feature can be configured with a number of parameters.
-
- @item psnr
--Enables computing psnr along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=psnr'.
-
- @item ssim
--Enables computing ssim along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=ssim'.
-
- @item ms_ssim
--Enables computing ms_ssim along with vmaf.
--Default value: @code{false}
-+Deprecated, use feature='name=ms_ssim'.
-
-- at item pool
--Set the pool method to be used for computing vmaf.
--Options are @code{min}, @code{harmonic_mean} or @code{mean} (default).
-+ at item log_path
-+Set the file path to be used to store log files.
-+
-+ at item log_fmt
-+Set the format of the log file (xml, json, csv, or sub).
-
- @item n_threads
--Set number of threads to be used when computing vmaf.
--Default value: @code{0}, which makes use of all available logical processors.
-+Set number of threads to be used when initializing libvmaf.
-+Default value: @code{0}, no threads.
-
- @item n_subsample
--Set interval for frame subsampling used when computing vmaf.
--Default value: @code{1}
--
-- at item enable_conf_interval
--Enables confidence interval.
--Default value: @code{false}
-+Set frame subsampling interval to be used.
- @end table
-
- This filter also supports the @ref{framesync} options.
-@@ -14735,23 +14727,31 @@ This filter also supports the @ref{framesync} options.
- @subsection Examples
- @itemize
- @item
--On the below examples the input file @file{main.mpg} being processed is
--compared with the reference file @file{ref.mpg}.
-+In the examples below, a distorted video @file{distorted.mpg} is
-+compared with a reference file @file{reference.mpg}.
-
-+ at item
-+Basic usage:
-+ at example
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf=log_path=output.xml -f null -
-+ at end example
-+
-+ at item
-+Example with multiple models:
- @example
--ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null -
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='model=version=vmaf_v0.6.1\\:name=vmaf|version=vmaf_v0.6.1neg\\:name=vmaf_neg' -f null -
- @end example
-
- @item
--Example with options:
-+Example with multiple addtional features:
- @example
--ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null -
-+ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='feature=name=psnr|name=ciede' -f null -
- @end example
-
- @item
- Example with options and different containers:
- @example
--ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
-+ffmpeg -i distorted.mpg -i reference.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=log_fmt=json:log_path=output.json" -f null -
- @end example
- @end itemize
-
-diff --git a/libavfilter/vf_libvmaf.c b/libavfilter/vf_libvmaf.c
-index 5d492126eb..eee1c280ef 100644
---- a/libavfilter/vf_libvmaf.c
-+++ b/libavfilter/vf_libvmaf.c
-@@ -24,8 +24,8 @@
- * Calculate the VMAF between two input videos.
- */
-
--#include <pthread.h>
- #include <libvmaf.h>
-+
- #include "libavutil/avstring.h"
- #include "libavutil/opt.h"
- #include "libavutil/pixdesc.h"
-@@ -39,23 +39,9 @@
- typedef struct LIBVMAFContext {
- const AVClass *class;
- FFFrameSync fs;
-- const AVPixFmtDescriptor *desc;
-- int width;
-- int height;
-- double vmaf_score;
-- int vmaf_thread_created;
-- pthread_t vmaf_thread;
-- pthread_mutex_t lock;
-- pthread_cond_t cond;
-- int eof;
-- AVFrame *gmain;
-- AVFrame *gref;
-- int frame_set;
- char *model_path;
- char *log_path;
- char *log_fmt;
-- int disable_clip;
-- int disable_avx;
- int enable_transform;
- int phone_model;
- int psnr;
-@@ -65,184 +51,487 @@ typedef struct LIBVMAFContext {
- int n_threads;
- int n_subsample;
- int enable_conf_interval;
-- int error;
-+ char *model_cfg;
-+ char *feature_cfg;
-+ VmafContext *vmaf;
-+ VmafModel **model;
-+ unsigned model_cnt;
-+ unsigned frame_cnt;
-+ unsigned bpc;
- } LIBVMAFContext;
-
- #define OFFSET(x) offsetof(LIBVMAFContext, x)
- #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
- static const AVOption libvmaf_options[] = {
-- {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
-- {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-- {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-- {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-- {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-+ {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
-+ {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
-+ {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"ssim", "use feature='name=ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"ms_ssim", "use feature='name=ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
- {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
- {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
- {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
-- {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
-+ {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
-+ {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
-+ {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
- { NULL }
- };
-
- FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
-
--#define read_frame_fn(type, bits) \
-- static int read_frame_##bits##bit(float *ref_data, float *main_data, \
-- float *temp_data, int stride, void *ctx) \
--{ \
-- LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
-- int ret; \
-- \
-- pthread_mutex_lock(&s->lock); \
-- \
-- while (!s->frame_set && !s->eof) { \
-- pthread_cond_wait(&s->cond, &s->lock); \
-- } \
-- \
-- if (s->frame_set) { \
-- int ref_stride = s->gref->linesize[0]; \
-- int main_stride = s->gmain->linesize[0]; \
-- \
-- const type *ref_ptr = (const type *) s->gref->data[0]; \
-- const type *main_ptr = (const type *) s->gmain->data[0]; \
-- \
-- float *ptr = ref_data; \
-- float factor = 1.f / (1 << (bits - 8)); \
-- \
-- int h = s->height; \
-- int w = s->width; \
-- \
-- int i,j; \
-- \
-- for (i = 0; i < h; i++) { \
-- for ( j = 0; j < w; j++) { \
-- ptr[j] = ref_ptr[j] * factor; \
-- } \
-- ref_ptr += ref_stride / sizeof(*ref_ptr); \
-- ptr += stride / sizeof(*ptr); \
-- } \
-- \
-- ptr = main_data; \
-- \
-- for (i = 0; i < h; i++) { \
-- for (j = 0; j < w; j++) { \
-- ptr[j] = main_ptr[j] * factor; \
-- } \
-- main_ptr += main_stride / sizeof(*main_ptr); \
-- ptr += stride / sizeof(*ptr); \
-- } \
-- } \
-- \
-- ret = !s->frame_set; \
-- \
-- av_frame_unref(s->gref); \
-- av_frame_unref(s->gmain); \
-- s->frame_set = 0; \
-- \
-- pthread_cond_signal(&s->cond); \
-- pthread_mutex_unlock(&s->lock); \
-- \
-- if (ret) { \
-- return 2; \
-- } \
-- \
-- return 0; \
-+static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
-+{
-+ switch (av_pix_fmt) {
-+ case AV_PIX_FMT_YUV420P:
-+ case AV_PIX_FMT_YUV420P10LE:
-+ case AV_PIX_FMT_YUV420P12LE:
-+ case AV_PIX_FMT_YUV420P16LE:
-+ return VMAF_PIX_FMT_YUV420P;
-+ case AV_PIX_FMT_YUV422P:
-+ case AV_PIX_FMT_YUV422P10LE:
-+ case AV_PIX_FMT_YUV422P12LE:
-+ case AV_PIX_FMT_YUV422P16LE:
-+ return VMAF_PIX_FMT_YUV422P;
-+ case AV_PIX_FMT_YUV444P:
-+ case AV_PIX_FMT_YUV444P10LE:
-+ case AV_PIX_FMT_YUV444P12LE:
-+ case AV_PIX_FMT_YUV444P16LE:
-+ return VMAF_PIX_FMT_YUV444P;
-+ default:
-+ return VMAF_PIX_FMT_UNKNOWN;
-+ }
- }
-
--read_frame_fn(uint8_t, 8);
--read_frame_fn(uint16_t, 10);
-+static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
-+{
-+ int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
-+ src->width, src->height);
-+ if (err)
-+ return AVERROR(ENOMEM);
-+
-+ for (unsigned i = 0; i < 3; i++) {
-+ uint8_t *src_data = src->data[i];
-+ uint8_t *dst_data = dst->data[i];
-+ for (unsigned j = 0; j < dst->h[i]; j++) {
-+ memcpy(dst_data, src_data, sizeof(*dst_data) * dst->w[i]);
-+ src_data += src->linesize[i];
-+ dst_data += dst->stride[i];
-+ }
-+ }
-+
-+ return 0;
-+}
-
--static void compute_vmaf_score(LIBVMAFContext *s)
-+static int do_vmaf(FFFrameSync *fs)
- {
-- int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
-- int stride, void *ctx);
-- char *format;
-+ AVFilterContext *ctx = fs->parent;
-+ LIBVMAFContext *s = ctx->priv;
-+ VmafPicture pic_ref, pic_dist;
-+ AVFrame *ref, *dist;
-+ int err = 0;
-
-- if (s->desc->comp[0].depth <= 8) {
-- read_frame = read_frame_8bit;
-- } else {
-- read_frame = read_frame_10bit;
-+ int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
-+ if (ret < 0)
-+ return ret;
-+ if (ctx->is_disabled || !ref)
-+ return ff_filter_frame(ctx->outputs[0], dist);
-+
-+ err = copy_picture_data(ref, &pic_ref, s->bpc);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ err = copy_picture_data(dist, &pic_dist, s->bpc);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
-+ vmaf_picture_unref(&pic_ref);
-+ return AVERROR(ENOMEM);
- }
-
-- format = (char *) s->desc->name;
-+ err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
-+ if (err) {
-+ av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
-+ return AVERROR(EINVAL);
-+ }
-
-- s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
-- read_frame, s, s->model_path, s->log_path,
-- s->log_fmt, 0, 0, s->enable_transform,
-- s->phone_model, s->psnr, s->ssim,
-- s->ms_ssim, s->pool,
-- s->n_threads, s->n_subsample, s->enable_conf_interval);
-+ return ff_filter_frame(ctx->outputs[0], dist);
- }
-
--static void *call_vmaf(void *ctx)
-+
-+static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
- {
-- LIBVMAFContext *s = (LIBVMAFContext *) ctx;
-- compute_vmaf_score(s);
-- if (!s->error) {
-- av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
-- } else {
-- pthread_mutex_lock(&s->lock);
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ AVDictionary **dict = NULL;
-+ char *str_copy = NULL;
-+ char *saveptr = NULL;
-+ unsigned cnt2;
-+ int err = 0;
-+
-+ if (!str)
-+ return NULL;
-+
-+ cnt2 = 1;
-+ for (char *p = str; *p; p++) {
-+ if (*p == '|')
-+ cnt2++;
-+ }
-+
-+ dict = av_calloc(cnt2, sizeof(*dict));
-+ if (!dict)
-+ goto fail;
-+
-+ str_copy = av_strdup(str);
-+ if (!str_copy)
-+ goto fail;
-+
-+ *cnt = 0;
-+ for (unsigned i = 0; i < cnt2; i++) {
-+ char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
-+ if (!s)
-+ continue;
-+ err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
-+ if (err)
-+ goto fail;
-+ }
-+
-+ av_free(str_copy);
-+ return dict;
-+
-+fail:
-+ if (dict) {
-+ for (unsigned i = 0; i < *cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
-+ }
-+ av_free(dict);
- }
-- pthread_exit(NULL);
-+
-+ av_free(str_copy);
-+ *cnt = 0;
- return NULL;
- }
-
--static int do_vmaf(FFFrameSync *fs)
-+static int parse_features(AVFilterContext *ctx)
- {
-- AVFilterContext *ctx = fs->parent;
- LIBVMAFContext *s = ctx->priv;
-- AVFrame *master, *ref;
-- int ret;
-+ AVDictionary **dict = NULL;
-+ unsigned dict_cnt;
-+ int err = 0;
-
-- ret = ff_framesync_dualinput_get(fs, &master, &ref);
-- if (ret < 0)
-- return ret;
-- if (!ref)
-- return ff_filter_frame(ctx->outputs[0], master);
-+ if (!s->feature_cfg)
-+ return 0;
-+
-+ dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
-+ if (!dict) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not parse feature config: %s\n", s->feature_cfg);
-+ return AVERROR(EINVAL);
-+ }
-
-- pthread_mutex_lock(&s->lock);
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ char *feature_name = NULL;
-+ VmafFeatureDictionary *feature_opts_dict = NULL;
-+ AVDictionaryEntry *e = NULL;
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ if (av_stristr(e->key, "name")) {
-+ feature_name = e->value;
-+ continue;
-+ }
-+
-+ err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
-+ e->value);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not set feature option: %s.%s=%s\n",
-+ feature_name, e->key, e->value);
-+ goto exit;
-+ }
-+ }
-+
-+ err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem during vmaf_use_feature: %s\n", feature_name);
-+ goto exit;
-+ }
-+ }
-
-- while (s->frame_set && !s->error) {
-- pthread_cond_wait(&s->cond, &s->lock);
-+exit:
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
- }
-+ av_free(dict);
-+ return err;
-+}
-+
-+static int parse_models(AVFilterContext *ctx)
-+{
-+ LIBVMAFContext *s = ctx->priv;
-+ AVDictionary **dict;
-+ unsigned dict_cnt;
-+ int err = 0;
-+
-+ if (!s->model_cfg) return 0;
-
-- if (s->error) {
-+ dict_cnt = 0;
-+ dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
-+ if (!dict) {
- av_log(ctx, AV_LOG_ERROR,
-- "libvmaf encountered an error, check log for details\n");
-- pthread_mutex_unlock(&s->lock);
-+ "could not parse model config: %s\n", s->model_cfg);
- return AVERROR(EINVAL);
- }
-
-- av_frame_ref(s->gref, ref);
-- av_frame_ref(s->gmain, master);
-+ s->model_cnt = dict_cnt;
-+ s->model = av_calloc(s->model_cnt, sizeof(*s->model));
-+ if (!s->model)
-+ return AVERROR(ENOMEM);
-+
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ VmafModelConfig model_cfg = { 0 };
-+ AVDictionaryEntry *e = NULL;
-+ char *version = NULL;
-+ char *path = NULL;
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ if (av_stristr(e->key, "disable_clip")) {
-+ model_cfg.flags |= av_stristr(e->value, "true") ?
-+ VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "enable_transform")) {
-+ model_cfg.flags |= av_stristr(e->value, "true") ?
-+ VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "name")) {
-+ model_cfg.name = e->value;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "version")) {
-+ version = e->value;
-+ continue;
-+ }
-+
-+ if (av_stristr(e->key, "path")) {
-+ path = e->value;
-+ continue;
-+ }
-+ }
-+
-+ if (version) {
-+ err = vmaf_model_load(&s->model[i], &model_cfg, version);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with version: %s\n",
-+ version);
-+ goto exit;
-+ }
-+ }
-+
-+ if (path && !s->model[i]) {
-+ err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with path: %s\n",
-+ path);
-+ goto exit;
-+ }
-+ }
-+
-+ if (!s->model[i]) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not load libvmaf model with config: %s\n",
-+ s->model_cfg);
-+ goto exit;
-+ }
-+
-+ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
-+ VmafFeatureDictionary *feature_opts_dict = NULL;
-+ char *feature_opt = NULL;
-+
-+ char *feature_name = av_strtok(e->key, ".", &feature_opt);
-+ if (!feature_opt)
-+ continue;
-+
-+ err = vmaf_feature_dictionary_set(&feature_opts_dict,
-+ feature_opt, e->value);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not set feature option: %s.%s=%s\n",
-+ feature_name, feature_opt, e->value);
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+
-+ err = vmaf_model_feature_overload(s->model[i], feature_name,
-+ feature_opts_dict);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "could not overload feature: %s\n", feature_name);
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+ }
-+ }
-+
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem during vmaf_use_features_from_model\n");
-+ err = AVERROR(EINVAL);
-+ goto exit;
-+ }
-+ }
-+
-+exit:
-+ for (unsigned i = 0; i < dict_cnt; i++) {
-+ if (dict[i])
-+ av_dict_free(&dict[i]);
-+ }
-+ av_free(dict);
-+ return err;
-+}
-+
-+static enum VmafLogLevel log_level_map(int log_level)
-+{
-+ switch (log_level) {
-+ case AV_LOG_QUIET:
-+ return VMAF_LOG_LEVEL_NONE;
-+ case AV_LOG_ERROR:
-+ return VMAF_LOG_LEVEL_ERROR;
-+ case AV_LOG_WARNING:
-+ return VMAF_LOG_LEVEL_WARNING;
-+ case AV_LOG_INFO:
-+ return VMAF_LOG_LEVEL_INFO;
-+ case AV_LOG_DEBUG:
-+ return VMAF_LOG_LEVEL_DEBUG;
-+ default:
-+ return VMAF_LOG_LEVEL_INFO;
-+ }
-+}
-+
-+static int parse_deprecated_options(AVFilterContext *ctx)
-+{
-+ LIBVMAFContext *s = ctx->priv;
-+ VmafModel *model = NULL;
-+ VmafModelCollection *model_collection = NULL;
-+ enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT;
-+ int err = 0;
-+
-+ VmafModelConfig model_cfg = {
-+ .name = "vmaf",
-+ .flags = flags,
-+ };
-+
-+ if (s->enable_transform || s->phone_model)
-+ flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM;
-+
-+ if (!s->model_path)
-+ goto extra_metrics_only;
-+
-+ if (s->enable_conf_interval) {
-+ err = vmaf_model_collection_load_from_path(&model, &model_collection,
-+ &model_cfg, s->model_path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading model file: %s\n", s->model_path);
-+ goto exit;
-+ }
-+
-+ err = vmaf_use_features_from_model_collection(s->vmaf, model_collection);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractors from model file: %s\n",
-+ s->model_path);
-+ goto exit;
-+ }
-+ } else {
-+ err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading model file: %s\n", s->model_path);
-+ goto exit;
-+ }
-+ err = vmaf_use_features_from_model(s->vmaf, model);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractors from model file: %s\n",
-+ s->model_path);
-+ goto exit;
-+ }
-+ }
-+
-+extra_metrics_only:
-+ if (s->psnr) {
-+ VmafFeatureDictionary *d = NULL;
-+ vmaf_feature_dictionary_set(&d, "enable_chroma", "false");
-+
-+ err = vmaf_use_feature(s->vmaf, "psnr", d);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: psnr\n");
-+ goto exit;
-+ }
-+ }
-
-- s->frame_set = 1;
-+ if (s->ssim) {
-+ err = vmaf_use_feature(s->vmaf, "float_ssim", NULL);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: ssim\n");
-+ goto exit;
-+ }
-+ }
-
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ if (s->ms_ssim) {
-+ err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem loading feature extractor: ms_ssim\n");
-+ goto exit;
-+ }
-+ }
-
-- return ff_filter_frame(ctx->outputs[0], master);
-+exit:
-+ return err;
- }
-
- static av_cold int init(AVFilterContext *ctx)
- {
- LIBVMAFContext *s = ctx->priv;
-+ int err = 0;
-
-- s->gref = av_frame_alloc();
-- s->gmain = av_frame_alloc();
-- if (!s->gref || !s->gmain)
-- return AVERROR(ENOMEM);
-+ VmafConfiguration cfg = {
-+ .log_level = log_level_map(av_log_get_level()),
-+ .n_subsample = s->n_subsample,
-+ .n_threads = s->n_threads,
-+ };
-+
-+ err = vmaf_init(&s->vmaf, cfg);
-+ if (err)
-+ return AVERROR(EINVAL);
-+
-+ err = parse_deprecated_options(ctx);
-+ if (err)
-+ return err;
-
-- s->error = 0;
-+ err = parse_models(ctx);
-+ if (err)
-+ return err;
-
-- s->vmaf_thread_created = 0;
-- pthread_mutex_init(&s->lock, NULL);
-- pthread_cond_init (&s->cond, NULL);
-+ err = parse_features(ctx);
-+ if (err)
-+ return err;
-
- s->fs.on_event = do_vmaf;
- return 0;
-@@ -256,26 +545,31 @@ static const enum AVPixelFormat pix_fmts[] = {
-
- static int config_input_ref(AVFilterLink *inlink)
- {
-- AVFilterContext *ctx = inlink->dst;
-+ AVFilterContext *ctx = inlink->dst;
- LIBVMAFContext *s = ctx->priv;
-- int th;
-+ const AVPixFmtDescriptor *desc;
-+ int err = 0;
-
-- if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
-- ctx->inputs[0]->h != ctx->inputs[1]->h) {
-- av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
-- return AVERROR(EINVAL);
-+ if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
-+ av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
-+ err |= AVERROR(EINVAL);
- }
-
-- s->desc = av_pix_fmt_desc_get(inlink->format);
-- s->width = ctx->inputs[0]->w;
-- s->height = ctx->inputs[0]->h;
-+ if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
-+ av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
-+ err |= AVERROR(EINVAL);
-+ }
-
-- th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
-- if (th) {
-- av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
-- return AVERROR(EINVAL);
-+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
-+ av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
-+ err |= AVERROR(EINVAL);
- }
-- s->vmaf_thread_created = 1;
-+
-+ if (err)
-+ return err;
-+
-+ desc = av_pix_fmt_desc_get(inlink->format);
-+ s->bpc = desc->comp[0].depth;
-
- return 0;
- }
-@@ -307,28 +601,80 @@ static int activate(AVFilterContext *ctx)
- return ff_framesync_activate(&s->fs);
- }
-
-+static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
-+{
-+ if (log_fmt) {
-+ if (av_stristr(log_fmt, "xml"))
-+ return VMAF_OUTPUT_FORMAT_XML;
-+ if (av_stristr(log_fmt, "json"))
-+ return VMAF_OUTPUT_FORMAT_JSON;
-+ if (av_stristr(log_fmt, "csv"))
-+ return VMAF_OUTPUT_FORMAT_CSV;
-+ if (av_stristr(log_fmt, "sub"))
-+ return VMAF_OUTPUT_FORMAT_SUB;
-+ }
-+
-+ return VMAF_OUTPUT_FORMAT_XML;
-+}
-+
-+static enum VmafPoolingMethod pool_method_map(const char *pool_method)
-+{
-+ if (pool_method) {
-+ if (av_stristr(pool_method, "min"))
-+ return VMAF_POOL_METHOD_MIN;
-+ if (av_stristr(pool_method, "mean"))
-+ return VMAF_POOL_METHOD_MEAN;
-+ if (av_stristr(pool_method, "harmonic_mean"))
-+ return VMAF_POOL_METHOD_HARMONIC_MEAN;
-+ }
-+
-+ return VMAF_POOL_METHOD_MEAN;
-+}
-+
- static av_cold void uninit(AVFilterContext *ctx)
- {
- LIBVMAFContext *s = ctx->priv;
-+ int err = 0;
-
- ff_framesync_uninit(&s->fs);
-
-- pthread_mutex_lock(&s->lock);
-- s->eof = 1;
-- pthread_cond_signal(&s->cond);
-- pthread_mutex_unlock(&s->lock);
-+ if (!s->frame_cnt)
-+ goto clean_up;
-
-- if (s->vmaf_thread_created)
-- {
-- pthread_join(s->vmaf_thread, NULL);
-- s->vmaf_thread_created = 0;
-+ err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem flushing libvmaf context.\n");
- }
-
-- av_frame_free(&s->gref);
-- av_frame_free(&s->gmain);
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ double vmaf_score;
-+ err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
-+ &vmaf_score, 0, s->frame_cnt - 1);
-+ if (err) {
-+ av_log(ctx, AV_LOG_ERROR,
-+ "problem getting pooled vmaf score.\n");
-+ }
-+
-+ av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
-+ }
-+
-+ if (s->vmaf) {
-+ if (s->log_path && !err)
-+ vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
-+ }
-+
-+clean_up:
-+ if (s->model) {
-+ for (unsigned i = 0; i < s->model_cnt; i++) {
-+ if (s->model[i])
-+ vmaf_model_destroy(s->model[i]);
-+ }
-+ av_free(s->model);
-+ }
-
-- pthread_mutex_destroy(&s->lock);
-- pthread_cond_destroy(&s->cond);
-+ if (s->vmaf)
-+ vmaf_close(s->vmaf);
- }
-
- static const AVFilterPad libvmaf_inputs[] = {
---
-2.20.1
-
Copied: ffmpeg/repos/extra-x86_64/ffmpeg-vmaf2.x.patch (from rev 455117, ffmpeg/trunk/ffmpeg-vmaf2.x.patch)
===================================================================
--- ffmpeg-vmaf2.x.patch (rev 0)
+++ ffmpeg-vmaf2.x.patch 2022-09-06 12:32:45 UTC (rev 455118)
@@ -0,0 +1,955 @@
+diff --git a/configure b/configure
+index 94f513288a..493493b4c5 100755
+--- a/configure
++++ b/configure
+@@ -3751,7 +3751,7 @@ vaguedenoiser_filter_deps="gpl"
+ vflip_vulkan_filter_deps="vulkan spirv_compiler"
+ vidstabdetect_filter_deps="libvidstab"
+ vidstabtransform_filter_deps="libvidstab"
+-libvmaf_filter_deps="libvmaf pthreads"
++libvmaf_filter_deps="libvmaf"
+ zmq_filter_deps="libzmq"
+ zoompan_filter_deps="swscale"
+ zscale_filter_deps="libzimg const_nan"
+@@ -6626,7 +6626,7 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame
+ enabled libuavs3d && require_pkg_config libuavs3d "uavs3d >= 1.1.41" uavs3d.h uavs3d_decode
+ enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl
+ enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit
+-enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.5.2" libvmaf.h compute_vmaf
++enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 2.0.0" libvmaf.h vmaf_init
+ enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc
+ enabled libvorbis && require_pkg_config libvorbis vorbis vorbis/codec.h vorbis_info_init &&
+ require_pkg_config libvorbisenc vorbisenc vorbis/vorbisenc.h vorbis_encode_init
+diff --git a/doc/filters.texi b/doc/filters.texi
+index 248c09caf8..9a890d1555 100644
+--- a/doc/filters.texi
++++ b/doc/filters.texi
+@@ -14666,68 +14666,60 @@ ffmpeg -i input.mov -vf lensfun=make=Canon:model="Canon EOS 100D":lens_model="Ca
+
+ @section libvmaf
+
+-Obtain the VMAF (Video Multi-Method Assessment Fusion)
+-score between two input videos.
++Calulate the VMAF (Video Multi-Method Assessment Fusion) score for a
++reference/distorted pair of input videos.
+
+-The first input is the encoded video, and the second input is the reference video.
++The first input is the distorted video, and the second input is the reference video.
+
+ The obtained VMAF score is printed through the logging system.
+
+ It requires Netflix's vmaf library (libvmaf) as a pre-requisite.
+ After installing the library it can be enabled using:
+ @code{./configure --enable-libvmaf}.
+-If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}.
+
+ The filter has following options:
+
+ @table @option
+- at item model_path
+-Set the model path which is to be used for SVM.
+-Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"}
+-
+- at item log_path
+-Set the file path to be used to store logs.
++ at item model
++A `|` delimited list of vmaf models. Each model can be configured with a number of parameters.
++Default value: @code{"version=vmaf_v0.6.1"}
+
+- at item log_fmt
+-Set the format of the log file (csv, json or xml).
++ at item model_path
++Deprecated, use model='path=...'.
+
+ @item enable_transform
+-This option can enable/disable the @code{score_transform} applied to the final predicted VMAF score,
+-if you have specified score_transform option in the input parameter file passed to @code{run_vmaf_training.py}
+-Default value: @code{false}
++Deprecated, use model='enable_transform=true'.
+
+ @item phone_model
+-Invokes the phone model which will generate VMAF scores higher than in the
+-regular model, which is more suitable for laptop, TV, etc. viewing conditions.
+-Default value: @code{false}
++Deprecated, use model='enable_transform=true'.
++
++ at item enable_conf_interval
++Deprecated, use model='enable_conf_interval=true'.
++
++ at item feature
++A `|` delimited list of features. Each feature can be configured with a number of parameters.
+
+ @item psnr
+-Enables computing psnr along with vmaf.
+-Default value: @code{false}
++Deprecated, use feature='name=psnr'.
+
+ @item ssim
+-Enables computing ssim along with vmaf.
+-Default value: @code{false}
++Deprecated, use feature='name=ssim'.
+
+ @item ms_ssim
+-Enables computing ms_ssim along with vmaf.
+-Default value: @code{false}
++Deprecated, use feature='name=ms_ssim'.
+
+- at item pool
+-Set the pool method to be used for computing vmaf.
+-Options are @code{min}, @code{harmonic_mean} or @code{mean} (default).
++ at item log_path
++Set the file path to be used to store log files.
++
++ at item log_fmt
++Set the format of the log file (xml, json, csv, or sub).
+
+ @item n_threads
+-Set number of threads to be used when computing vmaf.
+-Default value: @code{0}, which makes use of all available logical processors.
++Set number of threads to be used when initializing libvmaf.
++Default value: @code{0}, no threads.
+
+ @item n_subsample
+-Set interval for frame subsampling used when computing vmaf.
+-Default value: @code{1}
+-
+- at item enable_conf_interval
+-Enables confidence interval.
+-Default value: @code{false}
++Set frame subsampling interval to be used.
+ @end table
+
+ This filter also supports the @ref{framesync} options.
+@@ -14735,23 +14727,31 @@ This filter also supports the @ref{framesync} options.
+ @subsection Examples
+ @itemize
+ @item
+-On the below examples the input file @file{main.mpg} being processed is
+-compared with the reference file @file{ref.mpg}.
++In the examples below, a distorted video @file{distorted.mpg} is
++compared with a reference file @file{reference.mpg}.
+
++ at item
++Basic usage:
++ at example
++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf=log_path=output.xml -f null -
++ at end example
++
++ at item
++Example with multiple models:
+ @example
+-ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null -
++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='model=version=vmaf_v0.6.1\\:name=vmaf|version=vmaf_v0.6.1neg\\:name=vmaf_neg' -f null -
+ @end example
+
+ @item
+-Example with options:
++Example with multiple addtional features:
+ @example
+-ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null -
++ffmpeg -i distorted.mpg -i reference.mpg -lavfi libvmaf='feature=name=psnr|name=ciede' -f null -
+ @end example
+
+ @item
+ Example with options and different containers:
+ @example
+-ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
++ffmpeg -i distorted.mpg -i reference.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=log_fmt=json:log_path=output.json" -f null -
+ @end example
+ @end itemize
+
+diff --git a/libavfilter/vf_libvmaf.c b/libavfilter/vf_libvmaf.c
+index 5d492126eb..eee1c280ef 100644
+--- a/libavfilter/vf_libvmaf.c
++++ b/libavfilter/vf_libvmaf.c
+@@ -24,8 +24,8 @@
+ * Calculate the VMAF between two input videos.
+ */
+
+-#include <pthread.h>
+ #include <libvmaf.h>
++
+ #include "libavutil/avstring.h"
+ #include "libavutil/opt.h"
+ #include "libavutil/pixdesc.h"
+@@ -39,23 +39,9 @@
+ typedef struct LIBVMAFContext {
+ const AVClass *class;
+ FFFrameSync fs;
+- const AVPixFmtDescriptor *desc;
+- int width;
+- int height;
+- double vmaf_score;
+- int vmaf_thread_created;
+- pthread_t vmaf_thread;
+- pthread_mutex_t lock;
+- pthread_cond_t cond;
+- int eof;
+- AVFrame *gmain;
+- AVFrame *gref;
+- int frame_set;
+ char *model_path;
+ char *log_path;
+ char *log_fmt;
+- int disable_clip;
+- int disable_avx;
+ int enable_transform;
+ int phone_model;
+ int psnr;
+@@ -65,184 +51,487 @@ typedef struct LIBVMAFContext {
+ int n_threads;
+ int n_subsample;
+ int enable_conf_interval;
+- int error;
++ char *model_cfg;
++ char *feature_cfg;
++ VmafContext *vmaf;
++ VmafModel **model;
++ unsigned model_cnt;
++ unsigned frame_cnt;
++ unsigned bpc;
+ } LIBVMAFContext;
+
+ #define OFFSET(x) offsetof(LIBVMAFContext, x)
+ #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+ static const AVOption libvmaf_options[] = {
+- {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
+- {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
+- {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
+- {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+- {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+- {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+- {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
+- {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
++ {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
++ {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
++ {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"ssim", "use feature='name=ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"ms_ssim", "use feature='name=ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
+ {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
+ {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
+ {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
+- {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
++ {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
++ {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
++ {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
+ { NULL }
+ };
+
+ FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
+
+-#define read_frame_fn(type, bits) \
+- static int read_frame_##bits##bit(float *ref_data, float *main_data, \
+- float *temp_data, int stride, void *ctx) \
+-{ \
+- LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
+- int ret; \
+- \
+- pthread_mutex_lock(&s->lock); \
+- \
+- while (!s->frame_set && !s->eof) { \
+- pthread_cond_wait(&s->cond, &s->lock); \
+- } \
+- \
+- if (s->frame_set) { \
+- int ref_stride = s->gref->linesize[0]; \
+- int main_stride = s->gmain->linesize[0]; \
+- \
+- const type *ref_ptr = (const type *) s->gref->data[0]; \
+- const type *main_ptr = (const type *) s->gmain->data[0]; \
+- \
+- float *ptr = ref_data; \
+- float factor = 1.f / (1 << (bits - 8)); \
+- \
+- int h = s->height; \
+- int w = s->width; \
+- \
+- int i,j; \
+- \
+- for (i = 0; i < h; i++) { \
+- for ( j = 0; j < w; j++) { \
+- ptr[j] = ref_ptr[j] * factor; \
+- } \
+- ref_ptr += ref_stride / sizeof(*ref_ptr); \
+- ptr += stride / sizeof(*ptr); \
+- } \
+- \
+- ptr = main_data; \
+- \
+- for (i = 0; i < h; i++) { \
+- for (j = 0; j < w; j++) { \
+- ptr[j] = main_ptr[j] * factor; \
+- } \
+- main_ptr += main_stride / sizeof(*main_ptr); \
+- ptr += stride / sizeof(*ptr); \
+- } \
+- } \
+- \
+- ret = !s->frame_set; \
+- \
+- av_frame_unref(s->gref); \
+- av_frame_unref(s->gmain); \
+- s->frame_set = 0; \
+- \
+- pthread_cond_signal(&s->cond); \
+- pthread_mutex_unlock(&s->lock); \
+- \
+- if (ret) { \
+- return 2; \
+- } \
+- \
+- return 0; \
++static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
++{
++ switch (av_pix_fmt) {
++ case AV_PIX_FMT_YUV420P:
++ case AV_PIX_FMT_YUV420P10LE:
++ case AV_PIX_FMT_YUV420P12LE:
++ case AV_PIX_FMT_YUV420P16LE:
++ return VMAF_PIX_FMT_YUV420P;
++ case AV_PIX_FMT_YUV422P:
++ case AV_PIX_FMT_YUV422P10LE:
++ case AV_PIX_FMT_YUV422P12LE:
++ case AV_PIX_FMT_YUV422P16LE:
++ return VMAF_PIX_FMT_YUV422P;
++ case AV_PIX_FMT_YUV444P:
++ case AV_PIX_FMT_YUV444P10LE:
++ case AV_PIX_FMT_YUV444P12LE:
++ case AV_PIX_FMT_YUV444P16LE:
++ return VMAF_PIX_FMT_YUV444P;
++ default:
++ return VMAF_PIX_FMT_UNKNOWN;
++ }
+ }
+
+-read_frame_fn(uint8_t, 8);
+-read_frame_fn(uint16_t, 10);
++static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
++{
++ int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
++ src->width, src->height);
++ if (err)
++ return AVERROR(ENOMEM);
++
++ for (unsigned i = 0; i < 3; i++) {
++ uint8_t *src_data = src->data[i];
++ uint8_t *dst_data = dst->data[i];
++ for (unsigned j = 0; j < dst->h[i]; j++) {
++ memcpy(dst_data, src_data, sizeof(*dst_data) * dst->w[i]);
++ src_data += src->linesize[i];
++ dst_data += dst->stride[i];
++ }
++ }
++
++ return 0;
++}
+
+-static void compute_vmaf_score(LIBVMAFContext *s)
++static int do_vmaf(FFFrameSync *fs)
+ {
+- int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
+- int stride, void *ctx);
+- char *format;
++ AVFilterContext *ctx = fs->parent;
++ LIBVMAFContext *s = ctx->priv;
++ VmafPicture pic_ref, pic_dist;
++ AVFrame *ref, *dist;
++ int err = 0;
+
+- if (s->desc->comp[0].depth <= 8) {
+- read_frame = read_frame_8bit;
+- } else {
+- read_frame = read_frame_10bit;
++ int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
++ if (ret < 0)
++ return ret;
++ if (ctx->is_disabled || !ref)
++ return ff_filter_frame(ctx->outputs[0], dist);
++
++ err = copy_picture_data(ref, &pic_ref, s->bpc);
++ if (err) {
++ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
++ return AVERROR(ENOMEM);
++ }
++
++ err = copy_picture_data(dist, &pic_dist, s->bpc);
++ if (err) {
++ av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
++ vmaf_picture_unref(&pic_ref);
++ return AVERROR(ENOMEM);
+ }
+
+- format = (char *) s->desc->name;
++ err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
++ if (err) {
++ av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
++ return AVERROR(EINVAL);
++ }
+
+- s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
+- read_frame, s, s->model_path, s->log_path,
+- s->log_fmt, 0, 0, s->enable_transform,
+- s->phone_model, s->psnr, s->ssim,
+- s->ms_ssim, s->pool,
+- s->n_threads, s->n_subsample, s->enable_conf_interval);
++ return ff_filter_frame(ctx->outputs[0], dist);
+ }
+
+-static void *call_vmaf(void *ctx)
++
++static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
+ {
+- LIBVMAFContext *s = (LIBVMAFContext *) ctx;
+- compute_vmaf_score(s);
+- if (!s->error) {
+- av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
+- } else {
+- pthread_mutex_lock(&s->lock);
+- pthread_cond_signal(&s->cond);
+- pthread_mutex_unlock(&s->lock);
++ AVDictionary **dict = NULL;
++ char *str_copy = NULL;
++ char *saveptr = NULL;
++ unsigned cnt2;
++ int err = 0;
++
++ if (!str)
++ return NULL;
++
++ cnt2 = 1;
++ for (char *p = str; *p; p++) {
++ if (*p == '|')
++ cnt2++;
++ }
++
++ dict = av_calloc(cnt2, sizeof(*dict));
++ if (!dict)
++ goto fail;
++
++ str_copy = av_strdup(str);
++ if (!str_copy)
++ goto fail;
++
++ *cnt = 0;
++ for (unsigned i = 0; i < cnt2; i++) {
++ char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
++ if (!s)
++ continue;
++ err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
++ if (err)
++ goto fail;
++ }
++
++ av_free(str_copy);
++ return dict;
++
++fail:
++ if (dict) {
++ for (unsigned i = 0; i < *cnt; i++) {
++ if (dict[i])
++ av_dict_free(&dict[i]);
++ }
++ av_free(dict);
+ }
+- pthread_exit(NULL);
++
++ av_free(str_copy);
++ *cnt = 0;
+ return NULL;
+ }
+
+-static int do_vmaf(FFFrameSync *fs)
++static int parse_features(AVFilterContext *ctx)
+ {
+- AVFilterContext *ctx = fs->parent;
+ LIBVMAFContext *s = ctx->priv;
+- AVFrame *master, *ref;
+- int ret;
++ AVDictionary **dict = NULL;
++ unsigned dict_cnt;
++ int err = 0;
+
+- ret = ff_framesync_dualinput_get(fs, &master, &ref);
+- if (ret < 0)
+- return ret;
+- if (!ref)
+- return ff_filter_frame(ctx->outputs[0], master);
++ if (!s->feature_cfg)
++ return 0;
++
++ dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
++ if (!dict) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not parse feature config: %s\n", s->feature_cfg);
++ return AVERROR(EINVAL);
++ }
+
+- pthread_mutex_lock(&s->lock);
++ for (unsigned i = 0; i < dict_cnt; i++) {
++ char *feature_name = NULL;
++ VmafFeatureDictionary *feature_opts_dict = NULL;
++ AVDictionaryEntry *e = NULL;
++
++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
++ if (av_stristr(e->key, "name")) {
++ feature_name = e->value;
++ continue;
++ }
++
++ err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
++ e->value);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not set feature option: %s.%s=%s\n",
++ feature_name, e->key, e->value);
++ goto exit;
++ }
++ }
++
++ err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem during vmaf_use_feature: %s\n", feature_name);
++ goto exit;
++ }
++ }
+
+- while (s->frame_set && !s->error) {
+- pthread_cond_wait(&s->cond, &s->lock);
++exit:
++ for (unsigned i = 0; i < dict_cnt; i++) {
++ if (dict[i])
++ av_dict_free(&dict[i]);
+ }
++ av_free(dict);
++ return err;
++}
++
++static int parse_models(AVFilterContext *ctx)
++{
++ LIBVMAFContext *s = ctx->priv;
++ AVDictionary **dict;
++ unsigned dict_cnt;
++ int err = 0;
++
++ if (!s->model_cfg) return 0;
+
+- if (s->error) {
++ dict_cnt = 0;
++ dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
++ if (!dict) {
+ av_log(ctx, AV_LOG_ERROR,
+- "libvmaf encountered an error, check log for details\n");
+- pthread_mutex_unlock(&s->lock);
++ "could not parse model config: %s\n", s->model_cfg);
+ return AVERROR(EINVAL);
+ }
+
+- av_frame_ref(s->gref, ref);
+- av_frame_ref(s->gmain, master);
++ s->model_cnt = dict_cnt;
++ s->model = av_calloc(s->model_cnt, sizeof(*s->model));
++ if (!s->model)
++ return AVERROR(ENOMEM);
++
++ for (unsigned i = 0; i < dict_cnt; i++) {
++ VmafModelConfig model_cfg = { 0 };
++ AVDictionaryEntry *e = NULL;
++ char *version = NULL;
++ char *path = NULL;
++
++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
++ if (av_stristr(e->key, "disable_clip")) {
++ model_cfg.flags |= av_stristr(e->value, "true") ?
++ VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
++ continue;
++ }
++
++ if (av_stristr(e->key, "enable_transform")) {
++ model_cfg.flags |= av_stristr(e->value, "true") ?
++ VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
++ continue;
++ }
++
++ if (av_stristr(e->key, "name")) {
++ model_cfg.name = e->value;
++ continue;
++ }
++
++ if (av_stristr(e->key, "version")) {
++ version = e->value;
++ continue;
++ }
++
++ if (av_stristr(e->key, "path")) {
++ path = e->value;
++ continue;
++ }
++ }
++
++ if (version) {
++ err = vmaf_model_load(&s->model[i], &model_cfg, version);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not load libvmaf model with version: %s\n",
++ version);
++ goto exit;
++ }
++ }
++
++ if (path && !s->model[i]) {
++ err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not load libvmaf model with path: %s\n",
++ path);
++ goto exit;
++ }
++ }
++
++ if (!s->model[i]) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not load libvmaf model with config: %s\n",
++ s->model_cfg);
++ goto exit;
++ }
++
++ while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
++ VmafFeatureDictionary *feature_opts_dict = NULL;
++ char *feature_opt = NULL;
++
++ char *feature_name = av_strtok(e->key, ".", &feature_opt);
++ if (!feature_opt)
++ continue;
++
++ err = vmaf_feature_dictionary_set(&feature_opts_dict,
++ feature_opt, e->value);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not set feature option: %s.%s=%s\n",
++ feature_name, feature_opt, e->value);
++ err = AVERROR(EINVAL);
++ goto exit;
++ }
++
++ err = vmaf_model_feature_overload(s->model[i], feature_name,
++ feature_opts_dict);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "could not overload feature: %s\n", feature_name);
++ err = AVERROR(EINVAL);
++ goto exit;
++ }
++ }
++ }
++
++ for (unsigned i = 0; i < s->model_cnt; i++) {
++ err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem during vmaf_use_features_from_model\n");
++ err = AVERROR(EINVAL);
++ goto exit;
++ }
++ }
++
++exit:
++ for (unsigned i = 0; i < dict_cnt; i++) {
++ if (dict[i])
++ av_dict_free(&dict[i]);
++ }
++ av_free(dict);
++ return err;
++}
++
++static enum VmafLogLevel log_level_map(int log_level)
++{
++ switch (log_level) {
++ case AV_LOG_QUIET:
++ return VMAF_LOG_LEVEL_NONE;
++ case AV_LOG_ERROR:
++ return VMAF_LOG_LEVEL_ERROR;
++ case AV_LOG_WARNING:
++ return VMAF_LOG_LEVEL_WARNING;
++ case AV_LOG_INFO:
++ return VMAF_LOG_LEVEL_INFO;
++ case AV_LOG_DEBUG:
++ return VMAF_LOG_LEVEL_DEBUG;
++ default:
++ return VMAF_LOG_LEVEL_INFO;
++ }
++}
++
++static int parse_deprecated_options(AVFilterContext *ctx)
++{
++ LIBVMAFContext *s = ctx->priv;
++ VmafModel *model = NULL;
++ VmafModelCollection *model_collection = NULL;
++ enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT;
++ int err = 0;
++
++ VmafModelConfig model_cfg = {
++ .name = "vmaf",
++ .flags = flags,
++ };
++
++ if (s->enable_transform || s->phone_model)
++ flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM;
++
++ if (!s->model_path)
++ goto extra_metrics_only;
++
++ if (s->enable_conf_interval) {
++ err = vmaf_model_collection_load_from_path(&model, &model_collection,
++ &model_cfg, s->model_path);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading model file: %s\n", s->model_path);
++ goto exit;
++ }
++
++ err = vmaf_use_features_from_model_collection(s->vmaf, model_collection);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading feature extractors from model file: %s\n",
++ s->model_path);
++ goto exit;
++ }
++ } else {
++ err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading model file: %s\n", s->model_path);
++ goto exit;
++ }
++ err = vmaf_use_features_from_model(s->vmaf, model);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading feature extractors from model file: %s\n",
++ s->model_path);
++ goto exit;
++ }
++ }
++
++extra_metrics_only:
++ if (s->psnr) {
++ VmafFeatureDictionary *d = NULL;
++ vmaf_feature_dictionary_set(&d, "enable_chroma", "false");
++
++ err = vmaf_use_feature(s->vmaf, "psnr", d);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading feature extractor: psnr\n");
++ goto exit;
++ }
++ }
+
+- s->frame_set = 1;
++ if (s->ssim) {
++ err = vmaf_use_feature(s->vmaf, "float_ssim", NULL);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading feature extractor: ssim\n");
++ goto exit;
++ }
++ }
+
+- pthread_cond_signal(&s->cond);
+- pthread_mutex_unlock(&s->lock);
++ if (s->ms_ssim) {
++ err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem loading feature extractor: ms_ssim\n");
++ goto exit;
++ }
++ }
+
+- return ff_filter_frame(ctx->outputs[0], master);
++exit:
++ return err;
+ }
+
+ static av_cold int init(AVFilterContext *ctx)
+ {
+ LIBVMAFContext *s = ctx->priv;
++ int err = 0;
+
+- s->gref = av_frame_alloc();
+- s->gmain = av_frame_alloc();
+- if (!s->gref || !s->gmain)
+- return AVERROR(ENOMEM);
++ VmafConfiguration cfg = {
++ .log_level = log_level_map(av_log_get_level()),
++ .n_subsample = s->n_subsample,
++ .n_threads = s->n_threads,
++ };
++
++ err = vmaf_init(&s->vmaf, cfg);
++ if (err)
++ return AVERROR(EINVAL);
++
++ err = parse_deprecated_options(ctx);
++ if (err)
++ return err;
+
+- s->error = 0;
++ err = parse_models(ctx);
++ if (err)
++ return err;
+
+- s->vmaf_thread_created = 0;
+- pthread_mutex_init(&s->lock, NULL);
+- pthread_cond_init (&s->cond, NULL);
++ err = parse_features(ctx);
++ if (err)
++ return err;
+
+ s->fs.on_event = do_vmaf;
+ return 0;
+@@ -256,26 +545,31 @@ static const enum AVPixelFormat pix_fmts[] = {
+
+ static int config_input_ref(AVFilterLink *inlink)
+ {
+- AVFilterContext *ctx = inlink->dst;
++ AVFilterContext *ctx = inlink->dst;
+ LIBVMAFContext *s = ctx->priv;
+- int th;
++ const AVPixFmtDescriptor *desc;
++ int err = 0;
+
+- if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
+- ctx->inputs[0]->h != ctx->inputs[1]->h) {
+- av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
+- return AVERROR(EINVAL);
++ if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
++ av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
++ err |= AVERROR(EINVAL);
+ }
+
+- s->desc = av_pix_fmt_desc_get(inlink->format);
+- s->width = ctx->inputs[0]->w;
+- s->height = ctx->inputs[0]->h;
++ if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
++ av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
++ err |= AVERROR(EINVAL);
++ }
+
+- th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
+- if (th) {
+- av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
+- return AVERROR(EINVAL);
++ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
++ av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
++ err |= AVERROR(EINVAL);
+ }
+- s->vmaf_thread_created = 1;
++
++ if (err)
++ return err;
++
++ desc = av_pix_fmt_desc_get(inlink->format);
++ s->bpc = desc->comp[0].depth;
+
+ return 0;
+ }
+@@ -307,28 +601,80 @@ static int activate(AVFilterContext *ctx)
+ return ff_framesync_activate(&s->fs);
+ }
+
++static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
++{
++ if (log_fmt) {
++ if (av_stristr(log_fmt, "xml"))
++ return VMAF_OUTPUT_FORMAT_XML;
++ if (av_stristr(log_fmt, "json"))
++ return VMAF_OUTPUT_FORMAT_JSON;
++ if (av_stristr(log_fmt, "csv"))
++ return VMAF_OUTPUT_FORMAT_CSV;
++ if (av_stristr(log_fmt, "sub"))
++ return VMAF_OUTPUT_FORMAT_SUB;
++ }
++
++ return VMAF_OUTPUT_FORMAT_XML;
++}
++
++static enum VmafPoolingMethod pool_method_map(const char *pool_method)
++{
++ if (pool_method) {
++ if (av_stristr(pool_method, "min"))
++ return VMAF_POOL_METHOD_MIN;
++ if (av_stristr(pool_method, "mean"))
++ return VMAF_POOL_METHOD_MEAN;
++ if (av_stristr(pool_method, "harmonic_mean"))
++ return VMAF_POOL_METHOD_HARMONIC_MEAN;
++ }
++
++ return VMAF_POOL_METHOD_MEAN;
++}
++
+ static av_cold void uninit(AVFilterContext *ctx)
+ {
+ LIBVMAFContext *s = ctx->priv;
++ int err = 0;
+
+ ff_framesync_uninit(&s->fs);
+
+- pthread_mutex_lock(&s->lock);
+- s->eof = 1;
+- pthread_cond_signal(&s->cond);
+- pthread_mutex_unlock(&s->lock);
++ if (!s->frame_cnt)
++ goto clean_up;
+
+- if (s->vmaf_thread_created)
+- {
+- pthread_join(s->vmaf_thread, NULL);
+- s->vmaf_thread_created = 0;
++ err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem flushing libvmaf context.\n");
+ }
+
+- av_frame_free(&s->gref);
+- av_frame_free(&s->gmain);
++ for (unsigned i = 0; i < s->model_cnt; i++) {
++ double vmaf_score;
++ err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
++ &vmaf_score, 0, s->frame_cnt - 1);
++ if (err) {
++ av_log(ctx, AV_LOG_ERROR,
++ "problem getting pooled vmaf score.\n");
++ }
++
++ av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
++ }
++
++ if (s->vmaf) {
++ if (s->log_path && !err)
++ vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
++ }
++
++clean_up:
++ if (s->model) {
++ for (unsigned i = 0; i < s->model_cnt; i++) {
++ if (s->model[i])
++ vmaf_model_destroy(s->model[i]);
++ }
++ av_free(s->model);
++ }
+
+- pthread_mutex_destroy(&s->lock);
+- pthread_cond_destroy(&s->cond);
++ if (s->vmaf)
++ vmaf_close(s->vmaf);
+ }
+
+ static const AVFilterPad libvmaf_inputs[] = {
+--
+2.20.1
+
More information about the arch-commits
mailing list