[arch-commits] Commit in libopenshot/repos/community-staging-x86_64 (4 files)

Evangelos Foutras foutrelis at archlinux.org
Thu Nov 12 12:08:20 UTC 2020


    Date: Thursday, November 12, 2020 @ 12:08:19
  Author: foutrelis
Revision: 751677

archrelease: copy trunk to community-staging-x86_64

Added:
  libopenshot/repos/community-staging-x86_64/PKGBUILD
    (from rev 751676, libopenshot/trunk/PKGBUILD)
  libopenshot/repos/community-staging-x86_64/ffmpeg-4.0.patch
    (from rev 751676, libopenshot/trunk/ffmpeg-4.0.patch)
Deleted:
  libopenshot/repos/community-staging-x86_64/PKGBUILD
  libopenshot/repos/community-staging-x86_64/ffmpeg-4.0.patch

------------------+
 PKGBUILD         |  122 ++--
 ffmpeg-4.0.patch | 1606 ++++++++++++++++++++++++++---------------------------
 2 files changed, 864 insertions(+), 864 deletions(-)

Deleted: PKGBUILD
===================================================================
--- PKGBUILD	2020-11-12 12:08:14 UTC (rev 751676)
+++ PKGBUILD	2020-11-12 12:08:19 UTC (rev 751677)
@@ -1,61 +0,0 @@
-# Maintainer: David Runge <dvzrv at archlinux.org>
-# Contributor: Martin Wimpress <code at flexion.org>
-# Contributor: Foster McLane <fkmclane at gmail.com>
-# Contributor: Jonathan Thomas <jonathan at openshot.org>
-
-pkgname=libopenshot
-pkgver=0.2.5
-pkgrel=6
-pkgdesc="A video editing, animation, and playback library for C++, Python, and Ruby"
-arch=('x86_64')
-url="https://github.com/openshot/libopenshot"
-license=('LGPL3')
-# TODO: package cppzmq and resvg
-depends=('gcc-libs' 'glibc' 'libmagick' 'python' 'qt5-base' 'qt5-multimedia'
-'zeromq')
-makedepends=('cmake' 'doxygen' 'ffmpeg' 'jsoncpp' 'libopenshot-audio' 'swig'
-'unittestpp' 'x264')
-provides=('libopenshot.so')
-source=("$pkgname-$pkgver.tar.gz::https://github.com/OpenShot/libopenshot/archive/v$pkgver.tar.gz"
-        "$pkgname-0.2.5-gcc10.patch::https://github.com/OpenShot/libopenshot/pull/512/commits/13290364e7bea54164ab83d973951f2898ad9e23.patch")
-sha512sums=('b7cdf72897e6edaa8cc00e17dbe30f5b22a6b5d69aab64ddafb184458b41ef0332db1f3e2c6f039492bf7adb521d9758834d0bf6c24e6421a55970d8cf8caba7'
-            'ec492cf09563671b79850035c8138df3bbc4b2f9bd1261aa410a85e0ed977e1a2fcc1299a855490f93fe677e6b9c232d323d3fbf56be8470b420280f352b18dc')
-
-prepare() {
-  cd "${pkgname}-${pkgver}"
-  # fix build with gcc >= 10
-  # https://github.com/OpenShot/libopenshot/pull/512
-  patch -Np1 -i "../$pkgname-0.2.5-gcc10.patch"
-}
-
-build() {
-  cd "${pkgname}-${pkgver}"
-  local python_version=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))')
-  export PYTHON_LIBRARIES="/usr/lib/libpython3.so"
-  export PYTHON_INCLUDE_DIRS="/usr/include/python${python_version}"
-  cmake -DCMAKE_INSTALL_PREFIX='/usr' \
-        -DCMAKE_BUILD_TYPE='None' \
-        -DENABLE_RUBY=OFF \
-        -DMAGICKCORE_HDRI_ENABLE=1 \
-        -DMAGICKCORE_QUANTUM_DEPTH=16 \
-        -DPYTHON_LIBRARIES="/usr/lib/libpython3.so" \
-        -DPYTHON_INCLUDE_DIRS="/usr/include/python${python_version}" \
-        -DUSE_SYSTEM_JSONCPP=ON \
-        -Wno-dev \
-        -B build \
-        -S .
-  make -C build
-}
-
-check() {
-  cd "${pkgname}-${pkgver}"
-  make -C build test
-}
-
-package() {
-  depends+=('libavcodec.so' 'libavformat.so' 'libavutil.so' 'libjsoncpp.so'
-  'libopenshot-audio.so' 'libswscale.so' 'libswresample.so' 'libx264.so')
-  cd "${pkgname}-${pkgver}"
-  make -C build DESTDIR="${pkgdir}" install
-  install -vDm 644 {AUTHORS,README.md} -t "${pkgdir}/usr/share/doc/${pkgname}"
-}

Copied: libopenshot/repos/community-staging-x86_64/PKGBUILD (from rev 751676, libopenshot/trunk/PKGBUILD)
===================================================================
--- PKGBUILD	                        (rev 0)
+++ PKGBUILD	2020-11-12 12:08:19 UTC (rev 751677)
@@ -0,0 +1,61 @@
+# Maintainer: David Runge <dvzrv at archlinux.org>
+# Contributor: Martin Wimpress <code at flexion.org>
+# Contributor: Foster McLane <fkmclane at gmail.com>
+# Contributor: Jonathan Thomas <jonathan at openshot.org>
+
+pkgname=libopenshot
+pkgver=0.2.5
+pkgrel=7
+pkgdesc="A video editing, animation, and playback library for C++, Python, and Ruby"
+arch=('x86_64')
+url="https://github.com/openshot/libopenshot"
+license=('LGPL3')
+# TODO: package cppzmq and resvg
+depends=('gcc-libs' 'glibc' 'libmagick' 'python' 'qt5-base' 'qt5-multimedia'
+'zeromq')
+makedepends=('cmake' 'doxygen' 'ffmpeg' 'jsoncpp' 'libopenshot-audio' 'swig'
+'unittestpp' 'x264')
+provides=('libopenshot.so')
+source=("$pkgname-$pkgver.tar.gz::https://github.com/OpenShot/libopenshot/archive/v$pkgver.tar.gz"
+        "$pkgname-0.2.5-gcc10.patch::https://github.com/OpenShot/libopenshot/pull/512/commits/13290364e7bea54164ab83d973951f2898ad9e23.patch")
+sha512sums=('b7cdf72897e6edaa8cc00e17dbe30f5b22a6b5d69aab64ddafb184458b41ef0332db1f3e2c6f039492bf7adb521d9758834d0bf6c24e6421a55970d8cf8caba7'
+            'ec492cf09563671b79850035c8138df3bbc4b2f9bd1261aa410a85e0ed977e1a2fcc1299a855490f93fe677e6b9c232d323d3fbf56be8470b420280f352b18dc')
+
+prepare() {
+  cd "${pkgname}-${pkgver}"
+  # fix build with gcc >= 10
+  # https://github.com/OpenShot/libopenshot/pull/512
+  patch -Np1 -i "../$pkgname-0.2.5-gcc10.patch"
+}
+
+build() {
+  cd "${pkgname}-${pkgver}"
+  local python_version=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))')
+  export PYTHON_LIBRARIES="/usr/lib/libpython3.so"
+  export PYTHON_INCLUDE_DIRS="/usr/include/python${python_version}"
+  cmake -DCMAKE_INSTALL_PREFIX='/usr' \
+        -DCMAKE_BUILD_TYPE='None' \
+        -DENABLE_RUBY=OFF \
+        -DMAGICKCORE_HDRI_ENABLE=1 \
+        -DMAGICKCORE_QUANTUM_DEPTH=16 \
+        -DPYTHON_LIBRARIES="/usr/lib/libpython3.so" \
+        -DPYTHON_INCLUDE_DIRS="/usr/include/python${python_version}" \
+        -DUSE_SYSTEM_JSONCPP=ON \
+        -Wno-dev \
+        -B build \
+        -S .
+  make -C build
+}
+
+check() {
+  cd "${pkgname}-${pkgver}"
+  make -C build test
+}
+
+package() {
+  depends+=('libavcodec.so' 'libavformat.so' 'libavutil.so' 'libjsoncpp.so'
+  'libopenshot-audio.so' 'libswscale.so' 'libswresample.so' 'libx264.so')
+  cd "${pkgname}-${pkgver}"
+  make -C build DESTDIR="${pkgdir}" install
+  install -vDm 644 {AUTHORS,README.md} -t "${pkgdir}/usr/share/doc/${pkgname}"
+}

Deleted: ffmpeg-4.0.patch
===================================================================
--- ffmpeg-4.0.patch	2020-11-12 12:08:14 UTC (rev 751676)
+++ ffmpeg-4.0.patch	2020-11-12 12:08:19 UTC (rev 751677)
@@ -1,803 +0,0 @@
-diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
-index 4af6cc9..63d543d 100644
---- a/cmake/Modules/FindFFmpeg.cmake
-+++ b/cmake/Modules/FindFFmpeg.cmake
-@@ -77,14 +77,14 @@ FIND_LIBRARY( SWSCALE_LIBRARY swscale swscale-2 swscale-4
- 		   	 $ENV{FFMPEGDIR}/lib/ffmpeg/
- 		   	 $ENV{FFMPEGDIR}/bin/ )
- 
--#FindAvresample
--FIND_PATH( AVRESAMPLE_INCLUDE_DIR libavresample/avresample.h
-+#FindSwresample
-+FIND_PATH( SWRESAMPLE_INCLUDE_DIR libswresample/swresample.h
- 		   PATHS /usr/include/
- 		   	 /usr/include/ffmpeg/
- 		   	 $ENV{FFMPEGDIR}/include/
- 		   	 $ENV{FFMPEGDIR}/include/ffmpeg/ )
- 
--FIND_LIBRARY( AVRESAMPLE_LIBRARY avresample avresample-2 avresample-3
-+FIND_LIBRARY( SWRESAMPLE_LIBRARY swresample
- 		   PATHS /usr/lib/
- 		   	 /usr/lib/ffmpeg/
- 		   	 $ENV{FFMPEGDIR}/lib/
-@@ -113,31 +113,31 @@ IF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
-     SET ( SWSCALE_FOUND TRUE )
- ENDIF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
- 
--IF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
--    SET ( AVRESAMPLE_FOUND TRUE )
--ENDIF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
-+IF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
-+    SET ( SWRESAMPLE_FOUND TRUE )
-+ENDIF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
- 
--IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
-+IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
- 
- 	SET ( FFMPEG_FOUND TRUE )
- 
- 	SET ( FFMPEG_INCLUDE_DIR
--		  ${AVFORMAT_INCLUDE_DIR}
--		  ${AVCODEC_INCLUDE_DIR}
--		  ${AVUTIL_INCLUDE_DIR}
--		  ${AVDEVICE_INCLUDE_DIR}
--		  ${SWSCALE_INCLUDE_DIR}
--		  ${AVRESAMPLE_INCLUDE_DIR} )
--	
-+			${AVFORMAT_INCLUDE_DIR}
-+			${AVCODEC_INCLUDE_DIR}
-+			${AVUTIL_INCLUDE_DIR}
-+			${AVDEVICE_INCLUDE_DIR}
-+			${SWSCALE_INCLUDE_DIR}
-+			${SWRESAMPLE_INCLUDE_DIR} )
-+
- 	SET ( FFMPEG_LIBRARIES 
--		  ${AVFORMAT_LIBRARY}
--		  ${AVCODEC_LIBRARY}
--		  ${AVUTIL_LIBRARY}
--		  ${AVDEVICE_LIBRARY}
--		  ${SWSCALE_LIBRARY}
--		  ${AVRESAMPLE_LIBRARY} )
--
--ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
-+			${AVFORMAT_LIBRARY}
-+			${AVCODEC_LIBRARY}
-+			${AVUTIL_LIBRARY}
-+			${AVDEVICE_LIBRARY}
-+			${SWSCALE_LIBRARY}
-+			${SWRESAMPLE_LIBRARY} )
-+
-+ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
- 
- MARK_AS_ADVANCED(
-   FFMPEG_LIBRARY_DIR
-diff --git a/include/CrashHandler.h b/include/CrashHandler.h
-index e3a4bbe..12c79a8 100644
---- a/include/CrashHandler.h
-+++ b/include/CrashHandler.h
-@@ -53,13 +53,15 @@ namespace openshot {
- 	class CrashHandler {
- 	private:
- 		/// Default constructor
--		CrashHandler(){}; 						 // Don't allow user to create an instance of this singleton
-+		CrashHandler(){return;}; 						 // Don't allow user to create an instance of this singleton
- 
- 		/// Default copy method
--		CrashHandler(CrashHandler const&){};             // Don't allow the user to copy this instance
-+		//CrashHandler(CrashHandler const&){};             // Don't allow the user to copy this instance
-+		CrashHandler(CrashHandler const&) = delete;             // Don't allow the user to copy this instance
- 
- 		/// Default assignment operator
--		CrashHandler & operator=(CrashHandler const&){};  // Don't allow the user to assign this instance
-+		//CrashHandler & operator=(CrashHandler const&){};  // Don't allow the user to assign this instance
-+		CrashHandler & operator=(CrashHandler const&) = delete;  // Don't allow the user to assign this instance
- 
- 		/// Private variable to keep track of singleton instance
- 		static CrashHandler *m_pInstance;
-diff --git a/include/FFmpegReader.h b/include/FFmpegReader.h
-index 6072756..e2c4863 100644
---- a/include/FFmpegReader.h
-+++ b/include/FFmpegReader.h
-@@ -105,6 +105,7 @@ namespace openshot
- 		bool check_interlace;
- 		bool check_fps;
- 		bool has_missing_frames;
-+		bool use_omp_threads;
- 
- 		CacheMemory working_cache;
- 		CacheMemory missing_frames;
-diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h
-index 578c658..346da54 100644
---- a/include/FFmpegUtilities.h
-+++ b/include/FFmpegUtilities.h
-@@ -43,7 +43,15 @@
- 		#include <libavcodec/avcodec.h>
- 		#include <libavformat/avformat.h>
- 		#include <libswscale/swscale.h>
-+		// Change this to the first version swrescale works
-+	#if (LIBAVFORMAT_VERSION_MAJOR >= 57)
-+			#define USE_SW
-+	#endif
-+	#ifdef USE_SW
-+		#include <libswresample/swresample.h>
-+	#else
- 		#include <libavresample/avresample.h>
-+	#endif
- 		#include <libavutil/mathematics.h>
- 		#include <libavutil/pixfmt.h>
- 		#include <libavutil/pixdesc.h>
-@@ -106,7 +114,65 @@
- 		#define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
- 	#endif
- 
--	#if IS_FFMPEG_3_2
-+	#ifdef USE_SW
-+		#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
-+			swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count)
-+		#define SWR_ALLOC() swr_alloc()
-+		#define SWR_CLOSE(ctx) {}
-+		#define SWR_FREE(ctx) swr_free(ctx)
-+		#define SWR_INIT(ctx)  swr_init(ctx)
-+		#define SWRCONTEXT SwrContext
-+	#else
-+		#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
-+			avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count)
-+		#define SWR_ALLOC() avresample_alloc_context()
-+		#define SWR_CLOSE(ctx) avresample_close(ctx)
-+		#define SWR_FREE(ctx) avresample_free(ctx)
-+		#define SWR_INIT(ctx)  avresample_open(ctx)
-+		#define SWRCONTEXT AVAudioResampleContext
-+	#endif
-+
-+
-+	#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
-+		#define AV_REGISTER_ALL
-+		#define AVCODEC_REGISTER_ALL
-+		#define AV_FILENAME url
-+		#define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
-+		#define AV_ALLOCATE_FRAME() av_frame_alloc()
-+		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
-+		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
-+    	#define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
-+		#define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
-+		#define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
-+		#define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
-+		#define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
-+		auto AV_GET_CODEC_CONTEXT = [](AVStream* av_stream, AVCodec* av_codec) { \
-+			AVCodecContext *context = avcodec_alloc_context3(av_codec); \
-+			avcodec_parameters_to_context(context, av_stream->codecpar); \
-+			return context; \
-+		};
-+		#define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
-+		#define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
-+		#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
-+		#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
-+		#define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
-+		#define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
-+		#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
-+		#define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
-+		#define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
-+		#define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) 	av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
-+		#define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) 	av_st = avformat_new_stream(oc, NULL);\
-+			if (!av_st) \
-+				throw OutOfMemory("Could not allocate memory for the video stream.", path); \
-+			c = avcodec_alloc_context3(av_codec); \
-+			st_codec = c; \
-+			av_st->codecpar->codec_id = av_codec->id;
-+		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
-+	#elif IS_FFMPEG_3_2
-+		#define AV_REGISTER_ALL av_register_all();
-+		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
-+		#define AV_FILENAME filename
-+		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- 		#define AV_ALLOCATE_FRAME() av_frame_alloc()
- 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
- 		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
-@@ -138,6 +204,10 @@
- 			av_st->codecpar->codec_id = av_codec->id;
- 		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
- 	#elif LIBAVFORMAT_VERSION_MAJOR >= 55
-+		#define AV_REGISTER_ALL av_register_all();
-+		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
-+		#define AV_FILENAME filename
-+		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- 		#define AV_ALLOCATE_FRAME() av_frame_alloc()
- 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
- 		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
-@@ -164,6 +234,10 @@
- 			c = av_st->codec;
- 		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
- 	#else
-+		#define AV_REGISTER_ALL av_register_all();
-+		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
-+		#define AV_FILENAME filename
-+		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- 		#define AV_ALLOCATE_FRAME() avcodec_alloc_frame()
- 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
- 		#define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame)
-diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
-index 8343002..7eefacb 100644
---- a/include/FFmpegWriter.h
-+++ b/include/FFmpegWriter.h
-@@ -174,8 +174,8 @@ namespace openshot
- 	    int initial_audio_input_frame_size;
- 	    int audio_input_position;
- 	    int audio_encoder_buffer_size;
--	    AVAudioResampleContext *avr;
--	    AVAudioResampleContext *avr_planar;
-+	    SWRCONTEXT *avr;
-+	    SWRCONTEXT *avr_planar;
- 
- 	    /* Resample options */
- 	    int original_sample_rate;
-diff --git a/include/Frame.h b/include/Frame.h
-index a7ad509..eba7f8b 100644
---- a/include/Frame.h
-+++ b/include/Frame.h
-@@ -62,7 +62,7 @@
- #include "AudioResampler.h"
- #include "Fraction.h"
- 
--
-+#pragma SWIG nowarn=362
- using namespace std;
- 
- namespace openshot
-diff --git a/include/FrameMapper.h b/include/FrameMapper.h
-index e70fdbc..043b5e4 100644
---- a/include/FrameMapper.h
-+++ b/include/FrameMapper.h
-@@ -146,7 +146,7 @@ namespace openshot
- 		ReaderBase *reader;		// The source video reader
- 		CacheMemory final_cache; 		// Cache of actual Frame objects
- 		bool is_dirty; 			// When this is true, the next call to GetFrame will re-init the mapping
--		AVAudioResampleContext *avr;	// Audio resampling context object
-+		SWRCONTEXT *avr;	// Audio resampling context object
- 
- 		// Internal methods used by init
- 		void AddField(int64_t frame);
-diff --git a/include/OpenMPUtilities.h b/include/OpenMPUtilities.h
-index 8a95a95..c0f5597 100644
---- a/include/OpenMPUtilities.h
-+++ b/include/OpenMPUtilities.h
-@@ -29,8 +29,26 @@
- #define OPENSHOT_OPENMP_UTILITIES_H
- 
- #include <omp.h>
-+#include <stdlib.h>
-+#include <string.h>
- 
--	// Calculate the # of OpenMP Threads to allow
--	#define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
-+// Calculate the # of OpenMP Threads to allow
-+#define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
-+
-+using namespace std;
-+
-+namespace openshot {
-+
-+	// Check if OS2_OMP_THREADS environment variable is present, and return
-+	// if multiple threads should be used with OMP
-+	static bool IsOMPEnabled() {
-+		char* OS2_OMP_THREADS = getenv("OS2_OMP_THREADS");
-+		if (OS2_OMP_THREADS != NULL && strcmp(OS2_OMP_THREADS, "0") == 0)
-+			return false;
-+		else
-+			return true;
-+	}
-+
-+}
- 
- #endif
-diff --git a/include/ZmqLogger.h b/include/ZmqLogger.h
-index c134f2c..e825ed0 100644
---- a/include/ZmqLogger.h
-+++ b/include/ZmqLogger.h
-@@ -72,11 +72,19 @@ namespace openshot {
- 		/// Default constructor
- 		ZmqLogger(){}; 						 // Don't allow user to create an instance of this singleton
- 
-+#if __GNUC__ >=7
- 		/// Default copy method
--		ZmqLogger(ZmqLogger const&){};             // Don't allow the user to copy this instance
-+		ZmqLogger(ZmqLogger const&) = delete; // Don't allow the user to assign this instance
- 
- 		/// Default assignment operator
--		ZmqLogger & operator=(ZmqLogger const&){};  // Don't allow the user to assign this instance
-+		ZmqLogger & operator=(ZmqLogger const&) = delete;  // Don't allow the user to assign this instance
-+#else
-+		/// Default copy method
-+		ZmqLogger(ZmqLogger const&) {}; // Don't allow the user to assign this instance
-+
-+		/// Default assignment operator
-+		ZmqLogger & operator=(ZmqLogger const&);  // Don't allow the user to assign this instance
-+#endif
- 
- 		/// Private variable to keep track of singleton instance
- 		static ZmqLogger * m_pInstance;
-diff --git a/src/Clip.cpp b/src/Clip.cpp
-index 913fd71..63e7741 100644
---- a/src/Clip.cpp
-+++ b/src/Clip.cpp
-@@ -925,13 +925,14 @@ void Clip::SetJsonValue(Json::Value root) {
- 
- 			if (!existing_effect["type"].isNull()) {
- 				// Create instance of effect
--				e = EffectInfo().CreateEffect(existing_effect["type"].asString());
-+				if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
- 
--				// Load Json into Effect
--				e->SetJsonValue(existing_effect);
-+					// Load Json into Effect
-+					e->SetJsonValue(existing_effect);
- 
--				// Add Effect to Timeline
--				AddEffect(e);
-+					// Add Effect to Timeline
-+					AddEffect(e);
-+				}
- 			}
- 		}
- 	}
-diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp
-index 23bc9d0..f9e4c40 100644
---- a/src/EffectInfo.cpp
-+++ b/src/EffectInfo.cpp
-@@ -82,6 +82,7 @@ EffectBase* EffectInfo::CreateEffect(string effect_type) {
- 
- 	else if (effect_type == "Wave")
- 		return new Wave();
-+	return NULL;
- }
- 
- // Generate Json::JsonValue for this object
-diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
-index 5a4c936..3e29cf7 100644
---- a/src/FFmpegReader.cpp
-+++ b/src/FFmpegReader.cpp
-@@ -37,11 +37,12 @@ FFmpegReader::FFmpegReader(string path)
- 	  audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
- 	  check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
- 	  prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
--	  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
-+	  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
-+	  packet(NULL), use_omp_threads(true) {
- 
- 	// Initialize FFMpeg, and register all formats and codecs
--	av_register_all();
--	avcodec_register_all();
-+	AV_REGISTER_ALL
-+	AVCODEC_REGISTER_ALL
- 
- 	// Init cache
- 	working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
-@@ -58,11 +59,12 @@ FFmpegReader::FFmpegReader(string path, bool inspect_reader)
- 		  audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
- 		  check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
- 		  prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
--		  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
-+		  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
-+		  packet(NULL), use_omp_threads(true) {
- 
- 	// Initialize FFMpeg, and register all formats and codecs
--	av_register_all();
--	avcodec_register_all();
-+	AV_REGISTER_ALL
-+	AVCODEC_REGISTER_ALL
- 
- 	// Init cache
- 	working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
-@@ -227,6 +229,9 @@ void FFmpegReader::Open()
- 		missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
- 		final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
- 
-+		// Initialize OMP threading support
-+		use_omp_threads = openshot::IsOMPEnabled();
-+
- 		// Mark as "open"
- 		is_open = true;
- 	}
-@@ -606,6 +611,12 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
- 
- 						// Process Video Packet
- 						ProcessVideoPacket(requested_frame);
-+
-+						if (!use_omp_threads) {
-+							// Wait on each OMP task to complete before moving on to the next one. This slows
-+							// down processing considerably, but might be more stable on some systems.
-+							#pragma omp taskwait
-+						}
- 					}
- 
- 				}
-@@ -638,7 +649,6 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
- 				}
- 
- 				// Check if working frames are 'finished'
--				bool is_cache_found = false;
- 				if (!is_seeking) {
- 					// Check for any missing frames
- 					CheckMissingFrame(requested_frame);
-@@ -648,7 +658,7 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
- 				}
- 
- 				// Check if requested 'final' frame is available
--				is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
-+				bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
- 
- 				// Increment frames processed
- 				packets_processed++;
-@@ -978,7 +988,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
- 	int data_size = 0;
- 
- 	// re-initialize buffer size (it gets changed in the avcodec_decode_audio2 method call)
--	int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
-+	int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE;
- 	#pragma omp critical (ProcessAudioPacket)
- 	{
- 	#if IS_FFMPEG_3_2
-@@ -1083,7 +1093,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
- 
- 
- 	// Allocate audio buffer
--	int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
-+	int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE];
- 
- 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)", "packet_samples", packet_samples, "info.channels", info.channels, "info.sample_rate", info.sample_rate, "aCodecCtx->sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), "AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16, "", -1);
- 
-@@ -1093,11 +1103,11 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
- 	audio_converted->nb_samples = audio_frame->nb_samples;
- 	av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
- 
--	AVAudioResampleContext *avr = NULL;
-+	SWRCONTEXT *avr = NULL;
- 	int nb_samples = 0;
- 
- 	// setup resample context
--	avr = avresample_alloc_context();
-+	avr = SWR_ALLOC();
- 	av_opt_set_int(avr,  "in_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
- 	av_opt_set_int(avr, "out_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
- 	av_opt_set_int(avr,  "in_sample_fmt",     AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), 0);
-@@ -1106,10 +1116,10 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
- 	av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
- 	av_opt_set_int(avr,  "in_channels",       info.channels,    0);
- 	av_opt_set_int(avr, "out_channels",       info.channels,    0);
--	int r = avresample_open(avr);
-+	int r = SWR_INIT(avr);
- 
- 	// Convert audio samples
--	nb_samples = avresample_convert(avr, 	// audio resample context
-+	nb_samples = SWR_CONVERT(avr, 	// audio resample context
- 			audio_converted->data, 			// output data pointers
- 			audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
- 			audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
-@@ -1121,8 +1131,8 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
- 	memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels);
- 
- 	// Deallocate resample buffer
--	avresample_close(avr);
--	avresample_free(&avr);
-+	SWR_CLOSE(avr);
-+	SWR_FREE(&avr);
- 	avr = NULL;
- 
- 	// Free AVFrames
-@@ -1348,7 +1358,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
- 		{
- 			seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
- 			if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
--				fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->filename);
-+				fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
- 			} else
- 			{
- 				// VIDEO SEEK
-@@ -1362,7 +1372,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
- 		{
- 			seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
- 			if (av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
--				fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->filename);
-+				fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
- 			} else
- 			{
- 				// AUDIO SEEK
-diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
-index 4416040..d106901 100644
---- a/src/FFmpegWriter.cpp
-+++ b/src/FFmpegWriter.cpp
-@@ -46,7 +46,7 @@ FFmpegWriter::FFmpegWriter(string path) :
- 	info.has_video = false;
- 
- 	// Initialize FFMpeg, and register all formats and codecs
--	av_register_all();
-+	AV_REGISTER_ALL
- 
- 	// auto detect format
- 	auto_detect_format();
-@@ -299,7 +299,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value)
- /// Determine if codec name is valid
- bool FFmpegWriter::IsValidCodec(string codec_name) {
- 	// Initialize FFMpeg, and register all formats and codecs
--	av_register_all();
-+	AV_REGISTER_ALL
- 
- 	// Find the codec (if any)
- 	if (avcodec_find_encoder_by_name(codec_name.c_str()) == NULL)
-@@ -342,7 +342,7 @@ void FFmpegWriter::WriteHeader()
- 	}
- 
-     // Force the output filename (which doesn't always happen for some reason)
--    snprintf(oc->filename, sizeof(oc->filename), "%s", path.c_str());
-+		snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", path.c_str());
- 
- 	// Write the stream header, if any
- 	// TODO: add avoptions / parameters instead of NULL
-@@ -559,8 +559,10 @@ void FFmpegWriter::flush_encoders()
- {
- 	if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1)
- 		return;
-+#if (LIBAVFORMAT_VERSION_MAJOR < 58)
- 	if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
- 		return;
-+#endif
- 
-     int error_code = 0;
-     int stop_encoding = 1;
-@@ -734,14 +736,14 @@ void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st)
- 
- 	// Deallocate resample buffer
- 	if (avr) {
--		avresample_close(avr);
--		avresample_free(&avr);
-+		SWR_CLOSE(avr);
-+		SWR_FREE(&avr);
- 		avr = NULL;
- 	}
- 
- 	if (avr_planar) {
--		avresample_close(avr_planar);
--		avresample_free(&avr_planar);
-+		SWR_CLOSE(avr_planar);
-+		SWR_FREE(&avr_planar);
- 		avr_planar = NULL;
- 	}
- }
-@@ -881,7 +883,11 @@ AVStream* FFmpegWriter::add_audio_stream()
- 
- 	// some formats want stream headers to be separate
- 	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
-+#if (LIBAVCODEC_VERSION_MAJOR >= 57)
-+		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-+#else
- 		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-+#endif
- 
- 	AV_COPY_PARAMS_FROM_CONTEXT(st, c);
- 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_audio_stream", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->channels", c->channels, "c->sample_fmt", c->sample_fmt, "c->channel_layout", c->channel_layout, "c->sample_rate", c->sample_rate);
-@@ -953,7 +959,11 @@ AVStream* FFmpegWriter::add_video_stream()
- 		c->mb_decision = 2;
- 	// some formats want stream headers to be separate
- 	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
-+#if (LIBAVCODEC_VERSION_MAJOR >= 57)
-+		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-+#else
- 		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-+#endif
- 
- 	// Find all supported pixel formats for this codec
-     const PixelFormat* supported_pixel_formats = codec->pix_fmts;
-@@ -970,10 +980,12 @@ AVStream* FFmpegWriter::add_video_stream()
-             // Raw video should use RGB24
-         	c->pix_fmt = PIX_FMT_RGB24;
- 
-+#if (LIBAVFORMAT_VERSION_MAJOR < 58)
-         if (strcmp(fmt->name, "gif") != 0)
- 			// If not GIF format, skip the encoding process
- 			// Set raw picture flag (so we don't encode this video)
- 			oc->oformat->flags |= AVFMT_RAWPICTURE;
-+#endif
-         } else {
-         	// Set the default codec
-         	c->pix_fmt = PIX_FMT_YUV420P;
-@@ -981,7 +993,11 @@ AVStream* FFmpegWriter::add_video_stream()
-     }
- 
- 	AV_COPY_PARAMS_FROM_CONTEXT(st, c);
-+#if (LIBAVFORMAT_VERSION_MAJOR < 58)
- 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "AVFMT_RAWPICTURE", AVFMT_RAWPICTURE, "", -1);
-+#else
-+	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1);
-+#endif
- 
- 	return st;
- }
-@@ -1056,7 +1072,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st)
- 		av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
- 	}
- 
--	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
-+	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
- 
- }
- 
-@@ -1222,7 +1238,7 @@ void FFmpegWriter::write_audio_packets(bool final)
- 
- 			// setup resample context
- 			if (!avr) {
--				avr = avresample_alloc_context();
-+				avr = SWR_ALLOC();
- 				av_opt_set_int(avr,  "in_channel_layout", channel_layout_in_frame, 0);
- 				av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
- 				av_opt_set_int(avr,  "in_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
-@@ -1231,12 +1247,12 @@ void FFmpegWriter::write_audio_packets(bool final)
- 				av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
- 				av_opt_set_int(avr,  "in_channels",       channels_in_frame,    0);
- 				av_opt_set_int(avr, "out_channels",       info.channels,    0);
--				avresample_open(avr);
-+				SWR_INIT(avr);
- 			}
- 			int nb_samples = 0;
- 
- 			// Convert audio samples
--			nb_samples = avresample_convert(avr, 	// audio resample context
-+			nb_samples = SWR_CONVERT(avr, 	// audio resample context
- 					audio_converted->data, 			// output data pointers
- 					audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
- 					audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
-@@ -1297,7 +1313,7 @@ void FFmpegWriter::write_audio_packets(bool final)
- 
- 				// setup resample context
- 				if (!avr_planar) {
--					avr_planar = avresample_alloc_context();
-+					avr_planar = SWR_ALLOC();
- 					av_opt_set_int(avr_planar,  "in_channel_layout", info.channel_layout, 0);
- 					av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
- 					av_opt_set_int(avr_planar,  "in_sample_fmt",     output_sample_fmt,     0);
-@@ -1306,7 +1322,7 @@ void FFmpegWriter::write_audio_packets(bool final)
- 					av_opt_set_int(avr_planar, "out_sample_rate",    info.sample_rate,    0);
- 					av_opt_set_int(avr_planar,  "in_channels",       info.channels,    0);
- 					av_opt_set_int(avr_planar, "out_channels",       info.channels,    0);
--					avresample_open(avr_planar);
-+					SWR_INIT(avr_planar);
- 				}
- 
- 				// Create input frame (and allocate arrays)
-@@ -1329,7 +1345,7 @@ void FFmpegWriter::write_audio_packets(bool final)
- 				av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0);
- 
- 				// Convert audio samples
--				int nb_samples = avresample_convert(avr_planar, 	// audio resample context
-+				int nb_samples = SWR_CONVERT(avr_planar, 	// audio resample context
- 						frame_final->data, 			// output data pointers
- 						frame_final->linesize[0], 	// output plane size, in bytes. (0 if unknown)
- 						frame_final->nb_samples,	// maximum number of samples that the output buffer can hold
-@@ -1560,6 +1576,9 @@ void FFmpegWriter::process_video_packet(std::shared_ptr<Frame> frame)
- // write video frame
- bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* frame_final)
- {
-+#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
-+	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1, "", -1, "", -1);
-+#else
- 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags & AVFMT_RAWPICTURE", oc->oformat->flags & AVFMT_RAWPICTURE, "", -1, "", -1, "", -1, "", -1);
- 
- 	if (oc->oformat->flags & AVFMT_RAWPICTURE) {
-@@ -1587,7 +1606,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* fra
- 		// Deallocate packet
- 		AV_FREE_PACKET(&pkt);
- 
--	} else {
-+	} else
-+#endif
-+        {
- 
- 		AVPacket pkt;
- 		av_init_packet(&pkt);
-diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
-index f49cbc4..c4c68f5 100644
---- a/src/FrameMapper.cpp
-+++ b/src/FrameMapper.cpp
-@@ -650,8 +650,8 @@ void FrameMapper::Close()
- 
- 		// Deallocate resample buffer
- 		if (avr) {
--			avresample_close(avr);
--			avresample_free(&avr);
-+			SWR_CLOSE(avr);
-+			SWR_FREE(&avr);
- 			avr = NULL;
- 		}
- 	}
-@@ -741,8 +741,8 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow
- 
- 	// Deallocate resample buffer
- 	if (avr) {
--		avresample_close(avr);
--		avresample_free(&avr);
-+		SWR_CLOSE(avr);
-+		SWR_FREE(&avr);
- 		avr = NULL;
- 	}
- 
-@@ -817,7 +817,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
- 
-     // setup resample context
-     if (!avr) {
--        avr = avresample_alloc_context();
-+        avr = SWR_ALLOC();
-         av_opt_set_int(avr,  "in_channel_layout", channel_layout_in_frame, 0);
-         av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
-         av_opt_set_int(avr,  "in_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
-@@ -826,11 +826,11 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
-         av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
-         av_opt_set_int(avr,  "in_channels",       channels_in_frame,    0);
-         av_opt_set_int(avr, "out_channels",       info.channels,    0);
--        avresample_open(avr);
-+        SWR_INIT(avr);
-     }
- 
-     // Convert audio samples
--    nb_samples = avresample_convert(avr, 	// audio resample context
-+    nb_samples = SWR_CONVERT(avr, 	// audio resample context
-             audio_converted->data, 			// output data pointers
-             audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
-             audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
-diff --git a/src/Timeline.cpp b/src/Timeline.cpp
-index 1b4f475..ed3c3df 100644
---- a/src/Timeline.cpp
-+++ b/src/Timeline.cpp
-@@ -1000,13 +1000,14 @@ void Timeline::SetJsonValue(Json::Value root) {
- 
- 			if (!existing_effect["type"].isNull()) {
- 				// Create instance of effect
--				e = EffectInfo().CreateEffect(existing_effect["type"].asString());
-+				if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
- 
--				// Load Json into Effect
--				e->SetJsonValue(existing_effect);
-+					// Load Json into Effect
-+					e->SetJsonValue(existing_effect);
- 
--				// Add Effect to Timeline
--				AddEffect(e);
-+					// Add Effect to Timeline
-+					AddEffect(e);
-+				}
- 			}
- 		}
- 	}
-@@ -1270,13 +1271,14 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
- 		EffectBase *e = NULL;
- 
- 		// Init the matching effect object
--		e = EffectInfo().CreateEffect(effect_type);
-+		if (e = EffectInfo().CreateEffect(effect_type)) {
- 
--		// Load Json into Effect
--		e->SetJsonValue(change["value"]);
-+			// Load Json into Effect
-+			e->SetJsonValue(change["value"]);
- 
--		// Add Effect to Timeline
--		AddEffect(e);
-+			// Add Effect to Timeline
-+			AddEffect(e);
-+		}
- 
- 	} else if (change_type == "update") {
- 
-diff --git a/tests/ReaderBase_Tests.cpp b/tests/ReaderBase_Tests.cpp
-index 9d43530..70ca90d 100644
---- a/tests/ReaderBase_Tests.cpp
-+++ b/tests/ReaderBase_Tests.cpp
-@@ -44,9 +44,9 @@ TEST(ReaderBase_Derived_Class)
- 		std::shared_ptr<Frame> GetFrame(int64_t number) { std::shared_ptr<Frame> f(new Frame()); return f; }
- 		void Close() { };
- 		void Open() { };
--		string Json() { };
-+		string Json() { return NULL; };
- 		void SetJson(string value) { };
--		Json::Value JsonValue() { };
-+		Json::Value JsonValue() { return (int) NULL; };
- 		void SetJsonValue(Json::Value root) { };
- 		bool IsOpen() { return true; };
- 		string Name() { return "TestReader"; };

Copied: libopenshot/repos/community-staging-x86_64/ffmpeg-4.0.patch (from rev 751676, libopenshot/trunk/ffmpeg-4.0.patch)
===================================================================
--- ffmpeg-4.0.patch	                        (rev 0)
+++ ffmpeg-4.0.patch	2020-11-12 12:08:19 UTC (rev 751677)
@@ -0,0 +1,803 @@
+diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
+index 4af6cc9..63d543d 100644
+--- a/cmake/Modules/FindFFmpeg.cmake
++++ b/cmake/Modules/FindFFmpeg.cmake
+@@ -77,14 +77,14 @@ FIND_LIBRARY( SWSCALE_LIBRARY swscale swscale-2 swscale-4
+ 		   	 $ENV{FFMPEGDIR}/lib/ffmpeg/
+ 		   	 $ENV{FFMPEGDIR}/bin/ )
+ 
+-#FindAvresample
+-FIND_PATH( AVRESAMPLE_INCLUDE_DIR libavresample/avresample.h
++#FindSwresample
++FIND_PATH( SWRESAMPLE_INCLUDE_DIR libswresample/swresample.h
+ 		   PATHS /usr/include/
+ 		   	 /usr/include/ffmpeg/
+ 		   	 $ENV{FFMPEGDIR}/include/
+ 		   	 $ENV{FFMPEGDIR}/include/ffmpeg/ )
+ 
+-FIND_LIBRARY( AVRESAMPLE_LIBRARY avresample avresample-2 avresample-3
++FIND_LIBRARY( SWRESAMPLE_LIBRARY swresample
+ 		   PATHS /usr/lib/
+ 		   	 /usr/lib/ffmpeg/
+ 		   	 $ENV{FFMPEGDIR}/lib/
+@@ -113,31 +113,31 @@ IF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
+     SET ( SWSCALE_FOUND TRUE )
+ ENDIF ( SWSCALE_INCLUDE_DIR AND SWSCALE_LIBRARY )
+ 
+-IF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
+-    SET ( AVRESAMPLE_FOUND TRUE )
+-ENDIF ( AVRESAMPLE_INCLUDE_DIR AND AVRESAMPLE_LIBRARY )
++IF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
++    SET ( SWRESAMPLE_FOUND TRUE )
++ENDIF ( SWRESAMPLE_INCLUDE_DIR AND SWRESAMPLE_LIBRARY )
+ 
+-IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
++IF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
+ 
+ 	SET ( FFMPEG_FOUND TRUE )
+ 
+ 	SET ( FFMPEG_INCLUDE_DIR
+-		  ${AVFORMAT_INCLUDE_DIR}
+-		  ${AVCODEC_INCLUDE_DIR}
+-		  ${AVUTIL_INCLUDE_DIR}
+-		  ${AVDEVICE_INCLUDE_DIR}
+-		  ${SWSCALE_INCLUDE_DIR}
+-		  ${AVRESAMPLE_INCLUDE_DIR} )
+-	
++			${AVFORMAT_INCLUDE_DIR}
++			${AVCODEC_INCLUDE_DIR}
++			${AVUTIL_INCLUDE_DIR}
++			${AVDEVICE_INCLUDE_DIR}
++			${SWSCALE_INCLUDE_DIR}
++			${SWRESAMPLE_INCLUDE_DIR} )
++
+ 	SET ( FFMPEG_LIBRARIES 
+-		  ${AVFORMAT_LIBRARY}
+-		  ${AVCODEC_LIBRARY}
+-		  ${AVUTIL_LIBRARY}
+-		  ${AVDEVICE_LIBRARY}
+-		  ${SWSCALE_LIBRARY}
+-		  ${AVRESAMPLE_LIBRARY} )
+-
+-ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR AVRESAMPLE_FOUND )
++			${AVFORMAT_LIBRARY}
++			${AVCODEC_LIBRARY}
++			${AVUTIL_LIBRARY}
++			${AVDEVICE_LIBRARY}
++			${SWSCALE_LIBRARY}
++			${SWRESAMPLE_LIBRARY} )
++
++ENDIF ( AVFORMAT_INCLUDE_DIR OR AVCODEC_INCLUDE_DIR OR AVUTIL_INCLUDE_DIR OR AVDEVICE_FOUND OR SWSCALE_FOUND OR SWRESAMPLE_FOUND )
+ 
+ MARK_AS_ADVANCED(
+   FFMPEG_LIBRARY_DIR
+diff --git a/include/CrashHandler.h b/include/CrashHandler.h
+index e3a4bbe..12c79a8 100644
+--- a/include/CrashHandler.h
++++ b/include/CrashHandler.h
+@@ -53,13 +53,15 @@ namespace openshot {
+ 	class CrashHandler {
+ 	private:
+ 		/// Default constructor
+-		CrashHandler(){}; 						 // Don't allow user to create an instance of this singleton
++		CrashHandler(){return;}; 						 // Don't allow user to create an instance of this singleton
+ 
+ 		/// Default copy method
+-		CrashHandler(CrashHandler const&){};             // Don't allow the user to copy this instance
++		//CrashHandler(CrashHandler const&){};             // Don't allow the user to copy this instance
++		CrashHandler(CrashHandler const&) = delete;             // Don't allow the user to copy this instance
+ 
+ 		/// Default assignment operator
+-		CrashHandler & operator=(CrashHandler const&){};  // Don't allow the user to assign this instance
++		//CrashHandler & operator=(CrashHandler const&){};  // Don't allow the user to assign this instance
++		CrashHandler & operator=(CrashHandler const&) = delete;  // Don't allow the user to assign this instance
+ 
+ 		/// Private variable to keep track of singleton instance
+ 		static CrashHandler *m_pInstance;
+diff --git a/include/FFmpegReader.h b/include/FFmpegReader.h
+index 6072756..e2c4863 100644
+--- a/include/FFmpegReader.h
++++ b/include/FFmpegReader.h
+@@ -105,6 +105,7 @@ namespace openshot
+ 		bool check_interlace;
+ 		bool check_fps;
+ 		bool has_missing_frames;
++		bool use_omp_threads;
+ 
+ 		CacheMemory working_cache;
+ 		CacheMemory missing_frames;
+diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h
+index 578c658..346da54 100644
+--- a/include/FFmpegUtilities.h
++++ b/include/FFmpegUtilities.h
+@@ -43,7 +43,15 @@
+ 		#include <libavcodec/avcodec.h>
+ 		#include <libavformat/avformat.h>
+ 		#include <libswscale/swscale.h>
++		// Change this to the first version swrescale works
++	#if (LIBAVFORMAT_VERSION_MAJOR >= 57)
++			#define USE_SW
++	#endif
++	#ifdef USE_SW
++		#include <libswresample/swresample.h>
++	#else
+ 		#include <libavresample/avresample.h>
++	#endif
+ 		#include <libavutil/mathematics.h>
+ 		#include <libavutil/pixfmt.h>
+ 		#include <libavutil/pixdesc.h>
+@@ -106,7 +114,65 @@
+ 		#define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
+ 	#endif
+ 
+-	#if IS_FFMPEG_3_2
++	#ifdef USE_SW
++		#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
++			swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count)
++		#define SWR_ALLOC() swr_alloc()
++		#define SWR_CLOSE(ctx) {}
++		#define SWR_FREE(ctx) swr_free(ctx)
++		#define SWR_INIT(ctx)  swr_init(ctx)
++		#define SWRCONTEXT SwrContext
++	#else
++		#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
++			avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count)
++		#define SWR_ALLOC() avresample_alloc_context()
++		#define SWR_CLOSE(ctx) avresample_close(ctx)
++		#define SWR_FREE(ctx) avresample_free(ctx)
++		#define SWR_INIT(ctx)  avresample_open(ctx)
++		#define SWRCONTEXT AVAudioResampleContext
++	#endif
++
++
++	#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
++		#define AV_REGISTER_ALL
++		#define AVCODEC_REGISTER_ALL
++		#define AV_FILENAME url
++		#define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
++		#define AV_ALLOCATE_FRAME() av_frame_alloc()
++		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
++		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
++    	#define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
++		#define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
++		#define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
++		#define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
++		#define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
++		auto AV_GET_CODEC_CONTEXT = [](AVStream* av_stream, AVCodec* av_codec) { \
++			AVCodecContext *context = avcodec_alloc_context3(av_codec); \
++			avcodec_parameters_to_context(context, av_stream->codecpar); \
++			return context; \
++		};
++		#define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
++		#define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
++		#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
++		#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
++		#define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
++		#define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
++		#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
++		#define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
++		#define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
++		#define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) 	av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
++		#define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) 	av_st = avformat_new_stream(oc, NULL);\
++			if (!av_st) \
++				throw OutOfMemory("Could not allocate memory for the video stream.", path); \
++			c = avcodec_alloc_context3(av_codec); \
++			st_codec = c; \
++			av_st->codecpar->codec_id = av_codec->id;
++		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
++	#elif IS_FFMPEG_3_2
++		#define AV_REGISTER_ALL av_register_all();
++		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
++		#define AV_FILENAME filename
++		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ 		#define AV_ALLOCATE_FRAME() av_frame_alloc()
+ 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
+ 		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
+@@ -138,6 +204,10 @@
+ 			av_st->codecpar->codec_id = av_codec->id;
+ 		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
+ 	#elif LIBAVFORMAT_VERSION_MAJOR >= 55
++		#define AV_REGISTER_ALL av_register_all();
++		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
++		#define AV_FILENAME filename
++		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ 		#define AV_ALLOCATE_FRAME() av_frame_alloc()
+ 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
+ 		#define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
+@@ -164,6 +234,10 @@
+ 			c = av_st->codec;
+ 		#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
+ 	#else
++		#define AV_REGISTER_ALL av_register_all();
++		#define AVCODEC_REGISTER_ALL	avcodec_register_all();
++		#define AV_FILENAME filename
++		#define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ 		#define AV_ALLOCATE_FRAME() avcodec_alloc_frame()
+ 		#define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
+ 		#define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame)
+diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
+index 8343002..7eefacb 100644
+--- a/include/FFmpegWriter.h
++++ b/include/FFmpegWriter.h
+@@ -174,8 +174,8 @@ namespace openshot
+ 	    int initial_audio_input_frame_size;
+ 	    int audio_input_position;
+ 	    int audio_encoder_buffer_size;
+-	    AVAudioResampleContext *avr;
+-	    AVAudioResampleContext *avr_planar;
++	    SWRCONTEXT *avr;
++	    SWRCONTEXT *avr_planar;
+ 
+ 	    /* Resample options */
+ 	    int original_sample_rate;
+diff --git a/include/Frame.h b/include/Frame.h
+index a7ad509..eba7f8b 100644
+--- a/include/Frame.h
++++ b/include/Frame.h
+@@ -62,7 +62,7 @@
+ #include "AudioResampler.h"
+ #include "Fraction.h"
+ 
+-
++#pragma SWIG nowarn=362
+ using namespace std;
+ 
+ namespace openshot
+diff --git a/include/FrameMapper.h b/include/FrameMapper.h
+index e70fdbc..043b5e4 100644
+--- a/include/FrameMapper.h
++++ b/include/FrameMapper.h
+@@ -146,7 +146,7 @@ namespace openshot
+ 		ReaderBase *reader;		// The source video reader
+ 		CacheMemory final_cache; 		// Cache of actual Frame objects
+ 		bool is_dirty; 			// When this is true, the next call to GetFrame will re-init the mapping
+-		AVAudioResampleContext *avr;	// Audio resampling context object
++		SWRCONTEXT *avr;	// Audio resampling context object
+ 
+ 		// Internal methods used by init
+ 		void AddField(int64_t frame);
+diff --git a/include/OpenMPUtilities.h b/include/OpenMPUtilities.h
+index 8a95a95..c0f5597 100644
+--- a/include/OpenMPUtilities.h
++++ b/include/OpenMPUtilities.h
+@@ -29,8 +29,26 @@
+ #define OPENSHOT_OPENMP_UTILITIES_H
+ 
+ #include <omp.h>
++#include <stdlib.h>
++#include <string.h>
+ 
+-	// Calculate the # of OpenMP Threads to allow
+-	#define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
++// Calculate the # of OpenMP Threads to allow
++#define OPEN_MP_NUM_PROCESSORS omp_get_num_procs()
++
++using namespace std;
++
++namespace openshot {
++
++	// Check if OS2_OMP_THREADS environment variable is present, and return
++	// if multiple threads should be used with OMP
++	static bool IsOMPEnabled() {
++		char* OS2_OMP_THREADS = getenv("OS2_OMP_THREADS");
++		if (OS2_OMP_THREADS != NULL && strcmp(OS2_OMP_THREADS, "0") == 0)
++			return false;
++		else
++			return true;
++	}
++
++}
+ 
+ #endif
+diff --git a/include/ZmqLogger.h b/include/ZmqLogger.h
+index c134f2c..e825ed0 100644
+--- a/include/ZmqLogger.h
++++ b/include/ZmqLogger.h
+@@ -72,11 +72,19 @@ namespace openshot {
+ 		/// Default constructor
+ 		ZmqLogger(){}; 						 // Don't allow user to create an instance of this singleton
+ 
++#if __GNUC__ >=7
+ 		/// Default copy method
+-		ZmqLogger(ZmqLogger const&){};             // Don't allow the user to copy this instance
++		ZmqLogger(ZmqLogger const&) = delete; // Don't allow the user to assign this instance
+ 
+ 		/// Default assignment operator
+-		ZmqLogger & operator=(ZmqLogger const&){};  // Don't allow the user to assign this instance
++		ZmqLogger & operator=(ZmqLogger const&) = delete;  // Don't allow the user to assign this instance
++#else
++		/// Default copy method
++		ZmqLogger(ZmqLogger const&) {}; // Don't allow the user to assign this instance
++
++		/// Default assignment operator
++		ZmqLogger & operator=(ZmqLogger const&);  // Don't allow the user to assign this instance
++#endif
+ 
+ 		/// Private variable to keep track of singleton instance
+ 		static ZmqLogger * m_pInstance;
+diff --git a/src/Clip.cpp b/src/Clip.cpp
+index 913fd71..63e7741 100644
+--- a/src/Clip.cpp
++++ b/src/Clip.cpp
+@@ -925,13 +925,14 @@ void Clip::SetJsonValue(Json::Value root) {
+ 
+ 			if (!existing_effect["type"].isNull()) {
+ 				// Create instance of effect
+-				e = EffectInfo().CreateEffect(existing_effect["type"].asString());
++				if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
+ 
+-				// Load Json into Effect
+-				e->SetJsonValue(existing_effect);
++					// Load Json into Effect
++					e->SetJsonValue(existing_effect);
+ 
+-				// Add Effect to Timeline
+-				AddEffect(e);
++					// Add Effect to Timeline
++					AddEffect(e);
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp
+index 23bc9d0..f9e4c40 100644
+--- a/src/EffectInfo.cpp
++++ b/src/EffectInfo.cpp
+@@ -82,6 +82,7 @@ EffectBase* EffectInfo::CreateEffect(string effect_type) {
+ 
+ 	else if (effect_type == "Wave")
+ 		return new Wave();
++	return NULL;
+ }
+ 
+ // Generate Json::JsonValue for this object
+diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
+index 5a4c936..3e29cf7 100644
+--- a/src/FFmpegReader.cpp
++++ b/src/FFmpegReader.cpp
+@@ -37,11 +37,12 @@ FFmpegReader::FFmpegReader(string path)
+ 	  audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
+ 	  check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
+ 	  prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
+-	  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
++	  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
++	  packet(NULL), use_omp_threads(true) {
+ 
+ 	// Initialize FFMpeg, and register all formats and codecs
+-	av_register_all();
+-	avcodec_register_all();
++	AV_REGISTER_ALL
++	AVCODEC_REGISTER_ALL
+ 
+ 	// Init cache
+ 	working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
+@@ -58,11 +59,12 @@ FFmpegReader::FFmpegReader(string path, bool inspect_reader)
+ 		  audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
+ 		  check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
+ 		  prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
+-		  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL) {
++		  current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
++		  packet(NULL), use_omp_threads(true) {
+ 
+ 	// Initialize FFMpeg, and register all formats and codecs
+-	av_register_all();
+-	avcodec_register_all();
++	AV_REGISTER_ALL
++	AVCODEC_REGISTER_ALL
+ 
+ 	// Init cache
+ 	working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
+@@ -227,6 +229,9 @@ void FFmpegReader::Open()
+ 		missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
+ 		final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
+ 
++		// Initialize OMP threading support
++		use_omp_threads = openshot::IsOMPEnabled();
++
+ 		// Mark as "open"
+ 		is_open = true;
+ 	}
+@@ -606,6 +611,12 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
+ 
+ 						// Process Video Packet
+ 						ProcessVideoPacket(requested_frame);
++
++						if (!use_omp_threads) {
++							// Wait on each OMP task to complete before moving on to the next one. This slows
++							// down processing considerably, but might be more stable on some systems.
++							#pragma omp taskwait
++						}
+ 					}
+ 
+ 				}
+@@ -638,7 +649,6 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
+ 				}
+ 
+ 				// Check if working frames are 'finished'
+-				bool is_cache_found = false;
+ 				if (!is_seeking) {
+ 					// Check for any missing frames
+ 					CheckMissingFrame(requested_frame);
+@@ -648,7 +658,7 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame)
+ 				}
+ 
+ 				// Check if requested 'final' frame is available
+-				is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
++				bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
+ 
+ 				// Increment frames processed
+ 				packets_processed++;
+@@ -978,7 +988,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
+ 	int data_size = 0;
+ 
+ 	// re-initialize buffer size (it gets changed in the avcodec_decode_audio2 method call)
+-	int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
++	int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE;
+ 	#pragma omp critical (ProcessAudioPacket)
+ 	{
+ 	#if IS_FFMPEG_3_2
+@@ -1083,7 +1093,7 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
+ 
+ 
+ 	// Allocate audio buffer
+-	int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
++	int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE];
+ 
+ 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)", "packet_samples", packet_samples, "info.channels", info.channels, "info.sample_rate", info.sample_rate, "aCodecCtx->sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), "AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16, "", -1);
+ 
+@@ -1093,11 +1103,11 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
+ 	audio_converted->nb_samples = audio_frame->nb_samples;
+ 	av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
+ 
+-	AVAudioResampleContext *avr = NULL;
++	SWRCONTEXT *avr = NULL;
+ 	int nb_samples = 0;
+ 
+ 	// setup resample context
+-	avr = avresample_alloc_context();
++	avr = SWR_ALLOC();
+ 	av_opt_set_int(avr,  "in_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
+ 	av_opt_set_int(avr, "out_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
+ 	av_opt_set_int(avr,  "in_sample_fmt",     AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), 0);
+@@ -1106,10 +1116,10 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
+ 	av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
+ 	av_opt_set_int(avr,  "in_channels",       info.channels,    0);
+ 	av_opt_set_int(avr, "out_channels",       info.channels,    0);
+-	int r = avresample_open(avr);
++	int r = SWR_INIT(avr);
+ 
+ 	// Convert audio samples
+-	nb_samples = avresample_convert(avr, 	// audio resample context
++	nb_samples = SWR_CONVERT(avr, 	// audio resample context
+ 			audio_converted->data, 			// output data pointers
+ 			audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
+ 			audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
+@@ -1121,8 +1131,8 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
+ 	memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels);
+ 
+ 	// Deallocate resample buffer
+-	avresample_close(avr);
+-	avresample_free(&avr);
++	SWR_CLOSE(avr);
++	SWR_FREE(&avr);
+ 	avr = NULL;
+ 
+ 	// Free AVFrames
+@@ -1348,7 +1358,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
+ 		{
+ 			seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
+ 			if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
+-				fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->filename);
++				fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
+ 			} else
+ 			{
+ 				// VIDEO SEEK
+@@ -1362,7 +1372,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
+ 		{
+ 			seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
+ 			if (av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
+-				fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->filename);
++				fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
+ 			} else
+ 			{
+ 				// AUDIO SEEK
+diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
+index 4416040..d106901 100644
+--- a/src/FFmpegWriter.cpp
++++ b/src/FFmpegWriter.cpp
+@@ -46,7 +46,7 @@ FFmpegWriter::FFmpegWriter(string path) :
+ 	info.has_video = false;
+ 
+ 	// Initialize FFMpeg, and register all formats and codecs
+-	av_register_all();
++	AV_REGISTER_ALL
+ 
+ 	// auto detect format
+ 	auto_detect_format();
+@@ -299,7 +299,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value)
+ /// Determine if codec name is valid
+ bool FFmpegWriter::IsValidCodec(string codec_name) {
+ 	// Initialize FFMpeg, and register all formats and codecs
+-	av_register_all();
++	AV_REGISTER_ALL
+ 
+ 	// Find the codec (if any)
+ 	if (avcodec_find_encoder_by_name(codec_name.c_str()) == NULL)
+@@ -342,7 +342,7 @@ void FFmpegWriter::WriteHeader()
+ 	}
+ 
+     // Force the output filename (which doesn't always happen for some reason)
+-    snprintf(oc->filename, sizeof(oc->filename), "%s", path.c_str());
++		snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", path.c_str());
+ 
+ 	// Write the stream header, if any
+ 	// TODO: add avoptions / parameters instead of NULL
+@@ -559,8 +559,10 @@ void FFmpegWriter::flush_encoders()
+ {
+ 	if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1)
+ 		return;
++#if (LIBAVFORMAT_VERSION_MAJOR < 58)
+ 	if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
+ 		return;
++#endif
+ 
+     int error_code = 0;
+     int stop_encoding = 1;
+@@ -734,14 +736,14 @@ void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st)
+ 
+ 	// Deallocate resample buffer
+ 	if (avr) {
+-		avresample_close(avr);
+-		avresample_free(&avr);
++		SWR_CLOSE(avr);
++		SWR_FREE(&avr);
+ 		avr = NULL;
+ 	}
+ 
+ 	if (avr_planar) {
+-		avresample_close(avr_planar);
+-		avresample_free(&avr_planar);
++		SWR_CLOSE(avr_planar);
++		SWR_FREE(&avr_planar);
+ 		avr_planar = NULL;
+ 	}
+ }
+@@ -881,7 +883,11 @@ AVStream* FFmpegWriter::add_audio_stream()
+ 
+ 	// some formats want stream headers to be separate
+ 	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
++#if (LIBAVCODEC_VERSION_MAJOR >= 57)
++		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
++#else
+ 		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
++#endif
+ 
+ 	AV_COPY_PARAMS_FROM_CONTEXT(st, c);
+ 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_audio_stream", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->channels", c->channels, "c->sample_fmt", c->sample_fmt, "c->channel_layout", c->channel_layout, "c->sample_rate", c->sample_rate);
+@@ -953,7 +959,11 @@ AVStream* FFmpegWriter::add_video_stream()
+ 		c->mb_decision = 2;
+ 	// some formats want stream headers to be separate
+ 	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
++#if (LIBAVCODEC_VERSION_MAJOR >= 57)
++		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
++#else
+ 		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
++#endif
+ 
+ 	// Find all supported pixel formats for this codec
+     const PixelFormat* supported_pixel_formats = codec->pix_fmts;
+@@ -970,10 +980,12 @@ AVStream* FFmpegWriter::add_video_stream()
+             // Raw video should use RGB24
+         	c->pix_fmt = PIX_FMT_RGB24;
+ 
++#if (LIBAVFORMAT_VERSION_MAJOR < 58)
+         if (strcmp(fmt->name, "gif") != 0)
+ 			// If not GIF format, skip the encoding process
+ 			// Set raw picture flag (so we don't encode this video)
+ 			oc->oformat->flags |= AVFMT_RAWPICTURE;
++#endif
+         } else {
+         	// Set the default codec
+         	c->pix_fmt = PIX_FMT_YUV420P;
+@@ -981,7 +993,11 @@ AVStream* FFmpegWriter::add_video_stream()
+     }
+ 
+ 	AV_COPY_PARAMS_FROM_CONTEXT(st, c);
++#if (LIBAVFORMAT_VERSION_MAJOR < 58)
+ 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "AVFMT_RAWPICTURE", AVFMT_RAWPICTURE, "", -1);
++#else
++	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1);
++#endif
+ 
+ 	return st;
+ }
+@@ -1056,7 +1072,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st)
+ 		av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
+ 	}
+ 
+-	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
++	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
+ 
+ }
+ 
+@@ -1222,7 +1238,7 @@ void FFmpegWriter::write_audio_packets(bool final)
+ 
+ 			// setup resample context
+ 			if (!avr) {
+-				avr = avresample_alloc_context();
++				avr = SWR_ALLOC();
+ 				av_opt_set_int(avr,  "in_channel_layout", channel_layout_in_frame, 0);
+ 				av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
+ 				av_opt_set_int(avr,  "in_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
+@@ -1231,12 +1247,12 @@ void FFmpegWriter::write_audio_packets(bool final)
+ 				av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
+ 				av_opt_set_int(avr,  "in_channels",       channels_in_frame,    0);
+ 				av_opt_set_int(avr, "out_channels",       info.channels,    0);
+-				avresample_open(avr);
++				SWR_INIT(avr);
+ 			}
+ 			int nb_samples = 0;
+ 
+ 			// Convert audio samples
+-			nb_samples = avresample_convert(avr, 	// audio resample context
++			nb_samples = SWR_CONVERT(avr, 	// audio resample context
+ 					audio_converted->data, 			// output data pointers
+ 					audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
+ 					audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
+@@ -1297,7 +1313,7 @@ void FFmpegWriter::write_audio_packets(bool final)
+ 
+ 				// setup resample context
+ 				if (!avr_planar) {
+-					avr_planar = avresample_alloc_context();
++					avr_planar = SWR_ALLOC();
+ 					av_opt_set_int(avr_planar,  "in_channel_layout", info.channel_layout, 0);
+ 					av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
+ 					av_opt_set_int(avr_planar,  "in_sample_fmt",     output_sample_fmt,     0);
+@@ -1306,7 +1322,7 @@ void FFmpegWriter::write_audio_packets(bool final)
+ 					av_opt_set_int(avr_planar, "out_sample_rate",    info.sample_rate,    0);
+ 					av_opt_set_int(avr_planar,  "in_channels",       info.channels,    0);
+ 					av_opt_set_int(avr_planar, "out_channels",       info.channels,    0);
+-					avresample_open(avr_planar);
++					SWR_INIT(avr_planar);
+ 				}
+ 
+ 				// Create input frame (and allocate arrays)
+@@ -1329,7 +1345,7 @@ void FFmpegWriter::write_audio_packets(bool final)
+ 				av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0);
+ 
+ 				// Convert audio samples
+-				int nb_samples = avresample_convert(avr_planar, 	// audio resample context
++				int nb_samples = SWR_CONVERT(avr_planar, 	// audio resample context
+ 						frame_final->data, 			// output data pointers
+ 						frame_final->linesize[0], 	// output plane size, in bytes. (0 if unknown)
+ 						frame_final->nb_samples,	// maximum number of samples that the output buffer can hold
+@@ -1560,6 +1576,9 @@ void FFmpegWriter::process_video_packet(std::shared_ptr<Frame> frame)
+ // write video frame
+ bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* frame_final)
+ {
++#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
++	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1, "", -1, "", -1);
++#else
+ 	ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags & AVFMT_RAWPICTURE", oc->oformat->flags & AVFMT_RAWPICTURE, "", -1, "", -1, "", -1, "", -1);
+ 
+ 	if (oc->oformat->flags & AVFMT_RAWPICTURE) {
+@@ -1587,7 +1606,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* fra
+ 		// Deallocate packet
+ 		AV_FREE_PACKET(&pkt);
+ 
+-	} else {
++	} else
++#endif
++        {
+ 
+ 		AVPacket pkt;
+ 		av_init_packet(&pkt);
+diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
+index f49cbc4..c4c68f5 100644
+--- a/src/FrameMapper.cpp
++++ b/src/FrameMapper.cpp
+@@ -650,8 +650,8 @@ void FrameMapper::Close()
+ 
+ 		// Deallocate resample buffer
+ 		if (avr) {
+-			avresample_close(avr);
+-			avresample_free(&avr);
++			SWR_CLOSE(avr);
++			SWR_FREE(&avr);
+ 			avr = NULL;
+ 		}
+ 	}
+@@ -741,8 +741,8 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow
+ 
+ 	// Deallocate resample buffer
+ 	if (avr) {
+-		avresample_close(avr);
+-		avresample_free(&avr);
++		SWR_CLOSE(avr);
++		SWR_FREE(&avr);
+ 		avr = NULL;
+ 	}
+ 
+@@ -817,7 +817,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
+ 
+     // setup resample context
+     if (!avr) {
+-        avr = avresample_alloc_context();
++        avr = SWR_ALLOC();
+         av_opt_set_int(avr,  "in_channel_layout", channel_layout_in_frame, 0);
+         av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
+         av_opt_set_int(avr,  "in_sample_fmt",     AV_SAMPLE_FMT_S16,     0);
+@@ -826,11 +826,11 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
+         av_opt_set_int(avr, "out_sample_rate",    info.sample_rate,    0);
+         av_opt_set_int(avr,  "in_channels",       channels_in_frame,    0);
+         av_opt_set_int(avr, "out_channels",       info.channels,    0);
+-        avresample_open(avr);
++        SWR_INIT(avr);
+     }
+ 
+     // Convert audio samples
+-    nb_samples = avresample_convert(avr, 	// audio resample context
++    nb_samples = SWR_CONVERT(avr, 	// audio resample context
+             audio_converted->data, 			// output data pointers
+             audio_converted->linesize[0], 	// output plane size, in bytes. (0 if unknown)
+             audio_converted->nb_samples,	// maximum number of samples that the output buffer can hold
+diff --git a/src/Timeline.cpp b/src/Timeline.cpp
+index 1b4f475..ed3c3df 100644
+--- a/src/Timeline.cpp
++++ b/src/Timeline.cpp
+@@ -1000,13 +1000,14 @@ void Timeline::SetJsonValue(Json::Value root) {
+ 
+ 			if (!existing_effect["type"].isNull()) {
+ 				// Create instance of effect
+-				e = EffectInfo().CreateEffect(existing_effect["type"].asString());
++				if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
+ 
+-				// Load Json into Effect
+-				e->SetJsonValue(existing_effect);
++					// Load Json into Effect
++					e->SetJsonValue(existing_effect);
+ 
+-				// Add Effect to Timeline
+-				AddEffect(e);
++					// Add Effect to Timeline
++					AddEffect(e);
++				}
+ 			}
+ 		}
+ 	}
+@@ -1270,13 +1271,14 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
+ 		EffectBase *e = NULL;
+ 
+ 		// Init the matching effect object
+-		e = EffectInfo().CreateEffect(effect_type);
++		if (e = EffectInfo().CreateEffect(effect_type)) {
+ 
+-		// Load Json into Effect
+-		e->SetJsonValue(change["value"]);
++			// Load Json into Effect
++			e->SetJsonValue(change["value"]);
+ 
+-		// Add Effect to Timeline
+-		AddEffect(e);
++			// Add Effect to Timeline
++			AddEffect(e);
++		}
+ 
+ 	} else if (change_type == "update") {
+ 
+diff --git a/tests/ReaderBase_Tests.cpp b/tests/ReaderBase_Tests.cpp
+index 9d43530..70ca90d 100644
+--- a/tests/ReaderBase_Tests.cpp
++++ b/tests/ReaderBase_Tests.cpp
+@@ -44,9 +44,9 @@ TEST(ReaderBase_Derived_Class)
+ 		std::shared_ptr<Frame> GetFrame(int64_t number) { std::shared_ptr<Frame> f(new Frame()); return f; }
+ 		void Close() { };
+ 		void Open() { };
+-		string Json() { };
++		string Json() { return NULL; };
+ 		void SetJson(string value) { };
+-		Json::Value JsonValue() { };
++		Json::Value JsonValue() { return (int) NULL; };
+ 		void SetJsonValue(Json::Value root) { };
+ 		bool IsOpen() { return true; };
+ 		string Name() { return "TestReader"; };



More information about the arch-commits mailing list