This one changes the previous vmaf patch to libvmaf to keep it separate from the
native implementation of vmaf inside ffmpeg later.
Signed-off-by: Ashish Singh <ashk43712@gmail.com>
Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
... | ... |
@@ -256,6 +256,7 @@ External library support: |
256 | 256 |
--enable-libtwolame enable MP2 encoding via libtwolame [no] |
257 | 257 |
--enable-libv4l2 enable libv4l2/v4l-utils [no] |
258 | 258 |
--enable-libvidstab enable video stabilization using vid.stab [no] |
259 |
+ --enable-libvmaf enable vmaf filter via libvmaf [no] |
|
259 | 260 |
--enable-libvo-amrwbenc enable AMR-WB encoding via libvo-amrwbenc [no] |
260 | 261 |
--enable-libvorbis enable Vorbis en/decoding via libvorbis, |
261 | 262 |
native implementation exists [no] |
... | ... |
@@ -1569,6 +1570,7 @@ EXTERNAL_LIBRARY_LIST=" |
1569 | 1569 |
libtheora |
1570 | 1570 |
libtwolame |
1571 | 1571 |
libv4l2 |
1572 |
+ libvmaf |
|
1572 | 1573 |
libvorbis |
1573 | 1574 |
libvpx |
1574 | 1575 |
libwavpack |
... | ... |
@@ -3186,6 +3188,7 @@ uspp_filter_deps="gpl avcodec" |
3186 | 3186 |
vaguedenoiser_filter_deps="gpl" |
3187 | 3187 |
vidstabdetect_filter_deps="libvidstab" |
3188 | 3188 |
vidstabtransform_filter_deps="libvidstab" |
3189 |
+vmaf_filter_deps="libvmaf" |
|
3189 | 3190 |
zmq_filter_deps="libzmq" |
3190 | 3191 |
zoompan_filter_deps="swscale" |
3191 | 3192 |
zscale_filter_deps="libzimg" |
... | ... |
@@ -5902,6 +5905,8 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame |
5902 | 5902 |
die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; } |
5903 | 5903 |
enabled libv4l2 && require_pkg_config libv4l2 libv4l2.h v4l2_ioctl |
5904 | 5904 |
enabled libvidstab && require_pkg_config "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit |
5905 |
+enabled libvmaf && { check_lib libvmaf "libvmaf.h" "compute_vmaf" -lvmaf -lstdc++ -lpthread -lm || |
|
5906 |
+ die "ERROR: libvmaf must be installed"; } |
|
5905 | 5907 |
enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h E_IF_init -lvo-amrwbenc |
5906 | 5908 |
enabled libvorbis && require_pkg_config vorbis vorbis/codec.h vorbis_info_init && |
5907 | 5909 |
require_pkg_config vorbisenc vorbis/vorbisenc.h vorbis_encode_init |
... | ... |
@@ -9639,6 +9639,70 @@ The formula that generates the correction is: |
9639 | 9639 |
where @var{r_0} is halve of the image diagonal and @var{r_src} and @var{r_tgt} are the |
9640 | 9640 |
distances from the focal point in the source and target images, respectively. |
9641 | 9641 |
|
9642 |
+@section libvmaf |
|
9643 |
+ |
|
9644 |
+Obtain the average VMAF (Video Multi-Method Assessment Fusion) |
|
9645 |
+score between two input videos. |
|
9646 |
+ |
|
9647 |
+This filter takes two input videos. |
|
9648 |
+ |
|
9649 |
+Both video inputs must have the same resolution and pixel format for |
|
9650 |
+this filter to work correctly. Also it assumes that both inputs |
|
9651 |
+have the same number of frames, which are compared one by one. |
|
9652 |
+ |
|
9653 |
+The obtained average VMAF score is printed through the logging system. |
|
9654 |
+ |
|
9655 |
+It requires Netflix's vmaf library (libvmaf) as a pre-requisite. |
|
9656 |
+After installing the library it can be enabled using: |
|
9657 |
+@code{./configure --enable-libvmaf}. |
|
9658 |
+If no model path is specified it uses the default model: @code{vmaf_v0.6.1.pkl}. |
|
9659 |
+ |
|
9660 |
+On the below examples the input file @file{main.mpg} being processed is |
|
9661 |
+compared with the reference file @file{ref.mpg}. |
|
9662 |
+ |
|
9663 |
+The filter has following options: |
|
9664 |
+ |
|
9665 |
+@table @option |
|
9666 |
+@item model_path |
|
9667 |
+Set the model path which is to be used for SVM. |
|
9668 |
+Default value: @code{"vmaf_v0.6.1.pkl"} |
|
9669 |
+ |
|
9670 |
+@item log_path |
|
9671 |
+Set the file path to be used to store logs. |
|
9672 |
+ |
|
9673 |
+@item log_fmt |
|
9674 |
+Set the format of the log file (xml or json). |
|
9675 |
+ |
|
9676 |
+@item enable_transform |
|
9677 |
+Enables transform for computing vmaf. |
|
9678 |
+ |
|
9679 |
+@item phone_model |
|
9680 |
+Invokes the phone model which will generate VMAF scores higher than in the |
|
9681 |
+regular model, which is more suitable for laptop, TV, etc. viewing conditions. |
|
9682 |
+ |
|
9683 |
+@item psnr |
|
9684 |
+Enables computing psnr along with vmaf. |
|
9685 |
+ |
|
9686 |
+@item ssim |
|
9687 |
+Enables computing ssim along with vmaf. |
|
9688 |
+ |
|
9689 |
+@item ms_ssim |
|
9690 |
+Enables computing ms_ssim along with vmaf. |
|
9691 |
+ |
|
9692 |
+@item pool |
|
9693 |
+Set the pool method to be used for computing vmaf. |
|
9694 |
+@end table |
|
9695 |
+ |
|
9696 |
+For example: |
|
9697 |
+@example |
|
9698 |
+ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null - |
|
9699 |
+@end example |
|
9700 |
+ |
|
9701 |
+Example with options: |
|
9702 |
+@example |
|
9703 |
+ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:enable-transform=1" -f null - |
|
9704 |
+@end example |
|
9705 |
+ |
|
9642 | 9706 |
@section limiter |
9643 | 9707 |
|
9644 | 9708 |
Limits the pixel components values to the specified range [min, max]. |
... | ... |
@@ -216,6 +216,7 @@ OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o |
216 | 216 |
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o |
217 | 217 |
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o |
218 | 218 |
OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o |
219 |
+OBJS-$(CONFIG_LIBVMAF_FILTER) += vf_libvmaf.o dualinput.o framesync.o |
|
219 | 220 |
OBJS-$(CONFIG_LIMITER_FILTER) += vf_limiter.o |
220 | 221 |
OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o |
221 | 222 |
OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o |
... | ... |
@@ -228,6 +228,7 @@ static void register_all(void) |
228 | 228 |
REGISTER_FILTER(INTERLEAVE, interleave, vf); |
229 | 229 |
REGISTER_FILTER(KERNDEINT, kerndeint, vf); |
230 | 230 |
REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf); |
231 |
+ REGISTER_FILTER(LIBVMAF, libvmaf, vf); |
|
231 | 232 |
REGISTER_FILTER(LIMITER, limiter, vf); |
232 | 233 |
REGISTER_FILTER(LOOP, loop, vf); |
233 | 234 |
REGISTER_FILTER(LUMAKEY, lumakey, vf); |
234 | 235 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,339 @@ |
0 |
+/* |
|
1 |
+ * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com> |
|
2 |
+ * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com> |
|
3 |
+ * |
|
4 |
+ * This file is part of FFmpeg. |
|
5 |
+ * |
|
6 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
7 |
+ * modify it under the terms of the GNU Lesser General Public |
|
8 |
+ * License as published by the Free Software Foundation; either |
|
9 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
10 |
+ * |
|
11 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
12 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 |
+ * Lesser General Public License for more details. |
|
15 |
+ * |
|
16 |
+ * You should have received a copy of the GNU Lesser General Public |
|
17 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
18 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
19 |
+ */ |
|
20 |
+ |
|
21 |
+/** |
|
22 |
+ * @file |
|
23 |
+ * Calculate the VMAF between two input videos. |
|
24 |
+ */ |
|
25 |
+ |
|
26 |
+#include <inttypes.h> |
|
27 |
+#include <pthread.h> |
|
28 |
+#include <libvmaf.h> |
|
29 |
+#include "libavutil/avstring.h" |
|
30 |
+#include "libavutil/opt.h" |
|
31 |
+#include "libavutil/pixdesc.h" |
|
32 |
+#include "avfilter.h" |
|
33 |
+#include "dualinput.h" |
|
34 |
+#include "drawutils.h" |
|
35 |
+#include "formats.h" |
|
36 |
+#include "internal.h" |
|
37 |
+#include "video.h" |
|
38 |
+ |
|
39 |
+typedef struct LIBVMAFContext { |
|
40 |
+ const AVClass *class; |
|
41 |
+ FFDualInputContext dinput; |
|
42 |
+ const AVPixFmtDescriptor *desc; |
|
43 |
+ char *format; |
|
44 |
+ int width; |
|
45 |
+ int height; |
|
46 |
+ double vmaf_score; |
|
47 |
+ pthread_t vmaf_thread; |
|
48 |
+ pthread_mutex_t lock; |
|
49 |
+ pthread_cond_t cond; |
|
50 |
+ int eof; |
|
51 |
+ AVFrame *gmain; |
|
52 |
+ AVFrame *gref; |
|
53 |
+ int frame_set; |
|
54 |
+ char *model_path; |
|
55 |
+ char *log_path; |
|
56 |
+ char *log_fmt; |
|
57 |
+ int disable_clip; |
|
58 |
+ int disable_avx; |
|
59 |
+ int enable_transform; |
|
60 |
+ int phone_model; |
|
61 |
+ int psnr; |
|
62 |
+ int ssim; |
|
63 |
+ int ms_ssim; |
|
64 |
+ char *pool; |
|
65 |
+} LIBVMAFContext; |
|
66 |
+ |
|
67 |
+#define OFFSET(x) offsetof(LIBVMAFContext, x) |
|
68 |
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
69 |
+ |
|
70 |
+static const AVOption libvmaf_options[] = { |
|
71 |
+ {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS}, |
|
72 |
+ {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, |
|
73 |
+ {"log_fmt", "Set the format of the log (xml or json).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, |
|
74 |
+ {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, |
|
75 |
+ {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, |
|
76 |
+ {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, |
|
77 |
+ {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, |
|
78 |
+ {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS}, |
|
79 |
+ {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS}, |
|
80 |
+ { NULL } |
|
81 |
+}; |
|
82 |
+ |
|
83 |
+AVFILTER_DEFINE_CLASS(libvmaf); |
|
84 |
+ |
|
85 |
+#define read_frame_fn(type, bits) \ |
|
86 |
+ static int read_frame_##bits##bit(float *ref_data, float *main_data, \ |
|
87 |
+ float *temp_data, int stride, \ |
|
88 |
+ double *score, void *ctx) \ |
|
89 |
+{ \ |
|
90 |
+ LIBVMAFContext *s = (LIBVMAFContext *) ctx; \ |
|
91 |
+ int ret; \ |
|
92 |
+ \ |
|
93 |
+ pthread_mutex_lock(&s->lock); \ |
|
94 |
+ \ |
|
95 |
+ while (!s->frame_set && !s->eof) { \ |
|
96 |
+ pthread_cond_wait(&s->cond, &s->lock); \ |
|
97 |
+ } \ |
|
98 |
+ \ |
|
99 |
+ if (s->frame_set) { \ |
|
100 |
+ int ref_stride = s->gref->linesize[0]; \ |
|
101 |
+ int main_stride = s->gmain->linesize[0]; \ |
|
102 |
+ \ |
|
103 |
+ const type *ref_ptr = (const type *) s->gref->data[0]; \ |
|
104 |
+ const type *main_ptr = (const type *) s->gmain->data[0]; \ |
|
105 |
+ \ |
|
106 |
+ float *ptr = ref_data; \ |
|
107 |
+ \ |
|
108 |
+ int h = s->height; \ |
|
109 |
+ int w = s->width; \ |
|
110 |
+ \ |
|
111 |
+ int i,j; \ |
|
112 |
+ \ |
|
113 |
+ for (i = 0; i < h; i++) { \ |
|
114 |
+ for ( j = 0; j < w; j++) { \ |
|
115 |
+ ptr[j] = (float)ref_ptr[j]; \ |
|
116 |
+ } \ |
|
117 |
+ ref_ptr += ref_stride / sizeof(*ref_ptr); \ |
|
118 |
+ ptr += stride / sizeof(*ptr); \ |
|
119 |
+ } \ |
|
120 |
+ \ |
|
121 |
+ ptr = main_data; \ |
|
122 |
+ \ |
|
123 |
+ for (i = 0; i < h; i++) { \ |
|
124 |
+ for (j = 0; j < w; j++) { \ |
|
125 |
+ ptr[j] = (float)main_ptr[j]; \ |
|
126 |
+ } \ |
|
127 |
+ main_ptr += main_stride / sizeof(*main_ptr); \ |
|
128 |
+ ptr += stride / sizeof(*ptr); \ |
|
129 |
+ } \ |
|
130 |
+ } \ |
|
131 |
+ \ |
|
132 |
+ ret = !s->frame_set; \ |
|
133 |
+ \ |
|
134 |
+ s->frame_set = 0; \ |
|
135 |
+ \ |
|
136 |
+ pthread_cond_signal(&s->cond); \ |
|
137 |
+ pthread_mutex_unlock(&s->lock); \ |
|
138 |
+ \ |
|
139 |
+ if (ret) { \ |
|
140 |
+ return 2; \ |
|
141 |
+ } \ |
|
142 |
+ \ |
|
143 |
+ return 0; \ |
|
144 |
+} |
|
145 |
+ |
|
146 |
+read_frame_fn(uint8_t, 8); |
|
147 |
+read_frame_fn(uint16_t, 10); |
|
148 |
+ |
|
149 |
+static void compute_vmaf_score(LIBVMAFContext *s) |
|
150 |
+{ |
|
151 |
+ int (*read_frame)(float *ref_data, float *main_data, float *temp_data, |
|
152 |
+ int stride, double *score, void *ctx); |
|
153 |
+ |
|
154 |
+ if (s->desc->comp[0].depth <= 8) { |
|
155 |
+ read_frame = read_frame_8bit; |
|
156 |
+ } else { |
|
157 |
+ read_frame = read_frame_10bit; |
|
158 |
+ } |
|
159 |
+ |
|
160 |
+ s->vmaf_score = compute_vmaf(s->format, s->width, s->height, read_frame, s, |
|
161 |
+ s->model_path, s->log_path, s->log_fmt, 0, 0, |
|
162 |
+ s->enable_transform, s->phone_model, s->psnr, |
|
163 |
+ s->ssim, s->ms_ssim, s->pool); |
|
164 |
+} |
|
165 |
+ |
|
166 |
+static void *call_vmaf(void *ctx) |
|
167 |
+{ |
|
168 |
+ LIBVMAFContext *s = (LIBVMAFContext *) ctx; |
|
169 |
+ compute_vmaf_score(s); |
|
170 |
+ av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score); |
|
171 |
+ pthread_exit(NULL); |
|
172 |
+} |
|
173 |
+ |
|
174 |
+static AVFrame *do_vmaf(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref) |
|
175 |
+{ |
|
176 |
+ LIBVMAFContext *s = ctx->priv; |
|
177 |
+ |
|
178 |
+ pthread_mutex_lock(&s->lock); |
|
179 |
+ |
|
180 |
+ while (s->frame_set != 0) { |
|
181 |
+ pthread_cond_wait(&s->cond, &s->lock); |
|
182 |
+ } |
|
183 |
+ |
|
184 |
+ av_frame_ref(s->gref, ref); |
|
185 |
+ av_frame_ref(s->gmain, main); |
|
186 |
+ |
|
187 |
+ s->frame_set = 1; |
|
188 |
+ |
|
189 |
+ pthread_cond_signal(&s->cond); |
|
190 |
+ pthread_mutex_unlock(&s->lock); |
|
191 |
+ |
|
192 |
+ return main; |
|
193 |
+} |
|
194 |
+ |
|
195 |
+static av_cold int init(AVFilterContext *ctx) |
|
196 |
+{ |
|
197 |
+ LIBVMAFContext *s = ctx->priv; |
|
198 |
+ |
|
199 |
+ s->gref = av_frame_alloc(); |
|
200 |
+ s->gmain = av_frame_alloc(); |
|
201 |
+ |
|
202 |
+ pthread_mutex_init(&s->lock, NULL); |
|
203 |
+ pthread_cond_init (&s->cond, NULL); |
|
204 |
+ |
|
205 |
+ s->dinput.process = do_vmaf; |
|
206 |
+ return 0; |
|
207 |
+} |
|
208 |
+ |
|
209 |
+static int query_formats(AVFilterContext *ctx) |
|
210 |
+{ |
|
211 |
+ static const enum AVPixelFormat pix_fmts[] = { |
|
212 |
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, |
|
213 |
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, |
|
214 |
+ AV_PIX_FMT_NONE |
|
215 |
+ }; |
|
216 |
+ |
|
217 |
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); |
|
218 |
+ if (!fmts_list) |
|
219 |
+ return AVERROR(ENOMEM); |
|
220 |
+ return ff_set_common_formats(ctx, fmts_list); |
|
221 |
+} |
|
222 |
+ |
|
223 |
+ |
|
224 |
+static int config_input_ref(AVFilterLink *inlink) |
|
225 |
+{ |
|
226 |
+ AVFilterContext *ctx = inlink->dst; |
|
227 |
+ LIBVMAFContext *s = ctx->priv; |
|
228 |
+ int th; |
|
229 |
+ |
|
230 |
+ if (ctx->inputs[0]->w != ctx->inputs[1]->w || |
|
231 |
+ ctx->inputs[0]->h != ctx->inputs[1]->h) { |
|
232 |
+ av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n"); |
|
233 |
+ return AVERROR(EINVAL); |
|
234 |
+ } |
|
235 |
+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) { |
|
236 |
+ av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n"); |
|
237 |
+ return AVERROR(EINVAL); |
|
238 |
+ } |
|
239 |
+ |
|
240 |
+ s->desc = av_pix_fmt_desc_get(inlink->format); |
|
241 |
+ s->width = ctx->inputs[0]->w; |
|
242 |
+ s->height = ctx->inputs[0]->h; |
|
243 |
+ |
|
244 |
+ th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s); |
|
245 |
+ if (th) { |
|
246 |
+ av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n"); |
|
247 |
+ return AVERROR(EINVAL); |
|
248 |
+ } |
|
249 |
+ |
|
250 |
+ return 0; |
|
251 |
+} |
|
252 |
+ |
|
253 |
+ |
|
254 |
+static int config_output(AVFilterLink *outlink) |
|
255 |
+{ |
|
256 |
+ AVFilterContext *ctx = outlink->src; |
|
257 |
+ LIBVMAFContext *s = ctx->priv; |
|
258 |
+ AVFilterLink *mainlink = ctx->inputs[0]; |
|
259 |
+ int ret; |
|
260 |
+ |
|
261 |
+ outlink->w = mainlink->w; |
|
262 |
+ outlink->h = mainlink->h; |
|
263 |
+ outlink->time_base = mainlink->time_base; |
|
264 |
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; |
|
265 |
+ outlink->frame_rate = mainlink->frame_rate; |
|
266 |
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) |
|
267 |
+ return ret; |
|
268 |
+ |
|
269 |
+ return 0; |
|
270 |
+} |
|
271 |
+ |
|
272 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
273 |
+{ |
|
274 |
+ LIBVMAFContext *s = inlink->dst->priv; |
|
275 |
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); |
|
276 |
+} |
|
277 |
+ |
|
278 |
+static int request_frame(AVFilterLink *outlink) |
|
279 |
+{ |
|
280 |
+ LIBVMAFContext *s = outlink->src->priv; |
|
281 |
+ return ff_dualinput_request_frame(&s->dinput, outlink); |
|
282 |
+} |
|
283 |
+ |
|
284 |
+static av_cold void uninit(AVFilterContext *ctx) |
|
285 |
+{ |
|
286 |
+ LIBVMAFContext *s = ctx->priv; |
|
287 |
+ |
|
288 |
+ ff_dualinput_uninit(&s->dinput); |
|
289 |
+ |
|
290 |
+ pthread_mutex_lock(&s->lock); |
|
291 |
+ s->eof = 1; |
|
292 |
+ pthread_cond_signal(&s->cond); |
|
293 |
+ pthread_mutex_unlock(&s->lock); |
|
294 |
+ |
|
295 |
+ pthread_join(s->vmaf_thread, NULL); |
|
296 |
+ |
|
297 |
+ av_frame_free(&s->gref); |
|
298 |
+ av_frame_free(&s->gmain); |
|
299 |
+ |
|
300 |
+ pthread_mutex_destroy(&s->lock); |
|
301 |
+ pthread_cond_destroy(&s->cond); |
|
302 |
+} |
|
303 |
+ |
|
304 |
+static const AVFilterPad libvmaf_inputs[] = { |
|
305 |
+ { |
|
306 |
+ .name = "main", |
|
307 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
308 |
+ .filter_frame = filter_frame, |
|
309 |
+ },{ |
|
310 |
+ .name = "reference", |
|
311 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
312 |
+ .filter_frame = filter_frame, |
|
313 |
+ .config_props = config_input_ref, |
|
314 |
+ }, |
|
315 |
+ { NULL } |
|
316 |
+}; |
|
317 |
+ |
|
318 |
+static const AVFilterPad libvmaf_outputs[] = { |
|
319 |
+ { |
|
320 |
+ .name = "default", |
|
321 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
322 |
+ .config_props = config_output, |
|
323 |
+ .request_frame = request_frame, |
|
324 |
+ }, |
|
325 |
+ { NULL } |
|
326 |
+}; |
|
327 |
+ |
|
328 |
+AVFilter ff_vf_libvmaf = { |
|
329 |
+ .name = "libvmaf", |
|
330 |
+ .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."), |
|
331 |
+ .init = init, |
|
332 |
+ .uninit = uninit, |
|
333 |
+ .query_formats = query_formats, |
|
334 |
+ .priv_size = sizeof(LIBVMAFContext), |
|
335 |
+ .priv_class = &libvmaf_class, |
|
336 |
+ .inputs = libvmaf_inputs, |
|
337 |
+ .outputs = libvmaf_outputs, |
|
338 |
+}; |