Browse code

avfilter: add framerate video filter

Signed-off-by: Paul B Mahol <onemda@gmail.com>

Paul B Mahol authored on 2015/07/24 16:09:16
Showing 6 changed files
... ...
@@ -37,6 +37,7 @@ version <next>:
37 37
 - hstack and vstack filter
38 38
 - Support DNx100 (1440x1080@8)
39 39
 - VAAPI hevc hwaccel
40
+- framerate filter
40 41
 
41 42
 
42 43
 version 2.7:
... ...
@@ -6015,6 +6015,51 @@ ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
6015 6015
 ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
6016 6016
 @end example
6017 6017
 
6018
+@section framerate
6019
+
6020
+Change the frame rate by interpolating new video output frames from the source
6021
+frames.
6022
+
6023
+This filter is not designed to function correctly with interlaced media. If
6024
+you wish to change the frame rate of interlaced media then you are required
6025
+to deinterlace before this filter and re-interlace after this filter.
6026
+
6027
+A description of the accepted options follows.
6028
+
6029
+@table @option
6030
+@item fps
6031
+Specify the output frames per second. This option can also be specified
6032
+as a value alone. The default is @code{50}.
6033
+
6034
+@item interp_start
6035
+Specify the start of a range where the output frame will be created as a
6036
+linear interpolation of two frames. The range is [@code{0}-@code{255}],
6037
+the default is @code{15}.
6038
+
6039
+@item interp_end
6040
+Specify the end of a range where the output frame will be created as a
6041
+linear interpolation of two frames. The range is [@code{0}-@code{255}],
6042
+the default is @code{240}.
6043
+
6044
+@item scene
6045
+Specify the level at which a scene change is detected as a value between
6046
+0 and 100 to indicate a new scene; a low value reflects a low
6047
+probability for the current frame to introduce a new scene, while a higher
6048
+value means the current frame is more likely to be one.
6049
+The default is @code{7}.
6050
+
6051
+@item flags
6052
+Specify flags influencing the filter process.
6053
+
6054
+Available value for @var{flags} is:
6055
+
6056
+@table @option
6057
+@item scene_change_detect, scd
6058
+Enable scene change detection using the value of the option @var{scene}.
6059
+This flag is enabled by default.
6060
+@end table
6061
+@end table
6062
+
6018 6063
 @section framestep
6019 6064
 
6020 6065
 Select one frame every N-th frame.
... ...
@@ -142,6 +142,7 @@ OBJS-$(CONFIG_FIND_RECT_FILTER)              += vf_find_rect.o lavfutils.o
142 142
 OBJS-$(CONFIG_FORMAT_FILTER)                 += vf_format.o
143 143
 OBJS-$(CONFIG_FPS_FILTER)                    += vf_fps.o
144 144
 OBJS-$(CONFIG_FRAMEPACK_FILTER)              += vf_framepack.o
145
+OBJS-$(CONFIG_FRAMERATE_FILTER)              += vf_framerate.o
145 146
 OBJS-$(CONFIG_FRAMESTEP_FILTER)              += vf_framestep.o
146 147
 OBJS-$(CONFIG_FREI0R_FILTER)                 += vf_frei0r.o
147 148
 OBJS-$(CONFIG_FSPP_FILTER)                   += vf_fspp.o
... ...
@@ -158,6 +158,7 @@ void avfilter_register_all(void)
158 158
     REGISTER_FILTER(FORMAT,         format,         vf);
159 159
     REGISTER_FILTER(FPS,            fps,            vf);
160 160
     REGISTER_FILTER(FRAMEPACK,      framepack,      vf);
161
+    REGISTER_FILTER(FRAMERATE,      framerate,      vf);
161 162
     REGISTER_FILTER(FRAMESTEP,      framestep,      vf);
162 163
     REGISTER_FILTER(FREI0R,         frei0r,         vf);
163 164
     REGISTER_FILTER(FSPP,           fspp,           vf);
... ...
@@ -30,7 +30,7 @@
30 30
 #include "libavutil/version.h"
31 31
 
32 32
 #define LIBAVFILTER_VERSION_MAJOR  5
33
-#define LIBAVFILTER_VERSION_MINOR  38
33
+#define LIBAVFILTER_VERSION_MINOR  39
34 34
 #define LIBAVFILTER_VERSION_MICRO 100
35 35
 
36 36
 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
37 37
new file mode 100644
... ...
@@ -0,0 +1,592 @@
0
+/*
1
+ * Copyright (C) 2012 Mark Himsley
2
+ *
3
+ * get_scene_score() Copyright (c) 2011 Stefano Sabatini
4
+ * taken from libavfilter/vf_select.c
5
+ *
6
+ * This file is part of FFmpeg.
7
+ *
8
+ * FFmpeg is free software; you can redistribute it and/or
9
+ * modify it under the terms of the GNU Lesser General Public
10
+ * License as published by the Free Software Foundation; either
11
+ * version 2.1 of the License, or (at your option) any later version.
12
+ *
13
+ * FFmpeg is distributed in the hope that it will be useful,
14
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
+ * Lesser General Public License for more details.
17
+ *
18
+ * You should have received a copy of the GNU Lesser General Public
19
+ * License along with FFmpeg; if not, write to the Free Software
20
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
+ */
22
+
23
+/**
24
+ * @file
25
+ * filter for upsampling or downsampling a progressive source
26
+ */
27
+
28
+#define DEBUG
29
+
30
+#include "libavutil/avassert.h"
31
+#include "libavutil/imgutils.h"
32
+#include "libavutil/internal.h"
33
+#include "libavutil/opt.h"
34
+#include "libavutil/pixdesc.h"
35
+#include "libavutil/pixelutils.h"
36
+
37
+#include "avfilter.h"
38
+#include "internal.h"
39
+#include "video.h"
40
+
41
+#define N_SRCE 3
42
+
43
+typedef struct FrameRateContext {
44
+    const AVClass *class;
45
+    // parameters
46
+    AVRational dest_frame_rate;         ///< output frames per second
47
+    int flags;                          ///< flags affecting frame rate conversion algorithm
48
+    double scene_score;                 ///< score that denotes a scene change has happened
49
+    int interp_start;                   ///< start of range to apply linear interpolation
50
+    int interp_end;                     ///< end of range to apply linear interpolation
51
+
52
+    int line_size[4];                   ///< bytes of pixel data per line for each plane
53
+    int vsub;
54
+
55
+    int frst, next, prev, crnt, last;
56
+    int pending_srce_frames;            ///< how many input frames are still waiting to be processed
57
+    int flush;                          ///< are we flushing final frames
58
+    int pending_end_frame;              ///< flag indicating we are waiting to call filter_frame()
59
+
60
+    AVRational srce_time_base;          ///< timebase of source
61
+
62
+    AVRational dest_time_base;          ///< timebase of destination
63
+    int32_t dest_frame_num;
64
+    int64_t last_dest_frame_pts;        ///< pts of the last frame output
65
+    int64_t average_srce_pts_dest_delta;///< average input pts delta converted from input rate to output rate
66
+    int64_t average_dest_pts_delta;     ///< calculated average output pts delta
67
+
68
+    av_pixelutils_sad_fn sad;           ///< Sum of the absolute difference function (scene detect only)
69
+    double prev_mafd;                   ///< previous MAFD                           (scene detect only)
70
+
71
+    AVFrame *srce[N_SRCE];              ///< buffered source frames
72
+    int64_t srce_pts_dest[N_SRCE];      ///< pts for source frames scaled to output timebase
73
+    int64_t pts;                        ///< pts of frame we are working on
74
+} FrameRateContext;
75
+
76
+#define OFFSET(x) offsetof(FrameRateContext, x)
77
+#define V AV_OPT_FLAG_VIDEO_PARAM
78
+#define F AV_OPT_FLAG_FILTERING_PARAM
79
+#define FRAMERATE_FLAG_SCD 01
80
+
81
+static const AVOption framerate_options[] = {
82
+    {"fps",                 "required output frames per second rate", OFFSET(dest_frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="50"},             0,       INT_MAX, V|F },
83
+
84
+    {"interp_start",        "point to start linear interpolation",    OFFSET(interp_start),    AV_OPT_TYPE_INT,      {.i64=15},                 0,       255,     V|F },
85
+    {"interp_end",          "point to end linear interpolation",      OFFSET(interp_end),      AV_OPT_TYPE_INT,      {.i64=240},                0,       255,     V|F },
86
+    {"scene",               "scene change level",                     OFFSET(scene_score),     AV_OPT_TYPE_DOUBLE,   {.dbl=7.0},                0,       INT_MAX, V|F },
87
+
88
+    {"flags",               "set flags",                              OFFSET(flags),           AV_OPT_TYPE_FLAGS,    {.i64=1},                  0,       INT_MAX, V|F, "flags" },
89
+    {"scene_change_detect", "enable scene change detection",          0,                       AV_OPT_TYPE_CONST,    {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
90
+    {"scd",                 "enable scene change detection",          0,                       AV_OPT_TYPE_CONST,    {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
91
+
92
+    {NULL}
93
+};
94
+
95
+AVFILTER_DEFINE_CLASS(framerate);
96
+
97
+static void next_source(AVFilterContext *ctx)
98
+{
99
+    FrameRateContext *s = ctx->priv;
100
+    int i;
101
+
102
+    ff_dlog(ctx,  "next_source()\n");
103
+
104
+    if (s->srce[s->last] && s->srce[s->last] != s->srce[s->last-1]) {
105
+        ff_dlog(ctx, "next_source() unlink %d\n", s->last);
106
+        av_frame_free(&s->srce[s->last]);
107
+    }
108
+    for (i = s->last; i > s->frst; i--) {
109
+        ff_dlog(ctx, "next_source() copy %d to %d\n", i - 1, i);
110
+        s->srce[i] = s->srce[i - 1];
111
+    }
112
+    ff_dlog(ctx, "next_source() make %d null\n", s->frst);
113
+    s->srce[s->frst] = NULL;
114
+}
115
+
116
+static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
117
+{
118
+    FrameRateContext *s = ctx->priv;
119
+    double ret = 0;
120
+
121
+    ff_dlog(ctx, "get_scene_score()\n");
122
+
123
+    if (crnt &&
124
+        crnt->height == next->height &&
125
+        crnt->width  == next->width) {
126
+        int x, y;
127
+        int64_t sad;
128
+        double mafd, diff;
129
+        uint8_t *p1 = crnt->data[0];
130
+        uint8_t *p2 = next->data[0];
131
+        const int p1_linesize = crnt->linesize[0];
132
+        const int p2_linesize = next->linesize[0];
133
+
134
+        ff_dlog(ctx, "get_scene_score() process\n");
135
+
136
+        for (sad = y = 0; y < crnt->height; y += 8) {
137
+            for (x = 0; x < p1_linesize; x += 8) {
138
+                sad += s->sad(p1 + y * p1_linesize + x,
139
+                              p1_linesize,
140
+                              p2 + y * p2_linesize + x,
141
+                              p2_linesize);
142
+            }
143
+        }
144
+        emms_c();
145
+        mafd = sad / (crnt->height * crnt->width * 3);
146
+        diff = fabs(mafd - s->prev_mafd);
147
+        ret  = av_clipf(FFMIN(mafd, diff), 0, 100.0);
148
+        s->prev_mafd = mafd;
149
+    }
150
+        ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
151
+    return ret;
152
+}
153
+
154
+static int process_work_frame(AVFilterContext *ctx, int stop)
155
+{
156
+    FrameRateContext *s = ctx->priv;
157
+    AVFilterLink *outlink = ctx->outputs[0];
158
+    int64_t work_next_pts;
159
+    AVFrame *copy_src1, *copy_src2, *work;
160
+    int interpolate;
161
+
162
+    ff_dlog(ctx, "process_work_frame()\n");
163
+
164
+    ff_dlog(ctx, "process_work_frame() pending_input_frames %d\n", s->pending_srce_frames);
165
+
166
+    if (s->srce[s->prev]) ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
167
+    if (s->srce[s->crnt]) ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
168
+    if (s->srce[s->next]) ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
169
+
170
+    if (!s->srce[s->crnt]) {
171
+        // the filter cannot do anything
172
+        ff_dlog(ctx, "process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
173
+        next_source(ctx);
174
+        return 0;
175
+    }
176
+
177
+    work_next_pts = s->pts + s->average_dest_pts_delta;
178
+
179
+    ff_dlog(ctx, "process_work_frame() work crnt pts:%"PRId64"\n", s->pts);
180
+    ff_dlog(ctx, "process_work_frame() work next pts:%"PRId64"\n", work_next_pts);
181
+    if (s->srce[s->prev])
182
+        ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64" at dest time base:%u/%u\n",
183
+            s->srce_pts_dest[s->prev], s->dest_time_base.num, s->dest_time_base.den);
184
+    if (s->srce[s->crnt])
185
+        ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64" at dest time base:%u/%u\n",
186
+            s->srce_pts_dest[s->crnt], s->dest_time_base.num, s->dest_time_base.den);
187
+    if (s->srce[s->next])
188
+        ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64" at dest time base:%u/%u\n",
189
+            s->srce_pts_dest[s->next], s->dest_time_base.num, s->dest_time_base.den);
190
+
191
+    av_assert0(s->srce[s->next]);
192
+
193
+    // should filter be skipping input frame (output frame rate is lower than input frame rate)
194
+    if (!s->flush && s->pts >= s->srce_pts_dest[s->next]) {
195
+        ff_dlog(ctx, "process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
196
+        next_source(ctx);
197
+        s->pending_srce_frames--;
198
+        return 0;
199
+    }
200
+
201
+    // calculate interpolation
202
+    interpolate = (int) ((s->pts - s->srce_pts_dest[s->crnt]) * 256.0 / s->average_srce_pts_dest_delta);
203
+    ff_dlog(ctx, "process_work_frame() interpolate:%d/256\n", interpolate);
204
+    copy_src1 = s->srce[s->crnt];
205
+    if (interpolate > s->interp_end) {
206
+        ff_dlog(ctx, "process_work_frame() source is:NEXT\n");
207
+        copy_src1 = s->srce[s->next];
208
+    }
209
+    if (s->srce[s->prev] && interpolate < -s->interp_end) {
210
+        ff_dlog(ctx, "process_work_frame() source is:PREV\n");
211
+        copy_src1 = s->srce[s->prev];
212
+    }
213
+
214
+    // decide whether to blend two frames
215
+    if ((interpolate >= s->interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->interp_end)) {
216
+        double interpolate_scene_score = 0;
217
+
218
+        if (interpolate > 0) {
219
+            ff_dlog(ctx, "process_work_frame() interpolate source is:NEXT\n");
220
+            copy_src2 = s->srce[s->next];
221
+        } else {
222
+            ff_dlog(ctx, "process_work_frame() interpolate source is:PREV\n");
223
+            copy_src2 = s->srce[s->prev];
224
+        }
225
+        if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
226
+            interpolate_scene_score = get_scene_score(ctx, copy_src1, copy_src2);
227
+            ff_dlog(ctx, "process_work_frame() interpolate scene score:%f\n", interpolate_scene_score);
228
+        }
229
+        // decide if the shot-change detection allows us to blend two frames
230
+        if (interpolate_scene_score < s->scene_score && copy_src2) {
231
+            uint16_t src2_factor = abs(interpolate);
232
+            uint16_t src1_factor = 256 - src2_factor;
233
+            int plane, line, pixel;
234
+
235
+            // get work-space for output frame
236
+            work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
237
+            if (!work)
238
+                return AVERROR(ENOMEM);
239
+
240
+            av_frame_copy_props(work, s->srce[s->crnt]);
241
+
242
+            ff_dlog(ctx, "process_work_frame() INTERPOLATE to create work frame\n");
243
+            for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
244
+                int cpy_line_width = s->line_size[plane];
245
+                uint8_t *cpy_src1_data = copy_src1->data[plane];
246
+                int cpy_src1_line_size = copy_src1->linesize[plane];
247
+                uint8_t *cpy_src2_data = copy_src2->data[plane];
248
+                int cpy_src2_line_size = copy_src2->linesize[plane];
249
+                int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
250
+                uint8_t *cpy_dst_data = work->data[plane];
251
+                int cpy_dst_line_size = work->linesize[plane];
252
+                if (plane <1 || plane >2) {
253
+                    // luma or alpha
254
+                    for (line = 0; line < cpy_src_h; line++) {
255
+                        for (pixel = 0; pixel < cpy_line_width; pixel++) {
256
+                            // integer version of (src1 * src1_factor) + (src2 + src2_factor) + 0.5
257
+                            // 0.5 is for rounding
258
+                            // 128 is the integer representation of 0.5 << 8
259
+                            cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + 128) >> 8;
260
+                        }
261
+                        cpy_src1_data += cpy_src1_line_size;
262
+                        cpy_src2_data += cpy_src2_line_size;
263
+                        cpy_dst_data += cpy_dst_line_size;
264
+                    }
265
+                } else {
266
+                    // chroma
267
+                    for (line = 0; line < cpy_src_h; line++) {
268
+                        for (pixel = 0; pixel < cpy_line_width; pixel++) {
269
+                            // as above
270
+                            // because U and V are based around 128 we have to subtract 128 from the components.
271
+                            // 32896 is the integer representation of 128.5 << 8
272
+                            cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - 128) * src1_factor) + ((cpy_src2_data[pixel] - 128) * src2_factor) + 32896) >> 8;
273
+                        }
274
+                        cpy_src1_data += cpy_src1_line_size;
275
+                        cpy_src2_data += cpy_src2_line_size;
276
+                        cpy_dst_data += cpy_dst_line_size;
277
+                    }
278
+                }
279
+            }
280
+            goto copy_done;
281
+        }
282
+        else {
283
+            ff_dlog(ctx, "process_work_frame() CUT - DON'T INTERPOLATE\n");
284
+        }
285
+    }
286
+
287
+    ff_dlog(ctx, "process_work_frame() COPY to the work frame\n");
288
+    // copy the frame we decided is our base source
289
+    work = av_frame_clone(copy_src1);
290
+    if (!work)
291
+        return AVERROR(ENOMEM);
292
+
293
+copy_done:
294
+    work->pts = s->pts;
295
+
296
+    // should filter be re-using input frame (output frame rate is higher than input frame rate)
297
+    if (!s->flush && (work_next_pts + s->average_dest_pts_delta) < (s->srce_pts_dest[s->crnt] + s->average_srce_pts_dest_delta)) {
298
+        ff_dlog(ctx, "process_work_frame() REPEAT FRAME\n");
299
+    } else {
300
+        ff_dlog(ctx, "process_work_frame() CONSUME FRAME, move to next frame\n");
301
+        s->pending_srce_frames--;
302
+        next_source(ctx);
303
+    }
304
+    ff_dlog(ctx, "process_work_frame() output a frame\n");
305
+    s->dest_frame_num++;
306
+    if (stop)
307
+        s->pending_end_frame = 0;
308
+    s->last_dest_frame_pts = work->pts;
309
+
310
+    return ff_filter_frame(ctx->outputs[0], work);
311
+}
312
+
313
+static void set_srce_frame_dest_pts(AVFilterContext *ctx)
314
+{
315
+    FrameRateContext *s = ctx->priv;
316
+
317
+    ff_dlog(ctx, "set_srce_frame_output_pts()\n");
318
+
319
+    // scale the input pts from the timebase difference between input and output
320
+    if (s->srce[s->prev])
321
+        s->srce_pts_dest[s->prev] = av_rescale_q(s->srce[s->prev]->pts, s->srce_time_base, s->dest_time_base);
322
+    if (s->srce[s->crnt])
323
+        s->srce_pts_dest[s->crnt] = av_rescale_q(s->srce[s->crnt]->pts, s->srce_time_base, s->dest_time_base);
324
+    if (s->srce[s->next])
325
+        s->srce_pts_dest[s->next] = av_rescale_q(s->srce[s->next]->pts, s->srce_time_base, s->dest_time_base);
326
+}
327
+
328
+static void set_work_frame_pts(AVFilterContext *ctx)
329
+{
330
+    FrameRateContext *s = ctx->priv;
331
+    int64_t pts, average_srce_pts_delta = 0;
332
+
333
+    ff_dlog(ctx, "set_work_frame_pts()\n");
334
+
335
+    av_assert0(s->srce[s->next]);
336
+    av_assert0(s->srce[s->crnt]);
337
+
338
+    ff_dlog(ctx, "set_work_frame_pts() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
339
+    ff_dlog(ctx, "set_work_frame_pts() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
340
+    if (s->srce[s->prev])
341
+        ff_dlog(ctx, "set_work_frame_pts() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
342
+
343
+    average_srce_pts_delta = s->average_srce_pts_dest_delta;
344
+    ff_dlog(ctx, "set_work_frame_pts() initial average srce pts:%"PRId64"\n", average_srce_pts_delta);
345
+
346
+    // calculate the PTS delta
347
+    if ((pts = (s->srce[s->next]->pts - s->srce[s->crnt]->pts))) {
348
+        average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
349
+    } else if (s->srce[s->prev] && (pts = (s->srce[s->crnt]->pts - s->srce[s->prev]->pts))) {
350
+        average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
351
+    }
352
+
353
+    s->average_srce_pts_dest_delta = av_rescale_q(average_srce_pts_delta, s->srce_time_base, s->dest_time_base);
354
+    ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64"\n", average_srce_pts_delta);
355
+    ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64" at dest time base:%u/%u\n",
356
+            s->average_srce_pts_dest_delta, s->dest_time_base.num, s->dest_time_base.den);
357
+
358
+    set_srce_frame_dest_pts(ctx);
359
+
360
+    if (ctx->inputs[0] && !s->average_dest_pts_delta) {
361
+        int64_t d = av_q2d(av_inv_q(av_mul_q(s->srce_time_base, s->dest_frame_rate)));
362
+        if (d == 0) { // FIXME
363
+            av_log(ctx, AV_LOG_WARNING, "Buggy path reached, use settb filter before this filter!\n");
364
+            d = av_q2d(av_mul_q(ctx->inputs[0]->time_base, s->dest_frame_rate));
365
+        }
366
+        s->average_dest_pts_delta = av_rescale_q(d, s->srce_time_base, s->dest_time_base);
367
+        ff_dlog(ctx, "set_frame_pts() average output pts from input timebase\n");
368
+        ff_dlog(ctx, "set_work_frame_pts() average dest pts delta:%"PRId64"\n", s->average_dest_pts_delta);
369
+    }
370
+
371
+    if (!s->dest_frame_num) {
372
+        s->pts = s->last_dest_frame_pts = s->srce_pts_dest[s->crnt];
373
+    } else {
374
+        s->pts = s->last_dest_frame_pts + s->average_dest_pts_delta;
375
+    }
376
+
377
+    ff_dlog(ctx, "set_work_frame_pts() calculated pts:%"PRId64" at dest time base:%u/%u\n",
378
+            s->pts, s->dest_time_base.num, s->dest_time_base.den);
379
+}
380
+
381
+static av_cold int init(AVFilterContext *ctx)
382
+{
383
+    FrameRateContext *s = ctx->priv;
384
+
385
+    s->dest_frame_num = 0;
386
+
387
+    s->crnt = (N_SRCE)>>1;
388
+    s->last = N_SRCE - 1;
389
+
390
+    s->next = s->crnt - 1;
391
+    s->prev = s->crnt + 1;
392
+
393
+    return 0;
394
+}
395
+
396
+static av_cold void uninit(AVFilterContext *ctx)
397
+{
398
+    FrameRateContext *s = ctx->priv;
399
+    int i;
400
+
401
+    for (i = s->frst + 1; i > s->last; i++) {
402
+        if (s->srce[i] && (s->srce[i] != s->srce[i + 1]))
403
+            av_frame_free(&s->srce[i]);
404
+    }
405
+    av_frame_free(&s->srce[s->last]);
406
+}
407
+
408
+static int query_formats(AVFilterContext *ctx)
409
+{
410
+    static const enum AVPixelFormat pix_fmts[] = {
411
+        AV_PIX_FMT_YUV410P,
412
+        AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVJ411P,
413
+        AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
414
+        AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
415
+        AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
416
+        AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
417
+        AV_PIX_FMT_NONE
418
+    };
419
+
420
+    AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
421
+    if (!fmts_list)
422
+        return AVERROR(ENOMEM);
423
+    return ff_set_common_formats(ctx, fmts_list);
424
+}
425
+
426
+static int config_input(AVFilterLink *inlink)
427
+{
428
+    AVFilterContext *ctx = inlink->dst;
429
+    FrameRateContext *s = ctx->priv;
430
+    const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
431
+    int plane;
432
+
433
+    for (plane = 0; plane < 4; plane++) {
434
+        s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
435
+                                                    plane);
436
+    }
437
+
438
+    s->vsub = pix_desc->log2_chroma_h;
439
+
440
+    s->sad = av_pixelutils_get_sad_fn(3, 3, 2, s); // 8x8 both sources aligned
441
+    if (!s->sad)
442
+        return AVERROR(EINVAL);
443
+
444
+    s->srce_time_base = inlink->time_base;
445
+
446
+    return 0;
447
+}
448
+
449
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
450
+{
451
+    AVFilterContext *ctx = inlink->dst;
452
+    FrameRateContext *s = ctx->priv;
453
+
454
+    // we have one new frame
455
+    s->pending_srce_frames++;
456
+
457
+    if (inpicref->interlaced_frame)
458
+        av_log(ctx, AV_LOG_WARNING, "Interlaced frame found - the output will not be correct.\n");
459
+
460
+    // store the pointer to the new frame
461
+    av_frame_free(&s->srce[s->frst]);
462
+    s->srce[s->frst] = inpicref;
463
+
464
+    if (!s->pending_end_frame && s->srce[s->crnt]) {
465
+        set_work_frame_pts(ctx);
466
+        s->pending_end_frame = 1;
467
+    } else {
468
+        set_srce_frame_dest_pts(ctx);
469
+    }
470
+
471
+    return process_work_frame(ctx, 1);
472
+}
473
+
474
+static int config_output(AVFilterLink *outlink)
475
+{
476
+    AVFilterContext *ctx = outlink->src;
477
+    FrameRateContext *s = ctx->priv;
478
+    int exact;
479
+
480
+    ff_dlog(ctx, "config_output()\n");
481
+
482
+    ff_dlog(ctx,
483
+           "config_output() input time base:%u/%u (%f)\n",
484
+           ctx->inputs[0]->time_base.num,ctx->inputs[0]->time_base.den,
485
+           av_q2d(ctx->inputs[0]->time_base));
486
+
487
+    // make sure timebase is small enough to hold the framerate
488
+
489
+    exact = av_reduce(&s->dest_time_base.num, &s->dest_time_base.den,
490
+                      av_gcd((int64_t)s->srce_time_base.num * s->dest_frame_rate.num,
491
+                             (int64_t)s->srce_time_base.den * s->dest_frame_rate.den ),
492
+                      (int64_t)s->srce_time_base.den * s->dest_frame_rate.num, INT_MAX);
493
+
494
+    av_log(ctx, AV_LOG_INFO,
495
+           "time base:%u/%u -> %u/%u exact:%d\n",
496
+           s->srce_time_base.num, s->srce_time_base.den,
497
+           s->dest_time_base.num, s->dest_time_base.den, exact);
498
+    if (!exact) {
499
+        av_log(ctx, AV_LOG_WARNING, "Timebase conversion is not exact\n");
500
+    }
501
+
502
+    outlink->frame_rate = s->dest_frame_rate;
503
+    outlink->time_base = s->dest_time_base;
504
+    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
505
+
506
+    ff_dlog(ctx,
507
+           "config_output() output time base:%u/%u (%f) w:%d h:%d\n",
508
+           outlink->time_base.num, outlink->time_base.den,
509
+           av_q2d(outlink->time_base),
510
+           outlink->w, outlink->h);
511
+
512
+
513
+    av_log(ctx, AV_LOG_INFO, "fps -> fps:%u/%u scene score:%f interpolate start:%d end:%d\n",
514
+            s->dest_frame_rate.num, s->dest_frame_rate.den,
515
+            s->scene_score, s->interp_start, s->interp_end);
516
+
517
+    return 0;
518
+}
519
+
520
+static int request_frame(AVFilterLink *outlink)
521
+{
522
+    AVFilterContext *ctx = outlink->src;
523
+    FrameRateContext *s = ctx->priv;
524
+    int val, i;
525
+
526
+    ff_dlog(ctx, "request_frame()\n");
527
+
528
+    // if there is no "next" frame AND we are not in flush then get one from our input filter
529
+    if (!s->srce[s->frst] && !s->flush) {
530
+        ff_dlog(ctx, "request_frame() call source's request_frame()\n");
531
+        if ((val = ff_request_frame(outlink->src->inputs[0])) < 0) {
532
+            ff_dlog(ctx, "request_frame() source's request_frame() returned error:%d\n", val);
533
+            return val;
534
+        }
535
+        ff_dlog(ctx, "request_frame() source's request_frame() returned:%d\n", val);
536
+        return 0;
537
+    }
538
+
539
+    ff_dlog(ctx, "request_frame() REPEAT or FLUSH\n");
540
+
541
+    if (s->pending_srce_frames <= 0) {
542
+        ff_dlog(ctx, "request_frame() nothing else to do, return:EOF\n");
543
+        return AVERROR_EOF;
544
+    }
545
+
546
+    // otherwise, make brand-new frame and pass to our output filter
547
+    ff_dlog(ctx, "request_frame() FLUSH\n");
548
+
549
+    // back fill at end of file when source has no more frames
550
+    for (i = s->last; i > s->frst; i--) {
551
+        if (!s->srce[i - 1] && s->srce[i]) {
552
+            ff_dlog(ctx, "request_frame() copy:%d to:%d\n", i, i - 1);
553
+            s->srce[i - 1] = s->srce[i];
554
+        }
555
+    }
556
+
557
+    set_work_frame_pts(ctx);
558
+    return process_work_frame(ctx, 0);
559
+}
560
+
561
+static const AVFilterPad framerate_inputs[] = {
562
+    {
563
+        .name         = "default",
564
+        .type         = AVMEDIA_TYPE_VIDEO,
565
+        .config_props = config_input,
566
+        .filter_frame = filter_frame,
567
+    },
568
+    { NULL }
569
+};
570
+
571
+static const AVFilterPad framerate_outputs[] = {
572
+    {
573
+        .name          = "default",
574
+        .type          = AVMEDIA_TYPE_VIDEO,
575
+        .request_frame = request_frame,
576
+        .config_props  = config_output,
577
+    },
578
+    { NULL }
579
+};
580
+
581
+AVFilter ff_vf_framerate = {
582
+    .name          = "framerate",
583
+    .description   = NULL_IF_CONFIG_SMALL("Upsamples or downsamples progressive source between specified frame rates."),
584
+    .priv_size     = sizeof(FrameRateContext),
585
+    .priv_class    = &framerate_class,
586
+    .init          = init,
587
+    .uninit        = uninit,
588
+    .query_formats = query_formats,
589
+    .inputs        = framerate_inputs,
590
+    .outputs       = framerate_outputs,
591
+};