Signed-off-by: Paul B Mahol <onemda@gmail.com>
Paul B Mahol authored on 2017/11/21 03:58:44... | ... |
@@ -10569,6 +10569,34 @@ Default method is @samp{fdiff}. |
10569 | 10569 |
Scene change detection threshold. Default is @code{5.0}. |
10570 | 10570 |
@end table |
10571 | 10571 |
|
10572 |
+@section mix |
|
10573 |
+ |
|
10574 |
+Mix several video input streams into one video stream. |
|
10575 |
+ |
|
10576 |
+A description of the accepted options follows. |
|
10577 |
+ |
|
10578 |
+@table @option |
|
10579 |
+@item nb_inputs |
|
10580 |
+The number of inputs. If unspecified, it defaults to 2. |
|
10581 |
+ |
|
10582 |
+@item weights |
|
10583 |
+Specify weight of each input video stream as sequence. |
|
10584 |
+Each weight is separated by space. |
|
10585 |
+ |
|
10586 |
+@item duration |
|
10587 |
+Specify how end of stream is determined. |
|
10588 |
+@table @samp |
|
10589 |
+@item longest |
|
10590 |
+The duration of the longest input. (default) |
|
10591 |
+ |
|
10592 |
+@item shortest |
|
10593 |
+The duration of the shortest input. |
|
10594 |
+ |
|
10595 |
+@item first |
|
10596 |
+The duration of the first input. |
|
10597 |
+@end table |
|
10598 |
+@end table |
|
10599 |
+ |
|
10572 | 10600 |
@section mpdecimate |
10573 | 10601 |
|
10574 | 10602 |
Drop frames that do not differ greatly from the previous frame in |
... | ... |
@@ -240,6 +240,7 @@ OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation |
240 | 240 |
OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o |
241 | 241 |
OBJS-$(CONFIG_MIDEQUALIZER_FILTER) += vf_midequalizer.o framesync.o |
242 | 242 |
OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o |
243 |
+OBJS-$(CONFIG_MIX_FILTER) += vf_mix.o |
|
243 | 244 |
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o |
244 | 245 |
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o |
245 | 246 |
OBJS-$(CONFIG_NLMEANS_FILTER) += vf_nlmeans.o |
... | ... |
@@ -250,6 +250,7 @@ static void register_all(void) |
250 | 250 |
REGISTER_FILTER(METADATA, metadata, vf); |
251 | 251 |
REGISTER_FILTER(MIDEQUALIZER, midequalizer, vf); |
252 | 252 |
REGISTER_FILTER(MINTERPOLATE, minterpolate, vf); |
253 |
+ REGISTER_FILTER(MIX, mix, vf); |
|
253 | 254 |
REGISTER_FILTER(MPDECIMATE, mpdecimate, vf); |
254 | 255 |
REGISTER_FILTER(NEGATE, negate, vf); |
255 | 256 |
REGISTER_FILTER(NLMEANS, nlmeans, vf); |
... | ... |
@@ -30,8 +30,8 @@ |
30 | 30 |
#include "libavutil/version.h" |
31 | 31 |
|
32 | 32 |
#define LIBAVFILTER_VERSION_MAJOR 7 |
33 |
-#define LIBAVFILTER_VERSION_MINOR 2 |
|
34 |
-#define LIBAVFILTER_VERSION_MICRO 102 |
|
33 |
+#define LIBAVFILTER_VERSION_MINOR 3 |
|
34 |
+#define LIBAVFILTER_VERSION_MICRO 100 |
|
35 | 35 |
|
36 | 36 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |
37 | 37 |
LIBAVFILTER_VERSION_MINOR, \ |
38 | 38 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,283 @@ |
0 |
+/* |
|
1 |
+ * Copyright (c) 2017 Paul B Mahol |
|
2 |
+ * |
|
3 |
+ * This file is part of FFmpeg. |
|
4 |
+ * |
|
5 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
6 |
+ * modify it under the terms of the GNU Lesser General Public |
|
7 |
+ * License as published by the Free Software Foundation; either |
|
8 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
9 |
+ * |
|
10 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
11 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 |
+ * Lesser General Public License for more details. |
|
14 |
+ * |
|
15 |
+ * You should have received a copy of the GNU Lesser General Public |
|
16 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
17 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
+ */ |
|
19 |
+ |
|
20 |
+#include "libavutil/avstring.h" |
|
21 |
+#include "libavutil/imgutils.h" |
|
22 |
+#include "libavutil/intreadwrite.h" |
|
23 |
+#include "libavutil/opt.h" |
|
24 |
+#include "libavutil/pixdesc.h" |
|
25 |
+ |
|
26 |
+#include "avfilter.h" |
|
27 |
+#include "formats.h" |
|
28 |
+#include "internal.h" |
|
29 |
+#include "framesync.h" |
|
30 |
+#include "video.h" |
|
31 |
+ |
|
32 |
+typedef struct MixContext { |
|
33 |
+ const AVClass *class; |
|
34 |
+ const AVPixFmtDescriptor *desc; |
|
35 |
+ char *weights_str; |
|
36 |
+ int nb_inputs; |
|
37 |
+ int duration; |
|
38 |
+ float *weights; |
|
39 |
+ float wfactor; |
|
40 |
+ |
|
41 |
+ int depth; |
|
42 |
+ int nb_planes; |
|
43 |
+ int linesize[4]; |
|
44 |
+ int height[4]; |
|
45 |
+ |
|
46 |
+ AVFrame **frames; |
|
47 |
+ FFFrameSync fs; |
|
48 |
+} MixContext; |
|
49 |
+ |
|
50 |
+static int query_formats(AVFilterContext *ctx) |
|
51 |
+{ |
|
52 |
+ AVFilterFormats *pix_fmts = NULL; |
|
53 |
+ int fmt, ret; |
|
54 |
+ |
|
55 |
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) { |
|
56 |
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); |
|
57 |
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || |
|
58 |
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL || |
|
59 |
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) && |
|
60 |
+ (ret = ff_add_format(&pix_fmts, fmt)) < 0) |
|
61 |
+ return ret; |
|
62 |
+ } |
|
63 |
+ |
|
64 |
+ return ff_set_common_formats(ctx, pix_fmts); |
|
65 |
+} |
|
66 |
+ |
|
67 |
+static av_cold int init(AVFilterContext *ctx) |
|
68 |
+{ |
|
69 |
+ MixContext *s = ctx->priv; |
|
70 |
+ char *p, *arg, *saveptr = NULL; |
|
71 |
+ int i, ret; |
|
72 |
+ |
|
73 |
+ s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames)); |
|
74 |
+ if (!s->frames) |
|
75 |
+ return AVERROR(ENOMEM); |
|
76 |
+ |
|
77 |
+ s->weights = av_calloc(s->nb_inputs, sizeof(*s->weights)); |
|
78 |
+ if (!s->weights) |
|
79 |
+ return AVERROR(ENOMEM); |
|
80 |
+ |
|
81 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
82 |
+ AVFilterPad pad = { 0 }; |
|
83 |
+ |
|
84 |
+ pad.type = AVMEDIA_TYPE_VIDEO; |
|
85 |
+ pad.name = av_asprintf("input%d", i); |
|
86 |
+ if (!pad.name) |
|
87 |
+ return AVERROR(ENOMEM); |
|
88 |
+ |
|
89 |
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) { |
|
90 |
+ av_freep(&pad.name); |
|
91 |
+ return ret; |
|
92 |
+ } |
|
93 |
+ } |
|
94 |
+ |
|
95 |
+ p = s->weights_str; |
|
96 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
97 |
+ if (!(arg = av_strtok(p, " ", &saveptr))) |
|
98 |
+ break; |
|
99 |
+ |
|
100 |
+ p = NULL; |
|
101 |
+ sscanf(arg, "%f", &s->weights[i]); |
|
102 |
+ s->wfactor += s->weights[i]; |
|
103 |
+ } |
|
104 |
+ s->wfactor = 1 / s->wfactor; |
|
105 |
+ |
|
106 |
+ return 0; |
|
107 |
+} |
|
108 |
+ |
|
109 |
+static int process_frame(FFFrameSync *fs) |
|
110 |
+{ |
|
111 |
+ AVFilterContext *ctx = fs->parent; |
|
112 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
113 |
+ MixContext *s = fs->opaque; |
|
114 |
+ AVFrame **in = s->frames; |
|
115 |
+ AVFrame *out; |
|
116 |
+ int i, p, ret, x, y; |
|
117 |
+ |
|
118 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
119 |
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) |
|
120 |
+ return ret; |
|
121 |
+ } |
|
122 |
+ |
|
123 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
124 |
+ if (!out) |
|
125 |
+ return AVERROR(ENOMEM); |
|
126 |
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base); |
|
127 |
+ |
|
128 |
+ if (s->depth <= 8) { |
|
129 |
+ for (p = 0; p < s->nb_planes; p++) { |
|
130 |
+ uint8_t *dst = out->data[p]; |
|
131 |
+ |
|
132 |
+ for (y = 0; y < s->height[p]; y++) { |
|
133 |
+ for (x = 0; x < s->linesize[p]; x++) { |
|
134 |
+ int val = 0; |
|
135 |
+ |
|
136 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
137 |
+ uint8_t src = in[i]->data[p][y * s->linesize[p] + x]; |
|
138 |
+ |
|
139 |
+ val += src * s->weights[i]; |
|
140 |
+ } |
|
141 |
+ |
|
142 |
+ dst[x] = val * s->wfactor; |
|
143 |
+ } |
|
144 |
+ |
|
145 |
+ dst += out->linesize[p]; |
|
146 |
+ } |
|
147 |
+ } |
|
148 |
+ } else { |
|
149 |
+ for (p = 0; p < s->nb_planes; p++) { |
|
150 |
+ uint16_t *dst = (uint16_t *)out->data[p]; |
|
151 |
+ |
|
152 |
+ for (y = 0; y < s->height[p]; y++) { |
|
153 |
+ for (x = 0; x < s->linesize[p]; x++) { |
|
154 |
+ int val = 0; |
|
155 |
+ |
|
156 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
157 |
+ uint16_t src = AV_RN16(in[i]->data[p] + y * s->linesize[p] + x * 2); |
|
158 |
+ |
|
159 |
+ val += src * s->weights[i]; |
|
160 |
+ } |
|
161 |
+ |
|
162 |
+ dst[x] = val * s->wfactor; |
|
163 |
+ } |
|
164 |
+ |
|
165 |
+ dst += out->linesize[p] / 2; |
|
166 |
+ } |
|
167 |
+ } |
|
168 |
+ } |
|
169 |
+ |
|
170 |
+ return ff_filter_frame(outlink, out); |
|
171 |
+} |
|
172 |
+ |
|
173 |
+static int config_output(AVFilterLink *outlink) |
|
174 |
+{ |
|
175 |
+ AVFilterContext *ctx = outlink->src; |
|
176 |
+ MixContext *s = ctx->priv; |
|
177 |
+ AVRational time_base = ctx->inputs[0]->time_base; |
|
178 |
+ AVRational frame_rate = ctx->inputs[0]->frame_rate; |
|
179 |
+ AVFilterLink *inlink = ctx->inputs[0]; |
|
180 |
+ int height = ctx->inputs[0]->h; |
|
181 |
+ int width = ctx->inputs[0]->w; |
|
182 |
+ FFFrameSyncIn *in; |
|
183 |
+ int i, ret; |
|
184 |
+ |
|
185 |
+ for (i = 1; i < s->nb_inputs; i++) { |
|
186 |
+ if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) { |
|
187 |
+ av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height); |
|
188 |
+ return AVERROR(EINVAL); |
|
189 |
+ } |
|
190 |
+ } |
|
191 |
+ |
|
192 |
+ s->desc = av_pix_fmt_desc_get(outlink->format); |
|
193 |
+ if (!s->desc) |
|
194 |
+ return AVERROR_BUG; |
|
195 |
+ s->nb_planes = av_pix_fmt_count_planes(outlink->format); |
|
196 |
+ s->depth = s->desc->comp[0].depth; |
|
197 |
+ |
|
198 |
+ outlink->w = width; |
|
199 |
+ outlink->h = height; |
|
200 |
+ outlink->time_base = time_base; |
|
201 |
+ outlink->frame_rate = frame_rate; |
|
202 |
+ |
|
203 |
+ if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0) |
|
204 |
+ return ret; |
|
205 |
+ |
|
206 |
+ in = s->fs.in; |
|
207 |
+ s->fs.opaque = s; |
|
208 |
+ s->fs.on_event = process_frame; |
|
209 |
+ |
|
210 |
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) |
|
211 |
+ return ret; |
|
212 |
+ |
|
213 |
+ s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h); |
|
214 |
+ s->height[0] = s->height[3] = inlink->h; |
|
215 |
+ |
|
216 |
+ for (i = 0; i < s->nb_inputs; i++) { |
|
217 |
+ AVFilterLink *inlink = ctx->inputs[i]; |
|
218 |
+ |
|
219 |
+ in[i].time_base = inlink->time_base; |
|
220 |
+ in[i].sync = 1; |
|
221 |
+ in[i].before = EXT_STOP; |
|
222 |
+ in[i].after = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY; |
|
223 |
+ } |
|
224 |
+ |
|
225 |
+ return ff_framesync_configure(&s->fs); |
|
226 |
+} |
|
227 |
+ |
|
228 |
+static av_cold void uninit(AVFilterContext *ctx) |
|
229 |
+{ |
|
230 |
+ MixContext *s = ctx->priv; |
|
231 |
+ int i; |
|
232 |
+ |
|
233 |
+ ff_framesync_uninit(&s->fs); |
|
234 |
+ av_freep(&s->frames); |
|
235 |
+ av_freep(&s->weights); |
|
236 |
+ |
|
237 |
+ for (i = 0; i < ctx->nb_inputs; i++) |
|
238 |
+ av_freep(&ctx->input_pads[i].name); |
|
239 |
+} |
|
240 |
+ |
|
241 |
+static int activate(AVFilterContext *ctx) |
|
242 |
+{ |
|
243 |
+ MixContext *s = ctx->priv; |
|
244 |
+ return ff_framesync_activate(&s->fs); |
|
245 |
+} |
|
246 |
+ |
|
247 |
+#define OFFSET(x) offsetof(MixContext, x) |
|
248 |
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM |
|
249 |
+ |
|
250 |
+static const AVOption mix_options[] = { |
|
251 |
+ { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS }, |
|
252 |
+ { "weights", "set weight for each input", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, .flags = FLAGS }, |
|
253 |
+ { "duration", "how to determine end of stream", OFFSET(duration), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, .flags = FLAGS, "duration" }, |
|
254 |
+ { "longest", "Duration of longest input", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "duration" }, |
|
255 |
+ { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "duration" }, |
|
256 |
+ { "first", "Duration of first input", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "duration" }, |
|
257 |
+ { NULL }, |
|
258 |
+}; |
|
259 |
+ |
|
260 |
+static const AVFilterPad outputs[] = { |
|
261 |
+ { |
|
262 |
+ .name = "default", |
|
263 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
264 |
+ .config_props = config_output, |
|
265 |
+ }, |
|
266 |
+ { NULL } |
|
267 |
+}; |
|
268 |
+ |
|
269 |
+AVFILTER_DEFINE_CLASS(mix); |
|
270 |
+ |
|
271 |
+AVFilter ff_vf_mix = { |
|
272 |
+ .name = "mix", |
|
273 |
+ .description = NULL_IF_CONFIG_SMALL("Mix video inputs."), |
|
274 |
+ .priv_size = sizeof(MixContext), |
|
275 |
+ .priv_class = &mix_class, |
|
276 |
+ .query_formats = query_formats, |
|
277 |
+ .outputs = outputs, |
|
278 |
+ .init = init, |
|
279 |
+ .uninit = uninit, |
|
280 |
+ .activate = activate, |
|
281 |
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, |
|
282 |
+}; |