... | ... |
@@ -7475,6 +7475,54 @@ do not have exactly the same duration in the first file. |
7475 | 7475 |
|
7476 | 7476 |
@end itemize |
7477 | 7477 |
|
7478 |
+@section interleave, ainterleave |
|
7479 |
+ |
|
7480 |
+Temporally interleave frames from several inputs. |
|
7481 |
+ |
|
7482 |
+@code{interleave} works with video inputs, @code{ainterleave} with audio. |
|
7483 |
+ |
|
7484 |
+These filters read frames from several inputs and send the oldest |
|
7485 |
+queued frame to the output. |
|
7486 |
+ |
|
7487 |
+Input streams must have a well defined, monotonically increasing frame |
|
7488 |
+timestamp values. |
|
7489 |
+ |
|
7490 |
+In order to submit one frame to output, these filters need to enqueue |
|
7491 |
+at least one frame for each input, so they cannot work in case one |
|
7492 |
+input is not yet terminated and will not receive incoming frames. |
|
7493 |
+ |
|
7494 |
+For example consider the case when one input is a @code{select} filter |
|
7495 |
+which always drop input frames. The @code{interleave} filter will keep |
|
7496 |
+reading from that input, but it will never be able to send new frames |
|
7497 |
+to output until the input will send an end-of-stream signal. |
|
7498 |
+ |
|
7499 |
+Also, depending on inputs synchronization, the filters will drop |
|
7500 |
+frames in case one input receives more frames than the other ones, and |
|
7501 |
+the queue is already filled. |
|
7502 |
+ |
|
7503 |
+These filters accept the following options: |
|
7504 |
+ |
|
7505 |
+@table @option |
|
7506 |
+@item nb_inputs, n |
|
7507 |
+Set the number of different inputs, it is 2 by default. |
|
7508 |
+@end table |
|
7509 |
+ |
|
7510 |
+@subsection Examples |
|
7511 |
+ |
|
7512 |
+@itemize |
|
7513 |
+@item |
|
7514 |
+Interleave frames belonging to different streams using @command{ffmpeg}: |
|
7515 |
+@example |
|
7516 |
+ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi |
|
7517 |
+@end example |
|
7518 |
+ |
|
7519 |
+@item |
|
7520 |
+Add flickering blur effect: |
|
7521 |
+@example |
|
7522 |
+select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave |
|
7523 |
+@end example |
|
7524 |
+@end itemize |
|
7525 |
+ |
|
7478 | 7526 |
@section showspectrum |
7479 | 7527 |
|
7480 | 7528 |
Convert input audio to a video output, representing the audio frequency |
... | ... |
@@ -52,6 +52,7 @@ OBJS-$(CONFIG_SWSCALE) += lswsutils.o |
52 | 52 |
OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o |
53 | 53 |
OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o |
54 | 54 |
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o |
55 |
+OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o |
|
55 | 56 |
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o |
56 | 57 |
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o |
57 | 58 |
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o |
... | ... |
@@ -136,6 +137,7 @@ OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o |
136 | 136 |
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o |
137 | 137 |
OBJS-$(CONFIG_IL_FILTER) += vf_il.o |
138 | 138 |
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o |
139 |
+OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o |
|
139 | 140 |
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o |
140 | 141 |
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o |
141 | 142 |
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o |
... | ... |
@@ -50,6 +50,7 @@ void avfilter_register_all(void) |
50 | 50 |
#endif |
51 | 51 |
REGISTER_FILTER(AFADE, afade, af); |
52 | 52 |
REGISTER_FILTER(AFORMAT, aformat, af); |
53 |
+ REGISTER_FILTER(AINTERLEAVE, ainterleave, af); |
|
53 | 54 |
REGISTER_FILTER(ALLPASS, allpass, af); |
54 | 55 |
REGISTER_FILTER(AMERGE, amerge, af); |
55 | 56 |
REGISTER_FILTER(AMIX, amix, af); |
... | ... |
@@ -134,6 +135,7 @@ void avfilter_register_all(void) |
134 | 134 |
REGISTER_FILTER(IDET, idet, vf); |
135 | 135 |
REGISTER_FILTER(IL, il, vf); |
136 | 136 |
REGISTER_FILTER(INTERLACE, interlace, vf); |
137 |
+ REGISTER_FILTER(INTERLEAVE, interleave, vf); |
|
137 | 138 |
REGISTER_FILTER(KERNDEINT, kerndeint, vf); |
138 | 139 |
REGISTER_FILTER(LUT, lut, vf); |
139 | 140 |
REGISTER_FILTER(LUTRGB, lutrgb, vf); |
140 | 141 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,259 @@ |
0 |
+/* |
|
1 |
+ * Copyright (c) 2013 Stefano Sabatini |
|
2 |
+ * |
|
3 |
+ * This file is part of FFmpeg. |
|
4 |
+ * |
|
5 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
6 |
+ * modify it under the terms of the GNU Lesser General Public |
|
7 |
+ * License as published by the Free Software Foundation; either |
|
8 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
9 |
+ * |
|
10 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
11 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 |
+ * Lesser General Public License for more details. |
|
14 |
+ * |
|
15 |
+ * You should have received a copy of the GNU Lesser General Public |
|
16 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
17 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
+ */ |
|
19 |
+ |
|
20 |
+/** |
|
21 |
+ * @file |
|
22 |
+ * audio and video interleaver |
|
23 |
+ */ |
|
24 |
+ |
|
25 |
+#include "libavutil/avassert.h" |
|
26 |
+#include "libavutil/avstring.h" |
|
27 |
+#include "libavutil/opt.h" |
|
28 |
+#include "avfilter.h" |
|
29 |
+#include "bufferqueue.h" |
|
30 |
+#include "formats.h" |
|
31 |
+#include "internal.h" |
|
32 |
+#include "audio.h" |
|
33 |
+#include "video.h" |
|
34 |
+ |
|
35 |
+typedef struct { |
|
36 |
+ const AVClass *class; |
|
37 |
+ int nb_inputs; |
|
38 |
+ struct FFBufQueue *queues; |
|
39 |
+} InterleaveContext; |
|
40 |
+ |
|
41 |
+#define OFFSET(x) offsetof(InterleaveContext, x) |
|
42 |
+ |
|
43 |
+#define DEFINE_OPTIONS(filt_name, flags_) \ |
|
44 |
+static const AVOption filt_name##_options[] = { \ |
|
45 |
+ { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \ |
|
46 |
+ { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \ |
|
47 |
+ { NULL }, \ |
|
48 |
+} |
|
49 |
+ |
|
50 |
+inline static int push_frame(AVFilterContext *ctx) |
|
51 |
+{ |
|
52 |
+ InterleaveContext *interleave = ctx->priv; |
|
53 |
+ AVFrame *frame; |
|
54 |
+ int i, queue_idx = -1; |
|
55 |
+ int64_t pts_min = INT64_MAX; |
|
56 |
+ |
|
57 |
+ /* look for oldest frame */ |
|
58 |
+ for (i = 0; i < ctx->nb_inputs; i++) { |
|
59 |
+ struct FFBufQueue *q = &interleave->queues[i]; |
|
60 |
+ |
|
61 |
+ if (!q->available && !ctx->inputs[i]->closed) |
|
62 |
+ return 0; |
|
63 |
+ if (q->available) { |
|
64 |
+ frame = ff_bufqueue_peek(q, 0); |
|
65 |
+ if (frame->pts < pts_min) { |
|
66 |
+ pts_min = frame->pts; |
|
67 |
+ queue_idx = i; |
|
68 |
+ } |
|
69 |
+ } |
|
70 |
+ } |
|
71 |
+ |
|
72 |
+ /* all inputs are closed */ |
|
73 |
+ if (queue_idx < 0) |
|
74 |
+ return AVERROR_EOF; |
|
75 |
+ |
|
76 |
+ frame = ff_bufqueue_get(&interleave->queues[queue_idx]); |
|
77 |
+ av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n", |
|
78 |
+ queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q)); |
|
79 |
+ return ff_filter_frame(ctx->outputs[0], frame); |
|
80 |
+} |
|
81 |
+ |
|
82 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
83 |
+{ |
|
84 |
+ AVFilterContext *ctx = inlink->dst; |
|
85 |
+ InterleaveContext *interleave = ctx->priv; |
|
86 |
+ unsigned in_no = FF_INLINK_IDX(inlink); |
|
87 |
+ |
|
88 |
+ if (frame->pts == AV_NOPTS_VALUE) { |
|
89 |
+ av_log(ctx, AV_LOG_WARNING, |
|
90 |
+ "NOPTS value for input frame cannot be accepted, frame discarded\n"); |
|
91 |
+ av_frame_free(&frame); |
|
92 |
+ return AVERROR_INVALIDDATA; |
|
93 |
+ } |
|
94 |
+ |
|
95 |
+ /* queue frame */ |
|
96 |
+ frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q); |
|
97 |
+ av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n", |
|
98 |
+ frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, interleave->queues[in_no].available); |
|
99 |
+ ff_bufqueue_add(ctx, &interleave->queues[in_no], frame); |
|
100 |
+ |
|
101 |
+ return push_frame(ctx); |
|
102 |
+} |
|
103 |
+ |
|
104 |
+static int init(AVFilterContext *ctx) |
|
105 |
+{ |
|
106 |
+ InterleaveContext *interleave = ctx->priv; |
|
107 |
+ const AVFilterPad *outpad = &ctx->filter->outputs[0]; |
|
108 |
+ int i; |
|
109 |
+ |
|
110 |
+ interleave->queues = av_calloc(interleave->nb_inputs, sizeof(interleave->queues[0])); |
|
111 |
+ if (!interleave->queues) |
|
112 |
+ return AVERROR(ENOMEM); |
|
113 |
+ |
|
114 |
+ for (i = 0; i < interleave->nb_inputs; i++) { |
|
115 |
+ AVFilterPad inpad = { 0 }; |
|
116 |
+ |
|
117 |
+ inpad.name = av_asprintf("input%d", i); |
|
118 |
+ if (!inpad.name) |
|
119 |
+ return AVERROR(ENOMEM); |
|
120 |
+ inpad.type = outpad->type; |
|
121 |
+ inpad.filter_frame = filter_frame; |
|
122 |
+ |
|
123 |
+ switch (outpad->type) { |
|
124 |
+ case AVMEDIA_TYPE_VIDEO: |
|
125 |
+ inpad.get_video_buffer = ff_null_get_video_buffer; break; |
|
126 |
+ case AVMEDIA_TYPE_AUDIO: |
|
127 |
+ inpad.get_audio_buffer = ff_null_get_audio_buffer; break; |
|
128 |
+ default: |
|
129 |
+ av_assert0(0); |
|
130 |
+ } |
|
131 |
+ ff_insert_inpad(ctx, i, &inpad); |
|
132 |
+ } |
|
133 |
+ |
|
134 |
+ return 0; |
|
135 |
+} |
|
136 |
+ |
|
137 |
+static void uninit(AVFilterContext *ctx) |
|
138 |
+{ |
|
139 |
+ InterleaveContext *interleave = ctx->priv; |
|
140 |
+ int i; |
|
141 |
+ |
|
142 |
+ for (i = 0; i < ctx->nb_inputs; i++) { |
|
143 |
+ ff_bufqueue_discard_all(&interleave->queues[i]); |
|
144 |
+ av_freep(&interleave->queues[i]); |
|
145 |
+ av_freep(&ctx->input_pads[i].name); |
|
146 |
+ } |
|
147 |
+} |
|
148 |
+ |
|
149 |
+static int config_output(AVFilterLink *outlink) |
|
150 |
+{ |
|
151 |
+ AVFilterContext *ctx = outlink->src; |
|
152 |
+ AVFilterLink *inlink0 = ctx->inputs[0]; |
|
153 |
+ int i; |
|
154 |
+ |
|
155 |
+ if (outlink->type == AVMEDIA_TYPE_VIDEO) { |
|
156 |
+ outlink->time_base = AV_TIME_BASE_Q; |
|
157 |
+ outlink->w = inlink0->w; |
|
158 |
+ outlink->h = inlink0->h; |
|
159 |
+ outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio; |
|
160 |
+ outlink->format = inlink0->format; |
|
161 |
+ outlink->frame_rate = (AVRational) {1, 0}; |
|
162 |
+ for (i = 1; i < ctx->nb_inputs; i++) { |
|
163 |
+ AVFilterLink *inlink = ctx->inputs[i]; |
|
164 |
+ |
|
165 |
+ if (outlink->w != inlink->w || |
|
166 |
+ outlink->h != inlink->h || |
|
167 |
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num || |
|
168 |
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) { |
|
169 |
+ av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s " |
|
170 |
+ "(size %dx%d, SAR %d:%d) do not match the corresponding " |
|
171 |
+ "output link parameters (%dx%d, SAR %d:%d)\n", |
|
172 |
+ ctx->input_pads[i].name, inlink->w, inlink->h, |
|
173 |
+ inlink->sample_aspect_ratio.num, |
|
174 |
+ inlink->sample_aspect_ratio.den, |
|
175 |
+ outlink->w, outlink->h, |
|
176 |
+ outlink->sample_aspect_ratio.num, |
|
177 |
+ outlink->sample_aspect_ratio.den); |
|
178 |
+ return AVERROR(EINVAL); |
|
179 |
+ } |
|
180 |
+ } |
|
181 |
+ } |
|
182 |
+ |
|
183 |
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP; |
|
184 |
+ return 0; |
|
185 |
+} |
|
186 |
+ |
|
187 |
+static int request_frame(AVFilterLink *outlink) |
|
188 |
+{ |
|
189 |
+ AVFilterContext *ctx = outlink->src; |
|
190 |
+ InterleaveContext *interleave = ctx->priv; |
|
191 |
+ int i, ret; |
|
192 |
+ |
|
193 |
+ for (i = 0; i < ctx->nb_inputs; i++) { |
|
194 |
+ if (!interleave->queues[i].available && !ctx->inputs[i]->closed) { |
|
195 |
+ ret = ff_request_frame(ctx->inputs[i]); |
|
196 |
+ if (ret != AVERROR_EOF) |
|
197 |
+ return ret; |
|
198 |
+ } |
|
199 |
+ } |
|
200 |
+ |
|
201 |
+ return push_frame(ctx); |
|
202 |
+} |
|
203 |
+ |
|
204 |
+#if CONFIG_INTERLEAVE_FILTER |
|
205 |
+ |
|
206 |
+DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); |
|
207 |
+AVFILTER_DEFINE_CLASS(interleave); |
|
208 |
+ |
|
209 |
+static const AVFilterPad interleave_outputs[] = { |
|
210 |
+ { |
|
211 |
+ .name = "default", |
|
212 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
213 |
+ .config_props = config_output, |
|
214 |
+ .request_frame = request_frame, |
|
215 |
+ }, |
|
216 |
+ { NULL } |
|
217 |
+}; |
|
218 |
+ |
|
219 |
+AVFilter avfilter_vf_interleave = { |
|
220 |
+ .name = "interleave", |
|
221 |
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."), |
|
222 |
+ .priv_size = sizeof(InterleaveContext), |
|
223 |
+ .init = init, |
|
224 |
+ .uninit = uninit, |
|
225 |
+ .outputs = interleave_outputs, |
|
226 |
+ .priv_class = &interleave_class, |
|
227 |
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, |
|
228 |
+}; |
|
229 |
+ |
|
230 |
+#endif |
|
231 |
+ |
|
232 |
+#if CONFIG_AINTERLEAVE_FILTER |
|
233 |
+ |
|
234 |
+DEFINE_OPTIONS(ainterleave, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); |
|
235 |
+AVFILTER_DEFINE_CLASS(ainterleave); |
|
236 |
+ |
|
237 |
+static const AVFilterPad ainterleave_outputs[] = { |
|
238 |
+ { |
|
239 |
+ .name = "default", |
|
240 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
241 |
+ .config_props = config_output, |
|
242 |
+ .request_frame = request_frame, |
|
243 |
+ }, |
|
244 |
+ { NULL } |
|
245 |
+}; |
|
246 |
+ |
|
247 |
+AVFilter avfilter_af_ainterleave = { |
|
248 |
+ .name = "ainterleave", |
|
249 |
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."), |
|
250 |
+ .priv_size = sizeof(InterleaveContext), |
|
251 |
+ .init = init, |
|
252 |
+ .uninit = uninit, |
|
253 |
+ .outputs = ainterleave_outputs, |
|
254 |
+ .priv_class = &ainterleave_class, |
|
255 |
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, |
|
256 |
+}; |
|
257 |
+ |
|
258 |
+#endif |
... | ... |
@@ -29,7 +29,7 @@ |
29 | 29 |
#include "libavutil/avutil.h" |
30 | 30 |
|
31 | 31 |
#define LIBAVFILTER_VERSION_MAJOR 3 |
32 |
-#define LIBAVFILTER_VERSION_MINOR 59 |
|
32 |
+#define LIBAVFILTER_VERSION_MINOR 60 |
|
33 | 33 |
#define LIBAVFILTER_VERSION_MICRO 100 |
34 | 34 |
|
35 | 35 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |