... | ... |
@@ -237,6 +237,35 @@ For example: |
237 | 237 |
will create two separate outputs from the same input, one cropped and |
238 | 238 |
one padded. |
239 | 239 |
|
240 |
+@section astreamsync |
|
241 |
+ |
|
242 |
+Forward two audio streams and control the order the buffers are forwarded. |
|
243 |
+ |
|
244 |
+The argument to the filter is an expression deciding which stream should be |
|
245 |
+forwarded next: if the result is negative, the first stream is forwarded; if |
|
246 |
+the result is positive or zero, the second stream is forwarded. It can use |
|
247 |
+the following variables: |
|
248 |
+ |
|
249 |
+@table @var |
|
250 |
+@item b1 b2 |
|
251 |
+number of buffers forwarded so far on each stream |
|
252 |
+@item s1 s2 |
|
253 |
+number of samples forwarded so far on each stream |
|
254 |
+@item t1 t2 |
|
255 |
+current timestamp of each stream |
|
256 |
+@end table |
|
257 |
+ |
|
258 |
+The default value is @code{t1-t2}, which means to always forward the stream |
|
259 |
+that has a smaller timestamp. |
|
260 |
+ |
|
261 |
+Example: stress-test @code{amerge} by randomly sending buffers on the wrong |
|
262 |
+input, while avoiding too much of a desynchronization: |
|
263 |
+@example |
|
264 |
+amovie=file.ogg [a] ; amovie=file.mp3 [b] ; |
|
265 |
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ; |
|
266 |
+[a2] [b2] amerge |
|
267 |
+@end example |
|
268 |
+ |
|
240 | 269 |
@section earwax |
241 | 270 |
|
242 | 271 |
Make audio easier to listen to on headphones. |
... | ... |
@@ -30,6 +30,7 @@ OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o |
30 | 30 |
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o |
31 | 31 |
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o |
32 | 32 |
OBJS-$(CONFIG_ASPLIT_FILTER) += af_asplit.o |
33 |
+OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o |
|
33 | 34 |
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o |
34 | 35 |
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o |
35 | 36 |
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o |
36 | 37 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,209 @@ |
0 |
+/* |
|
1 |
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org> |
|
2 |
+ * |
|
3 |
+ * This file is part of FFmpeg. |
|
4 |
+ * |
|
5 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
6 |
+ * modify it under the terms of the GNU Lesser General Public |
|
7 |
+ * License as published by the Free Software Foundation; either |
|
8 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
9 |
+ * |
|
10 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
11 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 |
+ * GNU General Public License for more details. |
|
14 |
+ * |
|
15 |
+ * You should have received a copy of the GNU Lesser General Public |
|
16 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
17 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
+ */ |
|
19 |
+ |
|
20 |
+/** |
|
21 |
+ * @file |
|
22 |
+ * Stream (de)synchronization filter |
|
23 |
+ */ |
|
24 |
+ |
|
25 |
+#include "libavutil/eval.h" |
|
26 |
+#include "avfilter.h" |
|
27 |
+#include "internal.h" |
|
28 |
+ |
|
29 |
+#define QUEUE_SIZE 16 |
|
30 |
+ |
|
31 |
+static const char * const var_names[] = { |
|
32 |
+ "b1", "b2", |
|
33 |
+ "s1", "s2", |
|
34 |
+ "t1", "t2", |
|
35 |
+ NULL |
|
36 |
+}; |
|
37 |
+ |
|
38 |
+enum var_name { |
|
39 |
+ VAR_B1, VAR_B2, |
|
40 |
+ VAR_S1, VAR_S2, |
|
41 |
+ VAR_T1, VAR_T2, |
|
42 |
+ VAR_NB |
|
43 |
+}; |
|
44 |
+ |
|
45 |
+typedef struct { |
|
46 |
+ AVExpr *expr; |
|
47 |
+ double var_values[VAR_NB]; |
|
48 |
+ struct buf_queue { |
|
49 |
+ AVFilterBufferRef *buf[QUEUE_SIZE]; |
|
50 |
+ unsigned tail, nb; |
|
51 |
+ /* buf[tail] is the oldest, |
|
52 |
+ buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ |
|
53 |
+ } queue[2]; |
|
54 |
+ int req[2]; |
|
55 |
+ int next_out; |
|
56 |
+ int eof; /* bitmask, one bit for each stream */ |
|
57 |
+} AStreamSyncContext; |
|
58 |
+ |
|
59 |
+static const char *default_expr = "t1-t2"; |
|
60 |
+ |
|
61 |
+static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque) |
|
62 |
+{ |
|
63 |
+ AStreamSyncContext *as = ctx->priv; |
|
64 |
+ const char *expr = args0 ? args0 : default_expr; |
|
65 |
+ int r, i; |
|
66 |
+ |
|
67 |
+ r = av_expr_parse(&as->expr, expr, var_names, |
|
68 |
+ NULL, NULL, NULL, NULL, 0, ctx); |
|
69 |
+ if (r < 0) { |
|
70 |
+ av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr); |
|
71 |
+ return r; |
|
72 |
+ } |
|
73 |
+ for (i = 0; i < 42; i++) |
|
74 |
+ av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */ |
|
75 |
+ return 0; |
|
76 |
+} |
|
77 |
+ |
|
78 |
+static int query_formats(AVFilterContext *ctx) |
|
79 |
+{ |
|
80 |
+ int i; |
|
81 |
+ AVFilterFormats *formats; |
|
82 |
+ |
|
83 |
+ for (i = 0; i < 2; i++) { |
|
84 |
+ formats = ctx->inputs[i]->in_formats; |
|
85 |
+ avfilter_formats_ref(formats, &ctx->inputs[i]->out_formats); |
|
86 |
+ avfilter_formats_ref(formats, &ctx->outputs[i]->in_formats); |
|
87 |
+ formats = ctx->inputs[i]->in_packing; |
|
88 |
+ avfilter_formats_ref(formats, &ctx->inputs[i]->out_packing); |
|
89 |
+ avfilter_formats_ref(formats, &ctx->outputs[i]->in_packing); |
|
90 |
+ formats = ctx->inputs[i]->in_chlayouts; |
|
91 |
+ avfilter_formats_ref(formats, &ctx->inputs[i]->out_chlayouts); |
|
92 |
+ avfilter_formats_ref(formats, &ctx->outputs[i]->in_chlayouts); |
|
93 |
+ } |
|
94 |
+ return 0; |
|
95 |
+} |
|
96 |
+ |
|
97 |
+static int config_output(AVFilterLink *outlink) |
|
98 |
+{ |
|
99 |
+ AVFilterContext *ctx = outlink->src; |
|
100 |
+ int id = outlink == ctx->outputs[1]; |
|
101 |
+ int i; |
|
102 |
+ |
|
103 |
+ outlink->sample_rate = ctx->inputs[id]->sample_rate; |
|
104 |
+ outlink->time_base = ctx->inputs[id]->time_base; |
|
105 |
+ return 0; |
|
106 |
+} |
|
107 |
+ |
|
108 |
+static void send_out(AVFilterContext *ctx, int out_id) |
|
109 |
+{ |
|
110 |
+ AStreamSyncContext *as = ctx->priv; |
|
111 |
+ struct buf_queue *queue = &as->queue[out_id]; |
|
112 |
+ AVFilterBufferRef *buf = queue->buf[queue->tail]; |
|
113 |
+ |
|
114 |
+ queue->buf[queue->tail] = NULL; |
|
115 |
+ as->var_values[VAR_B1 + out_id]++; |
|
116 |
+ as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples; |
|
117 |
+ if (buf->pts != AV_NOPTS_VALUE) |
|
118 |
+ as->var_values[VAR_T1 + out_id] = |
|
119 |
+ av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; |
|
120 |
+ as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / |
|
121 |
+ (double)ctx->inputs[out_id]->sample_rate; |
|
122 |
+ avfilter_filter_samples(ctx->outputs[out_id], buf); |
|
123 |
+ queue->nb--; |
|
124 |
+ queue->tail = (queue->tail + 1) % QUEUE_SIZE; |
|
125 |
+ if (as->req[out_id]) |
|
126 |
+ as->req[out_id]--; |
|
127 |
+} |
|
128 |
+ |
|
129 |
+static void send_next(AVFilterContext *ctx) |
|
130 |
+{ |
|
131 |
+ AStreamSyncContext *as = ctx->priv; |
|
132 |
+ int i; |
|
133 |
+ |
|
134 |
+ while (1) { |
|
135 |
+ if (!as->queue[as->next_out].nb) |
|
136 |
+ break; |
|
137 |
+ send_out(ctx, as->next_out); |
|
138 |
+ if (!as->eof) |
|
139 |
+ as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0; |
|
140 |
+ } |
|
141 |
+ for (i = 0; i < 2; i++) |
|
142 |
+ if (as->queue[i].nb == QUEUE_SIZE) |
|
143 |
+ send_out(ctx, i); |
|
144 |
+} |
|
145 |
+ |
|
146 |
+static int request_frame(AVFilterLink *outlink) |
|
147 |
+{ |
|
148 |
+ AVFilterContext *ctx = outlink->src; |
|
149 |
+ AStreamSyncContext *as = ctx->priv; |
|
150 |
+ int id = outlink == ctx->outputs[1]; |
|
151 |
+ |
|
152 |
+ as->req[id]++; |
|
153 |
+ while (as->req[id] && !(as->eof & (1 << id))) { |
|
154 |
+ if (as->queue[as->next_out].nb) { |
|
155 |
+ send_next(ctx); |
|
156 |
+ } else { |
|
157 |
+ as->eof |= 1 << as->next_out; |
|
158 |
+ avfilter_request_frame(ctx->inputs[as->next_out]); |
|
159 |
+ if (as->eof & (1 << as->next_out)) |
|
160 |
+ as->next_out = !as->next_out; |
|
161 |
+ } |
|
162 |
+ } |
|
163 |
+ return 0; |
|
164 |
+} |
|
165 |
+ |
|
166 |
+static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
167 |
+{ |
|
168 |
+ AVFilterContext *ctx = inlink->dst; |
|
169 |
+ AStreamSyncContext *as = ctx->priv; |
|
170 |
+ int id = inlink == ctx->inputs[1]; |
|
171 |
+ |
|
172 |
+ as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] = |
|
173 |
+ insamples; |
|
174 |
+ as->eof &= ~(1 << id); |
|
175 |
+ send_next(ctx); |
|
176 |
+} |
|
177 |
+ |
|
178 |
+AVFilter avfilter_af_astreamsync = { |
|
179 |
+ .name = "astreamsync", |
|
180 |
+ .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data " |
|
181 |
+ "in a configurable order."), |
|
182 |
+ .priv_size = sizeof(AStreamSyncContext), |
|
183 |
+ .init = init, |
|
184 |
+ .query_formats = query_formats, |
|
185 |
+ |
|
186 |
+ .inputs = (const AVFilterPad[]) { |
|
187 |
+ { .name = "in1", |
|
188 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
189 |
+ .filter_samples = filter_samples, |
|
190 |
+ .min_perms = AV_PERM_READ, }, |
|
191 |
+ { .name = "in2", |
|
192 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
193 |
+ .filter_samples = filter_samples, |
|
194 |
+ .min_perms = AV_PERM_READ, }, |
|
195 |
+ { .name = NULL } |
|
196 |
+ }, |
|
197 |
+ .outputs = (const AVFilterPad[]) { |
|
198 |
+ { .name = "out1", |
|
199 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
200 |
+ .config_props = config_output, |
|
201 |
+ .request_frame = request_frame, }, |
|
202 |
+ { .name = "out2", |
|
203 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
204 |
+ .config_props = config_output, |
|
205 |
+ .request_frame = request_frame, }, |
|
206 |
+ { .name = NULL } |
|
207 |
+ }, |
|
208 |
+}; |
... | ... |
@@ -40,6 +40,7 @@ void avfilter_register_all(void) |
40 | 40 |
REGISTER_FILTER (ARESAMPLE, aresample, af); |
41 | 41 |
REGISTER_FILTER (ASHOWINFO, ashowinfo, af); |
42 | 42 |
REGISTER_FILTER (ASPLIT, asplit, af); |
43 |
+ REGISTER_FILTER (ASTREAMSYNC, astreamsync, af); |
|
43 | 44 |
REGISTER_FILTER (EARWAX, earwax, af); |
44 | 45 |
REGISTER_FILTER (PAN, pan, af); |
45 | 46 |
REGISTER_FILTER (VOLUME, volume, af); |