This is a variant of showwaves. It is implemented as a different filter
so that the user is not allowed to use meaningless options which belong
to showwaves (such as rate).
Major edits done by Stefano Sabatini, from a patch by ubitux.
See thread:
From: Clément Bœsch <u@pkh.me>
To: ffmpeg-devel@ffmpeg.org
Date: Wed, 24 Dec 2014 15:03:26 +0100
Subject: [FFmpeg-devel] [PATCH] avfilter/showwaves: add single_pic option
... | ... |
@@ -11741,6 +11741,33 @@ aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r= |
11741 | 11741 |
@end example |
11742 | 11742 |
@end itemize |
11743 | 11743 |
|
11744 |
+@section showwavespic |
|
11745 |
+ |
|
11746 |
+Convert input audio to a single video frame, representing the samples waves. |
|
11747 |
+ |
|
11748 |
+The filter accepts the following options: |
|
11749 |
+ |
|
11750 |
+@table @option |
|
11751 |
+@item size, s |
|
11752 |
+Specify the video size for the output. For the syntax of this option, check the |
|
11753 |
+@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}. |
|
11754 |
+Default value is @code{600x240}. |
|
11755 |
+ |
|
11756 |
+@item split_channels |
|
11757 |
+Set if channels should be drawn separately or overlap. Default value is 0. |
|
11758 |
+@end table |
|
11759 |
+ |
|
11760 |
+@subsection Examples |
|
11761 |
+ |
|
11762 |
+@itemize |
|
11763 |
+@item |
|
11764 |
+Extract a channel split representation of the wave form of a whole audio track |
|
11765 |
+in a 1024x800 picture using @command{ffmpeg}: |
|
11766 |
+@example |
|
11767 |
+ffmpeg -i audio.flac -lavfi showwavespic=split_channels=1:s=1024x800 waveform.png |
|
11768 |
+@end example |
|
11769 |
+@end itemize |
|
11770 |
+ |
|
11744 | 11771 |
@section split, asplit |
11745 | 11772 |
|
11746 | 11773 |
Split input into several identical outputs. |
... | ... |
@@ -236,6 +236,7 @@ OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o |
236 | 236 |
OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o |
237 | 237 |
OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o |
238 | 238 |
OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o |
239 |
+OBJS-$(CONFIG_SHOWWAVESPIC_FILTER) += avf_showwaves.o |
|
239 | 240 |
|
240 | 241 |
# multimedia sources |
241 | 242 |
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o |
... | ... |
@@ -251,6 +251,7 @@ void avfilter_register_all(void) |
251 | 251 |
REGISTER_FILTER(SHOWCQT, showcqt, avf); |
252 | 252 |
REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf); |
253 | 253 |
REGISTER_FILTER(SHOWWAVES, showwaves, avf); |
254 |
+ REGISTER_FILTER(SHOWWAVESPIC, showwavespic, avf); |
|
254 | 255 |
|
255 | 256 |
/* multimedia sources */ |
256 | 257 |
REGISTER_FILTER(AMOVIE, amovie, avsrc); |
... | ... |
@@ -23,6 +23,7 @@ |
23 | 23 |
* audio to video multimedia filter |
24 | 24 |
*/ |
25 | 25 |
|
26 |
+#include "libavutil/avassert.h" |
|
26 | 27 |
#include "libavutil/channel_layout.h" |
27 | 28 |
#include "libavutil/opt.h" |
28 | 29 |
#include "libavutil/parseutils.h" |
... | ... |
@@ -40,6 +41,11 @@ enum ShowWavesMode { |
40 | 40 |
MODE_NB, |
41 | 41 |
}; |
42 | 42 |
|
43 |
+struct frame_node { |
|
44 |
+ AVFrame *frame; |
|
45 |
+ struct frame_node *next; |
|
46 |
+}; |
|
47 |
+ |
|
43 | 48 |
typedef struct { |
44 | 49 |
const AVClass *class; |
45 | 50 |
int w, h; |
... | ... |
@@ -54,6 +60,13 @@ typedef struct { |
54 | 54 |
int split_channels; |
55 | 55 |
void (*draw_sample)(uint8_t *buf, int height, int linesize, |
56 | 56 |
int16_t sample, int16_t *prev_y, int intensity); |
57 |
+ |
|
58 |
+ /* single picture */ |
|
59 |
+ int single_pic; |
|
60 |
+ struct frame_node *audio_frames; |
|
61 |
+ struct frame_node *last_frame; |
|
62 |
+ int64_t total_samples; |
|
63 |
+ int64_t *sum; /* abs sum of the samples per channel */ |
|
57 | 64 |
} ShowWavesContext; |
58 | 65 |
|
59 | 66 |
#define OFFSET(x) offsetof(ShowWavesContext, x) |
... | ... |
@@ -82,6 +95,19 @@ static av_cold void uninit(AVFilterContext *ctx) |
82 | 82 |
|
83 | 83 |
av_frame_free(&showwaves->outpicref); |
84 | 84 |
av_freep(&showwaves->buf_idy); |
85 |
+ |
|
86 |
+ if (showwaves->single_pic) { |
|
87 |
+ struct frame_node *node = showwaves->audio_frames; |
|
88 |
+ while (node) { |
|
89 |
+ struct frame_node *tmp = node; |
|
90 |
+ |
|
91 |
+ node = node->next; |
|
92 |
+ av_frame_free(&tmp->frame); |
|
93 |
+ av_freep(&tmp); |
|
94 |
+ } |
|
95 |
+ av_freep(&showwaves->sum); |
|
96 |
+ showwaves->last_frame = NULL; |
|
97 |
+ } |
|
85 | 98 |
} |
86 | 99 |
|
87 | 100 |
static int query_formats(AVFilterContext *ctx) |
... | ... |
@@ -162,6 +188,55 @@ inline static int push_frame(AVFilterLink *outlink) |
162 | 162 |
return ret; |
163 | 163 |
} |
164 | 164 |
|
165 |
+static int push_single_pic(AVFilterLink *outlink) |
|
166 |
+{ |
|
167 |
+ AVFilterContext *ctx = outlink->src; |
|
168 |
+ AVFilterLink *inlink = ctx->inputs[0]; |
|
169 |
+ ShowWavesContext *showwaves = ctx->priv; |
|
170 |
+ int64_t n = 0, max_samples = showwaves->total_samples / outlink->w; |
|
171 |
+ AVFrame *out = showwaves->outpicref; |
|
172 |
+ struct frame_node *node; |
|
173 |
+ const int nb_channels = inlink->channels; |
|
174 |
+ const int x = 255 / (showwaves->split_channels ? 1 : nb_channels); |
|
175 |
+ const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; |
|
176 |
+ const int linesize = out->linesize[0]; |
|
177 |
+ int col = 0; |
|
178 |
+ int64_t *sum = showwaves->sum; |
|
179 |
+ |
|
180 |
+ av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples); |
|
181 |
+ |
|
182 |
+ memset(sum, 0, nb_channels); |
|
183 |
+ |
|
184 |
+ for (node = showwaves->audio_frames; node; node = node->next) { |
|
185 |
+ int i; |
|
186 |
+ const AVFrame *frame = node->frame; |
|
187 |
+ const int16_t *p = (const int16_t *)frame->data[0]; |
|
188 |
+ |
|
189 |
+ for (i = 0; i < frame->nb_samples; i++) { |
|
190 |
+ int ch; |
|
191 |
+ |
|
192 |
+ for (ch = 0; ch < nb_channels; ch++) |
|
193 |
+ sum[ch] += abs(p[ch + i*nb_channels]) << 1; |
|
194 |
+ if (n++ == max_samples) { |
|
195 |
+ for (ch = 0; ch < nb_channels; ch++) { |
|
196 |
+ int16_t sample = sum[ch] / max_samples; |
|
197 |
+ uint8_t *buf = out->data[0] + col; |
|
198 |
+ if (showwaves->split_channels) |
|
199 |
+ buf += ch*ch_height*linesize; |
|
200 |
+ av_assert0(col < outlink->w); |
|
201 |
+ showwaves->draw_sample(buf, ch_height, linesize, sample, &showwaves->buf_idy[ch], x); |
|
202 |
+ sum[ch] = 0; |
|
203 |
+ } |
|
204 |
+ col++; |
|
205 |
+ n = 0; |
|
206 |
+ } |
|
207 |
+ } |
|
208 |
+ } |
|
209 |
+ |
|
210 |
+ return push_frame(outlink); |
|
211 |
+} |
|
212 |
+ |
|
213 |
+ |
|
165 | 214 |
static int request_frame(AVFilterLink *outlink) |
166 | 215 |
{ |
167 | 216 |
ShowWavesContext *showwaves = outlink->src->priv; |
... | ... |
@@ -173,8 +248,13 @@ static int request_frame(AVFilterLink *outlink) |
173 | 173 |
ret = ff_request_frame(inlink); |
174 | 174 |
} while (!showwaves->req_fullfilled && ret >= 0); |
175 | 175 |
|
176 |
- if (ret == AVERROR_EOF && showwaves->outpicref) |
|
177 |
- push_frame(outlink); |
|
176 |
+ if (ret == AVERROR_EOF && showwaves->outpicref) { |
|
177 |
+ if (showwaves->single_pic) |
|
178 |
+ push_single_pic(outlink); |
|
179 |
+ else |
|
180 |
+ push_frame(outlink); |
|
181 |
+ } |
|
182 |
+ |
|
178 | 183 |
return ret; |
179 | 184 |
} |
180 | 185 |
|
... | ... |
@@ -229,14 +309,56 @@ static void draw_sample_cline(uint8_t *buf, int height, int linesize, |
229 | 229 |
buf[k * linesize] += intensity; |
230 | 230 |
} |
231 | 231 |
|
232 |
-static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
232 |
+static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, |
|
233 |
+ const AVFilterLink *inlink, AVFilterLink *outlink, |
|
234 |
+ const AVFrame *in) |
|
235 |
+{ |
|
236 |
+ if (!showwaves->outpicref) { |
|
237 |
+ int j; |
|
238 |
+ AVFrame *out = showwaves->outpicref = |
|
239 |
+ ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
240 |
+ if (!out) |
|
241 |
+ return AVERROR(ENOMEM); |
|
242 |
+ out->width = outlink->w; |
|
243 |
+ out->height = outlink->h; |
|
244 |
+ out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels, |
|
245 |
+ av_make_q(1, inlink->sample_rate), |
|
246 |
+ outlink->time_base); |
|
247 |
+ for (j = 0; j < outlink->h; j++) |
|
248 |
+ memset(out->data[0] + j*out->linesize[0], 0, outlink->w); |
|
249 |
+ } |
|
250 |
+ return 0; |
|
251 |
+} |
|
252 |
+ |
|
253 |
+static av_cold int init(AVFilterContext *ctx) |
|
254 |
+{ |
|
255 |
+ ShowWavesContext *showwaves = ctx->priv; |
|
256 |
+ |
|
257 |
+ if (!strcmp(ctx->filter->name, "showwavespic")) { |
|
258 |
+ showwaves->single_pic = 1; |
|
259 |
+ showwaves->mode = MODE_CENTERED_LINE; |
|
260 |
+ } |
|
261 |
+ |
|
262 |
+ switch (showwaves->mode) { |
|
263 |
+ case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; |
|
264 |
+ case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; |
|
265 |
+ case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; |
|
266 |
+ case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; |
|
267 |
+ default: |
|
268 |
+ return AVERROR_BUG; |
|
269 |
+ } |
|
270 |
+ return 0; |
|
271 |
+} |
|
272 |
+ |
|
273 |
+#if CONFIG_SHOWWAVES_FILTER |
|
274 |
+ |
|
275 |
+static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
233 | 276 |
{ |
234 | 277 |
AVFilterContext *ctx = inlink->dst; |
235 | 278 |
AVFilterLink *outlink = ctx->outputs[0]; |
236 | 279 |
ShowWavesContext *showwaves = ctx->priv; |
237 | 280 |
const int nb_samples = insamples->nb_samples; |
238 | 281 |
AVFrame *outpicref = showwaves->outpicref; |
239 |
- int linesize = outpicref ? outpicref->linesize[0] : 0; |
|
240 | 282 |
int16_t *p = (int16_t *)insamples->data[0]; |
241 | 283 |
int nb_channels = inlink->channels; |
242 | 284 |
int i, j, ret = 0; |
... | ... |
@@ -246,23 +368,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
246 | 246 |
|
247 | 247 |
/* draw data in the buffer */ |
248 | 248 |
for (i = 0; i < nb_samples; i++) { |
249 |
- if (!showwaves->outpicref) { |
|
250 |
- showwaves->outpicref = outpicref = |
|
251 |
- ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
252 |
- if (!outpicref) |
|
253 |
- return AVERROR(ENOMEM); |
|
254 |
- outpicref->width = outlink->w; |
|
255 |
- outpicref->height = outlink->h; |
|
256 |
- outpicref->pts = insamples->pts + |
|
257 |
- av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, |
|
258 |
- (AVRational){ 1, inlink->sample_rate }, |
|
259 |
- outlink->time_base); |
|
260 |
- linesize = outpicref->linesize[0]; |
|
261 |
- for (j = 0; j < outlink->h; j++) |
|
262 |
- memset(outpicref->data[0] + j * linesize, 0, outlink->w); |
|
263 |
- } |
|
249 |
+ |
|
250 |
+ ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
|
251 |
+ if (ret < 0) |
|
252 |
+ goto end; |
|
253 |
+ outpicref = showwaves->outpicref; |
|
254 |
+ |
|
264 | 255 |
for (j = 0; j < nb_channels; j++) { |
265 | 256 |
uint8_t *buf = outpicref->data[0] + showwaves->buf_idx; |
257 |
+ const int linesize = outpicref->linesize[0]; |
|
266 | 258 |
if (showwaves->split_channels) |
267 | 259 |
buf += j*ch_height*linesize; |
268 | 260 |
showwaves->draw_sample(buf, ch_height, linesize, *p++, |
... | ... |
@@ -280,30 +394,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
280 | 280 |
outpicref = showwaves->outpicref; |
281 | 281 |
} |
282 | 282 |
|
283 |
+end: |
|
283 | 284 |
av_frame_free(&insamples); |
284 | 285 |
return ret; |
285 | 286 |
} |
286 | 287 |
|
287 |
-static av_cold int init(AVFilterContext *ctx) |
|
288 |
-{ |
|
289 |
- ShowWavesContext *showwaves = ctx->priv; |
|
290 |
- |
|
291 |
- switch (showwaves->mode) { |
|
292 |
- case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; |
|
293 |
- case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; |
|
294 |
- case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; |
|
295 |
- case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; |
|
296 |
- default: |
|
297 |
- return AVERROR_BUG; |
|
298 |
- } |
|
299 |
- return 0; |
|
300 |
-} |
|
301 |
- |
|
302 | 288 |
static const AVFilterPad showwaves_inputs[] = { |
303 | 289 |
{ |
304 | 290 |
.name = "default", |
305 | 291 |
.type = AVMEDIA_TYPE_AUDIO, |
306 |
- .filter_frame = filter_frame, |
|
292 |
+ .filter_frame = showwaves_filter_frame, |
|
307 | 293 |
}, |
308 | 294 |
{ NULL } |
309 | 295 |
}; |
... | ... |
@@ -329,3 +429,107 @@ AVFilter ff_avf_showwaves = { |
329 | 329 |
.outputs = showwaves_outputs, |
330 | 330 |
.priv_class = &showwaves_class, |
331 | 331 |
}; |
332 |
+ |
|
333 |
+#endif // CONFIG_SHOWWAVES_FILTER |
|
334 |
+ |
|
335 |
+#if CONFIG_SHOWWAVESPIC_FILTER |
|
336 |
+ |
|
337 |
+#define OFFSET(x) offsetof(ShowWavesContext, x) |
|
338 |
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
339 |
+ |
|
340 |
+static const AVOption showwavespic_options[] = { |
|
341 |
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
|
342 |
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
|
343 |
+ { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, |
|
344 |
+ { NULL } |
|
345 |
+}; |
|
346 |
+ |
|
347 |
+AVFILTER_DEFINE_CLASS(showwavespic); |
|
348 |
+ |
|
349 |
+static int showwavespic_config_input(AVFilterLink *inlink) |
|
350 |
+{ |
|
351 |
+ AVFilterContext *ctx = inlink->dst; |
|
352 |
+ ShowWavesContext *showwaves = ctx->priv; |
|
353 |
+ |
|
354 |
+ if (showwaves->single_pic) { |
|
355 |
+ showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum)); |
|
356 |
+ if (!showwaves->sum) |
|
357 |
+ return AVERROR(ENOMEM); |
|
358 |
+ } |
|
359 |
+ |
|
360 |
+ return 0; |
|
361 |
+} |
|
362 |
+ |
|
363 |
+static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
364 |
+{ |
|
365 |
+ AVFilterContext *ctx = inlink->dst; |
|
366 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
367 |
+ ShowWavesContext *showwaves = ctx->priv; |
|
368 |
+ int16_t *p = (int16_t *)insamples->data[0]; |
|
369 |
+ int ret = 0; |
|
370 |
+ |
|
371 |
+ if (showwaves->single_pic) { |
|
372 |
+ struct frame_node *f; |
|
373 |
+ |
|
374 |
+ ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
|
375 |
+ if (ret < 0) |
|
376 |
+ goto end; |
|
377 |
+ |
|
378 |
+ /* queue the audio frame */ |
|
379 |
+ f = av_malloc(sizeof(*f)); |
|
380 |
+ if (!f) { |
|
381 |
+ ret = AVERROR(ENOMEM); |
|
382 |
+ goto end; |
|
383 |
+ } |
|
384 |
+ f->frame = insamples; |
|
385 |
+ f->next = NULL; |
|
386 |
+ if (!showwaves->last_frame) { |
|
387 |
+ showwaves->audio_frames = |
|
388 |
+ showwaves->last_frame = f; |
|
389 |
+ } else { |
|
390 |
+ showwaves->last_frame->next = f; |
|
391 |
+ showwaves->last_frame = f; |
|
392 |
+ } |
|
393 |
+ showwaves->total_samples += insamples->nb_samples; |
|
394 |
+ |
|
395 |
+ return 0; |
|
396 |
+ } |
|
397 |
+ |
|
398 |
+end: |
|
399 |
+ av_frame_free(&insamples); |
|
400 |
+ return ret; |
|
401 |
+} |
|
402 |
+ |
|
403 |
+static const AVFilterPad showwavespic_inputs[] = { |
|
404 |
+ { |
|
405 |
+ .name = "default", |
|
406 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
407 |
+ .config_props = showwavespic_config_input, |
|
408 |
+ .filter_frame = showwavespic_filter_frame, |
|
409 |
+ }, |
|
410 |
+ { NULL } |
|
411 |
+}; |
|
412 |
+ |
|
413 |
+static const AVFilterPad showwavespic_outputs[] = { |
|
414 |
+ { |
|
415 |
+ .name = "default", |
|
416 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
417 |
+ .config_props = config_output, |
|
418 |
+ .request_frame = request_frame, |
|
419 |
+ }, |
|
420 |
+ { NULL } |
|
421 |
+}; |
|
422 |
+ |
|
423 |
+AVFilter ff_avf_showwavespic = { |
|
424 |
+ .name = "showwavespic", |
|
425 |
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."), |
|
426 |
+ .init = init, |
|
427 |
+ .uninit = uninit, |
|
428 |
+ .query_formats = query_formats, |
|
429 |
+ .priv_size = sizeof(ShowWavesContext), |
|
430 |
+ .inputs = showwavespic_inputs, |
|
431 |
+ .outputs = showwavespic_outputs, |
|
432 |
+ .priv_class = &showwavespic_class, |
|
433 |
+}; |
|
434 |
+ |
|
435 |
+#endif // CONFIG_SHOWWAVESPIC_FILTER |
... | ... |
@@ -30,7 +30,7 @@ |
30 | 30 |
#include "libavutil/version.h" |
31 | 31 |
|
32 | 32 |
#define LIBAVFILTER_VERSION_MAJOR 5 |
33 |
-#define LIBAVFILTER_VERSION_MINOR 12 |
|
33 |
+#define LIBAVFILTER_VERSION_MINOR 13 |
|
34 | 34 |
#define LIBAVFILTER_VERSION_MICRO 100 |
35 | 35 |
|
36 | 36 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |