9f26421b |
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavresample/avresample.h" |
093804a9 |
#include "libavutil/attributes.h" |
9f26421b |
#include "libavutil/audio_fifo.h" |
1d9c2dc8 |
#include "libavutil/common.h" |
9f26421b |
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h" |
803391f7 |
#include "internal.h" |
9f26421b |
typedef struct ASyncContext {
const AVClass *class;
AVAudioResampleContext *avr;
int64_t pts; ///< timestamp in samples of the first sample in fifo
int min_delta; ///< pad/trim min threshold in samples |
c143de40 |
int first_frame; ///< 1 until filter_frame() has processed at least 1 frame with a pts != AV_NOPTS_VALUE
int64_t first_pts; ///< user-specified first expected pts, in samples |
20a8ee30 |
int comp; ///< current resample compensation |
9f26421b |
/* options */
int resample;
float min_delta_sec;
int max_comp; |
6f834293 |
|
cd7febd3 |
/* set by filter_frame() to signal an output frame to request_frame() */ |
6f834293 |
int got_output; |
9f26421b |
} ASyncContext;
#define OFFSET(x) offsetof(ASyncContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM |
42d621d1 |
#define F AV_OPT_FLAG_FILTERING_PARAM |
c17808ce |
static const AVOption asyncts_options[] = { |
d46c1c72 |
{ "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A|F }, |
9f26421b |
{ "min_delta", "Minimum difference between timestamps and audio data " |
98840ee0 |
"(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F }, |
d46c1c72 |
{ "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F }, |
b6e7041f |
{ "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F }, |
9f26421b |
{ NULL },
};
|
c17808ce |
AVFILTER_DEFINE_CLASS(asyncts); |
9f26421b |
|
093804a9 |
static av_cold int init(AVFilterContext *ctx) |
9f26421b |
{
ASyncContext *s = ctx->priv;
|
c143de40 |
s->pts = AV_NOPTS_VALUE;
s->first_frame = 1;
|
9f26421b |
return 0;
}
|
093804a9 |
static av_cold void uninit(AVFilterContext *ctx) |
9f26421b |
{
ASyncContext *s = ctx->priv;
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
}
static int config_props(AVFilterLink *link)
{
ASyncContext *s = link->src->priv;
int ret;
s->min_delta = s->min_delta_sec * link->sample_rate;
link->time_base = (AVRational){1, link->sample_rate};
s->avr = avresample_alloc_context();
if (!s->avr)
return AVERROR(ENOMEM);
av_opt_set_int(s->avr, "in_channel_layout", link->channel_layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", link->channel_layout, 0);
av_opt_set_int(s->avr, "in_sample_fmt", link->format, 0);
av_opt_set_int(s->avr, "out_sample_fmt", link->format, 0);
av_opt_set_int(s->avr, "in_sample_rate", link->sample_rate, 0);
av_opt_set_int(s->avr, "out_sample_rate", link->sample_rate, 0);
if (s->resample)
av_opt_set_int(s->avr, "force_resampling", 1, 0);
if ((ret = avresample_open(s->avr)) < 0)
return ret;
return 0;
}
|
f266486b |
/* get amount of data currently buffered, in samples */
static int64_t get_delay(ASyncContext *s)
{
return avresample_available(s->avr) + avresample_get_delay(s->avr);
}
|
c143de40 |
static void handle_trimming(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
if (s->pts < s->first_pts) {
int delta = FFMIN(s->first_pts - s->pts, avresample_available(s->avr));
av_log(ctx, AV_LOG_VERBOSE, "Trimming %d samples from start\n",
delta);
avresample_read(s->avr, NULL, delta);
s->pts += delta;
} else if (s->first_frame)
s->pts = s->first_pts;
}
|
9f26421b |
static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
ASyncContext *s = ctx->priv; |
6f834293 |
int ret = 0; |
9f26421b |
int nb_samples;
|
6f834293 |
s->got_output = 0;
while (ret >= 0 && !s->got_output)
ret = ff_request_frame(ctx->inputs[0]);
|
9f26421b |
/* flush the fifo */ |
c143de40 |
if (ret == AVERROR_EOF) {
if (s->first_pts != AV_NOPTS_VALUE)
handle_trimming(ctx);
if (nb_samples = get_delay(s)) { |
7e350379 |
AVFrame *buf = ff_get_audio_buffer(link, nb_samples); |
0ee440fe |
if (!buf)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples, NULL, 0, 0);
if (ret <= 0) { |
7e350379 |
av_frame_free(&buf); |
0ee440fe |
return (ret < 0) ? ret : AVERROR_EOF;
}
buf->pts = s->pts;
return ff_filter_frame(link, buf); |
c143de40 |
} |
9f26421b |
}
return ret;
}
|
7e350379 |
static int write_to_fifo(ASyncContext *s, AVFrame *buf) |
9f26421b |
{ |
e7ba5b1d |
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, |
7e350379 |
buf->linesize[0], buf->nb_samples);
av_frame_free(&buf); |
cd991462 |
return ret; |
9f26421b |
}
|
7e350379 |
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
9f26421b |
{
AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0]; |
7e350379 |
int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout); |
9f26421b |
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); |
cd991462 |
int out_size, ret; |
9f26421b |
int64_t delta; |
20a8ee30 |
int64_t new_pts; |
9f26421b |
|
c0dc57f1 |
/* buffer data until we get the next timestamp */
if (s->pts == AV_NOPTS_VALUE || pts == AV_NOPTS_VALUE) { |
9f26421b |
if (pts != AV_NOPTS_VALUE) {
s->pts = pts - get_delay(s);
} |
cd991462 |
return write_to_fifo(s, buf); |
9f26421b |
}
|
c143de40 |
if (s->first_pts != AV_NOPTS_VALUE) {
handle_trimming(ctx);
if (!avresample_available(s->avr))
return write_to_fifo(s, buf);
}
|
9f26421b |
/* when we have two timestamps, compute how many samples would we have
* to add/remove to get proper sync between data and timestamps */
delta = pts - s->pts - get_delay(s);
out_size = avresample_available(s->avr);
|
4e5a8878 |
if (labs(delta) > s->min_delta ||
(s->first_frame && delta && s->first_pts != AV_NOPTS_VALUE)) { |
9f26421b |
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta); |
be51e589 |
out_size = av_clipl_int32((int64_t)out_size + delta); |
f297dd38 |
} else {
if (s->resample) { |
20a8ee30 |
// adjust the compensation if delta is non-zero
int delay = get_delay(s);
int comp = s->comp + av_clip(delta * inlink->sample_rate / delay,
-s->max_comp, s->max_comp);
if (comp != s->comp) {
av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
if (avresample_set_compensation(s->avr, comp, inlink->sample_rate) == 0) {
s->comp = comp;
}
} |
f297dd38 |
} |
20a8ee30 |
// adjust PTS to avoid monotonicity errors with input PTS jitter
pts -= delta; |
f297dd38 |
delta = 0; |
9f26421b |
}
if (out_size > 0) { |
7e350379 |
AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size); |
cd991462 |
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
} |
9f26421b |
|
c143de40 |
if (s->first_frame && delta > 0) { |
16a4a18d |
int planar = av_sample_fmt_is_planar(buf_out->format);
int planes = planar ? nb_channels : 1;
int block_size = av_get_bytes_per_sample(buf_out->format) *
(planar ? 1 : nb_channels);
|
c143de40 |
int ch;
av_samples_set_silence(buf_out->extended_data, 0, delta,
nb_channels, buf->format);
|
16a4a18d |
for (ch = 0; ch < planes; ch++)
buf_out->extended_data[ch] += delta * block_size; |
9f26421b |
|
c143de40 |
avresample_read(s->avr, buf_out->extended_data, out_size);
|
16a4a18d |
for (ch = 0; ch < planes; ch++)
buf_out->extended_data[ch] -= delta * block_size; |
c143de40 |
} else {
avresample_read(s->avr, buf_out->extended_data, out_size);
if (delta > 0) {
av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format);
} |
9f26421b |
} |
c143de40 |
buf_out->pts = s->pts; |
cd7febd3 |
ret = ff_filter_frame(outlink, buf_out); |
cd991462 |
if (ret < 0)
goto fail; |
6f834293 |
s->got_output = 1; |
c143de40 |
} else if (avresample_available(s->avr)) { |
9f26421b |
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
"whole buffer.\n");
}
/* drain any remaining buffered data */
avresample_read(s->avr, NULL, avresample_available(s->avr));
|
20a8ee30 |
new_pts = pts - avresample_get_delay(s->avr);
/* check for s->pts monotonicity */
if (new_pts > s->pts) {
s->pts = new_pts;
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->nb_samples);
} else {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
"whole buffer.\n");
ret = 0;
} |
cd991462 |
|
c143de40 |
s->first_frame = 0; |
cd991462 |
fail: |
7e350379 |
av_frame_free(&buf); |
cd991462 |
return ret; |
9f26421b |
}
|
568c70e7 |
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO, |
cd7febd3 |
.filter_frame = filter_frame |
568c70e7 |
},
{ NULL }
};
static const AVFilterPad avfilter_af_asyncts_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame
},
{ NULL }
};
|
9f26421b |
AVFilter avfilter_af_asyncts = {
.name = "asyncts",
.description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
.init = init,
.uninit = uninit,
.priv_size = sizeof(ASyncContext), |
ac217bda |
.priv_class = &asyncts_class, |
9f26421b |
|
568c70e7 |
.inputs = avfilter_af_asyncts_inputs,
.outputs = avfilter_af_asyncts_outputs, |
9f26421b |
}; |