/*
 * Copyright (c) 2015 Derek Buitenhuis
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"

#define DEFAULT_LENGTH 300

typedef struct ReverseContext {
    int nb_frames;
    AVFrame **frames;
    unsigned int frames_size;
    unsigned int pts_size;
    int64_t *pts;
    int flush_idx;
} ReverseContext;

static av_cold int init(AVFilterContext *ctx)
{
    ReverseContext *s = ctx->priv;

    s->pts = av_fast_realloc(NULL, &s->pts_size,
                             DEFAULT_LENGTH * sizeof(*(s->pts)));
    if (!s->pts)
        return AVERROR(ENOMEM);

    s->frames = av_fast_realloc(NULL, &s->frames_size,
                                DEFAULT_LENGTH * sizeof(*(s->frames)));
    if (!s->frames) {
        av_freep(&s->pts);
        return AVERROR(ENOMEM);
    }

    return 0;
}

static av_cold void uninit(AVFilterContext *ctx)
{
    ReverseContext *s = ctx->priv;

    av_freep(&s->pts);
    av_freep(&s->frames);
}

static int config_output(AVFilterLink *outlink)
{
    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
    return 0;
}

static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    ReverseContext *s    = ctx->priv;
    void *ptr;

    if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) {
        ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2);
        if (!ptr)
            return AVERROR(ENOMEM);
        s->pts = ptr;
    }

    if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) {
        ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2);
        if (!ptr)
            return AVERROR(ENOMEM);
        s->frames = ptr;
    }

    s->frames[s->nb_frames] = in;
    s->pts[s->nb_frames]    = in->pts;
    s->nb_frames++;

    return 0;
}

#if CONFIG_REVERSE_FILTER

static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ReverseContext *s = ctx->priv;
    int ret;

    ret = ff_request_frame(ctx->inputs[0]);

    if (ret == AVERROR_EOF && s->nb_frames > 0) {
        AVFrame *out = s->frames[s->nb_frames - 1];
        out->pts     = s->pts[s->flush_idx++];
        ret          = ff_filter_frame(outlink, out);
        s->nb_frames--;
    }

    return ret;
}

static const AVFilterPad reverse_inputs[] = {
    {
        .name         = "default",
        .type         = AVMEDIA_TYPE_VIDEO,
        .filter_frame = filter_frame,
    },
    { NULL }
};

static const AVFilterPad reverse_outputs[] = {
    {
        .name          = "default",
        .type          = AVMEDIA_TYPE_VIDEO,
        .request_frame = request_frame,
        .config_props  = config_output,
    },
    { NULL }
};

AVFilter ff_vf_reverse = {
    .name        = "reverse",
    .description = NULL_IF_CONFIG_SMALL("Reverse a clip."),
    .priv_size   = sizeof(ReverseContext),
    .init        = init,
    .uninit      = uninit,
    .inputs      = reverse_inputs,
    .outputs     = reverse_outputs,
};

#endif /* CONFIG_REVERSE_FILTER */

#if CONFIG_AREVERSE_FILTER

static int query_formats(AVFilterContext *ctx)
{
    AVFilterFormats *formats;
    AVFilterChannelLayouts *layouts;
    int ret;

    layouts = ff_all_channel_layouts();
    if (!layouts)
        return AVERROR(ENOMEM);
    ret = ff_set_common_channel_layouts(ctx, layouts);
    if (ret < 0)
        return ret;

    ret = ff_set_common_formats(ctx, ff_planar_sample_fmts());
    if (ret < 0)
        return ret;

    formats = ff_all_samplerates();
    if (!formats)
        return AVERROR(ENOMEM);
    return ff_set_common_samplerates(ctx, formats);
}

static int areverse_request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ReverseContext *s = ctx->priv;
    int ret, p, i, j;

    ret = ff_request_frame(ctx->inputs[0]);

    if (ret == AVERROR_EOF && s->nb_frames > 0) {
        AVFrame *out = s->frames[s->nb_frames - 1];
        out->pts     = s->pts[s->flush_idx++];

        for (p = 0; p < outlink->channels; p++) {
            switch (outlink->format) {
            case AV_SAMPLE_FMT_U8P: {
                uint8_t *dst = (uint8_t *)out->extended_data[p];
                for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
                    FFSWAP(uint8_t, dst[i], dst[j]);
            }
                break;
            case AV_SAMPLE_FMT_S16P: {
                int16_t *dst = (int16_t *)out->extended_data[p];
                for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
                    FFSWAP(int16_t, dst[i], dst[j]);
            }
                break;
            case AV_SAMPLE_FMT_S32P: {
                int32_t *dst = (int32_t *)out->extended_data[p];
                for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
                    FFSWAP(int32_t, dst[i], dst[j]);
            }
                break;
            case AV_SAMPLE_FMT_FLTP: {
                float *dst = (float *)out->extended_data[p];
                for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
                    FFSWAP(float, dst[i], dst[j]);
            }
                break;
            case AV_SAMPLE_FMT_DBLP: {
                double *dst = (double *)out->extended_data[p];
                for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
                    FFSWAP(double, dst[i], dst[j]);
            }
                break;
            }
        }

        ret = ff_filter_frame(outlink, out);
        s->nb_frames--;
    }

    return ret;
}

static const AVFilterPad areverse_inputs[] = {
    {
        .name           = "default",
        .type           = AVMEDIA_TYPE_AUDIO,
        .filter_frame   = filter_frame,
        .needs_writable = 1,
    },
    { NULL }
};

static const AVFilterPad areverse_outputs[] = {
    {
        .name          = "default",
        .type          = AVMEDIA_TYPE_AUDIO,
        .request_frame = areverse_request_frame,
        .config_props  = config_output,
    },
    { NULL }
};

AVFilter ff_af_areverse = {
    .name          = "areverse",
    .description   = NULL_IF_CONFIG_SMALL("Reverse an audio clip."),
    .query_formats = query_formats,
    .priv_size     = sizeof(ReverseContext),
    .init          = init,
    .uninit        = uninit,
    .inputs        = areverse_inputs,
    .outputs       = areverse_outputs,
};

#endif /* CONFIG_AREVERSE_FILTER */