Browse code

Merge commit 'a5a6ac1a123a927e5bed984ed757a29b7ff87dab'

* commit 'a5a6ac1a123a927e5bed984ed757a29b7ff87dab':
libavfilter/overlay_qsv: Add QSV overlay vpp filter
libavfilter/vf_vpp: Add common filters of the qsv vpp

Merged-by: James Almer <jamrial@gmail.com>

James Almer authored on 2017/11/12 02:46:24
Showing 8 changed files
... ...
@@ -14,6 +14,7 @@ version <next>:
14 14
 - native aptX encoder and decoder
15 15
 - Raw aptX muxer and demuxer
16 16
 - NVIDIA NVDEC-accelerated H.264 and HEVC hwaccel decoding
17
+- Intel QSV-accelerated overlay filter
17 18
 
18 19
 
19 20
 version 3.4:
... ...
@@ -2196,6 +2196,7 @@ CONFIG_EXTRA="
2196 2196
     qsv
2197 2197
     qsvdec
2198 2198
     qsvenc
2199
+    qsvvpp
2199 2200
     rangecoder
2200 2201
     riffdec
2201 2202
     riffenc
... ...
@@ -2783,6 +2784,7 @@ omx_rpi_select="omx"
2783 2783
 qsv_deps="libmfx"
2784 2784
 qsvdec_select="qsv"
2785 2785
 qsvenc_select="qsv"
2786
+qsvvpp_select="qsv"
2786 2787
 vaapi_encode_deps="vaapi"
2787 2788
 v4l2_m2m_deps_any="linux_videodev2_h"
2788 2789
 
... ...
@@ -3228,6 +3230,8 @@ negate_filter_deps="lut_filter"
3228 3228
 nnedi_filter_deps="gpl"
3229 3229
 ocr_filter_deps="libtesseract"
3230 3230
 ocv_filter_deps="libopencv"
3231
+overlay_qsv_filter_deps="libmfx"
3232
+overlay_qsv_filter_select="qsvvpp"
3231 3233
 owdenoise_filter_deps="gpl"
3232 3234
 pan_filter_deps="swresample"
3233 3235
 perspective_filter_deps="gpl"
... ...
@@ -3279,6 +3283,8 @@ zmq_filter_deps="libzmq"
3279 3279
 zoompan_filter_deps="swscale"
3280 3280
 zscale_filter_deps="libzimg const_nan"
3281 3281
 scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer"
3282
+vpp_qsv_filter_deps="libmfx"
3283
+vpp_qsv_filter_select="qsvvpp"
3282 3284
 
3283 3285
 # examples
3284 3286
 avio_dir_cmd_deps="avformat avutil"
... ...
@@ -25,6 +25,9 @@ OBJS = allfilters.o                                                     \
25 25
 
26 26
 OBJS-$(HAVE_THREADS)                         += pthread.o
27 27
 
28
+# subsystems
29
+OBJS-$(CONFIG_QSVVPP)                        += qsvvpp.o
30
+
28 31
 # audio filters
29 32
 OBJS-$(CONFIG_ABENCH_FILTER)                 += f_bench.o
30 33
 OBJS-$(CONFIG_ACOMPRESSOR_FILTER)            += af_sidechaincompress.o
... ...
@@ -248,6 +251,7 @@ OBJS-$(CONFIG_OCV_FILTER)                    += vf_libopencv.o
248 248
 OBJS-$(CONFIG_OPENCL)                        += deshake_opencl.o unsharp_opencl.o
249 249
 OBJS-$(CONFIG_OSCILLOSCOPE_FILTER)           += vf_datascope.o
250 250
 OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o framesync.o
251
+OBJS-$(CONFIG_OVERLAY_QSV_FILTER)            += vf_overlay_qsv.o
251 252
 OBJS-$(CONFIG_OWDENOISE_FILTER)              += vf_owdenoise.o
252 253
 OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o
253 254
 OBJS-$(CONFIG_PALETTEGEN_FILTER)             += vf_palettegen.o
... ...
@@ -331,6 +335,7 @@ OBJS-$(CONFIG_VIDSTABDETECT_FILTER)          += vidstabutils.o vf_vidstabdetect.
331 331
 OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER)       += vidstabutils.o vf_vidstabtransform.o
332 332
 OBJS-$(CONFIG_VIGNETTE_FILTER)               += vf_vignette.o
333 333
 OBJS-$(CONFIG_VMAFMOTION_FILTER)             += vf_vmafmotion.o framesync.o
334
+OBJS-$(CONFIG_VPP_QSV_FILTER)                += vf_vpp_qsv.o
334 335
 OBJS-$(CONFIG_VSTACK_FILTER)                 += vf_stack.o framesync.o
335 336
 OBJS-$(CONFIG_W3FDIF_FILTER)                 += vf_w3fdif.o
336 337
 OBJS-$(CONFIG_WAVEFORM_FILTER)               += vf_waveform.o
... ...
@@ -389,6 +394,8 @@ SKIPHEADERS-$(CONFIG_OPENCL)                 += opencl_internal.h deshake_opencl
389 389
 
390 390
 OBJS-$(CONFIG_SHARED)                        += log2_tab.o
391 391
 
392
+SKIPHEADERS-$(CONFIG_QSVVPP)                 += qsvvpp.h
393
+
392 394
 TOOLS     = graph2dot
393 395
 TESTPROGS = drawutils filtfmts formats integral
394 396
 
... ...
@@ -260,6 +260,7 @@ static void register_all(void)
260 260
     REGISTER_FILTER(OCV,            ocv,            vf);
261 261
     REGISTER_FILTER(OSCILLOSCOPE,   oscilloscope,   vf);
262 262
     REGISTER_FILTER(OVERLAY,        overlay,        vf);
263
+    REGISTER_FILTER(OVERLAY_QSV,    overlay_qsv,    vf);
263 264
     REGISTER_FILTER(OWDENOISE,      owdenoise,      vf);
264 265
     REGISTER_FILTER(PAD,            pad,            vf);
265 266
     REGISTER_FILTER(PALETTEGEN,     palettegen,     vf);
... ...
@@ -343,6 +344,7 @@ static void register_all(void)
343 343
     REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
344 344
     REGISTER_FILTER(VIGNETTE,       vignette,       vf);
345 345
     REGISTER_FILTER(VMAFMOTION,     vmafmotion,     vf);
346
+    REGISTER_FILTER(VPP_QSV,        vpp_qsv,        vf);
346 347
     REGISTER_FILTER(VSTACK,         vstack,         vf);
347 348
     REGISTER_FILTER(W3FDIF,         w3fdif,         vf);
348 349
     REGISTER_FILTER(WAVEFORM,       waveform,       vf);
349 350
new file mode 100644
... ...
@@ -0,0 +1,727 @@
0
+/*
1
+ * This file is part of FFmpeg.
2
+ *
3
+ * FFmpeg is free software; you can redistribute it and/or
4
+ * modify it under the terms of the GNU Lesser General Public
5
+ * License as published by the Free Software Foundation; either
6
+ * version 2.1 of the License, or (at your option) any later version.
7
+ *
8
+ * FFmpeg is distributed in the hope that it will be useful,
9
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11
+ * Lesser General Public License for more details.
12
+ *
13
+ * You should have received a copy of the GNU Lesser General Public
14
+ * License along with FFmpeg; if not, write to the Free Software
15
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
+ */
17
+
18
+/**
19
+ * @file
20
+ * Intel Quick Sync Video VPP base function
21
+ */
22
+
23
+#include "libavutil/common.h"
24
+#include "libavutil/mathematics.h"
25
+#include "libavutil/hwcontext.h"
26
+#include "libavutil/hwcontext_qsv.h"
27
+#include "libavutil/time.h"
28
+#include "libavutil/pixdesc.h"
29
+
30
+#include "internal.h"
31
+#include "qsvvpp.h"
32
+#include "video.h"
33
+
34
+#define IS_VIDEO_MEMORY(mode)  (mode & (MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET | \
35
+                                        MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET))
36
+#define IS_OPAQUE_MEMORY(mode) (mode & MFX_MEMTYPE_OPAQUE_FRAME)
37
+#define IS_SYSTEM_MEMORY(mode) (mode & MFX_MEMTYPE_SYSTEM_MEMORY)
38
+
39
+typedef struct QSVFrame {
40
+    AVFrame          *frame;
41
+    mfxFrameSurface1 *surface;
42
+    mfxFrameSurface1  surface_internal;  /* for system memory */
43
+    struct QSVFrame  *next;
44
+} QSVFrame;
45
+
46
+/* abstract struct for all QSV filters */
47
+struct QSVVPPContext {
48
+    mfxSession          session;
49
+    int (*filter_frame) (AVFilterLink *outlink, AVFrame *frame);/* callback */
50
+    enum AVPixelFormat  out_sw_format;   /* Real output format */
51
+    mfxVideoParam       vpp_param;
52
+    mfxFrameInfo       *frame_infos;     /* frame info for each input */
53
+
54
+    /* members related to the input/output surface */
55
+    int                 in_mem_mode;
56
+    int                 out_mem_mode;
57
+    QSVFrame           *in_frame_list;
58
+    QSVFrame           *out_frame_list;
59
+    int                 nb_surface_ptrs_in;
60
+    int                 nb_surface_ptrs_out;
61
+    mfxFrameSurface1  **surface_ptrs_in;
62
+    mfxFrameSurface1  **surface_ptrs_out;
63
+
64
+    /* MFXVPP extern parameters */
65
+    mfxExtOpaqueSurfaceAlloc opaque_alloc;
66
+    mfxExtBuffer      **ext_buffers;
67
+    int                 nb_ext_buffers;
68
+};
69
+
70
+static const mfxHandleType handle_types[] = {
71
+    MFX_HANDLE_VA_DISPLAY,
72
+    MFX_HANDLE_D3D9_DEVICE_MANAGER,
73
+    MFX_HANDLE_D3D11_DEVICE,
74
+};
75
+
76
+static const AVRational default_tb = { 1, 90000 };
77
+
78
+/* functions for frameAlloc */
79
+static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
80
+                             mfxFrameAllocResponse *resp)
81
+{
82
+    QSVVPPContext *s = pthis;
83
+    int i;
84
+
85
+    if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
86
+        !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
87
+        !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
88
+        return MFX_ERR_UNSUPPORTED;
89
+
90
+    if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
91
+        resp->mids = av_mallocz(s->nb_surface_ptrs_in * sizeof(*resp->mids));
92
+        if (!resp->mids)
93
+            return AVERROR(ENOMEM);
94
+
95
+        for (i = 0; i < s->nb_surface_ptrs_in; i++)
96
+            resp->mids[i] = s->surface_ptrs_in[i]->Data.MemId;
97
+
98
+        resp->NumFrameActual = s->nb_surface_ptrs_in;
99
+    } else {
100
+        resp->mids = av_mallocz(s->nb_surface_ptrs_out * sizeof(*resp->mids));
101
+        if (!resp->mids)
102
+            return AVERROR(ENOMEM);
103
+
104
+        for (i = 0; i < s->nb_surface_ptrs_out; i++)
105
+            resp->mids[i] = s->surface_ptrs_out[i]->Data.MemId;
106
+
107
+        resp->NumFrameActual = s->nb_surface_ptrs_out;
108
+    }
109
+
110
+    return MFX_ERR_NONE;
111
+}
112
+
113
+static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
114
+{
115
+    av_freep(&resp->mids);
116
+    return MFX_ERR_NONE;
117
+}
118
+
119
+static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
120
+{
121
+    return MFX_ERR_UNSUPPORTED;
122
+}
123
+
124
+static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
125
+{
126
+    return MFX_ERR_UNSUPPORTED;
127
+}
128
+
129
+static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
130
+{
131
+    *hdl = mid;
132
+    return MFX_ERR_NONE;
133
+}
134
+
135
+static int pix_fmt_to_mfx_fourcc(int format)
136
+{
137
+    switch (format) {
138
+    case AV_PIX_FMT_YUV420P:
139
+        return MFX_FOURCC_YV12;
140
+    case AV_PIX_FMT_NV12:
141
+        return MFX_FOURCC_NV12;
142
+    case AV_PIX_FMT_YUYV422:
143
+        return MFX_FOURCC_YUY2;
144
+    case AV_PIX_FMT_RGB32:
145
+        return MFX_FOURCC_RGB4;
146
+    }
147
+
148
+    return MFX_FOURCC_NV12;
149
+}
150
+
151
+static int map_frame_to_surface(AVFrame *frame, mfxFrameSurface1 *surface)
152
+{
153
+    switch (frame->format) {
154
+    case AV_PIX_FMT_NV12:
155
+        surface->Data.Y  = frame->data[0];
156
+        surface->Data.UV = frame->data[1];
157
+        break;
158
+    case AV_PIX_FMT_YUV420P:
159
+        surface->Data.Y = frame->data[0];
160
+        surface->Data.U = frame->data[1];
161
+        surface->Data.V = frame->data[2];
162
+        break;
163
+    case AV_PIX_FMT_YUYV422:
164
+        surface->Data.Y = frame->data[0];
165
+        surface->Data.U = frame->data[0] + 1;
166
+        surface->Data.V = frame->data[0] + 3;
167
+        break;
168
+    case AV_PIX_FMT_RGB32:
169
+        surface->Data.B = frame->data[0];
170
+        surface->Data.G = frame->data[0] + 1;
171
+        surface->Data.R = frame->data[0] + 2;
172
+        surface->Data.A = frame->data[0] + 3;
173
+        break;
174
+    default:
175
+        return MFX_ERR_UNSUPPORTED;
176
+    }
177
+    surface->Data.Pitch = frame->linesize[0];
178
+
179
+    return 0;
180
+}
181
+
182
+/* fill the surface info */
183
+static int fill_frameinfo_by_link(mfxFrameInfo *frameinfo, AVFilterLink *link)
184
+{
185
+    enum AVPixelFormat        pix_fmt;
186
+    AVHWFramesContext        *frames_ctx;
187
+    AVQSVFramesContext       *frames_hwctx;
188
+    const AVPixFmtDescriptor *desc;
189
+
190
+    if (link->format == AV_PIX_FMT_QSV) {
191
+        if (!link->hw_frames_ctx)
192
+            return AVERROR(EINVAL);
193
+
194
+        frames_ctx   = (AVHWFramesContext *)link->hw_frames_ctx->data;
195
+        frames_hwctx = frames_ctx->hwctx;
196
+        *frameinfo   = frames_hwctx->surfaces[0].Info;
197
+    } else {
198
+        pix_fmt = link->format;
199
+        desc = av_pix_fmt_desc_get(pix_fmt);
200
+        if (!desc)
201
+            return AVERROR_BUG;
202
+
203
+        frameinfo->CropX          = 0;
204
+        frameinfo->CropY          = 0;
205
+        frameinfo->Width          = FFALIGN(link->w, 32);
206
+        frameinfo->Height         = FFALIGN(link->h, 32);
207
+        frameinfo->PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
208
+        frameinfo->FourCC         = pix_fmt_to_mfx_fourcc(pix_fmt);
209
+        frameinfo->BitDepthLuma   = desc->comp[0].depth;
210
+        frameinfo->BitDepthChroma = desc->comp[0].depth;
211
+        frameinfo->Shift          = desc->comp[0].depth > 8;
212
+        if (desc->log2_chroma_w && desc->log2_chroma_h)
213
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV420;
214
+        else if (desc->log2_chroma_w)
215
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV422;
216
+        else
217
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV444;
218
+    }
219
+
220
+    frameinfo->CropW          = link->w;
221
+    frameinfo->CropH          = link->h;
222
+    frameinfo->FrameRateExtN  = link->frame_rate.num;
223
+    frameinfo->FrameRateExtD  = link->frame_rate.den;
224
+    frameinfo->AspectRatioW   = link->sample_aspect_ratio.num ? link->sample_aspect_ratio.num : 1;
225
+    frameinfo->AspectRatioH   = link->sample_aspect_ratio.den ? link->sample_aspect_ratio.den : 1;
226
+
227
+    return 0;
228
+}
229
+
230
+static void clear_unused_frames(QSVFrame *list)
231
+{
232
+    while (list) {
233
+        if (list->surface && !list->surface->Data.Locked) {
234
+            list->surface = NULL;
235
+            av_frame_free(&list->frame);
236
+        }
237
+        list = list->next;
238
+    }
239
+}
240
+
241
+static void clear_frame_list(QSVFrame **list)
242
+{
243
+    while (*list) {
244
+        QSVFrame *frame;
245
+
246
+        frame = *list;
247
+        *list = (*list)->next;
248
+        av_frame_free(&frame->frame);
249
+        av_freep(&frame);
250
+    }
251
+}
252
+
253
+static QSVFrame *get_free_frame(QSVFrame **list)
254
+{
255
+    QSVFrame *out = *list;
256
+
257
+    for (; out; out = out->next) {
258
+        if (!out->surface)
259
+            break;
260
+    }
261
+
262
+    if (!out) {
263
+        out = av_mallocz(sizeof(*out));
264
+        if (!out) {
265
+            av_log(NULL, AV_LOG_ERROR, "Can't alloc new output frame.\n");
266
+            return NULL;
267
+        }
268
+        out->next  = *list;
269
+        *list      = out;
270
+    }
271
+
272
+    return out;
273
+}
274
+
275
+/* get the input surface */
276
+static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
277
+{
278
+    QSVFrame        *qsv_frame;
279
+    AVFilterContext *ctx = inlink->dst;
280
+
281
+    clear_unused_frames(s->in_frame_list);
282
+
283
+    qsv_frame = get_free_frame(&s->in_frame_list);
284
+    if (!qsv_frame)
285
+        return NULL;
286
+
287
+    /* Turn AVFrame into mfxFrameSurface1.
288
+     * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
289
+     * mfxFrameSurface1 is stored in AVFrame->data[3];
290
+     * for system memory mode, raw video data is stored in
291
+     * AVFrame, we should map it into mfxFrameSurface1.
292
+     */
293
+    if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
294
+        if (picref->format != AV_PIX_FMT_QSV) {
295
+            av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
296
+            return NULL;
297
+        }
298
+        qsv_frame->frame   = picref;
299
+        qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
300
+    } else {
301
+        /* make a copy if the input is not padded as libmfx requires */
302
+        if (picref->height & 31 || picref->linesize[0] & 31) {
303
+            qsv_frame->frame = ff_get_video_buffer(inlink,
304
+                                                   FFALIGN(inlink->w, 32),
305
+                                                   FFALIGN(inlink->h, 32));
306
+            if (!qsv_frame->frame)
307
+                return NULL;
308
+
309
+            qsv_frame->frame->width   = picref->width;
310
+            qsv_frame->frame->height  = picref->height;
311
+
312
+            if (av_frame_copy(qsv_frame->frame, picref) < 0) {
313
+                av_frame_free(&qsv_frame->frame);
314
+                return NULL;
315
+            }
316
+
317
+            av_frame_copy_props(qsv_frame->frame, picref);
318
+            av_frame_free(&picref);
319
+        } else
320
+            qsv_frame->frame = picref;
321
+
322
+        if (map_frame_to_surface(qsv_frame->frame,
323
+                                &qsv_frame->surface_internal) < 0) {
324
+            av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
325
+            return NULL;
326
+        }
327
+        qsv_frame->surface = &qsv_frame->surface_internal;
328
+    }
329
+
330
+    qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
331
+    qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
332
+                                                      inlink->time_base, default_tb);
333
+
334
+    qsv_frame->surface->Info.PicStruct =
335
+            !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
336
+            (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
337
+                                                 MFX_PICSTRUCT_FIELD_BFF);
338
+    if (qsv_frame->frame->repeat_pict == 1)
339
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
340
+    else if (qsv_frame->frame->repeat_pict == 2)
341
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
342
+    else if (qsv_frame->frame->repeat_pict == 4)
343
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;
344
+
345
+    return qsv_frame;
346
+}
347
+
348
+/* get the output surface */
349
+static QSVFrame *query_frame(QSVVPPContext *s, AVFilterLink *outlink)
350
+{
351
+    AVFilterContext *ctx = outlink->src;
352
+    QSVFrame        *out_frame;
353
+    int              ret;
354
+
355
+    clear_unused_frames(s->out_frame_list);
356
+
357
+    out_frame = get_free_frame(&s->out_frame_list);
358
+    if (!out_frame)
359
+        return NULL;
360
+
361
+    /* For video memory, get a hw frame;
362
+     * For system memory, get a sw frame and map it into a mfx_surface. */
363
+    if (!IS_SYSTEM_MEMORY(s->out_mem_mode)) {
364
+        out_frame->frame = av_frame_alloc();
365
+        if (!out_frame->frame)
366
+            return NULL;
367
+
368
+        ret = av_hwframe_get_buffer(outlink->hw_frames_ctx, out_frame->frame, 0);
369
+        if (ret < 0) {
370
+            av_log(ctx, AV_LOG_ERROR, "Can't allocate a surface.\n");
371
+            return NULL;
372
+        }
373
+
374
+        out_frame->surface = (mfxFrameSurface1 *)out_frame->frame->data[3];
375
+    } else {
376
+        /* Get a frame with aligned dimensions.
377
+         * Libmfx need system memory being 128x64 aligned */
378
+        out_frame->frame = ff_get_video_buffer(outlink,
379
+                                               FFALIGN(outlink->w, 128),
380
+                                               FFALIGN(outlink->h, 64));
381
+        if (!out_frame->frame)
382
+            return NULL;
383
+
384
+        out_frame->frame->width  = outlink->w;
385
+        out_frame->frame->height = outlink->h;
386
+
387
+        ret = map_frame_to_surface(out_frame->frame,
388
+                                  &out_frame->surface_internal);
389
+        if (ret < 0)
390
+            return NULL;
391
+
392
+        out_frame->surface = &out_frame->surface_internal;
393
+    }
394
+
395
+    out_frame->surface->Info = s->vpp_param.vpp.Out;
396
+
397
+    return out_frame;
398
+}
399
+
400
+/* create the QSV session */
401
+static int init_vpp_session(AVFilterContext *avctx, QSVVPPContext *s)
402
+{
403
+    AVFilterLink                 *inlink = avctx->inputs[0];
404
+    AVFilterLink                *outlink = avctx->outputs[0];
405
+    AVQSVFramesContext  *in_frames_hwctx = NULL;
406
+    AVQSVFramesContext *out_frames_hwctx = NULL;
407
+
408
+    AVBufferRef *device_ref;
409
+    AVHWDeviceContext *device_ctx;
410
+    AVQSVDeviceContext *device_hwctx;
411
+    mfxHDL handle;
412
+    mfxHandleType handle_type;
413
+    mfxVersion ver;
414
+    mfxIMPL impl;
415
+    int ret, i;
416
+
417
+    if (inlink->hw_frames_ctx) {
418
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
419
+
420
+        device_ref      = frames_ctx->device_ref;
421
+        in_frames_hwctx = frames_ctx->hwctx;
422
+
423
+        s->in_mem_mode = in_frames_hwctx->frame_type;
424
+
425
+        s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
426
+                                              sizeof(*s->surface_ptrs_in));
427
+        if (!s->surface_ptrs_in)
428
+            return AVERROR(ENOMEM);
429
+
430
+        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
431
+            s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
432
+
433
+        s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
434
+    } else if (avctx->hw_device_ctx) {
435
+        device_ref     = avctx->hw_device_ctx;
436
+        s->in_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
437
+    } else {
438
+        av_log(avctx, AV_LOG_ERROR, "No hw context provided.\n");
439
+        return AVERROR(EINVAL);
440
+    }
441
+
442
+    device_ctx   = (AVHWDeviceContext *)device_ref->data;
443
+    device_hwctx = device_ctx->hwctx;
444
+
445
+    if (outlink->format == AV_PIX_FMT_QSV) {
446
+        AVHWFramesContext *out_frames_ctx;
447
+        AVBufferRef *out_frames_ref = av_hwframe_ctx_alloc(device_ref);
448
+        if (!out_frames_ref)
449
+            return AVERROR(ENOMEM);
450
+
451
+        s->out_mem_mode = IS_OPAQUE_MEMORY(s->in_mem_mode) ?
452
+                          MFX_MEMTYPE_OPAQUE_FRAME :
453
+                          MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
454
+
455
+        out_frames_ctx   = (AVHWFramesContext *)out_frames_ref->data;
456
+        out_frames_hwctx = out_frames_ctx->hwctx;
457
+
458
+        out_frames_ctx->format            = AV_PIX_FMT_QSV;
459
+        out_frames_ctx->width             = FFALIGN(outlink->w, 32);
460
+        out_frames_ctx->height            = FFALIGN(outlink->h, 32);
461
+        out_frames_ctx->sw_format         = s->out_sw_format;
462
+        out_frames_ctx->initial_pool_size = 64;
463
+        out_frames_hwctx->frame_type      = s->out_mem_mode;
464
+
465
+        ret = av_hwframe_ctx_init(out_frames_ref);
466
+        if (ret < 0) {
467
+            av_buffer_unref(&out_frames_ref);
468
+            av_log(avctx, AV_LOG_ERROR, "Error creating frames_ctx for output pad.\n");
469
+            return ret;
470
+        }
471
+
472
+        s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
473
+                                               sizeof(*s->surface_ptrs_out));
474
+        if (!s->surface_ptrs_out) {
475
+            av_buffer_unref(&out_frames_ref);
476
+            return AVERROR(ENOMEM);
477
+        }
478
+
479
+        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
480
+            s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
481
+        s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
482
+
483
+        av_buffer_unref(&outlink->hw_frames_ctx);
484
+        outlink->hw_frames_ctx = out_frames_ref;
485
+    } else
486
+        s->out_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
487
+
488
+    /* extract the properties of the "master" session given to us */
489
+    ret = MFXQueryIMPL(device_hwctx->session, &impl);
490
+    if (ret == MFX_ERR_NONE)
491
+        ret = MFXQueryVersion(device_hwctx->session, &ver);
492
+    if (ret != MFX_ERR_NONE) {
493
+        av_log(avctx, AV_LOG_ERROR, "Error querying the session attributes\n");
494
+        return AVERROR_UNKNOWN;
495
+    }
496
+
497
+    for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
498
+        ret = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
499
+        if (ret == MFX_ERR_NONE) {
500
+            handle_type = handle_types[i];
501
+            break;
502
+        }
503
+    }
504
+
505
+    /* create a "slave" session with those same properties, to be used for vpp */
506
+    ret = MFXInit(impl, &ver, &s->session);
507
+    if (ret != MFX_ERR_NONE) {
508
+        av_log(avctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
509
+        return AVERROR_UNKNOWN;
510
+    }
511
+
512
+    if (handle) {
513
+        ret = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
514
+        if (ret != MFX_ERR_NONE)
515
+            return AVERROR_UNKNOWN;
516
+    }
517
+
518
+    if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
519
+        s->opaque_alloc.In.Surfaces   = s->surface_ptrs_in;
520
+        s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
521
+        s->opaque_alloc.In.Type       = s->in_mem_mode;
522
+
523
+        s->opaque_alloc.Out.Surfaces   = s->surface_ptrs_out;
524
+        s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
525
+        s->opaque_alloc.Out.Type       = s->out_mem_mode;
526
+
527
+        s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
528
+        s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
529
+    } else if (IS_VIDEO_MEMORY(s->in_mem_mode) || IS_VIDEO_MEMORY(s->out_mem_mode)) {
530
+        mfxFrameAllocator frame_allocator = {
531
+            .pthis  = s,
532
+            .Alloc  = frame_alloc,
533
+            .Lock   = frame_lock,
534
+            .Unlock = frame_unlock,
535
+            .GetHDL = frame_get_hdl,
536
+            .Free   = frame_free,
537
+        };
538
+
539
+        ret = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
540
+        if (ret != MFX_ERR_NONE)
541
+            return AVERROR_UNKNOWN;
542
+    }
543
+
544
+    return 0;
545
+}
546
+
547
+int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param)
548
+{
549
+    int i;
550
+    int ret;
551
+    QSVVPPContext *s;
552
+
553
+    s = av_mallocz(sizeof(*s));
554
+    if (!s)
555
+        return AVERROR(ENOMEM);
556
+
557
+    s->filter_frame  = param->filter_frame;
558
+    if (!s->filter_frame)
559
+        s->filter_frame = ff_filter_frame;
560
+    s->out_sw_format = param->out_sw_format;
561
+
562
+    /* create the vpp session */
563
+    ret = init_vpp_session(avctx, s);
564
+    if (ret < 0)
565
+        goto failed;
566
+
567
+    s->frame_infos = av_mallocz_array(avctx->nb_inputs, sizeof(*s->frame_infos));
568
+    if (!s->frame_infos) {
569
+        ret = AVERROR(ENOMEM);
570
+        goto failed;
571
+    }
572
+
573
+    /* Init each input's information */
574
+    for (i = 0; i < avctx->nb_inputs; i++) {
575
+        ret = fill_frameinfo_by_link(&s->frame_infos[i], avctx->inputs[i]);
576
+        if (ret < 0)
577
+            goto failed;
578
+    }
579
+
580
+    /* Update input's frame info according to crop */
581
+    for (i = 0; i < param->num_crop; i++) {
582
+        QSVVPPCrop *crop = param->crop + i;
583
+        if (crop->in_idx > avctx->nb_inputs) {
584
+            ret = AVERROR(EINVAL);
585
+            goto failed;
586
+        }
587
+        s->frame_infos[crop->in_idx].CropX = crop->x;
588
+        s->frame_infos[crop->in_idx].CropY = crop->y;
589
+        s->frame_infos[crop->in_idx].CropW = crop->w;
590
+        s->frame_infos[crop->in_idx].CropH = crop->h;
591
+    }
592
+
593
+    s->vpp_param.vpp.In = s->frame_infos[0];
594
+
595
+    ret = fill_frameinfo_by_link(&s->vpp_param.vpp.Out, avctx->outputs[0]);
596
+    if (ret < 0) {
597
+        av_log(avctx, AV_LOG_ERROR, "Fail to get frame info from link.\n");
598
+        goto failed;
599
+    }
600
+
601
+    if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
602
+        s->nb_ext_buffers = param->num_ext_buf + 1;
603
+        s->ext_buffers = av_mallocz_array(s->nb_ext_buffers, sizeof(*s->ext_buffers));
604
+        if (!s->ext_buffers) {
605
+            ret = AVERROR(ENOMEM);
606
+            goto failed;
607
+        }
608
+
609
+        s->ext_buffers[0] = (mfxExtBuffer *)&s->opaque_alloc;
610
+        for (i = 1; i < param->num_ext_buf; i++)
611
+            s->ext_buffers[i]    = param->ext_buf[i - 1];
612
+        s->vpp_param.ExtParam    = s->ext_buffers;
613
+        s->vpp_param.NumExtParam = s->nb_ext_buffers;
614
+    } else {
615
+        s->vpp_param.NumExtParam = param->num_ext_buf;
616
+        s->vpp_param.ExtParam    = param->ext_buf;
617
+    }
618
+
619
+    s->vpp_param.AsyncDepth = 1;
620
+
621
+    if (IS_SYSTEM_MEMORY(s->in_mem_mode))
622
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_SYSTEM_MEMORY;
623
+    else if (IS_VIDEO_MEMORY(s->in_mem_mode))
624
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_VIDEO_MEMORY;
625
+    else if (IS_OPAQUE_MEMORY(s->in_mem_mode))
626
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_OPAQUE_MEMORY;
627
+
628
+    if (IS_SYSTEM_MEMORY(s->out_mem_mode))
629
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
630
+    else if (IS_VIDEO_MEMORY(s->out_mem_mode))
631
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_VIDEO_MEMORY;
632
+    else if (IS_OPAQUE_MEMORY(s->out_mem_mode))
633
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
634
+
635
+    ret = MFXVideoVPP_Init(s->session, &s->vpp_param);
636
+    if (ret < 0) {
637
+        av_log(avctx, AV_LOG_ERROR, "Failed to create a qsvvpp, ret = %d.\n", ret);
638
+        goto failed;
639
+    }
640
+
641
+    *vpp = s;
642
+    return 0;
643
+
644
+failed:
645
+    ff_qsvvpp_free(&s);
646
+
647
+    return ret;
648
+}
649
+
650
+int ff_qsvvpp_free(QSVVPPContext **vpp)
651
+{
652
+    QSVVPPContext *s = *vpp;
653
+
654
+    if (!s)
655
+        return 0;
656
+
657
+    if (s->session) {
658
+        MFXVideoVPP_Close(s->session);
659
+        MFXClose(s->session);
660
+    }
661
+
662
+    /* release all the resources */
663
+    clear_frame_list(&s->in_frame_list);
664
+    clear_frame_list(&s->out_frame_list);
665
+    av_freep(&s->surface_ptrs_in);
666
+    av_freep(&s->surface_ptrs_out);
667
+    av_freep(&s->ext_buffers);
668
+    av_freep(&s->frame_infos);
669
+    av_freep(vpp);
670
+
671
+    return 0;
672
+}
673
+
674
+int ff_qsvvpp_filter_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
675
+{
676
+    AVFilterContext  *ctx     = inlink->dst;
677
+    AVFilterLink     *outlink = ctx->outputs[0];
678
+    mfxSyncPoint      sync;
679
+    QSVFrame         *in_frame, *out_frame;
680
+    int               ret, filter_ret;
681
+
682
+    in_frame = submit_frame(s, inlink, picref);
683
+    if (!in_frame) {
684
+        av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input[%d]\n",
685
+               FF_INLINK_IDX(inlink));
686
+        return AVERROR(ENOMEM);
687
+    }
688
+
689
+    do {
690
+        out_frame = query_frame(s, outlink);
691
+        if (!out_frame) {
692
+            av_log(ctx, AV_LOG_ERROR, "Failed to query an output frame.\n");
693
+            return AVERROR(ENOMEM);
694
+        }
695
+
696
+        do {
697
+            ret = MFXVideoVPP_RunFrameVPPAsync(s->session, in_frame->surface,
698
+                                               out_frame->surface, NULL, &sync);
699
+            if (ret == MFX_WRN_DEVICE_BUSY)
700
+                av_usleep(500);
701
+        } while (ret == MFX_WRN_DEVICE_BUSY);
702
+
703
+        if (ret < 0 && ret != MFX_ERR_MORE_SURFACE) {
704
+            /* Ignore more_data error */
705
+            if (ret == MFX_ERR_MORE_DATA)
706
+                ret = AVERROR(EAGAIN);
707
+            break;
708
+        }
709
+
710
+        if (MFXVideoCORE_SyncOperation(s->session, sync, 1000) < 0)
711
+            av_log(ctx, AV_LOG_WARNING, "Sync failed.\n");
712
+
713
+        out_frame->frame->pts = av_rescale_q(out_frame->surface->Data.TimeStamp,
714
+                                             default_tb, outlink->time_base);
715
+
716
+        filter_ret = s->filter_frame(outlink, out_frame->frame);
717
+        if (filter_ret < 0) {
718
+            av_frame_free(&out_frame->frame);
719
+            ret = filter_ret;
720
+            break;
721
+        }
722
+        out_frame->frame = NULL;
723
+    } while(ret == MFX_ERR_MORE_SURFACE);
724
+
725
+    return ret;
726
+}
0 727
new file mode 100644
... ...
@@ -0,0 +1,66 @@
0
+/*
1
+ * This file is part of FFmpeg.
2
+ *
3
+ * FFmpeg is free software; you can redistribute it and/or
4
+ * modify it under the terms of the GNU Lesser General Public
5
+ * License as published by the Free Software Foundation; either
6
+ * version 2.1 of the License, or (at your option) any later version.
7
+ *
8
+ * FFmpeg is distributed in the hope that it will be useful,
9
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11
+ * Lesser General Public License for more details.
12
+ *
13
+ * You should have received a copy of the GNU Lesser General Public
14
+ * License along with FFmpeg; if not, write to the Free Software
15
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
+ */
17
+
18
+/**
19
+ * @file
20
+ * Intel Quick Sync Video VPP base function
21
+ */
22
+
23
+#ifndef AVFILTER_QSVVPP_H
24
+#define AVFILTER_QSVVPP_H
25
+
26
+#include <mfx/mfxvideo.h>
27
+
28
+#include "avfilter.h"
29
+
30
+#define FF_INLINK_IDX(link)  ((int)((link)->dstpad - (link)->dst->input_pads))
31
+#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
32
+
33
+typedef struct QSVVPPContext QSVVPPContext;
34
+
35
+typedef struct QSVVPPCrop {
36
+    int in_idx;        ///< Input index
37
+    int x, y, w, h;    ///< Crop rectangle
38
+} QSVVPPCrop;
39
+
40
+typedef struct QSVVPPParam {
41
+    /* default is ff_filter_frame */
42
+    int (*filter_frame)(AVFilterLink *outlink, AVFrame *frame);
43
+
44
+    /* To fill with MFX enhanced filter configurations */
45
+    int num_ext_buf;
46
+    mfxExtBuffer **ext_buf;
47
+
48
+    /* Real output format */
49
+    enum AVPixelFormat out_sw_format;
50
+
51
+    /* Crop information for each input, if needed */
52
+    int num_crop;
53
+    QSVVPPCrop *crop;
54
+} QSVVPPParam;
55
+
56
+/* create and initialize the QSV session */
57
+int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param);
58
+
59
+/* release the resources (eg.surfaces) */
60
+int ff_qsvvpp_free(QSVVPPContext **vpp);
61
+
62
+/* vpp filter frame and call the cb if needed */
63
+int ff_qsvvpp_filter_frame(QSVVPPContext *vpp, AVFilterLink *inlink, AVFrame *frame);
64
+
65
+#endif /* AVFILTER_QSVVPP_H */
0 66
new file mode 100644
... ...
@@ -0,0 +1,487 @@
0
+/*
1
+ * This file is part of FFmpeg.
2
+ *
3
+ * FFmpeg is free software; you can redistribute it and/or
4
+ * modify it under the terms of the GNU Lesser General Public
5
+ * License as published by the Free Software Foundation; either
6
+ * version 2.1 of the License, or (at your option) any later version.
7
+ *
8
+ * FFmpeg is distributed in the hope that it will be useful,
9
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11
+ * Lesser General Public License for more details.
12
+ *
13
+ * You should have received a copy of the GNU Lesser General Public
14
+ * License along with FFmpeg; if not, write to the Free Software
15
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
+ */
17
+
18
+/**
19
+ * @file
20
+ * A hardware accelerated overlay filter based on Intel Quick Sync Video VPP
21
+ */
22
+
23
+#include "libavutil/opt.h"
24
+#include "libavutil/common.h"
25
+#include "libavutil/pixdesc.h"
26
+#include "libavutil/eval.h"
27
+#include "libavutil/hwcontext.h"
28
+#include "libavutil/avstring.h"
29
+#include "libavutil/avassert.h"
30
+#include "libavutil/imgutils.h"
31
+#include "libavutil/mathematics.h"
32
+
33
+#include "internal.h"
34
+#include "avfilter.h"
35
+#include "formats.h"
36
+#include "video.h"
37
+
38
+#include "qsvvpp.h"
39
+
40
+#define MAIN    0
41
+#define OVERLAY 1
42
+
43
+#define OFFSET(x) offsetof(QSVOverlayContext, x)
44
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
45
+
46
+enum var_name {
47
+    VAR_MAIN_iW,     VAR_MW,
48
+    VAR_MAIN_iH,     VAR_MH,
49
+    VAR_OVERLAY_iW,
50
+    VAR_OVERLAY_iH,
51
+    VAR_OVERLAY_X,  VAR_OX,
52
+    VAR_OVERLAY_Y,  VAR_OY,
53
+    VAR_OVERLAY_W,  VAR_OW,
54
+    VAR_OVERLAY_H,  VAR_OH,
55
+    VAR_VARS_NB
56
+};
57
+
58
+enum EOFAction {
59
+    EOF_ACTION_REPEAT,
60
+    EOF_ACTION_ENDALL
61
+};
62
+
63
+typedef struct QSVOverlayContext {
64
+    const AVClass      *class;
65
+
66
+    QSVVPPContext      *qsv;
67
+    QSVVPPParam        qsv_param;
68
+    mfxExtVPPComposite comp_conf;
69
+    double             var_values[VAR_VARS_NB];
70
+
71
+    char     *overlay_ox, *overlay_oy, *overlay_ow, *overlay_oh;
72
+    uint16_t  overlay_alpha, overlay_pixel_alpha;
73
+
74
+    enum EOFAction eof_action;  /* action to take on EOF from source */
75
+
76
+    AVFrame *main;
77
+    AVFrame *over_prev, *over_next;
78
+} QSVOverlayContext;
79
+
80
+static const char *const var_names[] = {
81
+    "main_w",     "W",   /* input width of the main layer */
82
+    "main_h",     "H",   /* input height of the main layer */
83
+    "overlay_iw",        /* input width of the overlay layer */
84
+    "overlay_ih",        /* input height of the overlay layer */
85
+    "overlay_x",  "x",   /* x position of the overlay layer inside of main */
86
+    "overlay_y",  "y",   /* y position of the overlay layer inside of main */
87
+    "overlay_w",  "w",   /* output width of overlay layer */
88
+    "overlay_h",  "h",   /* output height of overlay layer */
89
+    NULL
90
+};
91
+
92
+static const AVOption options[] = {
93
+    { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
94
+    { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
95
+    { "w", "Overlay width",      OFFSET(overlay_ow), AV_OPT_TYPE_STRING, { .str="overlay_iw"}, 0, 255, .flags = FLAGS},
96
+    { "h", "Overlay height",     OFFSET(overlay_oh), AV_OPT_TYPE_STRING, { .str="overlay_ih*w/overlay_iw"}, 0, 255, .flags = FLAGS},
97
+    { "alpha", "Overlay global alpha", OFFSET(overlay_alpha), AV_OPT_TYPE_INT, { .i64 = 255}, 0, 255, .flags = FLAGS},
98
+    { "eof_action", "Action to take when encountering EOF from secondary input ",
99
+        OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
100
+        EOF_ACTION_REPEAT, EOF_ACTION_ENDALL, .flags = FLAGS, "eof_action" },
101
+        { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
102
+        { "endall", "End both streams.",          0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
103
+    { NULL }
104
+};
105
+
106
+static int eval_expr(AVFilterContext *ctx)
107
+{
108
+    QSVOverlayContext *vpp = ctx->priv;
109
+    double     *var_values = vpp->var_values;
110
+    int                ret = 0;
111
+    AVExpr *ox_expr = NULL, *oy_expr = NULL;
112
+    AVExpr *ow_expr = NULL, *oh_expr = NULL;
113
+
114
+#define PASS_EXPR(e, s) {\
115
+    ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
116
+    if (ret < 0) {\
117
+        av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
118
+        goto release;\
119
+    }\
120
+}
121
+    PASS_EXPR(ox_expr, vpp->overlay_ox);
122
+    PASS_EXPR(oy_expr, vpp->overlay_oy);
123
+    PASS_EXPR(ow_expr, vpp->overlay_ow);
124
+    PASS_EXPR(oh_expr, vpp->overlay_oh);
125
+#undef PASS_EXPR
126
+
127
+    var_values[VAR_OVERLAY_W] =
128
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
129
+    var_values[VAR_OVERLAY_H] =
130
+    var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
131
+
132
+    /* calc again in case ow is relative to oh */
133
+    var_values[VAR_OVERLAY_W] =
134
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
135
+
136
+    var_values[VAR_OVERLAY_X] =
137
+    var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
138
+    var_values[VAR_OVERLAY_Y] =
139
+    var_values[VAR_OY]        = av_expr_eval(oy_expr, var_values, NULL);
140
+
141
+    /* calc again in case ox is relative to oy */
142
+    var_values[VAR_OVERLAY_X] =
143
+    var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
144
+
145
+    /* calc overlay_w and overlay_h again incase relative to ox,oy */
146
+    var_values[VAR_OVERLAY_W] =
147
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
148
+    var_values[VAR_OVERLAY_H] =
149
+    var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
150
+    var_values[VAR_OVERLAY_W] =
151
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
152
+
153
+release:
154
+    av_expr_free(ox_expr);
155
+    av_expr_free(oy_expr);
156
+    av_expr_free(ow_expr);
157
+    av_expr_free(oh_expr);
158
+
159
+    return ret;
160
+}
161
+
162
+static int have_alpha_planar(AVFilterLink *link)
163
+{
164
+    enum AVPixelFormat pix_fmt;
165
+    const AVPixFmtDescriptor *desc;
166
+    AVHWFramesContext *fctx;
167
+
168
+    if (link->format == AV_PIX_FMT_QSV) {
169
+        fctx    = (AVHWFramesContext *)link->hw_frames_ctx->data;
170
+        pix_fmt = fctx->sw_format;
171
+    }
172
+
173
+    desc = av_pix_fmt_desc_get(pix_fmt);
174
+    if (!desc)
175
+        return 0;
176
+
177
+    return !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
178
+}
179
+
180
+static int config_main_input(AVFilterLink *inlink)
181
+{
182
+    AVFilterContext      *ctx = inlink->dst;
183
+    QSVOverlayContext    *vpp = ctx->priv;
184
+    mfxVPPCompInputStream *st = &vpp->comp_conf.InputStream[0];
185
+
186
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
187
+           av_get_pix_fmt_name(inlink->format));
188
+
189
+    vpp->var_values[VAR_MAIN_iW] =
190
+    vpp->var_values[VAR_MW]      = inlink->w;
191
+    vpp->var_values[VAR_MAIN_iH] =
192
+    vpp->var_values[VAR_MH]      = inlink->h;
193
+
194
+    st->DstX              = 0;
195
+    st->DstY              = 0;
196
+    st->DstW              = inlink->w;
197
+    st->DstH              = inlink->h;
198
+    st->GlobalAlphaEnable = 0;
199
+    st->PixelAlphaEnable  = 0;
200
+
201
+    return 0;
202
+}
203
+
204
+static int config_overlay_input(AVFilterLink *inlink)
205
+{
206
+    AVFilterContext       *ctx = inlink->dst;
207
+    QSVOverlayContext     *vpp = ctx->priv;
208
+    mfxVPPCompInputStream *st  = &vpp->comp_conf.InputStream[1];
209
+    int                    ret = 0;
210
+
211
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
212
+           av_get_pix_fmt_name(inlink->format));
213
+
214
+    vpp->var_values[VAR_OVERLAY_iW] = inlink->w;
215
+    vpp->var_values[VAR_OVERLAY_iH] = inlink->h;
216
+
217
+    ret = eval_expr(ctx);
218
+    if (ret < 0)
219
+        return ret;
220
+
221
+    st->DstX              = vpp->var_values[VAR_OX];
222
+    st->DstY              = vpp->var_values[VAR_OY];
223
+    st->DstW              = vpp->var_values[VAR_OW];
224
+    st->DstH              = vpp->var_values[VAR_OH];
225
+    st->GlobalAlpha       = vpp->overlay_alpha;
226
+    st->GlobalAlphaEnable = (st->GlobalAlpha < 255);
227
+    st->PixelAlphaEnable  = have_alpha_planar(inlink);
228
+
229
+    return 0;
230
+}
231
+
232
+static int config_output(AVFilterLink *outlink)
233
+{
234
+    AVFilterContext   *ctx = outlink->src;
235
+    QSVOverlayContext *vpp = ctx->priv;
236
+    AVFilterLink      *in0 = ctx->inputs[0];
237
+    AVFilterLink      *in1 = ctx->inputs[1];
238
+
239
+    av_log(ctx, AV_LOG_DEBUG, "Output is of %s.\n", av_get_pix_fmt_name(outlink->format));
240
+    if ((in0->format == AV_PIX_FMT_QSV && in1->format != AV_PIX_FMT_QSV) ||
241
+        (in0->format != AV_PIX_FMT_QSV && in1->format == AV_PIX_FMT_QSV)) {
242
+        av_log(ctx, AV_LOG_ERROR, "Mixing hardware and software pixel formats is not supported.\n");
243
+        return AVERROR(EINVAL);
244
+    } else if (in0->format == AV_PIX_FMT_QSV) {
245
+        AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data;
246
+        AVHWFramesContext *hw_frame1 = (AVHWFramesContext *)in1->hw_frames_ctx->data;
247
+
248
+        if (hw_frame0->device_ctx != hw_frame1->device_ctx) {
249
+            av_log(ctx, AV_LOG_ERROR, "Inputs with different underlying QSV devices are forbidden.\n");
250
+            return AVERROR(EINVAL);
251
+        }
252
+    }
253
+
254
+    outlink->w          = vpp->var_values[VAR_MW];
255
+    outlink->h          = vpp->var_values[VAR_MH];
256
+    outlink->frame_rate = in0->frame_rate;
257
+    outlink->time_base  = av_inv_q(outlink->frame_rate);
258
+
259
+    return ff_qsvvpp_create(ctx, &vpp->qsv, &vpp->qsv_param);
260
+}
261
+
262
+static int blend_frame(AVFilterContext *ctx, AVFrame *mpic, AVFrame *opic)
263
+{
264
+    int                ret = 0;
265
+    QSVOverlayContext *vpp = ctx->priv;
266
+    AVFrame     *opic_copy = NULL;
267
+
268
+    ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[0], mpic);
269
+    if (ret == 0 || ret == AVERROR(EAGAIN)) {
270
+        /* Reference the overlay frame. Because:
271
+         * 1. ff_qsvvpp_filter_frame will take control of the given frame
272
+         * 2. We need to repeat the overlay frame when 2nd input goes into EOF
273
+         */
274
+        opic_copy = av_frame_clone(opic);
275
+        if (!opic_copy)
276
+            return AVERROR(ENOMEM);
277
+
278
+        ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[1], opic_copy);
279
+    }
280
+
281
+    return ret;
282
+}
283
+
284
+static int handle_overlay_eof(AVFilterContext *ctx)
285
+{
286
+    int              ret = 0;
287
+    QSVOverlayContext *s = ctx->priv;
288
+    /* Repeat previous frame on secondary input */
289
+    if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
290
+        ret = blend_frame(ctx, s->main, s->over_prev);
291
+    /* End both streams */
292
+    else if (s->eof_action == EOF_ACTION_ENDALL)
293
+        return AVERROR_EOF;
294
+
295
+    s->main = NULL;
296
+
297
+    return ret;
298
+}
299
+
300
+static int request_frame(AVFilterLink *outlink)
301
+{
302
+    AVFilterContext *ctx = outlink->src;
303
+    QSVOverlayContext *s = ctx->priv;
304
+    AVRational   tb_main = ctx->inputs[MAIN]->time_base;
305
+    AVRational   tb_over = ctx->inputs[OVERLAY]->time_base;
306
+    int              ret = 0;
307
+
308
+    /* get a frame on the main input */
309
+    if (!s->main) {
310
+        ret = ff_request_frame(ctx->inputs[MAIN]);
311
+        if (ret < 0)
312
+            return ret;
313
+    }
314
+
315
+    /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
316
+    if (!s->over_next) {
317
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
318
+        if (ret == AVERROR_EOF)
319
+            return handle_overlay_eof(ctx);
320
+        else if (ret < 0)
321
+            return ret;
322
+    }
323
+
324
+    while (s->main->pts != AV_NOPTS_VALUE &&
325
+           s->over_next->pts != AV_NOPTS_VALUE &&
326
+           av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
327
+        av_frame_free(&s->over_prev);
328
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
329
+
330
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
331
+        if (ret == AVERROR_EOF)
332
+            return handle_overlay_eof(ctx);
333
+        else if (ret < 0)
334
+            return ret;
335
+    }
336
+
337
+    if (s->main->pts == AV_NOPTS_VALUE ||
338
+        s->over_next->pts == AV_NOPTS_VALUE ||
339
+        !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
340
+        ret = blend_frame(ctx, s->main, s->over_next);
341
+        av_frame_free(&s->over_prev);
342
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
343
+    } else if (s->over_prev) {
344
+        ret = blend_frame(ctx, s->main, s->over_prev);
345
+    } else {
346
+        av_frame_free(&s->main);
347
+        ret = AVERROR(EAGAIN);
348
+    }
349
+
350
+    s->main = NULL;
351
+
352
+    return ret;
353
+}
354
+
355
+static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
356
+{
357
+    QSVOverlayContext *s = inlink->dst->priv;
358
+
359
+    av_assert0(!s->main);
360
+    s->main = frame;
361
+
362
+    return 0;
363
+}
364
+
365
+static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
366
+{
367
+    QSVOverlayContext *s = inlink->dst->priv;
368
+
369
+    av_assert0(!s->over_next);
370
+    s->over_next = frame;
371
+
372
+    return 0;
373
+}
374
+
375
+static int overlay_qsv_init(AVFilterContext *ctx)
376
+{
377
+    QSVOverlayContext *vpp = ctx->priv;
378
+
379
+    /* fill composite config */
380
+    vpp->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
381
+    vpp->comp_conf.Header.BufferSz = sizeof(vpp->comp_conf);
382
+    vpp->comp_conf.NumInputStream  = ctx->nb_inputs;
383
+    vpp->comp_conf.InputStream     = av_mallocz_array(ctx->nb_inputs,
384
+                                                      sizeof(*vpp->comp_conf.InputStream));
385
+    if (!vpp->comp_conf.InputStream)
386
+        return AVERROR(ENOMEM);
387
+
388
+    /* initialize QSVVPP params */
389
+    vpp->qsv_param.filter_frame = NULL;
390
+    vpp->qsv_param.ext_buf      = av_mallocz(sizeof(*vpp->qsv_param.ext_buf));
391
+    if (!vpp->qsv_param.ext_buf)
392
+        return AVERROR(ENOMEM);
393
+
394
+    vpp->qsv_param.ext_buf[0]    = (mfxExtBuffer *)&vpp->comp_conf;
395
+    vpp->qsv_param.num_ext_buf   = 1;
396
+    vpp->qsv_param.out_sw_format = AV_PIX_FMT_NV12;
397
+    vpp->qsv_param.num_crop      = 0;
398
+
399
+    return 0;
400
+}
401
+
402
+static void overlay_qsv_uninit(AVFilterContext *ctx)
403
+{
404
+    QSVOverlayContext *vpp = ctx->priv;
405
+
406
+    av_frame_free(&vpp->main);
407
+    av_frame_free(&vpp->over_prev);
408
+    av_frame_free(&vpp->over_next);
409
+    ff_qsvvpp_free(&vpp->qsv);
410
+    av_freep(&vpp->comp_conf.InputStream);
411
+    av_freep(&vpp->qsv_param.ext_buf);
412
+}
413
+
414
+static int overlay_qsv_query_formats(AVFilterContext *ctx)
415
+{
416
+    int i;
417
+
418
+    static const enum AVPixelFormat main_in_fmts[] = {
419
+        AV_PIX_FMT_YUV420P,
420
+        AV_PIX_FMT_NV12,
421
+        AV_PIX_FMT_YUYV422,
422
+        AV_PIX_FMT_RGB32,
423
+        AV_PIX_FMT_QSV,
424
+        AV_PIX_FMT_NONE
425
+    };
426
+    static const enum AVPixelFormat out_pix_fmts[] = {
427
+        AV_PIX_FMT_NV12,
428
+        AV_PIX_FMT_QSV,
429
+        AV_PIX_FMT_NONE
430
+    };
431
+
432
+    for (i = 0; i < ctx->nb_inputs; i++)
433
+        ff_formats_ref(ff_make_format_list(main_in_fmts), &ctx->inputs[i]->out_formats);
434
+
435
+    ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats);
436
+
437
+    return 0;
438
+}
439
+
440
+static const AVClass overlay_qsv_class = {
441
+    .class_name = "overlay_qsv",
442
+    .item_name  = av_default_item_name,
443
+    .option     = options,
444
+    .version    = LIBAVUTIL_VERSION_INT,
445
+};
446
+
447
+static const AVFilterPad overlay_qsv_inputs[] = {
448
+    {
449
+        .name          = "main",
450
+        .type          = AVMEDIA_TYPE_VIDEO,
451
+        .filter_frame  = filter_frame_main,
452
+        .config_props  = config_main_input,
453
+        .needs_fifo    = 1,
454
+    },
455
+    {
456
+        .name          = "overlay",
457
+        .type          = AVMEDIA_TYPE_VIDEO,
458
+        .filter_frame  = filter_frame_overlay,
459
+        .config_props  = config_overlay_input,
460
+        .needs_fifo    = 1,
461
+    },
462
+    { NULL }
463
+};
464
+
465
+static const AVFilterPad overlay_qsv_outputs[] = {
466
+    {
467
+        .name          = "default",
468
+        .type          = AVMEDIA_TYPE_VIDEO,
469
+        .config_props  = config_output,
470
+        .request_frame = request_frame,
471
+    },
472
+    { NULL }
473
+};
474
+
475
+AVFilter ff_vf_overlay_qsv = {
476
+    .name           = "overlay_qsv",
477
+    .description    = NULL_IF_CONFIG_SMALL("Quick Sync Video overlay."),
478
+    .priv_size      = sizeof(QSVOverlayContext),
479
+    .query_formats  = overlay_qsv_query_formats,
480
+    .init           = overlay_qsv_init,
481
+    .uninit         = overlay_qsv_uninit,
482
+    .inputs         = overlay_qsv_inputs,
483
+    .outputs        = overlay_qsv_outputs,
484
+    .priv_class     = &overlay_qsv_class,
485
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
486
+};
0 487
new file mode 100644
... ...
@@ -0,0 +1,401 @@
0
+/*
1
+ * This file is part of FFmpeg.
2
+ *
3
+ * FFmpeg is free software; you can redistribute it and/or
4
+ * modify it under the terms of the GNU Lesser General Public
5
+ * License as published by the Free Software Foundation; either
6
+ * version 2.1 of the License, or (at your option) any later version.
7
+ *
8
+ * FFmpeg is distributed in the hope that it will be useful,
9
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11
+ * Lesser General Public License for more details.
12
+ *
13
+ * You should have received a copy of the GNU Lesser General Public
14
+ * License along with FFmpeg; if not, write to the Free Software
15
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
+ */
17
+
18
+/**
19
+ ** @file
20
+ ** Hardware accelerated common filters based on Intel Quick Sync Video VPP
21
+ **/
22
+
23
+#include <float.h>
24
+
25
+#include "libavutil/opt.h"
26
+#include "libavutil/eval.h"
27
+#include "libavutil/avassert.h"
28
+#include "libavutil/pixdesc.h"
29
+
30
+#include "formats.h"
31
+#include "internal.h"
32
+#include "avfilter.h"
33
+#include "libavcodec/avcodec.h"
34
+#include "libavformat/avformat.h"
35
+
36
+#include "qsvvpp.h"
37
+
38
+#define OFFSET(x) offsetof(VPPContext, x)
39
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
40
+
41
+/* number of video enhancement filters */
42
+#define ENH_FILTERS_COUNT (5)
43
+
44
+typedef struct VPPContext{
45
+    const AVClass *class;
46
+
47
+    QSVVPPContext *qsv;
48
+
49
+    /* Video Enhancement Algorithms */
50
+    mfxExtVPPDeinterlacing  deinterlace_conf;
51
+    mfxExtVPPFrameRateConversion frc_conf;
52
+    mfxExtVPPDenoise denoise_conf;
53
+    mfxExtVPPDetail detail_conf;
54
+    mfxExtVPPProcAmp procamp_conf;
55
+
56
+    int out_width;
57
+    int out_height;
58
+
59
+    AVRational framerate;       /* target framerate */
60
+    int use_frc;                /* use framerate conversion */
61
+    int deinterlace;            /* deinterlace mode : 0=off, 1=bob, 2=advanced */
62
+    int denoise;                /* Enable Denoise algorithm. Value [0, 100] */
63
+    int detail;                 /* Enable Detail Enhancement algorithm. */
64
+                                /* Level is the optional, value [0, 100] */
65
+    int use_crop;               /* 1 = use crop; 0=none */
66
+    int crop_w;
67
+    int crop_h;
68
+    int crop_x;
69
+    int crop_y;
70
+
71
+    /* param for the procamp */
72
+    int    procamp;            /* enable procamp */
73
+    float  hue;
74
+    float  saturation;
75
+    float  contrast;
76
+    float  brightness;
77
+
78
+    char *cx, *cy, *cw, *ch;
79
+    char *ow, *oh;
80
+} VPPContext;
81
+
82
+static const AVOption options[] = {
83
+    { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
84
+    { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
85
+    { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
86
+
87
+    { "denoise",     "denoise level [0, 100]",       OFFSET(denoise),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
88
+    { "detail",      "enhancement level [0, 100]",   OFFSET(detail),      AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
89
+    { "framerate",   "output framerate",             OFFSET(framerate),   AV_OPT_TYPE_RATIONAL, { .dbl = 0.0 },0, DBL_MAX, .flags = FLAGS },
90
+    { "procamp",     "Enable ProcAmp",               OFFSET(procamp),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 1, .flags = FLAGS},
91
+    { "hue",         "ProcAmp hue",                  OFFSET(hue),         AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -180.0, 180.0, .flags = FLAGS},
92
+    { "saturation",  "ProcAmp saturation",           OFFSET(saturation),  AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
93
+    { "contrast",    "ProcAmp contrast",             OFFSET(contrast),    AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
94
+    { "brightness",  "ProcAmp brightness",           OFFSET(brightness),  AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -100.0, 100.0, .flags = FLAGS},
95
+
96
+    { "cw",   "set the width crop area expression",   OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, CHAR_MIN, CHAR_MAX, FLAGS },
97
+    { "ch",   "set the height crop area expression",  OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, CHAR_MIN, CHAR_MAX, FLAGS },
98
+    { "cx",   "set the x crop area expression",       OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
99
+    { "cy",   "set the y crop area expression",       OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
100
+
101
+    { "w",      "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
102
+    { "width",  "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
103
+    { "h",      "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
104
+    { "height", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
105
+    { NULL }
106
+};
107
+
108
+static const char *const var_names[] = {
109
+    "iw", "in_w",
110
+    "ih", "in_h",
111
+    "ow", "out_w", "w",
112
+    "oh", "out_h", "h",
113
+    "cw",
114
+    "ch",
115
+    "cx",
116
+    "cy",
117
+    NULL
118
+};
119
+
120
+enum var_name {
121
+    VAR_iW, VAR_IN_W,
122
+    VAR_iH, VAR_IN_H,
123
+    VAR_oW, VAR_OUT_W, VAR_W,
124
+    VAR_oH, VAR_OUT_H, VAR_H,
125
+    CW,
126
+    CH,
127
+    CX,
128
+    CY,
129
+    VAR_VARS_NB
130
+};
131
+
132
+static int eval_expr(AVFilterContext *ctx)
133
+{
134
+#define PASS_EXPR(e, s) {\
135
+    ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
136
+    if (ret < 0) {\
137
+        av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
138
+        goto release;\
139
+    }\
140
+}
141
+#define CALC_EXPR(e, v, i) {\
142
+    i = v = av_expr_eval(e, var_values, NULL); \
143
+}
144
+    VPPContext *vpp = ctx->priv;
145
+    double  var_values[VAR_VARS_NB] = { NAN };
146
+    AVExpr *w_expr  = NULL, *h_expr  = NULL;
147
+    AVExpr *cw_expr = NULL, *ch_expr = NULL;
148
+    AVExpr *cx_expr = NULL, *cy_expr = NULL;
149
+    int     ret = 0;
150
+
151
+    PASS_EXPR(cw_expr, vpp->cw);
152
+    PASS_EXPR(ch_expr, vpp->ch);
153
+
154
+    PASS_EXPR(w_expr, vpp->ow);
155
+    PASS_EXPR(h_expr, vpp->oh);
156
+
157
+    PASS_EXPR(cx_expr, vpp->cx);
158
+    PASS_EXPR(cy_expr, vpp->cy);
159
+
160
+    var_values[VAR_iW] =
161
+    var_values[VAR_IN_W] = ctx->inputs[0]->w;
162
+
163
+    var_values[VAR_iH] =
164
+    var_values[VAR_IN_H] = ctx->inputs[0]->h;
165
+
166
+    /* crop params */
167
+    CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
168
+    CALC_EXPR(ch_expr, var_values[CH], vpp->crop_h);
169
+
170
+    /* calc again in case cw is relative to ch */
171
+    CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
172
+
173
+    CALC_EXPR(w_expr,
174
+            var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
175
+            vpp->out_width);
176
+    CALC_EXPR(h_expr,
177
+            var_values[VAR_OUT_H] = var_values[VAR_oH] = var_values[VAR_H],
178
+            vpp->out_height);
179
+
180
+    /* calc again in case ow is relative to oh */
181
+    CALC_EXPR(w_expr,
182
+            var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
183
+            vpp->out_width);
184
+
185
+
186
+    CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
187
+    CALC_EXPR(cy_expr, var_values[CY], vpp->crop_y);
188
+
189
+    /* calc again in case cx is relative to cy */
190
+    CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
191
+
192
+    if ((vpp->crop_w != var_values[VAR_iW]) || (vpp->crop_h != var_values[VAR_iH]))
193
+        vpp->use_crop = 1;
194
+
195
+release:
196
+    av_expr_free(w_expr);
197
+    av_expr_free(h_expr);
198
+    av_expr_free(cw_expr);
199
+    av_expr_free(ch_expr);
200
+    av_expr_free(cx_expr);
201
+    av_expr_free(cy_expr);
202
+#undef PASS_EXPR
203
+#undef CALC_EXPR
204
+
205
+    return ret;
206
+}
207
+
208
+static int config_input(AVFilterLink *inlink)
209
+{
210
+    AVFilterContext *ctx = inlink->dst;
211
+    VPPContext      *vpp = ctx->priv;
212
+    int              ret;
213
+
214
+    if (vpp->framerate.den == 0 || vpp->framerate.num == 0)
215
+        vpp->framerate = inlink->frame_rate;
216
+
217
+    if (av_cmp_q(vpp->framerate, inlink->frame_rate))
218
+        vpp->use_frc = 1;
219
+
220
+    ret = eval_expr(ctx);
221
+    if (ret != 0) {
222
+        av_log(ctx, AV_LOG_ERROR, "Fail to eval expr.\n");
223
+        return ret;
224
+    }
225
+
226
+    if (vpp->out_height == 0 || vpp->out_width == 0) {
227
+        vpp->out_width  = inlink->w;
228
+        vpp->out_height = inlink->h;
229
+    }
230
+
231
+    if (vpp->use_crop) {
232
+        vpp->crop_x = FFMAX(vpp->crop_x, 0);
233
+        vpp->crop_y = FFMAX(vpp->crop_y, 0);
234
+
235
+        if(vpp->crop_w + vpp->crop_x > inlink->w)
236
+           vpp->crop_x = inlink->w - vpp->crop_w;
237
+        if(vpp->crop_h + vpp->crop_y > inlink->h)
238
+           vpp->crop_y = inlink->h - vpp->crop_h;
239
+    }
240
+
241
+    return 0;
242
+}
243
+
244
+static int config_output(AVFilterLink *outlink)
245
+{
246
+    AVFilterContext *ctx = outlink->src;
247
+    VPPContext      *vpp = ctx->priv;
248
+    QSVVPPParam     param = { NULL };
249
+    QSVVPPCrop      crop  = { 0 };
250
+    mfxExtBuffer    *ext_buf[ENH_FILTERS_COUNT];
251
+
252
+    outlink->w          = vpp->out_width;
253
+    outlink->h          = vpp->out_height;
254
+    outlink->frame_rate = vpp->framerate;
255
+    outlink->time_base  = av_inv_q(vpp->framerate);
256
+
257
+    param.filter_frame  = NULL;
258
+    param.out_sw_format = AV_PIX_FMT_NV12;
259
+    param.num_ext_buf   = 0;
260
+    param.ext_buf       = ext_buf;
261
+
262
+    if (vpp->use_crop) {
263
+        crop.in_idx = 0;
264
+        crop.x = vpp->crop_x;
265
+        crop.y = vpp->crop_y;
266
+        crop.w = vpp->crop_w;
267
+        crop.h = vpp->crop_h;
268
+
269
+        param.num_crop = 1;
270
+        param.crop     = &crop;
271
+    }
272
+
273
+    if (vpp->deinterlace) {
274
+        memset(&vpp->deinterlace_conf, 0, sizeof(mfxExtVPPDeinterlacing));
275
+        vpp->deinterlace_conf.Header.BufferId = MFX_EXTBUFF_VPP_DEINTERLACING;
276
+        vpp->deinterlace_conf.Header.BufferSz = sizeof(mfxExtVPPDeinterlacing);
277
+        vpp->deinterlace_conf.Mode = vpp->deinterlace == 1 ?
278
+                                     MFX_DEINTERLACING_BOB : MFX_DEINTERLACING_ADVANCED;
279
+
280
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->deinterlace_conf;
281
+    }
282
+
283
+    if (vpp->use_frc) {
284
+        memset(&vpp->frc_conf, 0, sizeof(mfxExtVPPFrameRateConversion));
285
+        vpp->frc_conf.Header.BufferId = MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION;
286
+        vpp->frc_conf.Header.BufferSz = sizeof(mfxExtVPPFrameRateConversion);
287
+        vpp->frc_conf.Algorithm = MFX_FRCALGM_DISTRIBUTED_TIMESTAMP;
288
+
289
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->frc_conf;
290
+    }
291
+
292
+    if (vpp->denoise) {
293
+        memset(&vpp->denoise_conf, 0, sizeof(mfxExtVPPDenoise));
294
+        vpp->denoise_conf.Header.BufferId = MFX_EXTBUFF_VPP_DENOISE;
295
+        vpp->denoise_conf.Header.BufferSz = sizeof(mfxExtVPPDenoise);
296
+        vpp->denoise_conf.DenoiseFactor   = vpp->denoise;
297
+
298
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->denoise_conf;
299
+    }
300
+
301
+    if (vpp->detail) {
302
+        memset(&vpp->detail_conf, 0, sizeof(mfxExtVPPDetail));
303
+        vpp->detail_conf.Header.BufferId  = MFX_EXTBUFF_VPP_DETAIL;
304
+        vpp->detail_conf.Header.BufferSz  = sizeof(mfxExtVPPDetail);
305
+        vpp->detail_conf.DetailFactor = vpp->detail;
306
+
307
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->detail_conf;
308
+    }
309
+
310
+    if (vpp->procamp) {
311
+        memset(&vpp->procamp_conf, 0, sizeof(mfxExtVPPProcAmp));
312
+        vpp->procamp_conf.Header.BufferId  = MFX_EXTBUFF_VPP_PROCAMP;
313
+        vpp->procamp_conf.Header.BufferSz  = sizeof(mfxExtVPPProcAmp);
314
+        vpp->procamp_conf.Hue              = vpp->hue;
315
+        vpp->procamp_conf.Saturation       = vpp->saturation;
316
+        vpp->procamp_conf.Contrast         = vpp->contrast;
317
+        vpp->procamp_conf.Brightness       = vpp->brightness;
318
+
319
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->procamp_conf;
320
+    }
321
+
322
+    return ff_qsvvpp_create(ctx, &vpp->qsv, &param);
323
+}
324
+
325
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
326
+{
327
+    VPPContext *vpp = inlink->dst->priv;
328
+
329
+    return ff_qsvvpp_filter_frame(vpp->qsv, inlink, picref);
330
+}
331
+
332
+static int query_formats(AVFilterContext *ctx)
333
+{
334
+    AVFilterFormats *in_fmts, *out_fmts;
335
+    static const enum AVPixelFormat in_pix_fmts[] = {
336
+        AV_PIX_FMT_YUV420P,
337
+        AV_PIX_FMT_NV12,
338
+        AV_PIX_FMT_YUYV422,
339
+        AV_PIX_FMT_RGB32,
340
+        AV_PIX_FMT_QSV,
341
+        AV_PIX_FMT_NONE
342
+    };
343
+    static const enum AVPixelFormat out_pix_fmts[] = {
344
+        AV_PIX_FMT_NV12,
345
+        AV_PIX_FMT_QSV,
346
+        AV_PIX_FMT_NONE
347
+    };
348
+
349
+    in_fmts  = ff_make_format_list(in_pix_fmts);
350
+    out_fmts = ff_make_format_list(out_pix_fmts);
351
+    ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
352
+    ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
353
+
354
+    return 0;
355
+}
356
+
357
+static av_cold void vpp_uninit(AVFilterContext *ctx)
358
+{
359
+    VPPContext *vpp = ctx->priv;
360
+
361
+    ff_qsvvpp_free(&vpp->qsv);
362
+}
363
+
364
+static const AVClass vpp_class = {
365
+    .class_name = "vpp_qsv",
366
+    .item_name  = av_default_item_name,
367
+    .option     = options,
368
+    .version    = LIBAVUTIL_VERSION_INT,
369
+};
370
+
371
+static const AVFilterPad vpp_inputs[] = {
372
+    {
373
+        .name          = "default",
374
+        .type          = AVMEDIA_TYPE_VIDEO,
375
+        .config_props  = config_input,
376
+        .filter_frame  = filter_frame,
377
+    },
378
+    { NULL }
379
+};
380
+
381
+static const AVFilterPad vpp_outputs[] = {
382
+    {
383
+        .name          = "default",
384
+        .type          = AVMEDIA_TYPE_VIDEO,
385
+        .config_props  = config_output,
386
+    },
387
+    { NULL }
388
+};
389
+
390
+AVFilter ff_vf_vpp_qsv = {
391
+    .name          = "vpp_qsv",
392
+    .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
393
+    .priv_size     = sizeof(VPPContext),
394
+    .query_formats = query_formats,
395
+    .uninit        = vpp_uninit,
396
+    .inputs        = vpp_inputs,
397
+    .outputs       = vpp_outputs,
398
+    .priv_class    = &vpp_class,
399
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
400
+};