Signed-off-by: Paul B Mahol <onemda@gmail.com>
Paul B Mahol authored on 2013/09/04 21:59:18... | ... |
@@ -7830,6 +7830,45 @@ vignette='PI/4+random(1)*PI/50':eval=frame |
7830 | 7830 |
|
7831 | 7831 |
@end itemize |
7832 | 7832 |
|
7833 |
+@section w3fdif |
|
7834 |
+ |
|
7835 |
+Deinterlace the input video ("w3fdif" stands for "Weston 3 Field |
|
7836 |
+Deinterlacing Filter"). |
|
7837 |
+ |
|
7838 |
+Based on the process described by Martin Weston for BBC R&D, and |
|
7839 |
+implemented based on the de-interlace algorithm written by Jim |
|
7840 |
+Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter |
|
7841 |
+uses filter coefficients calculated by BBC R&D. |
|
7842 |
+ |
|
7843 |
+There are two sets of filter coefficients, so called "simple": |
|
7844 |
+and "complex". Which set of filter coefficients is used can |
|
7845 |
+be set by passing an optional parameter: |
|
7846 |
+ |
|
7847 |
+@table @option |
|
7848 |
+@item filter |
|
7849 |
+Set the interlacing filter coefficients. Accepts one of the following values: |
|
7850 |
+ |
|
7851 |
+@table @samp |
|
7852 |
+@item simple |
|
7853 |
+Simple filter coefficient set. |
|
7854 |
+@item complex |
|
7855 |
+More-complex filter coefficient set. |
|
7856 |
+@end table |
|
7857 |
+Default value is @samp{complex}. |
|
7858 |
+ |
|
7859 |
+@item deint |
|
7860 |
+Specify which frames to deinterlace. Accept one of the following values: |
|
7861 |
+ |
|
7862 |
+@table @samp |
|
7863 |
+@item all |
|
7864 |
+Deinterlace all frames, |
|
7865 |
+@item interlaced |
|
7866 |
+Only deinterlace frames marked as interlaced. |
|
7867 |
+@end table |
|
7868 |
+ |
|
7869 |
+Default value is @samp{all}. |
|
7870 |
+@end table |
|
7871 |
+ |
|
7833 | 7872 |
@anchor{yadif} |
7834 | 7873 |
@section yadif |
7835 | 7874 |
|
... | ... |
@@ -203,6 +203,7 @@ OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o |
203 | 203 |
OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o |
204 | 204 |
OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o |
205 | 205 |
OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o |
206 |
+OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o |
|
206 | 207 |
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o |
207 | 208 |
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o |
208 | 209 |
|
... | ... |
@@ -198,6 +198,7 @@ void avfilter_register_all(void) |
198 | 198 |
REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf); |
199 | 199 |
REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf); |
200 | 200 |
REGISTER_FILTER(VIGNETTE, vignette, vf); |
201 |
+ REGISTER_FILTER(W3FDIF, w3fdif, vf); |
|
201 | 202 |
REGISTER_FILTER(YADIF, yadif, vf); |
202 | 203 |
REGISTER_FILTER(ZMQ, zmq, vf); |
203 | 204 |
|
... | ... |
@@ -30,8 +30,8 @@ |
30 | 30 |
#include "libavutil/avutil.h" |
31 | 31 |
|
32 | 32 |
#define LIBAVFILTER_VERSION_MAJOR 3 |
33 |
-#define LIBAVFILTER_VERSION_MINOR 83 |
|
34 |
-#define LIBAVFILTER_VERSION_MICRO 104 |
|
33 |
+#define LIBAVFILTER_VERSION_MINOR 84 |
|
34 |
+#define LIBAVFILTER_VERSION_MICRO 100 |
|
35 | 35 |
|
36 | 36 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |
37 | 37 |
LIBAVFILTER_VERSION_MINOR, \ |
38 | 38 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,382 @@ |
0 |
+/* |
|
1 |
+ * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved |
|
2 |
+ * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D |
|
3 |
+ * Based on the process described by Martin Weston for BBC R&D |
|
4 |
+ * Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development |
|
5 |
+ * |
|
6 |
+ * This file is part of FFmpeg. |
|
7 |
+ * |
|
8 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
9 |
+ * modify it under the terms of the GNU Lesser General Public |
|
10 |
+ * License as published by the Free Software Foundation; either |
|
11 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
12 |
+ * |
|
13 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
14 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
15 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
16 |
+ * Lesser General Public License for more details. |
|
17 |
+ * |
|
18 |
+ * You should have received a copy of the GNU Lesser General Public |
|
19 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
20 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
21 |
+ */ |
|
22 |
+ |
|
23 |
+#include "libavutil/common.h" |
|
24 |
+#include "libavutil/imgutils.h" |
|
25 |
+#include "libavutil/opt.h" |
|
26 |
+#include "libavutil/pixdesc.h" |
|
27 |
+#include "avfilter.h" |
|
28 |
+#include "formats.h" |
|
29 |
+#include "internal.h" |
|
30 |
+#include "video.h" |
|
31 |
+ |
|
32 |
+typedef struct W3FDIFContext { |
|
33 |
+ const AVClass *class; |
|
34 |
+ int filter; ///< 0 is simple, 1 is more complex |
|
35 |
+ int deint; ///< which frames to deinterlace |
|
36 |
+ int linesize[4]; ///< bytes of pixel data per line for each plane |
|
37 |
+ int planeheight[4]; ///< height of each plane |
|
38 |
+ int field; ///< which field are we on, 0 or 1 |
|
39 |
+ int eof; |
|
40 |
+ int nb_planes; |
|
41 |
+ double ts_unit; |
|
42 |
+ AVFrame *prev, *cur, *next; ///< previous, current, next frames |
|
43 |
+ int32_t *work_line; ///< line we are calculating |
|
44 |
+} W3FDIFContext; |
|
45 |
+ |
|
46 |
+#define OFFSET(x) offsetof(W3FDIFContext, x) |
|
47 |
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
|
48 |
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit } |
|
49 |
+ |
|
50 |
+static const AVOption w3fdif_options[] = { |
|
51 |
+ { "filter", "specify the filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "filter" }, |
|
52 |
+ CONST("simple", NULL, 0, "filter"), |
|
53 |
+ CONST("complex", NULL, 1, "filter"), |
|
54 |
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" }, |
|
55 |
+ CONST("all", "deinterlace all frames", 0, "deint"), |
|
56 |
+ CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"), |
|
57 |
+ { NULL } |
|
58 |
+}; |
|
59 |
+ |
|
60 |
+AVFILTER_DEFINE_CLASS(w3fdif); |
|
61 |
+ |
|
62 |
+static int query_formats(AVFilterContext *ctx) |
|
63 |
+{ |
|
64 |
+ static const enum AVPixelFormat pix_fmts[] = { |
|
65 |
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, |
|
66 |
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, |
|
67 |
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, |
|
68 |
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, |
|
69 |
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, |
|
70 |
+ AV_PIX_FMT_YUVJ411P, |
|
71 |
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, |
|
72 |
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, |
|
73 |
+ AV_PIX_FMT_GRAY8, |
|
74 |
+ AV_PIX_FMT_NONE |
|
75 |
+ }; |
|
76 |
+ |
|
77 |
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); |
|
78 |
+ |
|
79 |
+ return 0; |
|
80 |
+} |
|
81 |
+ |
|
82 |
+static int config_input(AVFilterLink *inlink) |
|
83 |
+{ |
|
84 |
+ W3FDIFContext *s = inlink->dst->priv; |
|
85 |
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
|
86 |
+ int ret; |
|
87 |
+ |
|
88 |
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) |
|
89 |
+ return ret; |
|
90 |
+ |
|
91 |
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); |
|
92 |
+ s->planeheight[0] = s->planeheight[3] = inlink->h; |
|
93 |
+ |
|
94 |
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format); |
|
95 |
+ s->work_line = av_calloc(s->linesize[0], sizeof(*s->work_line)); |
|
96 |
+ if (!s->work_line) |
|
97 |
+ return AVERROR(ENOMEM); |
|
98 |
+ |
|
99 |
+ return 0; |
|
100 |
+} |
|
101 |
+ |
|
102 |
+static int config_output(AVFilterLink *outlink) |
|
103 |
+{ |
|
104 |
+ AVFilterLink *inlink = outlink->src->inputs[0]; |
|
105 |
+ W3FDIFContext *s = outlink->src->priv; |
|
106 |
+ |
|
107 |
+ outlink->time_base.num = inlink->time_base.num; |
|
108 |
+ outlink->time_base.den = inlink->time_base.den * 2; |
|
109 |
+ outlink->frame_rate.num = inlink->frame_rate.num * 2; |
|
110 |
+ outlink->frame_rate.den = inlink->frame_rate.den; |
|
111 |
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP; |
|
112 |
+ s->ts_unit = av_q2d(av_inv_q(av_mul_q(outlink->frame_rate, outlink->time_base))); |
|
113 |
+ |
|
114 |
+ return 0; |
|
115 |
+} |
|
116 |
+ |
|
117 |
+/* |
|
118 |
+ * Filter coefficients from PH-2071, scaled by 256 * 256. |
|
119 |
+ * Each set of coefficients has a set for low-frequencies and high-frequencies. |
|
120 |
+ * n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex. |
|
121 |
+ * It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd. |
|
122 |
+ * coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies |
|
123 |
+ * and high-frequencies for simple and more-complex mode. |
|
124 |
+ */ |
|
125 |
+static const int8_t n_coef_lf[2] = { 2, 4 }; |
|
126 |
+static const int32_t coef_lf[2][4] = {{ 32768, 32768, 0, 0}, |
|
127 |
+ { -1704, 34472, 34472, -1704}}; |
|
128 |
+static const int8_t n_coef_hf[2] = { 3, 5 }; |
|
129 |
+static const int32_t coef_hf[2][5] = {{ -4096, 8192, -4096, 0, 0}, |
|
130 |
+ { 2032, -7602, 11140, -7602, 2032}}; |
|
131 |
+ |
|
132 |
+static void deinterlace_plane(AVFilterContext *ctx, AVFrame *out, |
|
133 |
+ const AVFrame *cur, const AVFrame *adj, |
|
134 |
+ const int filter, const int plane) |
|
135 |
+{ |
|
136 |
+ W3FDIFContext *s = ctx->priv; |
|
137 |
+ uint8_t *in_line, *in_lines_cur[5], *in_lines_adj[5]; |
|
138 |
+ uint8_t *out_line, *out_pixel; |
|
139 |
+ int32_t *work_line, *work_pixel; |
|
140 |
+ uint8_t *cur_data = cur->data[plane]; |
|
141 |
+ uint8_t *adj_data = adj->data[plane]; |
|
142 |
+ uint8_t *dst_data = out->data[plane]; |
|
143 |
+ const int linesize = s->linesize[plane]; |
|
144 |
+ const int height = s->planeheight[plane]; |
|
145 |
+ const int cur_line_stride = cur->linesize[plane]; |
|
146 |
+ const int adj_line_stride = adj->linesize[plane]; |
|
147 |
+ const int dst_line_stride = out->linesize[plane]; |
|
148 |
+ int i, j, y_in, y_out; |
|
149 |
+ |
|
150 |
+ /* copy unchanged the lines of the field */ |
|
151 |
+ y_out = s->field == cur->top_field_first; |
|
152 |
+ |
|
153 |
+ in_line = cur_data + (y_out * cur_line_stride); |
|
154 |
+ out_line = dst_data + (y_out * dst_line_stride); |
|
155 |
+ |
|
156 |
+ while (y_out < height) { |
|
157 |
+ memcpy(out_line, in_line, linesize); |
|
158 |
+ y_out += 2; |
|
159 |
+ in_line += cur_line_stride * 2; |
|
160 |
+ out_line += dst_line_stride * 2; |
|
161 |
+ } |
|
162 |
+ |
|
163 |
+ /* interpolate other lines of the field */ |
|
164 |
+ y_out = s->field != cur->top_field_first; |
|
165 |
+ |
|
166 |
+ out_line = dst_data + (y_out * dst_line_stride); |
|
167 |
+ |
|
168 |
+ while (y_out < height) { |
|
169 |
+ /* clear workspace */ |
|
170 |
+ memset(s->work_line, 0, sizeof(*s->work_line) * linesize); |
|
171 |
+ |
|
172 |
+ /* get low vertical frequencies from current field */ |
|
173 |
+ for (j = 0; j < n_coef_lf[filter]; j++) { |
|
174 |
+ y_in = (y_out + 1) + (j * 2) - n_coef_lf[filter]; |
|
175 |
+ |
|
176 |
+ while (y_in < 0) |
|
177 |
+ y_in += 2; |
|
178 |
+ while (y_in >= height) |
|
179 |
+ y_in -= 2; |
|
180 |
+ |
|
181 |
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride); |
|
182 |
+ } |
|
183 |
+ |
|
184 |
+ work_line = s->work_line; |
|
185 |
+ switch (n_coef_lf[filter]) { |
|
186 |
+ case 2: |
|
187 |
+ for (i = 0; i < linesize; i++) { |
|
188 |
+ *work_line += *in_lines_cur[0]++ * coef_lf[filter][0]; |
|
189 |
+ *work_line++ += *in_lines_cur[1]++ * coef_lf[filter][1]; |
|
190 |
+ } |
|
191 |
+ break; |
|
192 |
+ case 4: |
|
193 |
+ for (i = 0; i < linesize; i++) { |
|
194 |
+ *work_line += *in_lines_cur[0]++ * coef_lf[filter][0]; |
|
195 |
+ *work_line += *in_lines_cur[1]++ * coef_lf[filter][1]; |
|
196 |
+ *work_line += *in_lines_cur[2]++ * coef_lf[filter][2]; |
|
197 |
+ *work_line++ += *in_lines_cur[3]++ * coef_lf[filter][3]; |
|
198 |
+ } |
|
199 |
+ } |
|
200 |
+ |
|
201 |
+ /* get high vertical frequencies from adjacent fields */ |
|
202 |
+ for (j = 0; j < n_coef_hf[filter]; j++) { |
|
203 |
+ y_in = (y_out + 1) + (j * 2) - n_coef_hf[filter]; |
|
204 |
+ |
|
205 |
+ while (y_in < 0) |
|
206 |
+ y_in += 2; |
|
207 |
+ while (y_in >= height) |
|
208 |
+ y_in -= 2; |
|
209 |
+ |
|
210 |
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride); |
|
211 |
+ in_lines_adj[j] = adj_data + (y_in * adj_line_stride); |
|
212 |
+ } |
|
213 |
+ |
|
214 |
+ work_line = s->work_line; |
|
215 |
+ switch (n_coef_hf[filter]) { |
|
216 |
+ case 3: |
|
217 |
+ for (i = 0; i < linesize; i++) { |
|
218 |
+ *work_line += *in_lines_cur[0]++ * coef_hf[filter][0]; |
|
219 |
+ *work_line += *in_lines_adj[0]++ * coef_hf[filter][0]; |
|
220 |
+ *work_line += *in_lines_cur[1]++ * coef_hf[filter][1]; |
|
221 |
+ *work_line += *in_lines_adj[1]++ * coef_hf[filter][1]; |
|
222 |
+ *work_line += *in_lines_cur[2]++ * coef_hf[filter][2]; |
|
223 |
+ *work_line++ += *in_lines_adj[2]++ * coef_hf[filter][2]; |
|
224 |
+ } |
|
225 |
+ break; |
|
226 |
+ case 5: |
|
227 |
+ for (i = 0; i < linesize; i++) { |
|
228 |
+ *work_line += *in_lines_cur[0]++ * coef_hf[filter][0]; |
|
229 |
+ *work_line += *in_lines_adj[0]++ * coef_hf[filter][0]; |
|
230 |
+ *work_line += *in_lines_cur[1]++ * coef_hf[filter][1]; |
|
231 |
+ *work_line += *in_lines_adj[1]++ * coef_hf[filter][1]; |
|
232 |
+ *work_line += *in_lines_cur[2]++ * coef_hf[filter][2]; |
|
233 |
+ *work_line += *in_lines_adj[2]++ * coef_hf[filter][2]; |
|
234 |
+ *work_line += *in_lines_cur[3]++ * coef_hf[filter][3]; |
|
235 |
+ *work_line += *in_lines_adj[3]++ * coef_hf[filter][3]; |
|
236 |
+ *work_line += *in_lines_cur[4]++ * coef_hf[filter][4]; |
|
237 |
+ *work_line++ += *in_lines_adj[4]++ * coef_hf[filter][4]; |
|
238 |
+ } |
|
239 |
+ } |
|
240 |
+ |
|
241 |
+ /* save scaled result to the output frame, scaling down by 256 * 256 */ |
|
242 |
+ work_pixel = s->work_line; |
|
243 |
+ out_pixel = out_line; |
|
244 |
+ |
|
245 |
+ for (j = 0; j < linesize; j++, out_pixel++, work_pixel++) |
|
246 |
+ *out_pixel = av_clip(*work_pixel, 0, 255 * 256 * 256) >> 16; |
|
247 |
+ |
|
248 |
+ /* move on to next line */ |
|
249 |
+ y_out += 2; |
|
250 |
+ out_line += dst_line_stride * 2; |
|
251 |
+ } |
|
252 |
+} |
|
253 |
+ |
|
254 |
+static int filter(AVFilterContext *ctx) |
|
255 |
+{ |
|
256 |
+ W3FDIFContext *s = ctx->priv; |
|
257 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
258 |
+ AVFrame *out, *adj; |
|
259 |
+ int plane; |
|
260 |
+ |
|
261 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
262 |
+ if (!out) |
|
263 |
+ return AVERROR(ENOMEM); |
|
264 |
+ av_frame_copy_props(out, s->cur); |
|
265 |
+ out->interlaced_frame = 0; |
|
266 |
+ out->pts = outlink->frame_count * s->ts_unit; |
|
267 |
+ |
|
268 |
+ adj = s->field ? s->next : s->prev; |
|
269 |
+ for (plane = 0; plane < s->nb_planes; plane++) |
|
270 |
+ deinterlace_plane(ctx, out, s->cur, adj, s->filter, plane); |
|
271 |
+ |
|
272 |
+ s->field = !s->field; |
|
273 |
+ |
|
274 |
+ return ff_filter_frame(outlink, out); |
|
275 |
+} |
|
276 |
+ |
|
277 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
278 |
+{ |
|
279 |
+ AVFilterContext *ctx = inlink->dst; |
|
280 |
+ W3FDIFContext *s = ctx->priv; |
|
281 |
+ int ret; |
|
282 |
+ |
|
283 |
+ av_frame_free(&s->prev); |
|
284 |
+ s->prev = s->cur; |
|
285 |
+ s->cur = s->next; |
|
286 |
+ s->next = frame; |
|
287 |
+ |
|
288 |
+ if (!s->cur) { |
|
289 |
+ s->cur = av_frame_clone(s->next); |
|
290 |
+ if (!s->cur) |
|
291 |
+ return AVERROR(ENOMEM); |
|
292 |
+ } |
|
293 |
+ |
|
294 |
+ if ((s->deint && !s->cur->interlaced_frame) || ctx->is_disabled) { |
|
295 |
+ AVFrame *out = av_frame_clone(s->cur); |
|
296 |
+ if (!out) |
|
297 |
+ return AVERROR(ENOMEM); |
|
298 |
+ |
|
299 |
+ av_frame_free(&s->prev); |
|
300 |
+ out->pts = ctx->outputs[0]->frame_count * s->ts_unit; |
|
301 |
+ return ff_filter_frame(ctx->outputs[0], out); |
|
302 |
+ } |
|
303 |
+ |
|
304 |
+ if (!s->prev) |
|
305 |
+ return 0; |
|
306 |
+ |
|
307 |
+ ret = filter(ctx); |
|
308 |
+ if (ret < 0) |
|
309 |
+ return ret; |
|
310 |
+ |
|
311 |
+ return filter(ctx); |
|
312 |
+} |
|
313 |
+ |
|
314 |
+static int request_frame(AVFilterLink *outlink) |
|
315 |
+{ |
|
316 |
+ AVFilterContext *ctx = outlink->src; |
|
317 |
+ W3FDIFContext *s = ctx->priv; |
|
318 |
+ |
|
319 |
+ do { |
|
320 |
+ int ret; |
|
321 |
+ |
|
322 |
+ if (s->eof) |
|
323 |
+ return AVERROR_EOF; |
|
324 |
+ |
|
325 |
+ ret = ff_request_frame(ctx->inputs[0]); |
|
326 |
+ |
|
327 |
+ if (ret == AVERROR_EOF && s->cur) { |
|
328 |
+ AVFrame *next = av_frame_clone(s->next); |
|
329 |
+ if (!next) |
|
330 |
+ return AVERROR(ENOMEM); |
|
331 |
+ filter_frame(ctx->inputs[0], next); |
|
332 |
+ s->eof = 1; |
|
333 |
+ } else if (ret < 0) { |
|
334 |
+ return ret; |
|
335 |
+ } |
|
336 |
+ } while (!s->cur); |
|
337 |
+ |
|
338 |
+ return 0; |
|
339 |
+} |
|
340 |
+ |
|
341 |
+static av_cold void uninit(AVFilterContext *ctx) |
|
342 |
+{ |
|
343 |
+ W3FDIFContext *s = ctx->priv; |
|
344 |
+ |
|
345 |
+ av_frame_free(&s->prev); |
|
346 |
+ av_frame_free(&s->cur ); |
|
347 |
+ av_frame_free(&s->next); |
|
348 |
+ av_freep(&s->work_line); |
|
349 |
+} |
|
350 |
+ |
|
351 |
+static const AVFilterPad w3fdif_inputs[] = { |
|
352 |
+ { |
|
353 |
+ .name = "default", |
|
354 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
355 |
+ .filter_frame = filter_frame, |
|
356 |
+ .config_props = config_input, |
|
357 |
+ }, |
|
358 |
+ { NULL } |
|
359 |
+}; |
|
360 |
+ |
|
361 |
+static const AVFilterPad w3fdif_outputs[] = { |
|
362 |
+ { |
|
363 |
+ .name = "default", |
|
364 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
365 |
+ .config_props = config_output, |
|
366 |
+ .request_frame = request_frame, |
|
367 |
+ }, |
|
368 |
+ { NULL } |
|
369 |
+}; |
|
370 |
+ |
|
371 |
+AVFilter avfilter_vf_w3fdif = { |
|
372 |
+ .name = "w3fdif", |
|
373 |
+ .description = NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."), |
|
374 |
+ .priv_size = sizeof(W3FDIFContext), |
|
375 |
+ .priv_class = &w3fdif_class, |
|
376 |
+ .uninit = uninit, |
|
377 |
+ .query_formats = query_formats, |
|
378 |
+ .inputs = w3fdif_inputs, |
|
379 |
+ .outputs = w3fdif_outputs, |
|
380 |
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, |
|
381 |
+}; |