Any alleged performance benefits gained from the split are purely
mythological and do not justify added code complexity.
... | ... |
@@ -314,7 +314,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples) |
314 | 314 |
if (s->next_pts != AV_NOPTS_VALUE) |
315 | 315 |
s->next_pts += nb_samples; |
316 | 316 |
|
317 |
- return ff_filter_samples(outlink, out_buf); |
|
317 |
+ return ff_filter_frame(outlink, out_buf); |
|
318 | 318 |
} |
319 | 319 |
|
320 | 320 |
/** |
... | ... |
@@ -455,7 +455,7 @@ static int request_frame(AVFilterLink *outlink) |
455 | 455 |
return output_frame(outlink, available_samples); |
456 | 456 |
} |
457 | 457 |
|
458 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
458 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
459 | 459 |
{ |
460 | 460 |
AVFilterContext *ctx = inlink->dst; |
461 | 461 |
MixContext *s = ctx->priv; |
... | ... |
@@ -509,7 +509,7 @@ static int init(AVFilterContext *ctx, const char *args) |
509 | 509 |
snprintf(name, sizeof(name), "input%d", i); |
510 | 510 |
pad.type = AVMEDIA_TYPE_AUDIO; |
511 | 511 |
pad.name = av_strdup(name); |
512 |
- pad.filter_samples = filter_samples; |
|
512 |
+ pad.filter_frame = filter_frame; |
|
513 | 513 |
|
514 | 514 |
ff_insert_inpad(ctx, i, &pad); |
515 | 515 |
} |
... | ... |
@@ -34,6 +34,7 @@ |
34 | 34 |
|
35 | 35 |
#include "audio.h" |
36 | 36 |
#include "avfilter.h" |
37 |
+#include "internal.h" |
|
37 | 38 |
|
38 | 39 |
typedef struct AShowInfoContext { |
39 | 40 |
/** |
... | ... |
@@ -64,7 +65,7 @@ static void uninit(AVFilterContext *ctx) |
64 | 64 |
av_freep(&s->plane_checksums); |
65 | 65 |
} |
66 | 66 |
|
67 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
67 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
68 | 68 |
{ |
69 | 69 |
AVFilterContext *ctx = inlink->dst; |
70 | 70 |
AShowInfoContext *s = ctx->priv; |
... | ... |
@@ -103,7 +104,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
103 | 103 |
av_log(ctx, AV_LOG_INFO, "]\n"); |
104 | 104 |
|
105 | 105 |
s->frame++; |
106 |
- return ff_filter_samples(inlink->dst->outputs[0], buf); |
|
106 |
+ return ff_filter_frame(inlink->dst->outputs[0], buf); |
|
107 | 107 |
} |
108 | 108 |
|
109 | 109 |
static const AVFilterPad inputs[] = { |
... | ... |
@@ -112,7 +113,7 @@ static const AVFilterPad inputs[] = { |
112 | 112 |
.type = AVMEDIA_TYPE_AUDIO, |
113 | 113 |
.get_audio_buffer = ff_null_get_audio_buffer, |
114 | 114 |
.config_props = config_input, |
115 |
- .filter_samples = filter_samples, |
|
115 |
+ .filter_frame = filter_frame, |
|
116 | 116 |
.min_perms = AV_PERM_READ, |
117 | 117 |
}, |
118 | 118 |
{ NULL }, |
... | ... |
@@ -39,7 +39,7 @@ typedef struct ASyncContext { |
39 | 39 |
float min_delta_sec; |
40 | 40 |
int max_comp; |
41 | 41 |
|
42 |
- /* set by filter_samples() to signal an output frame to request_frame() */ |
|
42 |
+ /* set by filter_frame() to signal an output frame to request_frame() */ |
|
43 | 43 |
int got_output; |
44 | 44 |
} ASyncContext; |
45 | 45 |
|
... | ... |
@@ -141,7 +141,7 @@ static int request_frame(AVFilterLink *link) |
141 | 141 |
} |
142 | 142 |
|
143 | 143 |
buf->pts = s->pts; |
144 |
- return ff_filter_samples(link, buf); |
|
144 |
+ return ff_filter_frame(link, buf); |
|
145 | 145 |
} |
146 | 146 |
|
147 | 147 |
return ret; |
... | ... |
@@ -161,7 +161,7 @@ static int64_t get_delay(ASyncContext *s) |
161 | 161 |
return avresample_available(s->avr) + avresample_get_delay(s->avr); |
162 | 162 |
} |
163 | 163 |
|
164 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
164 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
165 | 165 |
{ |
166 | 166 |
AVFilterContext *ctx = inlink->dst; |
167 | 167 |
ASyncContext *s = ctx->priv; |
... | ... |
@@ -217,7 +217,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
217 | 217 |
av_samples_set_silence(buf_out->extended_data, out_size - delta, |
218 | 218 |
delta, nb_channels, buf->format); |
219 | 219 |
} |
220 |
- ret = ff_filter_samples(outlink, buf_out); |
|
220 |
+ ret = ff_filter_frame(outlink, buf_out); |
|
221 | 221 |
if (ret < 0) |
222 | 222 |
goto fail; |
223 | 223 |
s->got_output = 1; |
... | ... |
@@ -243,7 +243,7 @@ static const AVFilterPad avfilter_af_asyncts_inputs[] = { |
243 | 243 |
{ |
244 | 244 |
.name = "default", |
245 | 245 |
.type = AVMEDIA_TYPE_AUDIO, |
246 |
- .filter_samples = filter_samples |
|
246 |
+ .filter_frame = filter_frame, |
|
247 | 247 |
}, |
248 | 248 |
{ NULL } |
249 | 249 |
}; |
... | ... |
@@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx) |
313 | 313 |
return 0; |
314 | 314 |
} |
315 | 315 |
|
316 |
-static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
316 |
+static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
317 | 317 |
{ |
318 | 318 |
AVFilterContext *ctx = inlink->dst; |
319 | 319 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -355,7 +355,7 @@ static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *bu |
355 | 355 |
memcpy(buf->data, buf->extended_data, |
356 | 356 |
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0])); |
357 | 357 |
|
358 |
- return ff_filter_samples(outlink, buf); |
|
358 |
+ return ff_filter_frame(outlink, buf); |
|
359 | 359 |
} |
360 | 360 |
|
361 | 361 |
static int channelmap_config_input(AVFilterLink *inlink) |
... | ... |
@@ -389,7 +389,7 @@ static const AVFilterPad avfilter_af_channelmap_inputs[] = { |
389 | 389 |
{ |
390 | 390 |
.name = "default", |
391 | 391 |
.type = AVMEDIA_TYPE_AUDIO, |
392 |
- .filter_samples = channelmap_filter_samples, |
|
392 |
+ .filter_frame = channelmap_filter_frame, |
|
393 | 393 |
.config_props = channelmap_config_input |
394 | 394 |
}, |
395 | 395 |
{ NULL } |
... | ... |
@@ -111,7 +111,7 @@ static int query_formats(AVFilterContext *ctx) |
111 | 111 |
return 0; |
112 | 112 |
} |
113 | 113 |
|
114 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
114 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
115 | 115 |
{ |
116 | 116 |
AVFilterContext *ctx = inlink->dst; |
117 | 117 |
int i, ret = 0; |
... | ... |
@@ -128,7 +128,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
128 | 128 |
buf_out->audio->channel_layout = |
129 | 129 |
av_channel_layout_extract_channel(buf->audio->channel_layout, i); |
130 | 130 |
|
131 |
- ret = ff_filter_samples(ctx->outputs[i], buf_out); |
|
131 |
+ ret = ff_filter_frame(ctx->outputs[i], buf_out); |
|
132 | 132 |
if (ret < 0) |
133 | 133 |
break; |
134 | 134 |
} |
... | ... |
@@ -140,7 +140,7 @@ static const AVFilterPad avfilter_af_channelsplit_inputs[] = { |
140 | 140 |
{ |
141 | 141 |
.name = "default", |
142 | 142 |
.type = AVMEDIA_TYPE_AUDIO, |
143 |
- .filter_samples = filter_samples, |
|
143 |
+ .filter_frame = filter_frame, |
|
144 | 144 |
}, |
145 | 145 |
{ NULL } |
146 | 146 |
}; |
... | ... |
@@ -93,7 +93,7 @@ static const AVClass join_class = { |
93 | 93 |
.version = LIBAVUTIL_VERSION_INT, |
94 | 94 |
}; |
95 | 95 |
|
96 |
-static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf) |
|
96 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
97 | 97 |
{ |
98 | 98 |
AVFilterContext *ctx = link->dst; |
99 | 99 |
JoinContext *s = ctx->priv; |
... | ... |
@@ -230,7 +230,7 @@ static int join_init(AVFilterContext *ctx, const char *args) |
230 | 230 |
snprintf(name, sizeof(name), "input%d", i); |
231 | 231 |
pad.type = AVMEDIA_TYPE_AUDIO; |
232 | 232 |
pad.name = av_strdup(name); |
233 |
- pad.filter_samples = filter_samples; |
|
233 |
+ pad.filter_frame = filter_frame; |
|
234 | 234 |
|
235 | 235 |
pad.needs_fifo = 1; |
236 | 236 |
|
... | ... |
@@ -471,7 +471,7 @@ static int join_request_frame(AVFilterLink *outlink) |
471 | 471 |
priv->nb_in_buffers = ctx->nb_inputs; |
472 | 472 |
buf->buf->priv = priv; |
473 | 473 |
|
474 |
- ret = ff_filter_samples(outlink, buf); |
|
474 |
+ ret = ff_filter_frame(outlink, buf); |
|
475 | 475 |
|
476 | 476 |
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); |
477 | 477 |
|
... | ... |
@@ -40,7 +40,7 @@ typedef struct ResampleContext { |
40 | 40 |
|
41 | 41 |
int64_t next_pts; |
42 | 42 |
|
43 |
- /* set by filter_samples() to signal an output frame to request_frame() */ |
|
43 |
+ /* set by filter_frame() to signal an output frame to request_frame() */ |
|
44 | 44 |
int got_output; |
45 | 45 |
} ResampleContext; |
46 | 46 |
|
... | ... |
@@ -162,12 +162,12 @@ static int request_frame(AVFilterLink *outlink) |
162 | 162 |
} |
163 | 163 |
|
164 | 164 |
buf->pts = s->next_pts; |
165 |
- return ff_filter_samples(outlink, buf); |
|
165 |
+ return ff_filter_frame(outlink, buf); |
|
166 | 166 |
} |
167 | 167 |
return ret; |
168 | 168 |
} |
169 | 169 |
|
170 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
170 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
171 | 171 |
{ |
172 | 172 |
AVFilterContext *ctx = inlink->dst; |
173 | 173 |
ResampleContext *s = ctx->priv; |
... | ... |
@@ -224,7 +224,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) |
224 | 224 |
|
225 | 225 |
s->next_pts = buf_out->pts + buf_out->audio->nb_samples; |
226 | 226 |
|
227 |
- ret = ff_filter_samples(outlink, buf_out); |
|
227 |
+ ret = ff_filter_frame(outlink, buf_out); |
|
228 | 228 |
s->got_output = 1; |
229 | 229 |
} |
230 | 230 |
|
... | ... |
@@ -232,7 +232,7 @@ fail: |
232 | 232 |
avfilter_unref_buffer(buf); |
233 | 233 |
} else { |
234 | 234 |
buf->format = outlink->format; |
235 |
- ret = ff_filter_samples(outlink, buf); |
|
235 |
+ ret = ff_filter_frame(outlink, buf); |
|
236 | 236 |
s->got_output = 1; |
237 | 237 |
} |
238 | 238 |
|
... | ... |
@@ -243,7 +243,7 @@ static const AVFilterPad avfilter_af_resample_inputs[] = { |
243 | 243 |
{ |
244 | 244 |
.name = "default", |
245 | 245 |
.type = AVMEDIA_TYPE_AUDIO, |
246 |
- .filter_samples = filter_samples, |
|
246 |
+ .filter_frame = filter_frame, |
|
247 | 247 |
.min_perms = AV_PERM_READ |
248 | 248 |
}, |
249 | 249 |
{ NULL } |
... | ... |
@@ -20,7 +20,7 @@ |
20 | 20 |
#include "avfilter.h" |
21 | 21 |
#include "internal.h" |
22 | 22 |
|
23 |
-static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) |
|
23 |
+static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref) |
|
24 | 24 |
{ |
25 | 25 |
avfilter_unref_bufferp(&samplesref); |
26 | 26 |
return 0; |
... | ... |
@@ -30,7 +30,7 @@ static const AVFilterPad avfilter_asink_anullsink_inputs[] = { |
30 | 30 |
{ |
31 | 31 |
.name = "default", |
32 | 32 |
.type = AVMEDIA_TYPE_AUDIO, |
33 |
- .filter_samples = null_filter_samples, |
|
33 |
+ .filter_frame = null_filter_frame, |
|
34 | 34 |
}, |
35 | 35 |
{ NULL }, |
36 | 36 |
}; |
... | ... |
@@ -146,50 +146,3 @@ fail: |
146 | 146 |
av_freep(&samples); |
147 | 147 |
return NULL; |
148 | 148 |
} |
149 |
- |
|
150 |
-static int default_filter_samples(AVFilterLink *link, |
|
151 |
- AVFilterBufferRef *samplesref) |
|
152 |
-{ |
|
153 |
- return ff_filter_samples(link->dst->outputs[0], samplesref); |
|
154 |
-} |
|
155 |
- |
|
156 |
-int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) |
|
157 |
-{ |
|
158 |
- int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *); |
|
159 |
- AVFilterPad *dst = link->dstpad; |
|
160 |
- AVFilterBufferRef *buf_out; |
|
161 |
- |
|
162 |
- FF_DPRINTF_START(NULL, filter_samples); ff_dlog_link(NULL, link, 1); |
|
163 |
- |
|
164 |
- if (!(filter_samples = dst->filter_samples)) |
|
165 |
- filter_samples = default_filter_samples; |
|
166 |
- |
|
167 |
- /* prepare to copy the samples if the buffer has insufficient permissions */ |
|
168 |
- if ((dst->min_perms & samplesref->perms) != dst->min_perms || |
|
169 |
- dst->rej_perms & samplesref->perms) { |
|
170 |
- av_log(link->dst, AV_LOG_DEBUG, |
|
171 |
- "Copying audio data in avfilter (have perms %x, need %x, reject %x)\n", |
|
172 |
- samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); |
|
173 |
- |
|
174 |
- buf_out = ff_default_get_audio_buffer(link, dst->min_perms, |
|
175 |
- samplesref->audio->nb_samples); |
|
176 |
- if (!buf_out) { |
|
177 |
- avfilter_unref_buffer(samplesref); |
|
178 |
- return AVERROR(ENOMEM); |
|
179 |
- } |
|
180 |
- buf_out->pts = samplesref->pts; |
|
181 |
- buf_out->audio->sample_rate = samplesref->audio->sample_rate; |
|
182 |
- |
|
183 |
- /* Copy actual data into new samples buffer */ |
|
184 |
- av_samples_copy(buf_out->extended_data, samplesref->extended_data, |
|
185 |
- 0, 0, samplesref->audio->nb_samples, |
|
186 |
- av_get_channel_layout_nb_channels(link->channel_layout), |
|
187 |
- link->format); |
|
188 |
- |
|
189 |
- avfilter_unref_buffer(samplesref); |
|
190 |
- } else |
|
191 |
- buf_out = samplesref; |
|
192 |
- |
|
193 |
- return filter_samples(link, buf_out); |
|
194 |
-} |
|
195 |
- |
... | ... |
@@ -42,17 +42,4 @@ AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, |
42 | 42 |
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, |
43 | 43 |
int nb_samples); |
44 | 44 |
|
45 |
-/** |
|
46 |
- * Send a buffer of audio samples to the next filter. |
|
47 |
- * |
|
48 |
- * @param link the output link over which the audio samples are being sent |
|
49 |
- * @param samplesref a reference to the buffer of audio samples being sent. The |
|
50 |
- * receiving filter will free this reference when it no longer |
|
51 |
- * needs it or pass it on to the next filter. |
|
52 |
- * |
|
53 |
- * @return >= 0 on success, a negative AVERROR on error. The receiving filter |
|
54 |
- * is responsible for unreferencing samplesref in case of error. |
|
55 |
- */ |
|
56 |
-int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref); |
|
57 |
- |
|
58 | 45 |
#endif /* AVFILTER_AUDIO_H */ |
... | ... |
@@ -23,12 +23,16 @@ |
23 | 23 |
|
24 | 24 |
#include "libavutil/channel_layout.h" |
25 | 25 |
#include "libavutil/common.h" |
26 |
+#include "libavutil/imgutils.h" |
|
26 | 27 |
#include "libavutil/pixdesc.h" |
27 | 28 |
#include "libavutil/rational.h" |
29 |
+#include "libavutil/samplefmt.h" |
|
28 | 30 |
|
31 |
+#include "audio.h" |
|
29 | 32 |
#include "avfilter.h" |
30 | 33 |
#include "formats.h" |
31 | 34 |
#include "internal.h" |
35 |
+#include "video.h" |
|
32 | 36 |
|
33 | 37 |
unsigned avfilter_version(void) { |
34 | 38 |
return LIBAVFILTER_VERSION_INT; |
... | ... |
@@ -446,3 +450,68 @@ enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx) |
446 | 446 |
{ |
447 | 447 |
return pads[pad_idx].type; |
448 | 448 |
} |
449 |
+ |
|
450 |
+static int default_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
451 |
+{ |
|
452 |
+ return ff_filter_frame(link->dst->outputs[0], frame); |
|
453 |
+} |
|
454 |
+ |
|
455 |
+int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
456 |
+{ |
|
457 |
+ int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *); |
|
458 |
+ AVFilterPad *dst = link->dstpad; |
|
459 |
+ AVFilterBufferRef *out; |
|
460 |
+ int perms = frame->perms; |
|
461 |
+ |
|
462 |
+ FF_DPRINTF_START(NULL, filter_frame); |
|
463 |
+ ff_dlog_link(NULL, link, 1); |
|
464 |
+ |
|
465 |
+ if (!(filter_frame = dst->filter_frame)) |
|
466 |
+ filter_frame = default_filter_frame; |
|
467 |
+ |
|
468 |
+ if (frame->linesize[0] < 0) |
|
469 |
+ perms |= AV_PERM_NEG_LINESIZES; |
|
470 |
+ /* prepare to copy the frame if the buffer has insufficient permissions */ |
|
471 |
+ if ((dst->min_perms & perms) != dst->min_perms || |
|
472 |
+ dst->rej_perms & perms) { |
|
473 |
+ av_log(link->dst, AV_LOG_DEBUG, |
|
474 |
+ "Copying data in avfilter (have perms %x, need %x, reject %x)\n", |
|
475 |
+ perms, link->dstpad->min_perms, link->dstpad->rej_perms); |
|
476 |
+ |
|
477 |
+ switch (link->type) { |
|
478 |
+ case AVMEDIA_TYPE_VIDEO: |
|
479 |
+ out = ff_get_video_buffer(link, dst->min_perms, |
|
480 |
+ link->w, link->h); |
|
481 |
+ break; |
|
482 |
+ case AVMEDIA_TYPE_AUDIO: |
|
483 |
+ out = ff_get_audio_buffer(link, dst->min_perms, |
|
484 |
+ frame->audio->nb_samples); |
|
485 |
+ break; |
|
486 |
+ default: return AVERROR(EINVAL); |
|
487 |
+ } |
|
488 |
+ if (!out) { |
|
489 |
+ avfilter_unref_buffer(frame); |
|
490 |
+ return AVERROR(ENOMEM); |
|
491 |
+ } |
|
492 |
+ avfilter_copy_buffer_ref_props(out, frame); |
|
493 |
+ |
|
494 |
+ switch (link->type) { |
|
495 |
+ case AVMEDIA_TYPE_VIDEO: |
|
496 |
+ av_image_copy(out->data, out->linesize, frame->data, frame->linesize, |
|
497 |
+ frame->format, frame->video->w, frame->video->h); |
|
498 |
+ break; |
|
499 |
+ case AVMEDIA_TYPE_AUDIO: |
|
500 |
+ av_samples_copy(out->extended_data, frame->extended_data, |
|
501 |
+ 0, 0, frame->audio->nb_samples, |
|
502 |
+ av_get_channel_layout_nb_channels(frame->audio->channel_layout), |
|
503 |
+ frame->format); |
|
504 |
+ break; |
|
505 |
+ default: return AVERROR(EINVAL); |
|
506 |
+ } |
|
507 |
+ |
|
508 |
+ avfilter_unref_buffer(frame); |
|
509 |
+ } else |
|
510 |
+ out = frame; |
|
511 |
+ |
|
512 |
+ return filter_frame(link, out); |
|
513 |
+} |
... | ... |
@@ -253,14 +253,7 @@ struct AVFilterPad { |
253 | 253 |
int rej_perms; |
254 | 254 |
|
255 | 255 |
/** |
256 |
- * Callback called before passing the first slice of a new frame. If |
|
257 |
- * NULL, the filter layer will default to storing a reference to the |
|
258 |
- * picture inside the link structure. |
|
259 |
- * |
|
260 |
- * Input video pads only. |
|
261 |
- * |
|
262 |
- * @return >= 0 on success, a negative AVERROR on error. picref will be |
|
263 |
- * unreferenced by the caller in case of error. |
|
256 |
+ * @deprecated unused |
|
264 | 257 |
*/ |
265 | 258 |
int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); |
266 | 259 |
|
... | ... |
@@ -282,37 +275,26 @@ struct AVFilterPad { |
282 | 282 |
int nb_samples); |
283 | 283 |
|
284 | 284 |
/** |
285 |
- * Callback called after the slices of a frame are completely sent. If |
|
286 |
- * NULL, the filter layer will default to releasing the reference stored |
|
287 |
- * in the link structure during start_frame(). |
|
288 |
- * |
|
289 |
- * Input video pads only. |
|
290 |
- * |
|
291 |
- * @return >= 0 on success, a negative AVERROR on error. |
|
285 |
+ * @deprecated unused |
|
292 | 286 |
*/ |
293 | 287 |
int (*end_frame)(AVFilterLink *link); |
294 | 288 |
|
295 | 289 |
/** |
296 |
- * Slice drawing callback. This is where a filter receives video data |
|
297 |
- * and should do its processing. |
|
298 |
- * |
|
299 |
- * Input video pads only. |
|
300 |
- * |
|
301 |
- * @return >= 0 on success, a negative AVERROR on error. |
|
290 |
+ * @deprecated unused |
|
302 | 291 |
*/ |
303 | 292 |
int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); |
304 | 293 |
|
305 | 294 |
/** |
306 |
- * Samples filtering callback. This is where a filter receives audio data |
|
307 |
- * and should do its processing. |
|
295 |
+ * Filtering callback. This is where a filter receives a frame with |
|
296 |
+ * audio/video data and should do its processing. |
|
308 | 297 |
* |
309 |
- * Input audio pads only. |
|
298 |
+ * Input pads only. |
|
310 | 299 |
* |
311 | 300 |
* @return >= 0 on success, a negative AVERROR on error. This function |
312 | 301 |
* must ensure that samplesref is properly unreferenced on error if it |
313 | 302 |
* hasn't been passed on to another filter. |
314 | 303 |
*/ |
315 |
- int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); |
|
304 |
+ int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); |
|
316 | 305 |
|
317 | 306 |
/** |
318 | 307 |
* Frame poll callback. This returns the number of immediately available |
... | ... |
@@ -531,18 +513,6 @@ struct AVFilterLink { |
531 | 531 |
AVLINK_STARTINIT, ///< started, but incomplete |
532 | 532 |
AVLINK_INIT ///< complete |
533 | 533 |
} init_state; |
534 |
- |
|
535 |
- /** |
|
536 |
- * The buffer reference currently being sent across the link by the source |
|
537 |
- * filter. This is used internally by the filter system to allow |
|
538 |
- * automatic copying of buffers which do not have sufficient permissions |
|
539 |
- * for the destination. This should not be accessed directly by the |
|
540 |
- * filters. |
|
541 |
- */ |
|
542 |
- AVFilterBufferRef *src_buf; |
|
543 |
- |
|
544 |
- AVFilterBufferRef *cur_buf; |
|
545 |
- AVFilterBufferRef *out_buf; |
|
546 | 534 |
}; |
547 | 535 |
|
548 | 536 |
/** |
... | ... |
@@ -48,13 +48,12 @@ static av_cold void uninit(AVFilterContext *ctx) |
48 | 48 |
av_audio_fifo_free(sink->audio_fifo); |
49 | 49 |
} |
50 | 50 |
|
51 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
51 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
52 | 52 |
{ |
53 | 53 |
BufferSinkContext *s = link->dst->priv; |
54 | 54 |
|
55 | 55 |
av_assert0(!s->cur_buf); |
56 | 56 |
s->cur_buf = buf; |
57 |
- link->cur_buf = NULL; |
|
58 | 57 |
|
59 | 58 |
return 0; |
60 | 59 |
} |
... | ... |
@@ -144,7 +143,7 @@ static const AVFilterPad avfilter_vsink_buffer_inputs[] = { |
144 | 144 |
{ |
145 | 145 |
.name = "default", |
146 | 146 |
.type = AVMEDIA_TYPE_VIDEO, |
147 |
- .start_frame = start_frame, |
|
147 |
+ .filter_frame = filter_frame, |
|
148 | 148 |
.min_perms = AV_PERM_READ, |
149 | 149 |
.needs_fifo = 1 |
150 | 150 |
}, |
... | ... |
@@ -165,7 +164,7 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = { |
165 | 165 |
{ |
166 | 166 |
.name = "default", |
167 | 167 |
.type = AVMEDIA_TYPE_AUDIO, |
168 |
- .filter_samples = start_frame, |
|
168 |
+ .filter_frame = filter_frame, |
|
169 | 169 |
.min_perms = AV_PERM_READ, |
170 | 170 |
.needs_fifo = 1 |
171 | 171 |
}, |
... | ... |
@@ -327,20 +327,7 @@ static int request_frame(AVFilterLink *link) |
327 | 327 |
} |
328 | 328 |
av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL); |
329 | 329 |
|
330 |
- switch (link->type) { |
|
331 |
- case AVMEDIA_TYPE_VIDEO: |
|
332 |
- if ((ret = ff_start_frame(link, buf)) < 0 || |
|
333 |
- (ret = ff_draw_slice(link, 0, link->h, 1)) < 0 || |
|
334 |
- (ret = ff_end_frame(link)) < 0) |
|
335 |
- return ret; |
|
336 |
- break; |
|
337 |
- case AVMEDIA_TYPE_AUDIO: |
|
338 |
- ret = ff_filter_samples(link, buf); |
|
339 |
- break; |
|
340 |
- default: |
|
341 |
- avfilter_unref_bufferp(&buf); |
|
342 |
- return AVERROR(EINVAL); |
|
343 |
- } |
|
330 |
+ ff_filter_frame(link, buf); |
|
344 | 331 |
|
345 | 332 |
return ret; |
346 | 333 |
} |
... | ... |
@@ -77,7 +77,6 @@ static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf) |
77 | 77 |
{ |
78 | 78 |
FifoContext *fifo = inlink->dst->priv; |
79 | 79 |
|
80 |
- inlink->cur_buf = NULL; |
|
81 | 80 |
fifo->last->next = av_mallocz(sizeof(Buf)); |
82 | 81 |
if (!fifo->last->next) { |
83 | 82 |
avfilter_unref_buffer(buf); |
... | ... |
@@ -99,16 +98,6 @@ static void queue_pop(FifoContext *s) |
99 | 99 |
s->root.next = tmp; |
100 | 100 |
} |
101 | 101 |
|
102 |
-static int end_frame(AVFilterLink *inlink) |
|
103 |
-{ |
|
104 |
- return 0; |
|
105 |
-} |
|
106 |
- |
|
107 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
108 |
-{ |
|
109 |
- return 0; |
|
110 |
-} |
|
111 |
- |
|
112 | 102 |
/** |
113 | 103 |
* Move data pointers and pts offset samples forward. |
114 | 104 |
*/ |
... | ... |
@@ -228,7 +217,7 @@ static int return_audio_frame(AVFilterContext *ctx) |
228 | 228 |
buf_out = s->buf_out; |
229 | 229 |
s->buf_out = NULL; |
230 | 230 |
} |
231 |
- return ff_filter_samples(link, buf_out); |
|
231 |
+ return ff_filter_frame(link, buf_out); |
|
232 | 232 |
} |
233 | 233 |
|
234 | 234 |
static int request_frame(AVFilterLink *outlink) |
... | ... |
@@ -241,27 +230,11 @@ static int request_frame(AVFilterLink *outlink) |
241 | 241 |
return ret; |
242 | 242 |
} |
243 | 243 |
|
244 |
- /* by doing this, we give ownership of the reference to the next filter, |
|
245 |
- * so we don't have to worry about dereferencing it ourselves. */ |
|
246 |
- switch (outlink->type) { |
|
247 |
- case AVMEDIA_TYPE_VIDEO: |
|
248 |
- if ((ret = ff_start_frame(outlink, fifo->root.next->buf)) < 0 || |
|
249 |
- (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || |
|
250 |
- (ret = ff_end_frame(outlink)) < 0) |
|
251 |
- return ret; |
|
252 |
- |
|
244 |
+ if (outlink->request_samples) { |
|
245 |
+ return return_audio_frame(outlink->src); |
|
246 |
+ } else { |
|
247 |
+ ret = ff_filter_frame(outlink, fifo->root.next->buf); |
|
253 | 248 |
queue_pop(fifo); |
254 |
- break; |
|
255 |
- case AVMEDIA_TYPE_AUDIO: |
|
256 |
- if (outlink->request_samples) { |
|
257 |
- return return_audio_frame(outlink->src); |
|
258 |
- } else { |
|
259 |
- ret = ff_filter_samples(outlink, fifo->root.next->buf); |
|
260 |
- queue_pop(fifo); |
|
261 |
- } |
|
262 |
- break; |
|
263 |
- default: |
|
264 |
- return AVERROR(EINVAL); |
|
265 | 249 |
} |
266 | 250 |
|
267 | 251 |
return ret; |
... | ... |
@@ -272,9 +245,7 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = { |
272 | 272 |
.name = "default", |
273 | 273 |
.type = AVMEDIA_TYPE_VIDEO, |
274 | 274 |
.get_video_buffer = ff_null_get_video_buffer, |
275 |
- .start_frame = add_to_queue, |
|
276 |
- .draw_slice = draw_slice, |
|
277 |
- .end_frame = end_frame, |
|
275 |
+ .filter_frame = add_to_queue, |
|
278 | 276 |
.rej_perms = AV_PERM_REUSE2, |
279 | 277 |
}, |
280 | 278 |
{ NULL } |
... | ... |
@@ -307,7 +278,7 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = { |
307 | 307 |
.name = "default", |
308 | 308 |
.type = AVMEDIA_TYPE_AUDIO, |
309 | 309 |
.get_audio_buffer = ff_null_get_audio_buffer, |
310 |
- .filter_samples = add_to_queue, |
|
310 |
+ .filter_frame = add_to_queue, |
|
311 | 311 |
.rej_perms = AV_PERM_REUSE2, |
312 | 312 |
}, |
313 | 313 |
{ NULL } |
... | ... |
@@ -64,18 +64,6 @@ struct AVFilterPad { |
64 | 64 |
int rej_perms; |
65 | 65 |
|
66 | 66 |
/** |
67 |
- * Callback called before passing the first slice of a new frame. If |
|
68 |
- * NULL, the filter layer will default to storing a reference to the |
|
69 |
- * picture inside the link structure. |
|
70 |
- * |
|
71 |
- * Input video pads only. |
|
72 |
- * |
|
73 |
- * @return >= 0 on success, a negative AVERROR on error. picref will be |
|
74 |
- * unreferenced by the caller in case of error. |
|
75 |
- */ |
|
76 |
- void (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); |
|
77 |
- |
|
78 |
- /** |
|
79 | 67 |
* Callback function to get a video buffer. If NULL, the filter system will |
80 | 68 |
* use avfilter_default_get_video_buffer(). |
81 | 69 |
* |
... | ... |
@@ -93,37 +81,16 @@ struct AVFilterPad { |
93 | 93 |
int nb_samples); |
94 | 94 |
|
95 | 95 |
/** |
96 |
- * Callback called after the slices of a frame are completely sent. If |
|
97 |
- * NULL, the filter layer will default to releasing the reference stored |
|
98 |
- * in the link structure during start_frame(). |
|
96 |
+ * Filtering callback. This is where a filter receives a frame with |
|
97 |
+ * audio/video data and should do its processing. |
|
99 | 98 |
* |
100 |
- * Input video pads only. |
|
101 |
- * |
|
102 |
- * @return >= 0 on success, a negative AVERROR on error. |
|
103 |
- */ |
|
104 |
- int (*end_frame)(AVFilterLink *link); |
|
105 |
- |
|
106 |
- /** |
|
107 |
- * Slice drawing callback. This is where a filter receives video data |
|
108 |
- * and should do its processing. |
|
109 |
- * |
|
110 |
- * Input video pads only. |
|
111 |
- * |
|
112 |
- * @return >= 0 on success, a negative AVERROR on error. |
|
113 |
- */ |
|
114 |
- int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); |
|
115 |
- |
|
116 |
- /** |
|
117 |
- * Samples filtering callback. This is where a filter receives audio data |
|
118 |
- * and should do its processing. |
|
119 |
- * |
|
120 |
- * Input audio pads only. |
|
99 |
+ * Input pads only. |
|
121 | 100 |
* |
122 | 101 |
* @return >= 0 on success, a negative AVERROR on error. This function |
123 | 102 |
* must ensure that samplesref is properly unreferenced on error if it |
124 | 103 |
* hasn't been passed on to another filter. |
125 | 104 |
*/ |
126 |
- int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); |
|
105 |
+ int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); |
|
127 | 106 |
|
128 | 107 |
/** |
129 | 108 |
* Frame poll callback. This returns the number of immediately available |
... | ... |
@@ -237,4 +204,17 @@ int ff_poll_frame(AVFilterLink *link); |
237 | 237 |
*/ |
238 | 238 |
int ff_request_frame(AVFilterLink *link); |
239 | 239 |
|
240 |
+/** |
|
241 |
+ * Send a frame of data to the next filter. |
|
242 |
+ * |
|
243 |
+ * @param link the output link over which the data is being sent |
|
244 |
+ * @param frame a reference to the buffer of data being sent. The |
|
245 |
+ * receiving filter will free this reference when it no longer |
|
246 |
+ * needs it or pass it on to the next filter. |
|
247 |
+ * |
|
248 |
+ * @return >= 0 on success, a negative AVERROR on error. The receiving filter |
|
249 |
+ * is responsible for unreferencing frame in case of error. |
|
250 |
+ */ |
|
251 |
+int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame); |
|
252 |
+ |
|
240 | 253 |
#endif /* AVFILTER_INTERNAL_H */ |
... | ... |
@@ -67,46 +67,23 @@ static void split_uninit(AVFilterContext *ctx) |
67 | 67 |
av_freep(&ctx->output_pads[i].name); |
68 | 68 |
} |
69 | 69 |
|
70 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
70 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
71 | 71 |
{ |
72 | 72 |
AVFilterContext *ctx = inlink->dst; |
73 | 73 |
int i, ret = 0; |
74 | 74 |
|
75 | 75 |
for (i = 0; i < ctx->nb_outputs; i++) { |
76 |
- AVFilterBufferRef *buf_out = avfilter_ref_buffer(picref, ~AV_PERM_WRITE); |
|
77 |
- if (!buf_out) |
|
78 |
- return AVERROR(ENOMEM); |
|
79 |
- |
|
80 |
- ret = ff_start_frame(ctx->outputs[i], buf_out); |
|
81 |
- if (ret < 0) |
|
82 |
- break; |
|
83 |
- } |
|
84 |
- return ret; |
|
85 |
-} |
|
86 |
- |
|
87 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
88 |
-{ |
|
89 |
- AVFilterContext *ctx = inlink->dst; |
|
90 |
- int i, ret = 0; |
|
91 |
- |
|
92 |
- for (i = 0; i < ctx->nb_outputs; i++) { |
|
93 |
- ret = ff_draw_slice(ctx->outputs[i], y, h, slice_dir); |
|
94 |
- if (ret < 0) |
|
76 |
+ AVFilterBufferRef *buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE); |
|
77 |
+ if (!buf_out) { |
|
78 |
+ ret = AVERROR(ENOMEM); |
|
95 | 79 |
break; |
96 |
- } |
|
97 |
- return ret; |
|
98 |
-} |
|
99 |
- |
|
100 |
-static int end_frame(AVFilterLink *inlink) |
|
101 |
-{ |
|
102 |
- AVFilterContext *ctx = inlink->dst; |
|
103 |
- int i, ret = 0; |
|
80 |
+ } |
|
104 | 81 |
|
105 |
- for (i = 0; i < ctx->nb_outputs; i++) { |
|
106 |
- ret = ff_end_frame(ctx->outputs[i]); |
|
82 |
+ ret = ff_filter_frame(ctx->outputs[i], buf_out); |
|
107 | 83 |
if (ret < 0) |
108 | 84 |
break; |
109 | 85 |
} |
86 |
+ avfilter_unref_bufferp(&frame); |
|
110 | 87 |
return ret; |
111 | 88 |
} |
112 | 89 |
|
... | ... |
@@ -115,9 +92,7 @@ static const AVFilterPad avfilter_vf_split_inputs[] = { |
115 | 115 |
.name = "default", |
116 | 116 |
.type = AVMEDIA_TYPE_VIDEO, |
117 | 117 |
.get_video_buffer = ff_null_get_video_buffer, |
118 |
- .start_frame = start_frame, |
|
119 |
- .draw_slice = draw_slice, |
|
120 |
- .end_frame = end_frame, |
|
118 |
+ .filter_frame = filter_frame, |
|
121 | 119 |
}, |
122 | 120 |
{ NULL } |
123 | 121 |
}; |
... | ... |
@@ -133,33 +108,12 @@ AVFilter avfilter_vf_split = { |
133 | 133 |
.outputs = NULL, |
134 | 134 |
}; |
135 | 135 |
|
136 |
-static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) |
|
137 |
-{ |
|
138 |
- AVFilterContext *ctx = inlink->dst; |
|
139 |
- int i, ret = 0; |
|
140 |
- |
|
141 |
- for (i = 0; i < ctx->nb_outputs; i++) { |
|
142 |
- AVFilterBufferRef *buf_out = avfilter_ref_buffer(samplesref, |
|
143 |
- ~AV_PERM_WRITE); |
|
144 |
- if (!buf_out) { |
|
145 |
- ret = AVERROR(ENOMEM); |
|
146 |
- break; |
|
147 |
- } |
|
148 |
- |
|
149 |
- ret = ff_filter_samples(inlink->dst->outputs[i], buf_out); |
|
150 |
- if (ret < 0) |
|
151 |
- break; |
|
152 |
- } |
|
153 |
- avfilter_unref_buffer(samplesref); |
|
154 |
- return ret; |
|
155 |
-} |
|
156 |
- |
|
157 | 136 |
static const AVFilterPad avfilter_af_asplit_inputs[] = { |
158 | 137 |
{ |
159 | 138 |
.name = "default", |
160 | 139 |
.type = AVMEDIA_TYPE_AUDIO, |
161 | 140 |
.get_audio_buffer = ff_null_get_audio_buffer, |
162 |
- .filter_samples = filter_samples |
|
141 |
+ .filter_frame = filter_frame, |
|
163 | 142 |
}, |
164 | 143 |
{ NULL } |
165 | 144 |
}; |
... | ... |
@@ -65,13 +65,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
65 | 65 |
return 0; |
66 | 66 |
} |
67 | 67 |
|
68 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
68 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
69 | 69 |
{ |
70 | 70 |
AspectContext *aspect = link->dst->priv; |
71 | 71 |
|
72 |
- picref->video->pixel_aspect = aspect->aspect; |
|
73 |
- link->cur_buf = NULL; |
|
74 |
- return ff_start_frame(link->dst->outputs[0], picref); |
|
72 |
+ frame->video->pixel_aspect = aspect->aspect; |
|
73 |
+ return ff_filter_frame(link->dst->outputs[0], frame); |
|
75 | 74 |
} |
76 | 75 |
|
77 | 76 |
#if CONFIG_SETDAR_FILTER |
... | ... |
@@ -99,8 +98,7 @@ static const AVFilterPad avfilter_vf_setdar_inputs[] = { |
99 | 99 |
.type = AVMEDIA_TYPE_VIDEO, |
100 | 100 |
.config_props = setdar_config_props, |
101 | 101 |
.get_video_buffer = ff_null_get_video_buffer, |
102 |
- .start_frame = start_frame, |
|
103 |
- .end_frame = ff_null_end_frame |
|
102 |
+ .filter_frame = filter_frame, |
|
104 | 103 |
}, |
105 | 104 |
{ NULL } |
106 | 105 |
}; |
... | ... |
@@ -144,8 +142,7 @@ static const AVFilterPad avfilter_vf_setsar_inputs[] = { |
144 | 144 |
.type = AVMEDIA_TYPE_VIDEO, |
145 | 145 |
.config_props = setsar_config_props, |
146 | 146 |
.get_video_buffer = ff_null_get_video_buffer, |
147 |
- .start_frame = start_frame, |
|
148 |
- .end_frame = ff_null_end_frame |
|
147 |
+ .filter_frame = filter_frame, |
|
149 | 148 |
}, |
150 | 149 |
{ NULL } |
151 | 150 |
}; |
... | ... |
@@ -78,49 +78,37 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
78 | 78 |
return 0; |
79 | 79 |
} |
80 | 80 |
|
81 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
81 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
82 | 82 |
{ |
83 | 83 |
AVFilterContext *ctx = inlink->dst; |
84 | 84 |
BlackFrameContext *blackframe = ctx->priv; |
85 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
86 | 85 |
int x, i; |
87 |
- uint8_t *p = picref->data[0] + y * picref->linesize[0]; |
|
86 |
+ int pblack = 0; |
|
87 |
+ uint8_t *p = frame->data[0]; |
|
88 | 88 |
|
89 |
- for (i = 0; i < h; i++) { |
|
89 |
+ for (i = 0; i < frame->video->h; i++) { |
|
90 | 90 |
for (x = 0; x < inlink->w; x++) |
91 | 91 |
blackframe->nblack += p[x] < blackframe->bthresh; |
92 |
- p += picref->linesize[0]; |
|
92 |
+ p += frame->linesize[0]; |
|
93 | 93 |
} |
94 | 94 |
|
95 |
- return ff_draw_slice(ctx->outputs[0], y, h, slice_dir); |
|
96 |
-} |
|
97 |
- |
|
98 |
-static int end_frame(AVFilterLink *inlink) |
|
99 |
-{ |
|
100 |
- AVFilterContext *ctx = inlink->dst; |
|
101 |
- BlackFrameContext *blackframe = ctx->priv; |
|
102 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
103 |
- int pblack = 0; |
|
104 |
- |
|
105 | 95 |
pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); |
106 | 96 |
if (pblack >= blackframe->bamount) |
107 | 97 |
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f\n", |
108 |
- blackframe->frame, pblack, picref->pos, picref->pts, |
|
109 |
- picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base)); |
|
98 |
+ blackframe->frame, pblack, frame->pos, frame->pts, |
|
99 |
+ frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base)); |
|
110 | 100 |
|
111 | 101 |
blackframe->frame++; |
112 | 102 |
blackframe->nblack = 0; |
113 |
- return ff_end_frame(inlink->dst->outputs[0]); |
|
103 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
114 | 104 |
} |
115 | 105 |
|
116 | 106 |
static const AVFilterPad avfilter_vf_blackframe_inputs[] = { |
117 | 107 |
{ |
118 | 108 |
.name = "default", |
119 | 109 |
.type = AVMEDIA_TYPE_VIDEO, |
120 |
- .draw_slice = draw_slice, |
|
121 | 110 |
.get_video_buffer = ff_null_get_video_buffer, |
122 |
- .start_frame = ff_null_start_frame, |
|
123 |
- .end_frame = end_frame, |
|
111 |
+ .filter_frame = filter_frame, |
|
124 | 112 |
}, |
125 | 113 |
{ NULL } |
126 | 114 |
}; |
... | ... |
@@ -307,31 +307,39 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li |
307 | 307 |
h, radius, power, temp); |
308 | 308 |
} |
309 | 309 |
|
310 |
-static int draw_slice(AVFilterLink *inlink, int y0, int h0, int slice_dir) |
|
310 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
311 | 311 |
{ |
312 | 312 |
AVFilterContext *ctx = inlink->dst; |
313 | 313 |
BoxBlurContext *boxblur = ctx->priv; |
314 | 314 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
315 |
- AVFilterBufferRef *inpicref = inlink ->cur_buf; |
|
316 |
- AVFilterBufferRef *outpicref = outlink->out_buf; |
|
315 |
+ AVFilterBufferRef *out; |
|
317 | 316 |
int plane; |
318 |
- int cw = inlink->w >> boxblur->hsub, ch = h0 >> boxblur->vsub; |
|
317 |
+ int cw = inlink->w >> boxblur->hsub, ch = in->video->h >> boxblur->vsub; |
|
319 | 318 |
int w[4] = { inlink->w, cw, cw, inlink->w }; |
320 |
- int h[4] = { h0, ch, ch, h0 }; |
|
319 |
+ int h[4] = { in->video->h, ch, ch, in->video->h }; |
|
321 | 320 |
|
322 |
- for (plane = 0; inpicref->data[plane] && plane < 4; plane++) |
|
323 |
- hblur(outpicref->data[plane], outpicref->linesize[plane], |
|
324 |
- inpicref ->data[plane], inpicref ->linesize[plane], |
|
321 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
322 |
+ if (!out) { |
|
323 |
+ avfilter_unref_bufferp(&in); |
|
324 |
+ return AVERROR(ENOMEM); |
|
325 |
+ } |
|
326 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
327 |
+ |
|
328 |
+ for (plane = 0; in->data[plane] && plane < 4; plane++) |
|
329 |
+ hblur(out->data[plane], out->linesize[plane], |
|
330 |
+ in ->data[plane], in ->linesize[plane], |
|
325 | 331 |
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], |
326 | 332 |
boxblur->temp); |
327 | 333 |
|
328 |
- for (plane = 0; inpicref->data[plane] && plane < 4; plane++) |
|
329 |
- vblur(outpicref->data[plane], outpicref->linesize[plane], |
|
330 |
- outpicref->data[plane], outpicref->linesize[plane], |
|
334 |
+ for (plane = 0; in->data[plane] && plane < 4; plane++) |
|
335 |
+ vblur(out->data[plane], out->linesize[plane], |
|
336 |
+ out->data[plane], out->linesize[plane], |
|
331 | 337 |
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], |
332 | 338 |
boxblur->temp); |
333 | 339 |
|
334 |
- return ff_draw_slice(outlink, y0, h0, slice_dir); |
|
340 |
+ avfilter_unref_bufferp(&in); |
|
341 |
+ |
|
342 |
+ return ff_filter_frame(outlink, out); |
|
335 | 343 |
} |
336 | 344 |
|
337 | 345 |
static const AVFilterPad avfilter_vf_boxblur_inputs[] = { |
... | ... |
@@ -339,7 +347,7 @@ static const AVFilterPad avfilter_vf_boxblur_inputs[] = { |
339 | 339 |
.name = "default", |
340 | 340 |
.type = AVMEDIA_TYPE_VIDEO, |
341 | 341 |
.config_props = config_input, |
342 |
- .draw_slice = draw_slice, |
|
342 |
+ .filter_frame = filter_frame, |
|
343 | 343 |
.min_perms = AV_PERM_READ |
344 | 344 |
}, |
345 | 345 |
{ NULL } |
... | ... |
@@ -31,8 +31,6 @@ static const AVFilterPad avfilter_vf_copy_inputs[] = { |
31 | 31 |
.name = "default", |
32 | 32 |
.type = AVMEDIA_TYPE_VIDEO, |
33 | 33 |
.get_video_buffer = ff_null_get_video_buffer, |
34 |
- .start_frame = ff_null_start_frame, |
|
35 |
- .end_frame = ff_null_end_frame, |
|
36 | 34 |
.rej_perms = ~0 |
37 | 35 |
}, |
38 | 36 |
{ NULL } |
... | ... |
@@ -243,24 +243,19 @@ static int config_output(AVFilterLink *link) |
243 | 243 |
return 0; |
244 | 244 |
} |
245 | 245 |
|
246 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
246 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
247 | 247 |
{ |
248 | 248 |
AVFilterContext *ctx = link->dst; |
249 | 249 |
CropContext *crop = ctx->priv; |
250 |
- AVFilterBufferRef *ref2; |
|
251 | 250 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); |
252 | 251 |
int i; |
253 | 252 |
|
254 |
- ref2 = avfilter_ref_buffer(picref, ~0); |
|
255 |
- if (!ref2) |
|
256 |
- return AVERROR(ENOMEM); |
|
253 |
+ frame->video->w = crop->w; |
|
254 |
+ frame->video->h = crop->h; |
|
257 | 255 |
|
258 |
- ref2->video->w = crop->w; |
|
259 |
- ref2->video->h = crop->h; |
|
260 |
- |
|
261 |
- crop->var_values[VAR_T] = picref->pts == AV_NOPTS_VALUE ? |
|
262 |
- NAN : picref->pts * av_q2d(link->time_base); |
|
263 |
- crop->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos; |
|
256 |
+ crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? |
|
257 |
+ NAN : frame->pts * av_q2d(link->time_base); |
|
258 |
+ crop->var_values[VAR_POS] = frame->pos == -1 ? NAN : frame->pos; |
|
264 | 259 |
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); |
265 | 260 |
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL); |
266 | 261 |
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); |
... | ... |
@@ -279,60 +274,34 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
279 | 279 |
(int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x, |
280 | 280 |
crop->y, crop->x+crop->w, crop->y+crop->h); |
281 | 281 |
|
282 |
- ref2->data[0] += crop->y * ref2->linesize[0]; |
|
283 |
- ref2->data[0] += crop->x * crop->max_step[0]; |
|
282 |
+ frame->data[0] += crop->y * frame->linesize[0]; |
|
283 |
+ frame->data[0] += crop->x * crop->max_step[0]; |
|
284 | 284 |
|
285 | 285 |
if (!(desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL)) { |
286 | 286 |
for (i = 1; i < 3; i ++) { |
287 |
- if (ref2->data[i]) { |
|
288 |
- ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i]; |
|
289 |
- ref2->data[i] += (crop->x * crop->max_step[i]) >> crop->hsub; |
|
287 |
+ if (frame->data[i]) { |
|
288 |
+ frame->data[i] += (crop->y >> crop->vsub) * frame->linesize[i]; |
|
289 |
+ frame->data[i] += (crop->x * crop->max_step[i]) >> crop->hsub; |
|
290 | 290 |
} |
291 | 291 |
} |
292 | 292 |
} |
293 | 293 |
|
294 | 294 |
/* alpha plane */ |
295 |
- if (ref2->data[3]) { |
|
296 |
- ref2->data[3] += crop->y * ref2->linesize[3]; |
|
297 |
- ref2->data[3] += crop->x * crop->max_step[3]; |
|
295 |
+ if (frame->data[3]) { |
|
296 |
+ frame->data[3] += crop->y * frame->linesize[3]; |
|
297 |
+ frame->data[3] += crop->x * crop->max_step[3]; |
|
298 | 298 |
} |
299 | 299 |
|
300 |
- return ff_start_frame(link->dst->outputs[0], ref2); |
|
301 |
-} |
|
302 |
- |
|
303 |
-static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
304 |
-{ |
|
305 |
- AVFilterContext *ctx = link->dst; |
|
306 |
- CropContext *crop = ctx->priv; |
|
307 |
- |
|
308 |
- if (y >= crop->y + crop->h || y + h <= crop->y) |
|
309 |
- return 0; |
|
310 |
- |
|
311 |
- if (y < crop->y) { |
|
312 |
- h -= crop->y - y; |
|
313 |
- y = crop->y; |
|
314 |
- } |
|
315 |
- if (y + h > crop->y + crop->h) |
|
316 |
- h = crop->y + crop->h - y; |
|
317 |
- |
|
318 |
- return ff_draw_slice(ctx->outputs[0], y - crop->y, h, slice_dir); |
|
319 |
-} |
|
320 |
- |
|
321 |
-static int end_frame(AVFilterLink *link) |
|
322 |
-{ |
|
323 |
- CropContext *crop = link->dst->priv; |
|
324 |
- |
|
325 | 300 |
crop->var_values[VAR_N] += 1.0; |
326 |
- return ff_end_frame(link->dst->outputs[0]); |
|
301 |
+ |
|
302 |
+ return ff_filter_frame(link->dst->outputs[0], frame); |
|
327 | 303 |
} |
328 | 304 |
|
329 | 305 |
static const AVFilterPad avfilter_vf_crop_inputs[] = { |
330 | 306 |
{ |
331 | 307 |
.name = "default", |
332 | 308 |
.type = AVMEDIA_TYPE_VIDEO, |
333 |
- .start_frame = start_frame, |
|
334 |
- .draw_slice = draw_slice, |
|
335 |
- .end_frame = end_frame, |
|
309 |
+ .filter_frame = filter_frame, |
|
336 | 310 |
.get_video_buffer = ff_null_get_video_buffer, |
337 | 311 |
.config_props = config_input, |
338 | 312 |
}, |
... | ... |
@@ -117,11 +117,10 @@ static int config_input(AVFilterLink *inlink) |
117 | 117 |
return 0; |
118 | 118 |
} |
119 | 119 |
|
120 |
-static int end_frame(AVFilterLink *inlink) |
|
120 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
121 | 121 |
{ |
122 | 122 |
AVFilterContext *ctx = inlink->dst; |
123 | 123 |
CropDetectContext *cd = ctx->priv; |
124 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
125 | 124 |
int bpp = cd->max_pixsteps[0]; |
126 | 125 |
int w, h, x, y, shrink_by; |
127 | 126 |
|
... | ... |
@@ -129,36 +128,36 @@ static int end_frame(AVFilterLink *inlink) |
129 | 129 |
if (++cd->frame_nb > 0) { |
130 | 130 |
// Reset the crop area every reset_count frames, if reset_count is > 0 |
131 | 131 |
if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) { |
132 |
- cd->x1 = picref->video->w-1; |
|
133 |
- cd->y1 = picref->video->h-1; |
|
132 |
+ cd->x1 = frame->video->w-1; |
|
133 |
+ cd->y1 = frame->video->h-1; |
|
134 | 134 |
cd->x2 = 0; |
135 | 135 |
cd->y2 = 0; |
136 | 136 |
cd->frame_nb = 1; |
137 | 137 |
} |
138 | 138 |
|
139 | 139 |
for (y = 0; y < cd->y1; y++) { |
140 |
- if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) { |
|
140 |
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { |
|
141 | 141 |
cd->y1 = y; |
142 | 142 |
break; |
143 | 143 |
} |
144 | 144 |
} |
145 | 145 |
|
146 |
- for (y = picref->video->h-1; y > cd->y2; y--) { |
|
147 |
- if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) { |
|
146 |
+ for (y = frame->video->h-1; y > cd->y2; y--) { |
|
147 |
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { |
|
148 | 148 |
cd->y2 = y; |
149 | 149 |
break; |
150 | 150 |
} |
151 | 151 |
} |
152 | 152 |
|
153 | 153 |
for (y = 0; y < cd->x1; y++) { |
154 |
- if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) { |
|
154 |
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { |
|
155 | 155 |
cd->x1 = y; |
156 | 156 |
break; |
157 | 157 |
} |
158 | 158 |
} |
159 | 159 |
|
160 |
- for (y = picref->video->w-1; y > cd->x2; y--) { |
|
161 |
- if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) { |
|
160 |
+ for (y = frame->video->w-1; y > cd->x2; y--) { |
|
161 |
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { |
|
162 | 162 |
cd->x2 = y; |
163 | 163 |
break; |
164 | 164 |
} |
... | ... |
@@ -189,12 +188,12 @@ static int end_frame(AVFilterLink *inlink) |
189 | 189 |
|
190 | 190 |
av_log(ctx, AV_LOG_INFO, |
191 | 191 |
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", |
192 |
- cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, picref->pos, picref->pts, |
|
193 |
- picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base), |
|
192 |
+ cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pos, frame->pts, |
|
193 |
+ frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), |
|
194 | 194 |
w, h, x, y); |
195 | 195 |
} |
196 | 196 |
|
197 |
- return ff_end_frame(inlink->dst->outputs[0]); |
|
197 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
198 | 198 |
} |
199 | 199 |
|
200 | 200 |
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = { |
... | ... |
@@ -203,8 +202,7 @@ static const AVFilterPad avfilter_vf_cropdetect_inputs[] = { |
203 | 203 |
.type = AVMEDIA_TYPE_VIDEO, |
204 | 204 |
.config_props = config_input, |
205 | 205 |
.get_video_buffer = ff_null_get_video_buffer, |
206 |
- .start_frame = ff_null_start_frame, |
|
207 |
- .end_frame = end_frame, |
|
206 |
+ .filter_frame = filter_frame, |
|
208 | 207 |
}, |
209 | 208 |
{ NULL } |
210 | 209 |
}; |
... | ... |
@@ -215,30 +215,38 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
215 | 215 |
return 0; |
216 | 216 |
} |
217 | 217 |
|
218 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
219 |
-{ |
|
220 |
- return 0; |
|
221 |
-} |
|
222 |
- |
|
223 |
-static int end_frame(AVFilterLink *inlink) |
|
218 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
224 | 219 |
{ |
225 | 220 |
DelogoContext *delogo = inlink->dst->priv; |
226 | 221 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
227 |
- AVFilterBufferRef *inpicref = inlink ->cur_buf; |
|
228 |
- AVFilterBufferRef *outpicref = outlink->out_buf; |
|
229 | 222 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
230 |
- int direct = inpicref->buf == outpicref->buf; |
|
223 |
+ AVFilterBufferRef *out; |
|
231 | 224 |
int hsub0 = desc->log2_chroma_w; |
232 | 225 |
int vsub0 = desc->log2_chroma_h; |
226 |
+ int direct; |
|
233 | 227 |
int plane; |
234 |
- int ret; |
|
235 | 228 |
|
236 |
- for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) { |
|
229 |
+ if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { |
|
230 |
+ direct = 1; |
|
231 |
+ out = in; |
|
232 |
+ } else { |
|
233 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
234 |
+ if (!out) { |
|
235 |
+ avfilter_unref_bufferp(&in); |
|
236 |
+ return AVERROR(ENOMEM); |
|
237 |
+ } |
|
238 |
+ |
|
239 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
240 |
+ out->video->w = outlink->w; |
|
241 |
+ out->video->h = outlink->h; |
|
242 |
+ } |
|
243 |
+ |
|
244 |
+ for (plane = 0; plane < 4 && in->data[plane]; plane++) { |
|
237 | 245 |
int hsub = plane == 1 || plane == 2 ? hsub0 : 0; |
238 | 246 |
int vsub = plane == 1 || plane == 2 ? vsub0 : 0; |
239 | 247 |
|
240 |
- apply_delogo(outpicref->data[plane], outpicref->linesize[plane], |
|
241 |
- inpicref ->data[plane], inpicref ->linesize[plane], |
|
248 |
+ apply_delogo(out->data[plane], out->linesize[plane], |
|
249 |
+ in ->data[plane], in ->linesize[plane], |
|
242 | 250 |
inlink->w>>hsub, inlink->h>>vsub, |
243 | 251 |
delogo->x>>hsub, delogo->y>>vsub, |
244 | 252 |
delogo->w>>hsub, delogo->h>>vsub, |
... | ... |
@@ -246,10 +254,10 @@ static int end_frame(AVFilterLink *inlink) |
246 | 246 |
delogo->show, direct); |
247 | 247 |
} |
248 | 248 |
|
249 |
- if ((ret = ff_draw_slice(outlink, 0, inlink->h, 1)) < 0 || |
|
250 |
- (ret = ff_end_frame(outlink)) < 0) |
|
251 |
- return ret; |
|
252 |
- return 0; |
|
249 |
+ if (!direct) |
|
250 |
+ avfilter_unref_bufferp(&in); |
|
251 |
+ |
|
252 |
+ return ff_filter_frame(outlink, out); |
|
253 | 253 |
} |
254 | 254 |
|
255 | 255 |
static const AVFilterPad avfilter_vf_delogo_inputs[] = { |
... | ... |
@@ -257,9 +265,7 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = { |
257 | 257 |
.name = "default", |
258 | 258 |
.type = AVMEDIA_TYPE_VIDEO, |
259 | 259 |
.get_video_buffer = ff_null_get_video_buffer, |
260 |
- .start_frame = ff_inplace_start_frame, |
|
261 |
- .draw_slice = null_draw_slice, |
|
262 |
- .end_frame = end_frame, |
|
260 |
+ .filter_frame = filter_frame, |
|
263 | 261 |
.min_perms = AV_PERM_WRITE | AV_PERM_READ, |
264 | 262 |
.rej_perms = AV_PERM_PRESERVE |
265 | 263 |
}, |
... | ... |
@@ -96,21 +96,20 @@ static int config_input(AVFilterLink *inlink) |
96 | 96 |
return 0; |
97 | 97 |
} |
98 | 98 |
|
99 |
-static int draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir) |
|
99 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
100 | 100 |
{ |
101 | 101 |
DrawBoxContext *drawbox = inlink->dst->priv; |
102 | 102 |
int plane, x, y, xb = drawbox->x, yb = drawbox->y; |
103 | 103 |
unsigned char *row[4]; |
104 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
105 | 104 |
|
106 |
- for (y = FFMAX(yb, y0); y < (y0 + h) && y < (yb + drawbox->h); y++) { |
|
107 |
- row[0] = picref->data[0] + y * picref->linesize[0]; |
|
105 |
+ for (y = FFMAX(yb, 0); y < frame->video->h && y < (yb + drawbox->h); y++) { |
|
106 |
+ row[0] = frame->data[0] + y * frame->linesize[0]; |
|
108 | 107 |
|
109 | 108 |
for (plane = 1; plane < 3; plane++) |
110 |
- row[plane] = picref->data[plane] + |
|
111 |
- picref->linesize[plane] * (y >> drawbox->vsub); |
|
109 |
+ row[plane] = frame->data[plane] + |
|
110 |
+ frame->linesize[plane] * (y >> drawbox->vsub); |
|
112 | 111 |
|
113 |
- for (x = FFMAX(xb, 0); x < (xb + drawbox->w) && x < picref->video->w; x++) { |
|
112 |
+ for (x = FFMAX(xb, 0); x < (xb + drawbox->w) && x < frame->video->w; x++) { |
|
114 | 113 |
double alpha = (double)drawbox->yuv_color[A] / 255; |
115 | 114 |
|
116 | 115 |
if ((y - yb < 3) || (yb + drawbox->h - y < 4) || |
... | ... |
@@ -122,7 +121,7 @@ static int draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir) |
122 | 122 |
} |
123 | 123 |
} |
124 | 124 |
|
125 |
- return ff_draw_slice(inlink->dst->outputs[0], y0, h, 1); |
|
125 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
126 | 126 |
} |
127 | 127 |
|
128 | 128 |
static const AVFilterPad avfilter_vf_drawbox_inputs[] = { |
... | ... |
@@ -131,9 +130,7 @@ static const AVFilterPad avfilter_vf_drawbox_inputs[] = { |
131 | 131 |
.type = AVMEDIA_TYPE_VIDEO, |
132 | 132 |
.config_props = config_input, |
133 | 133 |
.get_video_buffer = ff_null_get_video_buffer, |
134 |
- .start_frame = ff_null_start_frame, |
|
135 |
- .draw_slice = draw_slice, |
|
136 |
- .end_frame = ff_null_end_frame, |
|
134 |
+ .filter_frame = filter_frame, |
|
137 | 135 |
.min_perms = AV_PERM_WRITE | AV_PERM_READ, |
138 | 136 |
.rej_perms = AV_PERM_PRESERVE |
139 | 137 |
}, |
... | ... |
@@ -792,11 +792,6 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, |
792 | 792 |
return 0; |
793 | 793 |
} |
794 | 794 |
|
795 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
796 |
-{ |
|
797 |
- return 0; |
|
798 |
-} |
|
799 |
- |
|
800 | 795 |
static inline int normalize_double(int *n, double d) |
801 | 796 |
{ |
802 | 797 |
int ret = 0; |
... | ... |
@@ -812,20 +807,20 @@ static inline int normalize_double(int *n, double d) |
812 | 812 |
return ret; |
813 | 813 |
} |
814 | 814 |
|
815 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
815 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
816 | 816 |
{ |
817 | 817 |
AVFilterContext *ctx = inlink->dst; |
818 | 818 |
DrawTextContext *dtext = ctx->priv; |
819 |
- AVFilterBufferRef *buf_out; |
|
820 | 819 |
int ret = 0; |
821 | 820 |
|
822 | 821 |
if ((ret = dtext_prepare_text(ctx)) < 0) { |
823 | 822 |
av_log(ctx, AV_LOG_ERROR, "Can't draw text\n"); |
823 |
+ avfilter_unref_bufferp(&frame); |
|
824 | 824 |
return ret; |
825 | 825 |
} |
826 | 826 |
|
827 |
- dtext->var_values[VAR_T] = inpicref->pts == AV_NOPTS_VALUE ? |
|
828 |
- NAN : inpicref->pts * av_q2d(inlink->time_base); |
|
827 |
+ dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? |
|
828 |
+ NAN : frame->pts * av_q2d(inlink->time_base); |
|
829 | 829 |
dtext->var_values[VAR_X] = |
830 | 830 |
av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng); |
831 | 831 |
dtext->var_values[VAR_Y] = |
... | ... |
@@ -854,29 +849,12 @@ static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
854 | 854 |
(int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], |
855 | 855 |
dtext->x, dtext->y, dtext->x+dtext->w, dtext->y+dtext->h); |
856 | 856 |
|
857 |
- buf_out = avfilter_ref_buffer(inpicref, ~0); |
|
858 |
- if (!buf_out) |
|
859 |
- return AVERROR(ENOMEM); |
|
860 |
- |
|
861 |
- return ff_start_frame(inlink->dst->outputs[0], buf_out); |
|
862 |
-} |
|
863 |
- |
|
864 |
-static int end_frame(AVFilterLink *inlink) |
|
865 |
-{ |
|
866 |
- AVFilterLink *outlink = inlink->dst->outputs[0]; |
|
867 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
868 |
- DrawTextContext *dtext = inlink->dst->priv; |
|
869 |
- int ret; |
|
870 |
- |
|
871 | 857 |
if (dtext->draw) |
872 |
- draw_text(inlink->dst, picref, picref->video->w, picref->video->h); |
|
858 |
+ draw_text(inlink->dst, frame, frame->video->w, frame->video->h); |
|
873 | 859 |
|
874 | 860 |
dtext->var_values[VAR_N] += 1.0; |
875 | 861 |
|
876 |
- if ((ret = ff_draw_slice(outlink, 0, picref->video->h, 1)) < 0 || |
|
877 |
- (ret = ff_end_frame(outlink)) < 0) |
|
878 |
- return ret; |
|
879 |
- return 0; |
|
862 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
880 | 863 |
} |
881 | 864 |
|
882 | 865 |
static const AVFilterPad avfilter_vf_drawtext_inputs[] = { |
... | ... |
@@ -884,9 +862,7 @@ static const AVFilterPad avfilter_vf_drawtext_inputs[] = { |
884 | 884 |
.name = "default", |
885 | 885 |
.type = AVMEDIA_TYPE_VIDEO, |
886 | 886 |
.get_video_buffer = ff_null_get_video_buffer, |
887 |
- .start_frame = start_frame, |
|
888 |
- .draw_slice = null_draw_slice, |
|
889 |
- .end_frame = end_frame, |
|
887 |
+ .filter_frame = filter_frame, |
|
890 | 888 |
.config_props = config_input, |
891 | 889 |
.min_perms = AV_PERM_WRITE | |
892 | 890 |
AV_PERM_READ, |
... | ... |
@@ -98,17 +98,16 @@ static int config_props(AVFilterLink *inlink) |
98 | 98 |
return 0; |
99 | 99 |
} |
100 | 100 |
|
101 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
101 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
102 | 102 |
{ |
103 | 103 |
FadeContext *fade = inlink->dst->priv; |
104 |
- AVFilterBufferRef *outpic = inlink->cur_buf; |
|
105 | 104 |
uint8_t *p; |
106 | 105 |
int i, j, plane; |
107 | 106 |
|
108 | 107 |
if (fade->factor < UINT16_MAX) { |
109 | 108 |
/* luma or rgb plane */ |
110 |
- for (i = 0; i < h; i++) { |
|
111 |
- p = outpic->data[0] + (y+i) * outpic->linesize[0]; |
|
109 |
+ for (i = 0; i < frame->video->h; i++) { |
|
110 |
+ p = frame->data[0] + i * frame->linesize[0]; |
|
112 | 111 |
for (j = 0; j < inlink->w * fade->bpp; j++) { |
113 | 112 |
/* fade->factor is using 16 lower-order bits for decimal |
114 | 113 |
* places. 32768 = 1 << 15, it is an integer representation |
... | ... |
@@ -118,11 +117,11 @@ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
118 | 118 |
} |
119 | 119 |
} |
120 | 120 |
|
121 |
- if (outpic->data[1] && outpic->data[2]) { |
|
121 |
+ if (frame->data[1] && frame->data[2]) { |
|
122 | 122 |
/* chroma planes */ |
123 | 123 |
for (plane = 1; plane < 3; plane++) { |
124 |
- for (i = 0; i < h; i++) { |
|
125 |
- p = outpic->data[plane] + ((y+i) >> fade->vsub) * outpic->linesize[plane]; |
|
124 |
+ for (i = 0; i < frame->video->h; i++) { |
|
125 |
+ p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; |
|
126 | 126 |
for (j = 0; j < inlink->w >> fade->hsub; j++) { |
127 | 127 |
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer |
128 | 128 |
* representation of 128.5. The .5 is for rounding |
... | ... |
@@ -135,23 +134,13 @@ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
135 | 135 |
} |
136 | 136 |
} |
137 | 137 |
|
138 |
- return ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); |
|
139 |
-} |
|
140 |
- |
|
141 |
-static int end_frame(AVFilterLink *inlink) |
|
142 |
-{ |
|
143 |
- FadeContext *fade = inlink->dst->priv; |
|
144 |
- int ret; |
|
145 |
- |
|
146 |
- ret = ff_end_frame(inlink->dst->outputs[0]); |
|
147 |
- |
|
148 | 138 |
if (fade->frame_index >= fade->start_frame && |
149 | 139 |
fade->frame_index <= fade->stop_frame) |
150 | 140 |
fade->factor += fade->fade_per_frame; |
151 | 141 |
fade->factor = av_clip_uint16(fade->factor); |
152 | 142 |
fade->frame_index++; |
153 | 143 |
|
154 |
- return ret; |
|
144 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
155 | 145 |
} |
156 | 146 |
|
157 | 147 |
static const AVFilterPad avfilter_vf_fade_inputs[] = { |
... | ... |
@@ -160,9 +149,7 @@ static const AVFilterPad avfilter_vf_fade_inputs[] = { |
160 | 160 |
.type = AVMEDIA_TYPE_VIDEO, |
161 | 161 |
.config_props = config_props, |
162 | 162 |
.get_video_buffer = ff_null_get_video_buffer, |
163 |
- .start_frame = ff_null_start_frame, |
|
164 |
- .draw_slice = draw_slice, |
|
165 |
- .end_frame = end_frame, |
|
163 |
+ .filter_frame = filter_frame, |
|
166 | 164 |
.min_perms = AV_PERM_READ | AV_PERM_WRITE, |
167 | 165 |
.rej_perms = AV_PERM_PRESERVE, |
168 | 166 |
}, |
... | ... |
@@ -121,90 +121,39 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int |
121 | 121 |
return ff_get_video_buffer(outlink, perms, w, h); |
122 | 122 |
} |
123 | 123 |
|
124 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
124 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
125 | 125 |
{ |
126 |
- AVFilterContext *ctx = inlink->dst; |
|
127 |
- AVFilterLink *outlink = ctx->outputs[0]; |
|
128 |
- |
|
129 |
- AVFilterBufferRef *outpicref, *for_next_filter; |
|
130 |
- int ret = 0; |
|
131 |
- |
|
132 |
- outpicref = avfilter_ref_buffer(inpicref, ~0); |
|
133 |
- if (!outpicref) |
|
134 |
- return AVERROR(ENOMEM); |
|
135 |
- |
|
136 |
- for_next_filter = avfilter_ref_buffer(outpicref, ~0); |
|
137 |
- if (!for_next_filter) { |
|
138 |
- avfilter_unref_bufferp(&outpicref); |
|
139 |
- return AVERROR(ENOMEM); |
|
140 |
- } |
|
141 |
- |
|
142 |
- ret = ff_start_frame(outlink, for_next_filter); |
|
143 |
- if (ret < 0) { |
|
144 |
- avfilter_unref_bufferp(&outpicref); |
|
145 |
- return ret; |
|
146 |
- } |
|
147 |
- |
|
148 |
- outlink->out_buf = outpicref; |
|
149 |
- return 0; |
|
150 |
-} |
|
151 |
- |
|
152 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
153 |
-{ |
|
154 |
- AVFilterContext *ctx = inlink->dst; |
|
155 |
- FieldOrderContext *fieldorder = ctx->priv; |
|
156 |
- AVFilterLink *outlink = ctx->outputs[0]; |
|
157 |
- |
|
158 |
- AVFilterBufferRef *inpicref = inlink->cur_buf; |
|
126 |
+ AVFilterContext *ctx = inlink->dst; |
|
127 |
+ FieldOrderContext *s = ctx->priv; |
|
128 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
129 |
+ int h, plane, line_step, line_size, line; |
|
130 |
+ uint8_t *data; |
|
159 | 131 |
|
160 |
- /** can only currently do slices if this filter is doing nothing |
|
161 |
- * because this filter is moving picture content, the output |
|
162 |
- * slice will contain different video lines than the input slice |
|
163 |
- * and that complexity will be added later */ |
|
164 |
- if ( !inpicref->video->interlaced |
|
165 |
- || inpicref->video->top_field_first == fieldorder->dst_tff) { |
|
166 |
- return ff_draw_slice(outlink, y, h, slice_dir); |
|
167 |
- } |
|
168 |
- return 0; |
|
169 |
-} |
|
132 |
+ if (!frame->video->interlaced || |
|
133 |
+ frame->video->top_field_first == s->dst_tff) |
|
134 |
+ return ff_filter_frame(outlink, frame); |
|
170 | 135 |
|
171 |
-static int end_frame(AVFilterLink *inlink) |
|
172 |
-{ |
|
173 |
- AVFilterContext *ctx = inlink->dst; |
|
174 |
- FieldOrderContext *fieldorder = ctx->priv; |
|
175 |
- AVFilterLink *outlink = ctx->outputs[0]; |
|
176 |
- |
|
177 |
- AVFilterBufferRef *inpicref = inlink->cur_buf; |
|
178 |
- AVFilterBufferRef *outpicref = outlink->out_buf; |
|
179 |
- |
|
180 |
- int h, plane, line_step, line_size, line; |
|
181 |
- uint8_t *cpy_src, *cpy_dst; |
|
182 |
- |
|
183 |
- if ( inpicref->video->interlaced |
|
184 |
- && inpicref->video->top_field_first != fieldorder->dst_tff) { |
|
185 | 136 |
av_dlog(ctx, |
186 | 137 |
"picture will move %s one line\n", |
187 |
- fieldorder->dst_tff ? "up" : "down"); |
|
188 |
- h = inpicref->video->h; |
|
189 |
- for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) { |
|
190 |
- line_step = inpicref->linesize[plane]; |
|
191 |
- line_size = fieldorder->line_size[plane]; |
|
192 |
- cpy_src = inpicref->data[plane]; |
|
193 |
- cpy_dst = outpicref->data[plane]; |
|
194 |
- if (fieldorder->dst_tff) { |
|
138 |
+ s->dst_tff ? "up" : "down"); |
|
139 |
+ h = frame->video->h; |
|
140 |
+ for (plane = 0; plane < 4 && frame->data[plane]; plane++) { |
|
141 |
+ line_step = frame->linesize[plane]; |
|
142 |
+ line_size = s->line_size[plane]; |
|
143 |
+ data = frame->data[plane]; |
|
144 |
+ if (s->dst_tff) { |
|
195 | 145 |
/** Move every line up one line, working from |
196 | 146 |
* the top to the bottom of the frame. |
197 | 147 |
* The original top line is lost. |
198 | 148 |
* The new last line is created as a copy of the |
199 | 149 |
* penultimate line from that field. */ |
200 | 150 |
for (line = 0; line < h; line++) { |
201 |
- if (1 + line < outpicref->video->h) { |
|
202 |
- memcpy(cpy_dst, cpy_src + line_step, line_size); |
|
151 |
+ if (1 + line < frame->video->h) { |
|
152 |
+ memcpy(data, data + line_step, line_size); |
|
203 | 153 |
} else { |
204 |
- memcpy(cpy_dst, cpy_src - line_step - line_step, line_size); |
|
154 |
+ memcpy(data, data - line_step - line_step, line_size); |
|
205 | 155 |
} |
206 |
- cpy_src += line_step; |
|
207 |
- cpy_dst += line_step; |
|
156 |
+ data += line_step; |
|
208 | 157 |
} |
209 | 158 |
} else { |
210 | 159 |
/** Move every line down one line, working from |
... | ... |
@@ -212,27 +161,20 @@ static int end_frame(AVFilterLink *inlink) |
212 | 212 |
* The original bottom line is lost. |
213 | 213 |
* The new first line is created as a copy of the |
214 | 214 |
* second line from that field. */ |
215 |
- cpy_src += (h - 1) * line_step; |
|
216 |
- cpy_dst += (h - 1) * line_step; |
|
215 |
+ data += (h - 1) * line_step; |
|
217 | 216 |
for (line = h - 1; line >= 0 ; line--) { |
218 | 217 |
if (line > 0) { |
219 |
- memcpy(cpy_dst, cpy_src - line_step, line_size); |
|
218 |
+ memcpy(data, data - line_step, line_size); |
|
220 | 219 |
} else { |
221 |
- memcpy(cpy_dst, cpy_src + line_step + line_step, line_size); |
|
220 |
+ memcpy(data, data + line_step + line_step, line_size); |
|
222 | 221 |
} |
223 |
- cpy_src -= line_step; |
|
224 |
- cpy_dst -= line_step; |
|
222 |
+ data -= line_step; |
|
225 | 223 |
} |
226 | 224 |
} |
227 | 225 |
} |
228 |
- outpicref->video->top_field_first = fieldorder->dst_tff; |
|
229 |
- ff_draw_slice(outlink, 0, h, 1); |
|
230 |
- } else { |
|
231 |
- av_dlog(ctx, |
|
232 |
- "not interlaced or field order already correct\n"); |
|
233 |
- } |
|
226 |
+ frame->video->top_field_first = s->dst_tff; |
|
234 | 227 |
|
235 |
- return ff_end_frame(outlink); |
|
228 |
+ return ff_filter_frame(outlink, frame); |
|
236 | 229 |
} |
237 | 230 |
|
238 | 231 |
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = { |
... | ... |
@@ -240,10 +182,8 @@ static const AVFilterPad avfilter_vf_fieldorder_inputs[] = { |
240 | 240 |
.name = "default", |
241 | 241 |
.type = AVMEDIA_TYPE_VIDEO, |
242 | 242 |
.config_props = config_input, |
243 |
- .start_frame = start_frame, |
|
244 | 243 |
.get_video_buffer = get_video_buffer, |
245 |
- .draw_slice = draw_slice, |
|
246 |
- .end_frame = end_frame, |
|
244 |
+ .filter_frame = filter_frame, |
|
247 | 245 |
.min_perms = AV_PERM_READ, |
248 | 246 |
.rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE, |
249 | 247 |
}, |
... | ... |
@@ -104,9 +104,6 @@ static const AVFilterPad avfilter_vf_format_inputs[] = { |
104 | 104 |
.name = "default", |
105 | 105 |
.type = AVMEDIA_TYPE_VIDEO, |
106 | 106 |
.get_video_buffer = ff_null_get_video_buffer, |
107 |
- .start_frame = ff_null_start_frame, |
|
108 |
- .draw_slice = ff_null_draw_slice, |
|
109 |
- .end_frame = ff_null_end_frame, |
|
110 | 107 |
}, |
111 | 108 |
{ NULL } |
112 | 109 |
}; |
... | ... |
@@ -146,9 +143,6 @@ static const AVFilterPad avfilter_vf_noformat_inputs[] = { |
146 | 146 |
.name = "default", |
147 | 147 |
.type = AVMEDIA_TYPE_VIDEO, |
148 | 148 |
.get_video_buffer = ff_null_get_video_buffer, |
149 |
- .start_frame = ff_null_start_frame, |
|
150 |
- .draw_slice = ff_null_draw_slice, |
|
151 |
- .end_frame = ff_null_end_frame, |
|
152 | 149 |
}, |
153 | 150 |
{ NULL } |
154 | 151 |
}; |
... | ... |
@@ -144,9 +144,7 @@ static int request_frame(AVFilterLink *outlink) |
144 | 144 |
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, |
145 | 145 |
outlink->time_base) + s->frames_out; |
146 | 146 |
|
147 |
- if ((ret = ff_start_frame(outlink, buf)) < 0 || |
|
148 |
- (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || |
|
149 |
- (ret = ff_end_frame(outlink)) < 0) |
|
147 |
+ if ((ret = ff_filter_frame(outlink, buf)) < 0) |
|
150 | 148 |
return ret; |
151 | 149 |
|
152 | 150 |
s->frames_out++; |
... | ... |
@@ -171,16 +169,14 @@ static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf) |
171 | 171 |
return 0; |
172 | 172 |
} |
173 | 173 |
|
174 |
-static int end_frame(AVFilterLink *inlink) |
|
174 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
175 | 175 |
{ |
176 | 176 |
AVFilterContext *ctx = inlink->dst; |
177 | 177 |
FPSContext *s = ctx->priv; |
178 | 178 |
AVFilterLink *outlink = ctx->outputs[0]; |
179 |
- AVFilterBufferRef *buf = inlink->cur_buf; |
|
180 | 179 |
int64_t delta; |
181 | 180 |
int i, ret; |
182 | 181 |
|
183 |
- inlink->cur_buf = NULL; |
|
184 | 182 |
s->frames_in++; |
185 | 183 |
/* discard frames until we get the first timestamp */ |
186 | 184 |
if (s->pts == AV_NOPTS_VALUE) { |
... | ... |
@@ -251,9 +247,7 @@ static int end_frame(AVFilterLink *inlink) |
251 | 251 |
buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, |
252 | 252 |
outlink->time_base) + s->frames_out; |
253 | 253 |
|
254 |
- if ((ret = ff_start_frame(outlink, buf_out)) < 0 || |
|
255 |
- (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || |
|
256 |
- (ret = ff_end_frame(outlink)) < 0) { |
|
254 |
+ if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { |
|
257 | 255 |
avfilter_unref_bufferp(&buf); |
258 | 256 |
return ret; |
259 | 257 |
} |
... | ... |
@@ -268,23 +262,11 @@ static int end_frame(AVFilterLink *inlink) |
268 | 268 |
return ret; |
269 | 269 |
} |
270 | 270 |
|
271 |
-static int null_start_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
272 |
-{ |
|
273 |
- return 0; |
|
274 |
-} |
|
275 |
- |
|
276 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
277 |
-{ |
|
278 |
- return 0; |
|
279 |
-} |
|
280 |
- |
|
281 | 271 |
static const AVFilterPad avfilter_vf_fps_inputs[] = { |
282 | 272 |
{ |
283 | 273 |
.name = "default", |
284 | 274 |
.type = AVMEDIA_TYPE_VIDEO, |
285 |
- .start_frame = null_start_frame, |
|
286 |
- .draw_slice = null_draw_slice, |
|
287 |
- .end_frame = end_frame, |
|
275 |
+ .filter_frame = filter_frame, |
|
288 | 276 |
}, |
289 | 277 |
{ NULL } |
290 | 278 |
}; |
... | ... |
@@ -346,35 +346,34 @@ static int query_formats(AVFilterContext *ctx) |
346 | 346 |
return 0; |
347 | 347 |
} |
348 | 348 |
|
349 |
-static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
350 |
-{ |
|
351 |
- return 0; |
|
352 |
-} |
|
353 |
- |
|
354 |
-static int end_frame(AVFilterLink *inlink) |
|
349 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
355 | 350 |
{ |
356 | 351 |
Frei0rContext *frei0r = inlink->dst->priv; |
357 | 352 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
358 |
- AVFilterBufferRef *inpicref = inlink->cur_buf; |
|
359 |
- AVFilterBufferRef *outpicref = outlink->out_buf; |
|
360 |
- int ret; |
|
361 |
- |
|
362 |
- frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000, |
|
363 |
- (const uint32_t *)inpicref->data[0], |
|
364 |
- (uint32_t *)outpicref->data[0]); |
|
365 |
- if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) || |
|
366 |
- (ret = ff_end_frame(outlink)) < 0) |
|
367 |
- return ret; |
|
368 |
- return 0; |
|
353 |
+ AVFilterBufferRef *out; |
|
354 |
+ |
|
355 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
356 |
+ if (!out) { |
|
357 |
+ avfilter_unref_bufferp(&in); |
|
358 |
+ return AVERROR(ENOMEM); |
|
359 |
+ } |
|
360 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
361 |
+ |
|
362 |
+ frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000, |
|
363 |
+ (const uint32_t *)in->data[0], |
|
364 |
+ (uint32_t *)out->data[0]); |
|
365 |
+ |
|
366 |
+ avfilter_unref_bufferp(&in); |
|
367 |
+ |
|
368 |
+ return ff_filter_frame(outlink, out); |
|
369 | 369 |
} |
370 | 370 |
|
371 | 371 |
static const AVFilterPad avfilter_vf_frei0r_inputs[] = { |
372 | 372 |
{ |
373 | 373 |
.name = "default", |
374 | 374 |
.type = AVMEDIA_TYPE_VIDEO, |
375 |
- .draw_slice = null_draw_slice, |
|
376 | 375 |
.config_props = config_input_props, |
377 |
- .end_frame = end_frame, |
|
376 |
+ .filter_frame = filter_frame, |
|
378 | 377 |
.min_perms = AV_PERM_READ |
379 | 378 |
}, |
380 | 379 |
{ NULL } |
... | ... |
@@ -456,8 +455,6 @@ static int source_request_frame(AVFilterLink *outlink) |
456 | 456 |
{ |
457 | 457 |
Frei0rContext *frei0r = outlink->src->priv; |
458 | 458 |
AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
459 |
- AVFilterBufferRef *buf_out; |
|
460 |
- int ret; |
|
461 | 459 |
|
462 | 460 |
if (!picref) |
463 | 461 |
return AVERROR(ENOMEM); |
... | ... |
@@ -466,28 +463,10 @@ static int source_request_frame(AVFilterLink *outlink) |
466 | 466 |
picref->pts = frei0r->pts++; |
467 | 467 |
picref->pos = -1; |
468 | 468 |
|
469 |
- buf_out = avfilter_ref_buffer(picref, ~0); |
|
470 |
- if (!buf_out) { |
|
471 |
- ret = AVERROR(ENOMEM); |
|
472 |
- goto fail; |
|
473 |
- } |
|
474 |
- |
|
475 |
- ret = ff_start_frame(outlink, buf_out); |
|
476 |
- if (ret < 0) |
|
477 |
- goto fail; |
|
478 |
- |
|
479 | 469 |
frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), |
480 | 470 |
NULL, (uint32_t *)picref->data[0]); |
481 |
- ret = ff_draw_slice(outlink, 0, outlink->h, 1); |
|
482 |
- if (ret < 0) |
|
483 |
- goto fail; |
|
484 |
- |
|
485 |
- ret = ff_end_frame(outlink); |
|
486 |
- |
|
487 |
-fail: |
|
488 |
- avfilter_unref_buffer(picref); |
|
489 | 471 |
|
490 |
- return ret; |
|
472 |
+ return ff_filter_frame(outlink, picref); |
|
491 | 473 |
} |
492 | 474 |
|
493 | 475 |
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = { |
... | ... |
@@ -182,20 +182,29 @@ static int config_input(AVFilterLink *inlink) |
182 | 182 |
return 0; |
183 | 183 |
} |
184 | 184 |
|
185 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
186 |
-{ |
|
187 |
- return 0; |
|
188 |
-} |
|
189 |
- |
|
190 |
-static int end_frame(AVFilterLink *inlink) |
|
185 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
191 | 186 |
{ |
192 | 187 |
GradFunContext *gf = inlink->dst->priv; |
193 |
- AVFilterBufferRef *inpic = inlink->cur_buf; |
|
194 | 188 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
195 |
- AVFilterBufferRef *outpic = outlink->out_buf; |
|
196 |
- int p, ret; |
|
189 |
+ AVFilterBufferRef *out; |
|
190 |
+ int p, direct; |
|
191 |
+ |
|
192 |
+ if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { |
|
193 |
+ direct = 1; |
|
194 |
+ out = in; |
|
195 |
+ } else { |
|
196 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
197 |
+ if (!out) { |
|
198 |
+ avfilter_unref_bufferp(&in); |
|
199 |
+ return AVERROR(ENOMEM); |
|
200 |
+ } |
|
201 |
+ |
|
202 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
203 |
+ out->video->w = outlink->w; |
|
204 |
+ out->video->h = outlink->h; |
|
205 |
+ } |
|
197 | 206 |
|
198 |
- for (p = 0; p < 4 && inpic->data[p]; p++) { |
|
207 |
+ for (p = 0; p < 4 && in->data[p]; p++) { |
|
199 | 208 |
int w = inlink->w; |
200 | 209 |
int h = inlink->h; |
201 | 210 |
int r = gf->radius; |
... | ... |
@@ -206,15 +215,15 @@ static int end_frame(AVFilterLink *inlink) |
206 | 206 |
} |
207 | 207 |
|
208 | 208 |
if (FFMIN(w, h) > 2 * r) |
209 |
- filter(gf, outpic->data[p], inpic->data[p], w, h, outpic->linesize[p], inpic->linesize[p], r); |
|
210 |
- else if (outpic->data[p] != inpic->data[p]) |
|
211 |
- av_image_copy_plane(outpic->data[p], outpic->linesize[p], inpic->data[p], inpic->linesize[p], w, h); |
|
209 |
+ filter(gf, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r); |
|
210 |
+ else if (out->data[p] != in->data[p]) |
|
211 |
+ av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h); |
|
212 | 212 |
} |
213 | 213 |
|
214 |
- if ((ret = ff_draw_slice(outlink, 0, inlink->h, 1)) < 0 || |
|
215 |
- (ret = ff_end_frame(outlink)) < 0) |
|
216 |
- return ret; |
|
217 |
- return 0; |
|
214 |
+ if (!direct) |
|
215 |
+ avfilter_unref_bufferp(&in); |
|
216 |
+ |
|
217 |
+ return ff_filter_frame(outlink, out); |
|
218 | 218 |
} |
219 | 219 |
|
220 | 220 |
static const AVFilterPad avfilter_vf_gradfun_inputs[] = { |
... | ... |
@@ -222,9 +231,7 @@ static const AVFilterPad avfilter_vf_gradfun_inputs[] = { |
222 | 222 |
.name = "default", |
223 | 223 |
.type = AVMEDIA_TYPE_VIDEO, |
224 | 224 |
.config_props = config_input, |
225 |
- .start_frame = ff_inplace_start_frame, |
|
226 |
- .draw_slice = null_draw_slice, |
|
227 |
- .end_frame = end_frame, |
|
225 |
+ .filter_frame = filter_frame, |
|
228 | 226 |
.min_perms = AV_PERM_READ, |
229 | 227 |
}, |
230 | 228 |
{ NULL } |
... | ... |
@@ -84,22 +84,30 @@ static int config_props(AVFilterLink *inlink) |
84 | 84 |
return 0; |
85 | 85 |
} |
86 | 86 |
|
87 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
87 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
88 | 88 |
{ |
89 |
- FlipContext *flip = inlink->dst->priv; |
|
90 |
- AVFilterBufferRef *inpic = inlink->cur_buf; |
|
91 |
- AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; |
|
89 |
+ AVFilterContext *ctx = inlink->dst; |
|
90 |
+ FlipContext *flip = ctx->priv; |
|
91 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
92 |
+ AVFilterBufferRef *out; |
|
92 | 93 |
uint8_t *inrow, *outrow; |
93 | 94 |
int i, j, plane, step, hsub, vsub; |
94 | 95 |
|
95 |
- for (plane = 0; plane < 4 && inpic->data[plane]; plane++) { |
|
96 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
97 |
+ if (!out) { |
|
98 |
+ avfilter_unref_bufferp(&in); |
|
99 |
+ return AVERROR(ENOMEM); |
|
100 |
+ } |
|
101 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
102 |
+ |
|
103 |
+ for (plane = 0; plane < 4 && in->data[plane]; plane++) { |
|
96 | 104 |
step = flip->max_step[plane]; |
97 | 105 |
hsub = (plane == 1 || plane == 2) ? flip->hsub : 0; |
98 | 106 |
vsub = (plane == 1 || plane == 2) ? flip->vsub : 0; |
99 | 107 |
|
100 |
- outrow = outpic->data[plane] + (y>>vsub) * outpic->linesize[plane]; |
|
101 |
- inrow = inpic ->data[plane] + (y>>vsub) * inpic ->linesize[plane] + ((inlink->w >> hsub) - 1) * step; |
|
102 |
- for (i = 0; i < h>>vsub; i++) { |
|
108 |
+ outrow = out->data[plane]; |
|
109 |
+ inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step; |
|
110 |
+ for (i = 0; i < in->video->h >> vsub; i++) { |
|
103 | 111 |
switch (step) { |
104 | 112 |
case 1: |
105 | 113 |
for (j = 0; j < (inlink->w >> hsub); j++) |
... | ... |
@@ -140,19 +148,20 @@ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
140 | 140 |
memcpy(outrow + j*step, inrow - j*step, step); |
141 | 141 |
} |
142 | 142 |
|
143 |
- inrow += inpic ->linesize[plane]; |
|
144 |
- outrow += outpic->linesize[plane]; |
|
143 |
+ inrow += in ->linesize[plane]; |
|
144 |
+ outrow += out->linesize[plane]; |
|
145 | 145 |
} |
146 | 146 |
} |
147 | 147 |
|
148 |
- return ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); |
|
148 |
+ avfilter_unref_bufferp(&in); |
|
149 |
+ return ff_filter_frame(outlink, out); |
|
149 | 150 |
} |
150 | 151 |
|
151 | 152 |
static const AVFilterPad avfilter_vf_hflip_inputs[] = { |
152 | 153 |
{ |
153 | 154 |
.name = "default", |
154 | 155 |
.type = AVMEDIA_TYPE_VIDEO, |
155 |
- .draw_slice = draw_slice, |
|
156 |
+ .filter_frame = filter_frame, |
|
156 | 157 |
.config_props = config_props, |
157 | 158 |
.min_perms = AV_PERM_READ, |
158 | 159 |
}, |
... | ... |
@@ -322,42 +322,49 @@ static int config_input(AVFilterLink *inlink) |
322 | 322 |
return 0; |
323 | 323 |
} |
324 | 324 |
|
325 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
326 |
-{ |
|
327 |
- return 0; |
|
328 |
-} |
|
329 |
- |
|
330 |
-static int end_frame(AVFilterLink *inlink) |
|
325 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
331 | 326 |
{ |
332 | 327 |
HQDN3DContext *hqdn3d = inlink->dst->priv; |
333 | 328 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
334 |
- AVFilterBufferRef *inpic = inlink ->cur_buf; |
|
335 |
- AVFilterBufferRef *outpic = outlink->out_buf; |
|
336 |
- int ret, c; |
|
329 |
+ AVFilterBufferRef *out; |
|
330 |
+ int direct, c; |
|
331 |
+ |
|
332 |
+ if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { |
|
333 |
+ direct = 1; |
|
334 |
+ out = in; |
|
335 |
+ } else { |
|
336 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
337 |
+ if (!out) { |
|
338 |
+ avfilter_unref_bufferp(&in); |
|
339 |
+ return AVERROR(ENOMEM); |
|
340 |
+ } |
|
341 |
+ |
|
342 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
343 |
+ out->video->w = outlink->w; |
|
344 |
+ out->video->h = outlink->h; |
|
345 |
+ } |
|
337 | 346 |
|
338 | 347 |
for (c = 0; c < 3; c++) { |
339 |
- denoise(hqdn3d, inpic->data[c], outpic->data[c], |
|
348 |
+ denoise(hqdn3d, in->data[c], out->data[c], |
|
340 | 349 |
hqdn3d->line, &hqdn3d->frame_prev[c], |
341 |
- inpic->video->w >> (!!c * hqdn3d->hsub), |
|
342 |
- inpic->video->h >> (!!c * hqdn3d->vsub), |
|
343 |
- inpic->linesize[c], outpic->linesize[c], |
|
350 |
+ in->video->w >> (!!c * hqdn3d->hsub), |
|
351 |
+ in->video->h >> (!!c * hqdn3d->vsub), |
|
352 |
+ in->linesize[c], out->linesize[c], |
|
344 | 353 |
hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); |
345 | 354 |
} |
346 | 355 |
|
347 |
- if ((ret = ff_draw_slice(outlink, 0, inpic->video->h, 1)) < 0 || |
|
348 |
- (ret = ff_end_frame(outlink)) < 0) |
|
349 |
- return ret; |
|
350 |
- return 0; |
|
356 |
+ if (!direct) |
|
357 |
+ avfilter_unref_bufferp(&in); |
|
358 |
+ |
|
359 |
+ return ff_filter_frame(outlink, out); |
|
351 | 360 |
} |
352 | 361 |
|
353 | 362 |
static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = { |
354 | 363 |
{ |
355 | 364 |
.name = "default", |
356 | 365 |
.type = AVMEDIA_TYPE_VIDEO, |
357 |
- .start_frame = ff_inplace_start_frame, |
|
358 |
- .draw_slice = null_draw_slice, |
|
359 | 366 |
.config_props = config_input, |
360 |
- .end_frame = end_frame |
|
367 |
+ .filter_frame = filter_frame, |
|
361 | 368 |
}, |
362 | 369 |
{ NULL } |
363 | 370 |
}; |
... | ... |
@@ -32,6 +32,7 @@ |
32 | 32 |
#include "libavutil/file.h" |
33 | 33 |
#include "avfilter.h" |
34 | 34 |
#include "formats.h" |
35 |
+#include "internal.h" |
|
35 | 36 |
#include "video.h" |
36 | 37 |
|
37 | 38 |
static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *picref, enum AVPixelFormat pixfmt) |
... | ... |
@@ -68,11 +69,6 @@ static int query_formats(AVFilterContext *ctx) |
68 | 68 |
return 0; |
69 | 69 |
} |
70 | 70 |
|
71 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
72 |
-{ |
|
73 |
- return 0; |
|
74 |
-} |
|
75 |
- |
|
76 | 71 |
typedef struct { |
77 | 72 |
const char *name; |
78 | 73 |
int (*init)(AVFilterContext *ctx, const char *args); |
... | ... |
@@ -355,33 +351,36 @@ static av_cold void uninit(AVFilterContext *ctx) |
355 | 355 |
memset(ocv, 0, sizeof(*ocv)); |
356 | 356 |
} |
357 | 357 |
|
358 |
-static int end_frame(AVFilterLink *inlink) |
|
358 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
359 | 359 |
{ |
360 | 360 |
AVFilterContext *ctx = inlink->dst; |
361 | 361 |
OCVContext *ocv = ctx->priv; |
362 | 362 |
AVFilterLink *outlink= inlink->dst->outputs[0]; |
363 |
- AVFilterBufferRef *inpicref = inlink ->cur_buf; |
|
364 |
- AVFilterBufferRef *outpicref = outlink->out_buf; |
|
363 |
+ AVFilterBufferRef *out; |
|
365 | 364 |
IplImage inimg, outimg; |
366 |
- int ret; |
|
367 | 365 |
|
368 |
- fill_iplimage_from_picref(&inimg , inpicref , inlink->format); |
|
369 |
- fill_iplimage_from_picref(&outimg, outpicref, inlink->format); |
|
366 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
367 |
+ if (!out) { |
|
368 |
+ avfilter_unref_bufferp(&in); |
|
369 |
+ return AVERROR(ENOMEM); |
|
370 |
+ } |
|
371 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
372 |
+ |
|
373 |
+ fill_iplimage_from_picref(&inimg , in , inlink->format); |
|
374 |
+ fill_iplimage_from_picref(&outimg, out, inlink->format); |
|
370 | 375 |
ocv->end_frame_filter(ctx, &inimg, &outimg); |
371 |
- fill_picref_from_iplimage(outpicref, &outimg, inlink->format); |
|
376 |
+ fill_picref_from_iplimage(out, &outimg, inlink->format); |
|
372 | 377 |
|
373 |
- if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || |
|
374 |
- (ret = ff_end_frame(outlink)) < 0) |
|
375 |
- return ret; |
|
376 |
- return 0; |
|
378 |
+ avfilter_unref_bufferp(&in); |
|
379 |
+ |
|
380 |
+ return ff_filter_frame(outlink, out); |
|
377 | 381 |
} |
378 | 382 |
|
379 | 383 |
static const AVFilterPad avfilter_vf_ocv_inputs[] = { |
380 | 384 |
{ |
381 | 385 |
.name = "default", |
382 | 386 |
.type = AVMEDIA_TYPE_VIDEO, |
383 |
- .draw_slice = null_draw_slice, |
|
384 |
- .end_frame = end_frame, |
|
387 |
+ .filter_frame = filter_frame, |
|
385 | 388 |
.min_perms = AV_PERM_READ |
386 | 389 |
}, |
387 | 390 |
{ NULL } |
... | ... |
@@ -295,22 +295,28 @@ static int config_props(AVFilterLink *inlink) |
295 | 295 |
return 0; |
296 | 296 |
} |
297 | 297 |
|
298 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
298 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
299 | 299 |
{ |
300 | 300 |
AVFilterContext *ctx = inlink->dst; |
301 | 301 |
LutContext *lut = ctx->priv; |
302 | 302 |
AVFilterLink *outlink = ctx->outputs[0]; |
303 |
- AVFilterBufferRef *inpic = inlink ->cur_buf; |
|
304 |
- AVFilterBufferRef *outpic = outlink->out_buf; |
|
303 |
+ AVFilterBufferRef *out; |
|
305 | 304 |
uint8_t *inrow, *outrow, *inrow0, *outrow0; |
306 | 305 |
int i, j, k, plane; |
307 | 306 |
|
307 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
308 |
+ if (!out) { |
|
309 |
+ avfilter_unref_bufferp(&in); |
|
310 |
+ return AVERROR(ENOMEM); |
|
311 |
+ } |
|
312 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
313 |
+ |
|
308 | 314 |
if (lut->is_rgb) { |
309 | 315 |
/* packed */ |
310 |
- inrow0 = inpic ->data[0] + y * inpic ->linesize[0]; |
|
311 |
- outrow0 = outpic->data[0] + y * outpic->linesize[0]; |
|
316 |
+ inrow0 = in ->data[0]; |
|
317 |
+ outrow0 = out->data[0]; |
|
312 | 318 |
|
313 |
- for (i = 0; i < h; i ++) { |
|
319 |
+ for (i = 0; i < in->video->h; i ++) { |
|
314 | 320 |
inrow = inrow0; |
315 | 321 |
outrow = outrow0; |
316 | 322 |
for (j = 0; j < inlink->w; j++) { |
... | ... |
@@ -319,34 +325,35 @@ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
319 | 319 |
outrow += lut->step; |
320 | 320 |
inrow += lut->step; |
321 | 321 |
} |
322 |
- inrow0 += inpic ->linesize[0]; |
|
323 |
- outrow0 += outpic->linesize[0]; |
|
322 |
+ inrow0 += in ->linesize[0]; |
|
323 |
+ outrow0 += out->linesize[0]; |
|
324 | 324 |
} |
325 | 325 |
} else { |
326 | 326 |
/* planar */ |
327 |
- for (plane = 0; plane < 4 && inpic->data[plane]; plane++) { |
|
327 |
+ for (plane = 0; plane < 4 && in->data[plane]; plane++) { |
|
328 | 328 |
int vsub = plane == 1 || plane == 2 ? lut->vsub : 0; |
329 | 329 |
int hsub = plane == 1 || plane == 2 ? lut->hsub : 0; |
330 | 330 |
|
331 |
- inrow = inpic ->data[plane] + (y>>vsub) * inpic ->linesize[plane]; |
|
332 |
- outrow = outpic->data[plane] + (y>>vsub) * outpic->linesize[plane]; |
|
331 |
+ inrow = in ->data[plane]; |
|
332 |
+ outrow = out->data[plane]; |
|
333 | 333 |
|
334 |
- for (i = 0; i < h>>vsub; i ++) { |
|
334 |
+ for (i = 0; i < in->video->h >> vsub; i ++) { |
|
335 | 335 |
for (j = 0; j < inlink->w>>hsub; j++) |
336 | 336 |
outrow[j] = lut->lut[plane][inrow[j]]; |
337 |
- inrow += inpic ->linesize[plane]; |
|
338 |
- outrow += outpic->linesize[plane]; |
|
337 |
+ inrow += in ->linesize[plane]; |
|
338 |
+ outrow += out->linesize[plane]; |
|
339 | 339 |
} |
340 | 340 |
} |
341 | 341 |
} |
342 | 342 |
|
343 |
- return ff_draw_slice(outlink, y, h, slice_dir); |
|
343 |
+ avfilter_unref_bufferp(&in); |
|
344 |
+ return ff_filter_frame(outlink, out); |
|
344 | 345 |
} |
345 | 346 |
|
346 | 347 |
static const AVFilterPad inputs[] = { |
347 | 348 |
{ .name = "default", |
348 | 349 |
.type = AVMEDIA_TYPE_VIDEO, |
349 |
- .draw_slice = draw_slice, |
|
350 |
+ .filter_frame = filter_frame, |
|
350 | 351 |
.config_props = config_props, |
351 | 352 |
.min_perms = AV_PERM_READ, }, |
352 | 353 |
{ .name = NULL} |
... | ... |
@@ -31,8 +31,6 @@ static const AVFilterPad avfilter_vf_null_inputs[] = { |
31 | 31 |
.name = "default", |
32 | 32 |
.type = AVMEDIA_TYPE_VIDEO, |
33 | 33 |
.get_video_buffer = ff_null_get_video_buffer, |
34 |
- .start_frame = ff_null_start_frame, |
|
35 |
- .end_frame = ff_null_end_frame |
|
36 | 34 |
}, |
37 | 35 |
{ NULL } |
38 | 36 |
}; |
... | ... |
@@ -269,34 +269,22 @@ static void blend_frame(AVFilterContext *ctx, |
269 | 269 |
} |
270 | 270 |
} |
271 | 271 |
|
272 |
-static int null_start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
273 |
-{ |
|
274 |
- return 0; |
|
275 |
-} |
|
276 |
- |
|
277 |
-static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
278 |
-{ |
|
279 |
- return 0; |
|
280 |
-} |
|
281 |
- |
|
282 |
-static int end_frame_main(AVFilterLink *inlink) |
|
272 |
+static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
283 | 273 |
{ |
284 | 274 |
OverlayContext *s = inlink->dst->priv; |
285 | 275 |
|
286 | 276 |
av_assert0(!s->main); |
287 |
- s->main = inlink->cur_buf; |
|
288 |
- inlink->cur_buf = NULL; |
|
277 |
+ s->main = frame; |
|
289 | 278 |
|
290 | 279 |
return 0; |
291 | 280 |
} |
292 | 281 |
|
293 |
-static int end_frame_overlay(AVFilterLink *inlink) |
|
282 |
+static int filter_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
294 | 283 |
{ |
295 | 284 |
OverlayContext *s = inlink->dst->priv; |
296 | 285 |
|
297 | 286 |
av_assert0(!s->over_next); |
298 |
- s->over_next = inlink->cur_buf; |
|
299 |
- inlink->cur_buf = NULL; |
|
287 |
+ s->over_next = frame; |
|
300 | 288 |
|
301 | 289 |
return 0; |
302 | 290 |
} |
... | ... |
@@ -305,11 +293,7 @@ static int output_frame(AVFilterContext *ctx) |
305 | 305 |
{ |
306 | 306 |
OverlayContext *s = ctx->priv; |
307 | 307 |
AVFilterLink *outlink = ctx->outputs[0]; |
308 |
- int ret = ff_start_frame(outlink, s->main); |
|
309 |
- if (ret >= 0) |
|
310 |
- ret = ff_draw_slice(outlink, 0, outlink->h, 1); |
|
311 |
- if (ret >= 0) |
|
312 |
- ret = ff_end_frame(outlink); |
|
308 |
+ int ret = ff_filter_frame(outlink, s->main); |
|
313 | 309 |
s->main = NULL; |
314 | 310 |
|
315 | 311 |
return ret; |
... | ... |
@@ -378,10 +362,8 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = { |
378 | 378 |
{ |
379 | 379 |
.name = "main", |
380 | 380 |
.type = AVMEDIA_TYPE_VIDEO, |
381 |
- .start_frame = null_start_frame, |
|
382 | 381 |
.config_props = config_input_main, |
383 |
- .draw_slice = null_draw_slice, |
|
384 |
- .end_frame = end_frame_main, |
|
382 |
+ .filter_frame = filter_frame_main, |
|
385 | 383 |
.min_perms = AV_PERM_READ, |
386 | 384 |
.rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE, |
387 | 385 |
.needs_fifo = 1, |
... | ... |
@@ -389,10 +371,8 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = { |
389 | 389 |
{ |
390 | 390 |
.name = "overlay", |
391 | 391 |
.type = AVMEDIA_TYPE_VIDEO, |
392 |
- .start_frame = null_start_frame, |
|
393 | 392 |
.config_props = config_input_overlay, |
394 |
- .draw_slice = null_draw_slice, |
|
395 |
- .end_frame = end_frame_overlay, |
|
393 |
+ .filter_frame = filter_frame_overlay, |
|
396 | 394 |
.min_perms = AV_PERM_READ, |
397 | 395 |
.rej_perms = AV_PERM_REUSE2, |
398 | 396 |
.needs_fifo = 1, |
... | ... |
@@ -106,7 +106,6 @@ typedef struct { |
106 | 106 |
uint8_t *line[4]; |
107 | 107 |
int line_step[4]; |
108 | 108 |
int hsub, vsub; ///< chroma subsampling values |
109 |
- int needs_copy; |
|
110 | 109 |
} PadContext; |
111 | 110 |
|
112 | 111 |
static av_cold int init(AVFilterContext *ctx, const char *args) |
... | ... |
@@ -303,135 +302,85 @@ static int does_clip(PadContext *pad, AVFilterBufferRef *outpicref, int plane, i |
303 | 303 |
return 0; |
304 | 304 |
} |
305 | 305 |
|
306 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
306 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
307 | 307 |
{ |
308 | 308 |
PadContext *pad = inlink->dst->priv; |
309 |
- AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); |
|
310 |
- AVFilterBufferRef *for_next_filter; |
|
311 |
- int plane, ret = 0; |
|
309 |
+ AVFilterBufferRef *out = avfilter_ref_buffer(in, ~0); |
|
310 |
+ int plane, needs_copy; |
|
312 | 311 |
|
313 |
- if (!outpicref) |
|
312 |
+ if (!out) { |
|
313 |
+ avfilter_unref_bufferp(&in); |
|
314 | 314 |
return AVERROR(ENOMEM); |
315 |
+ } |
|
315 | 316 |
|
316 |
- for (plane = 0; plane < 4 && outpicref->data[plane]; plane++) { |
|
317 |
+ for (plane = 0; plane < 4 && out->data[plane]; plane++) { |
|
317 | 318 |
int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0; |
318 | 319 |
int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0; |
319 | 320 |
|
320 |
- av_assert0(outpicref->buf->w>0 && outpicref->buf->h>0); |
|
321 |
+ av_assert0(out->buf->w > 0 && out->buf->h > 0); |
|
321 | 322 |
|
322 |
- if(outpicref->format != outpicref->buf->format) //unsupported currently |
|
323 |
+ if (out->format != out->buf->format) //unsupported currently |
|
323 | 324 |
break; |
324 | 325 |
|
325 |
- outpicref->data[plane] -= (pad->x >> hsub) * pad ->line_step[plane] |
|
326 |
- + (pad->y >> vsub) * outpicref->linesize [plane]; |
|
326 |
+ out->data[plane] -= (pad->x >> hsub) * pad->line_step[plane] + |
|
327 |
+ (pad->y >> vsub) * out->linesize [plane]; |
|
327 | 328 |
|
328 |
- if( does_clip(pad, outpicref, plane, hsub, vsub, 0, 0) |
|
329 |
- || does_clip(pad, outpicref, plane, hsub, vsub, 0, pad->h-1) |
|
330 |
- || does_clip(pad, outpicref, plane, hsub, vsub, pad->w-1, 0) |
|
331 |
- || does_clip(pad, outpicref, plane, hsub, vsub, pad->w-1, pad->h-1) |
|
332 |
- ) |
|
329 |
+ if (does_clip(pad, out, plane, hsub, vsub, 0, 0) || |
|
330 |
+ does_clip(pad, out, plane, hsub, vsub, 0, pad->h - 1) || |
|
331 |
+ does_clip(pad, out, plane, hsub, vsub, pad->w - 1, 0) || |
|
332 |
+ does_clip(pad, out, plane, hsub, vsub, pad->w - 1, pad->h - 1)) |
|
333 | 333 |
break; |
334 | 334 |
} |
335 |
- pad->needs_copy= plane < 4 && outpicref->data[plane]; |
|
336 |
- if(pad->needs_copy){ |
|
335 |
+ needs_copy = plane < 4 && out->data[plane]; |
|
336 |
+ if (needs_copy) { |
|
337 | 337 |
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); |
338 |
- avfilter_unref_buffer(outpicref); |
|
339 |
- outpicref = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, |
|
340 |
- FFMAX(inlink->w, pad->w), |
|
341 |
- FFMAX(inlink->h, pad->h)); |
|
342 |
- if (!outpicref) |
|
338 |
+ avfilter_unref_buffer(out); |
|
339 |
+ out = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, |
|
340 |
+ FFMAX(inlink->w, pad->w), |
|
341 |
+ FFMAX(inlink->h, pad->h)); |
|
342 |
+ if (!out) { |
|
343 |
+ avfilter_unref_bufferp(&in); |
|
343 | 344 |
return AVERROR(ENOMEM); |
345 |
+ } |
|
344 | 346 |
|
345 |
- avfilter_copy_buffer_ref_props(outpicref, inpicref); |
|
346 |
- } |
|
347 |
- |
|
348 |
- outpicref->video->w = pad->w; |
|
349 |
- outpicref->video->h = pad->h; |
|
350 |
- |
|
351 |
- for_next_filter = avfilter_ref_buffer(outpicref, ~0); |
|
352 |
- if (!for_next_filter) { |
|
353 |
- ret = AVERROR(ENOMEM); |
|
354 |
- goto fail; |
|
347 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
355 | 348 |
} |
356 | 349 |
|
357 |
- ret = ff_start_frame(inlink->dst->outputs[0], for_next_filter); |
|
358 |
- if (ret < 0) |
|
359 |
- goto fail; |
|
360 |
- |
|
361 |
- inlink->dst->outputs[0]->out_buf = outpicref; |
|
362 |
- return 0; |
|
363 |
- |
|
364 |
-fail: |
|
365 |
- avfilter_unref_bufferp(&outpicref); |
|
366 |
- return ret; |
|
367 |
-} |
|
368 |
- |
|
369 |
-static int end_frame(AVFilterLink *link) |
|
370 |
-{ |
|
371 |
- return ff_end_frame(link->dst->outputs[0]); |
|
372 |
-} |
|
350 |
+ out->video->w = pad->w; |
|
351 |
+ out->video->h = pad->h; |
|
373 | 352 |
|
374 |
-static int draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir, int before_slice) |
|
375 |
-{ |
|
376 |
- PadContext *pad = link->dst->priv; |
|
377 |
- int bar_y, bar_h = 0, ret = 0; |
|
378 |
- |
|
379 |
- if (slice_dir * before_slice == 1 && y == pad->y) { |
|
380 |
- /* top bar */ |
|
381 |
- bar_y = 0; |
|
382 |
- bar_h = pad->y; |
|
383 |
- } else if (slice_dir * before_slice == -1 && (y + h) == (pad->y + pad->in_h)) { |
|
384 |
- /* bottom bar */ |
|
385 |
- bar_y = pad->y + pad->in_h; |
|
386 |
- bar_h = pad->h - pad->in_h - pad->y; |
|
353 |
+ /* top bar */ |
|
354 |
+ if (pad->y) { |
|
355 |
+ ff_draw_rectangle(out->data, out->linesize, |
|
356 |
+ pad->line, pad->line_step, pad->hsub, pad->vsub, |
|
357 |
+ 0, 0, pad->w, pad->y); |
|
387 | 358 |
} |
388 | 359 |
|
389 |
- if (bar_h) { |
|
390 |
- ff_draw_rectangle(link->dst->outputs[0]->out_buf->data, |
|
391 |
- link->dst->outputs[0]->out_buf->linesize, |
|
360 |
+ /* bottom bar */ |
|
361 |
+ if (pad->h > pad->y + pad->in_h) { |
|
362 |
+ ff_draw_rectangle(out->data, out->linesize, |
|
392 | 363 |
pad->line, pad->line_step, pad->hsub, pad->vsub, |
393 |
- 0, bar_y, pad->w, bar_h); |
|
394 |
- ret = ff_draw_slice(link->dst->outputs[0], bar_y, bar_h, slice_dir); |
|
364 |
+ 0, pad->y + pad->in_h, pad->w, pad->h - pad->y - pad->in_h); |
|
395 | 365 |
} |
396 |
- return ret; |
|
397 |
-} |
|
398 |
- |
|
399 |
-static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
400 |
-{ |
|
401 |
- PadContext *pad = link->dst->priv; |
|
402 |
- AVFilterBufferRef *outpic = link->dst->outputs[0]->out_buf; |
|
403 |
- AVFilterBufferRef *inpic = link->cur_buf; |
|
404 |
- int ret; |
|
405 |
- |
|
406 |
- y += pad->y; |
|
407 |
- |
|
408 |
- y &= ~((1 << pad->vsub) - 1); |
|
409 |
- h &= ~((1 << pad->vsub) - 1); |
|
410 |
- |
|
411 |
- if (!h) |
|
412 |
- return 0; |
|
413 |
- draw_send_bar_slice(link, y, h, slice_dir, 1); |
|
414 | 366 |
|
415 | 367 |
/* left border */ |
416 |
- ff_draw_rectangle(outpic->data, outpic->linesize, pad->line, pad->line_step, |
|
417 |
- pad->hsub, pad->vsub, 0, y, pad->x, h); |
|
418 |
- |
|
419 |
- if(pad->needs_copy){ |
|
420 |
- ff_copy_rectangle(outpic->data, outpic->linesize, |
|
421 |
- inpic->data, inpic->linesize, pad->line_step, |
|
422 |
- pad->hsub, pad->vsub, |
|
423 |
- pad->x, y, y-pad->y, inpic->video->w, h); |
|
368 |
+ ff_draw_rectangle(out->data, out->linesize, pad->line, pad->line_step, |
|
369 |
+ pad->hsub, pad->vsub, 0, pad->y, pad->x, in->video->h); |
|
370 |
+ |
|
371 |
+ if (needs_copy) { |
|
372 |
+ ff_copy_rectangle(out->data, out->linesize, in->data, in->linesize, |
|
373 |
+ pad->line_step, pad->hsub, pad->vsub, |
|
374 |
+ pad->x, pad->y, 0, in->video->w, in->video->h); |
|
424 | 375 |
} |
425 | 376 |
|
426 | 377 |
/* right border */ |
427 |
- ff_draw_rectangle(outpic->data, outpic->linesize, |
|
378 |
+ ff_draw_rectangle(out->data, out->linesize, |
|
428 | 379 |
pad->line, pad->line_step, pad->hsub, pad->vsub, |
429 |
- pad->x + pad->in_w, y, pad->w - pad->x - pad->in_w, h); |
|
430 |
- ret = ff_draw_slice(link->dst->outputs[0], y, h, slice_dir); |
|
431 |
- if (ret < 0) |
|
432 |
- return ret; |
|
380 |
+ pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w, |
|
381 |
+ in->video->h); |
|
433 | 382 |
|
434 |
- return draw_send_bar_slice(link, y, h, slice_dir, -1); |
|
383 |
+ avfilter_unref_bufferp(&in); |
|
384 |
+ return ff_filter_frame(inlink->dst->outputs[0], out); |
|
435 | 385 |
} |
436 | 386 |
|
437 | 387 |
static const AVFilterPad avfilter_vf_pad_inputs[] = { |
... | ... |
@@ -440,9 +389,7 @@ static const AVFilterPad avfilter_vf_pad_inputs[] = { |
440 | 440 |
.type = AVMEDIA_TYPE_VIDEO, |
441 | 441 |
.config_props = config_input, |
442 | 442 |
.get_video_buffer = get_video_buffer, |
443 |
- .start_frame = start_frame, |
|
444 |
- .draw_slice = draw_slice, |
|
445 |
- .end_frame = end_frame, |
|
443 |
+ .filter_frame = filter_frame, |
|
446 | 444 |
}, |
447 | 445 |
{ NULL } |
448 | 446 |
}; |
... | ... |
@@ -52,86 +52,65 @@ static int config_props(AVFilterLink *inlink) |
52 | 52 |
return 0; |
53 | 53 |
} |
54 | 54 |
|
55 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
55 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
56 | 56 |
{ |
57 | 57 |
PixdescTestContext *priv = inlink->dst->priv; |
58 | 58 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
59 |
- AVFilterBufferRef *outpicref, *for_next_filter; |
|
60 |
- int i, ret = 0; |
|
59 |
+ AVFilterBufferRef *out; |
|
60 |
+ int i, c, w = inlink->w, h = inlink->h; |
|
61 | 61 |
|
62 |
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
63 |
- outlink->w, outlink->h); |
|
64 |
- if (!outpicref) |
|
62 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
63 |
+ outlink->w, outlink->h); |
|
64 |
+ if (!out) { |
|
65 |
+ avfilter_unref_bufferp(&in); |
|
65 | 66 |
return AVERROR(ENOMEM); |
67 |
+ } |
|
66 | 68 |
|
67 |
- avfilter_copy_buffer_ref_props(outpicref, picref); |
|
69 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
68 | 70 |
|
69 | 71 |
for (i = 0; i < 4; i++) { |
70 | 72 |
int h = outlink->h; |
71 | 73 |
h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h; |
72 |
- if (outpicref->data[i]) { |
|
73 |
- uint8_t *data = outpicref->data[i] + |
|
74 |
- (outpicref->linesize[i] > 0 ? 0 : outpicref->linesize[i] * (h-1)); |
|
75 |
- memset(data, 0, FFABS(outpicref->linesize[i]) * h); |
|
74 |
+ if (out->data[i]) { |
|
75 |
+ uint8_t *data = out->data[i] + |
|
76 |
+ (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h-1)); |
|
77 |
+ memset(data, 0, FFABS(out->linesize[i]) * h); |
|
76 | 78 |
} |
77 | 79 |
} |
78 | 80 |
|
79 | 81 |
/* copy palette */ |
80 | 82 |
if (priv->pix_desc->flags & PIX_FMT_PAL || |
81 | 83 |
priv->pix_desc->flags & PIX_FMT_PSEUDOPAL) |
82 |
- memcpy(outpicref->data[1], outpicref->data[1], 256*4); |
|
83 |
- |
|
84 |
- for_next_filter = avfilter_ref_buffer(outpicref, ~0); |
|
85 |
- if (for_next_filter) |
|
86 |
- ret = ff_start_frame(outlink, for_next_filter); |
|
87 |
- else |
|
88 |
- ret = AVERROR(ENOMEM); |
|
89 |
- |
|
90 |
- if (ret < 0) { |
|
91 |
- avfilter_unref_bufferp(&outpicref); |
|
92 |
- return ret; |
|
93 |
- } |
|
94 |
- |
|
95 |
- outlink->out_buf = outpicref; |
|
96 |
- return 0; |
|
97 |
-} |
|
98 |
- |
|
99 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
100 |
-{ |
|
101 |
- PixdescTestContext *priv = inlink->dst->priv; |
|
102 |
- AVFilterBufferRef *inpic = inlink->cur_buf; |
|
103 |
- AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; |
|
104 |
- int i, c, w = inlink->w; |
|
84 |
+ memcpy(out->data[1], in->data[1], 256*4); |
|
105 | 85 |
|
106 | 86 |
for (c = 0; c < priv->pix_desc->nb_components; c++) { |
107 | 87 |
int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w; |
108 | 88 |
int h1 = c == 1 || c == 2 ? h>>priv->pix_desc->log2_chroma_h : h; |
109 |
- int y1 = c == 1 || c == 2 ? y>>priv->pix_desc->log2_chroma_h : y; |
|
110 | 89 |
|
111 |
- for (i = y1; i < y1 + h1; i++) { |
|
90 |
+ for (i = 0; i < h1; i++) { |
|
112 | 91 |
av_read_image_line(priv->line, |
113 |
- inpic->data, |
|
114 |
- inpic->linesize, |
|
92 |
+ in->data, |
|
93 |
+ in->linesize, |
|
115 | 94 |
priv->pix_desc, |
116 | 95 |
0, i, c, w1, 0); |
117 | 96 |
|
118 | 97 |
av_write_image_line(priv->line, |
119 |
- outpic->data, |
|
120 |
- outpic->linesize, |
|
98 |
+ out->data, |
|
99 |
+ out->linesize, |
|
121 | 100 |
priv->pix_desc, |
122 | 101 |
0, i, c, w1); |
123 | 102 |
} |
124 | 103 |
} |
125 | 104 |
|
126 |
- return ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); |
|
105 |
+ avfilter_unref_bufferp(&in); |
|
106 |
+ return ff_filter_frame(outlink, out); |
|
127 | 107 |
} |
128 | 108 |
|
129 | 109 |
static const AVFilterPad avfilter_vf_pixdesctest_inputs[] = { |
130 | 110 |
{ |
131 | 111 |
.name = "default", |
132 | 112 |
.type = AVMEDIA_TYPE_VIDEO, |
133 |
- .start_frame = start_frame, |
|
134 |
- .draw_slice = draw_slice, |
|
113 |
+ .filter_frame = filter_frame, |
|
135 | 114 |
.config_props = config_props, |
136 | 115 |
.min_perms = AV_PERM_READ, |
137 | 116 |
}, |
... | ... |
@@ -257,93 +257,46 @@ fail: |
257 | 257 |
return ret; |
258 | 258 |
} |
259 | 259 |
|
260 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
260 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
261 | 261 |
{ |
262 | 262 |
ScaleContext *scale = link->dst->priv; |
263 | 263 |
AVFilterLink *outlink = link->dst->outputs[0]; |
264 |
- AVFilterBufferRef *outpicref, *for_next_filter; |
|
264 |
+ AVFilterBufferRef *out; |
|
265 | 265 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); |
266 |
- int ret = 0; |
|
267 | 266 |
|
268 |
- if (!scale->sws) { |
|
269 |
- outpicref = avfilter_ref_buffer(picref, ~0); |
|
270 |
- if (!outpicref) |
|
271 |
- return AVERROR(ENOMEM); |
|
272 |
- return ff_start_frame(outlink, outpicref); |
|
273 |
- } |
|
267 |
+ if (!scale->sws) |
|
268 |
+ return ff_filter_frame(outlink, in); |
|
274 | 269 |
|
275 | 270 |
scale->hsub = desc->log2_chroma_w; |
276 | 271 |
scale->vsub = desc->log2_chroma_h; |
277 | 272 |
|
278 |
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
279 |
- if (!outpicref) |
|
273 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
274 |
+ if (!out) { |
|
275 |
+ avfilter_unref_bufferp(&in); |
|
280 | 276 |
return AVERROR(ENOMEM); |
281 |
- |
|
282 |
- avfilter_copy_buffer_ref_props(outpicref, picref); |
|
283 |
- outpicref->video->w = outlink->w; |
|
284 |
- outpicref->video->h = outlink->h; |
|
285 |
- |
|
286 |
- |
|
287 |
- av_reduce(&outpicref->video->pixel_aspect.num, &outpicref->video->pixel_aspect.den, |
|
288 |
- (int64_t)picref->video->pixel_aspect.num * outlink->h * link->w, |
|
289 |
- (int64_t)picref->video->pixel_aspect.den * outlink->w * link->h, |
|
290 |
- INT_MAX); |
|
291 |
- |
|
292 |
- scale->slice_y = 0; |
|
293 |
- for_next_filter = avfilter_ref_buffer(outpicref, ~0); |
|
294 |
- if (for_next_filter) |
|
295 |
- ret = ff_start_frame(outlink, for_next_filter); |
|
296 |
- else |
|
297 |
- ret = AVERROR(ENOMEM); |
|
298 |
- |
|
299 |
- if (ret < 0) { |
|
300 |
- avfilter_unref_bufferp(&outpicref); |
|
301 |
- return ret; |
|
302 | 277 |
} |
303 | 278 |
|
304 |
- outlink->out_buf = outpicref; |
|
305 |
- return 0; |
|
306 |
-} |
|
279 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
280 |
+ out->video->w = outlink->w; |
|
281 |
+ out->video->h = outlink->h; |
|
307 | 282 |
|
308 |
-static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
309 |
-{ |
|
310 |
- ScaleContext *scale = link->dst->priv; |
|
311 |
- int out_h, ret; |
|
312 |
- AVFilterBufferRef *cur_pic = link->cur_buf; |
|
313 |
- const uint8_t *data[4]; |
|
283 |
+ av_reduce(&out->video->pixel_aspect.num, &out->video->pixel_aspect.den, |
|
284 |
+ (int64_t)in->video->pixel_aspect.num * outlink->h * link->w, |
|
285 |
+ (int64_t)in->video->pixel_aspect.den * outlink->w * link->h, |
|
286 |
+ INT_MAX); |
|
314 | 287 |
|
315 |
- if (!scale->sws) { |
|
316 |
- return ff_draw_slice(link->dst->outputs[0], y, h, slice_dir); |
|
317 |
- } |
|
288 |
+ sws_scale(scale->sws, in->data, in->linesize, 0, in->video->h, |
|
289 |
+ out->data, out->linesize); |
|
318 | 290 |
|
319 |
- if (scale->slice_y == 0 && slice_dir == -1) |
|
320 |
- scale->slice_y = link->dst->outputs[0]->h; |
|
321 |
- |
|
322 |
- data[0] = cur_pic->data[0] + y * cur_pic->linesize[0]; |
|
323 |
- data[1] = scale->input_is_pal ? |
|
324 |
- cur_pic->data[1] : |
|
325 |
- cur_pic->data[1] + (y>>scale->vsub) * cur_pic->linesize[1]; |
|
326 |
- data[2] = cur_pic->data[2] + (y>>scale->vsub) * cur_pic->linesize[2]; |
|
327 |
- data[3] = cur_pic->data[3] + y * cur_pic->linesize[3]; |
|
328 |
- |
|
329 |
- out_h = sws_scale(scale->sws, data, cur_pic->linesize, y, h, |
|
330 |
- link->dst->outputs[0]->out_buf->data, |
|
331 |
- link->dst->outputs[0]->out_buf->linesize); |
|
332 |
- |
|
333 |
- if (slice_dir == -1) |
|
334 |
- scale->slice_y -= out_h; |
|
335 |
- ret = ff_draw_slice(link->dst->outputs[0], scale->slice_y, out_h, slice_dir); |
|
336 |
- if (slice_dir == 1) |
|
337 |
- scale->slice_y += out_h; |
|
338 |
- return ret; |
|
291 |
+ avfilter_unref_bufferp(&in); |
|
292 |
+ return ff_filter_frame(outlink, out); |
|
339 | 293 |
} |
340 | 294 |
|
341 | 295 |
static const AVFilterPad avfilter_vf_scale_inputs[] = { |
342 | 296 |
{ |
343 | 297 |
.name = "default", |
344 | 298 |
.type = AVMEDIA_TYPE_VIDEO, |
345 |
- .start_frame = start_frame, |
|
346 |
- .draw_slice = draw_slice, |
|
299 |
+ .filter_frame = filter_frame, |
|
347 | 300 |
.min_perms = AV_PERM_READ, |
348 | 301 |
}, |
349 | 302 |
{ NULL } |
... | ... |
@@ -228,50 +228,27 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref) |
228 | 228 |
return res; |
229 | 229 |
} |
230 | 230 |
|
231 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
231 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
232 | 232 |
{ |
233 | 233 |
SelectContext *select = inlink->dst->priv; |
234 | 234 |
|
235 |
- select->select = select_frame(inlink->dst, picref); |
|
235 |
+ select->select = select_frame(inlink->dst, frame); |
|
236 | 236 |
if (select->select) { |
237 |
- AVFilterBufferRef *buf_out; |
|
238 | 237 |
/* frame was requested through poll_frame */ |
239 | 238 |
if (select->cache_frames) { |
240 |
- if (!av_fifo_space(select->pending_frames)) |
|
239 |
+ if (!av_fifo_space(select->pending_frames)) { |
|
241 | 240 |
av_log(inlink->dst, AV_LOG_ERROR, |
242 | 241 |
"Buffering limit reached, cannot cache more frames\n"); |
243 |
- else |
|
244 |
- av_fifo_generic_write(select->pending_frames, &picref, |
|
245 |
- sizeof(picref), NULL); |
|
242 |
+ avfilter_unref_bufferp(&frame); |
|
243 |
+ } else |
|
244 |
+ av_fifo_generic_write(select->pending_frames, &frame, |
|
245 |
+ sizeof(frame), NULL); |
|
246 | 246 |
return 0; |
247 | 247 |
} |
248 |
- buf_out = avfilter_ref_buffer(picref, ~0); |
|
249 |
- if (!buf_out) |
|
250 |
- return AVERROR(ENOMEM); |
|
251 |
- return ff_start_frame(inlink->dst->outputs[0], buf_out); |
|
248 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
252 | 249 |
} |
253 | 250 |
|
254 |
- return 0; |
|
255 |
-} |
|
256 |
- |
|
257 |
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
258 |
-{ |
|
259 |
- SelectContext *select = inlink->dst->priv; |
|
260 |
- |
|
261 |
- if (select->select && !select->cache_frames) |
|
262 |
- return ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); |
|
263 |
- return 0; |
|
264 |
-} |
|
265 |
- |
|
266 |
-static int end_frame(AVFilterLink *inlink) |
|
267 |
-{ |
|
268 |
- SelectContext *select = inlink->dst->priv; |
|
269 |
- |
|
270 |
- if (select->select) { |
|
271 |
- if (select->cache_frames) |
|
272 |
- return 0; |
|
273 |
- return ff_end_frame(inlink->dst->outputs[0]); |
|
274 |
- } |
|
251 |
+ avfilter_unref_bufferp(&frame); |
|
275 | 252 |
return 0; |
276 | 253 |
} |
277 | 254 |
|
... | ... |
@@ -284,14 +261,9 @@ static int request_frame(AVFilterLink *outlink) |
284 | 284 |
|
285 | 285 |
if (av_fifo_size(select->pending_frames)) { |
286 | 286 |
AVFilterBufferRef *picref; |
287 |
- int ret; |
|
288 | 287 |
|
289 | 288 |
av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); |
290 |
- if ((ret = ff_start_frame(outlink, picref)) < 0 || |
|
291 |
- (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || |
|
292 |
- (ret = ff_end_frame(outlink)) < 0); |
|
293 |
- |
|
294 |
- return ret; |
|
289 |
+ return ff_filter_frame(outlink, picref); |
|
295 | 290 |
} |
296 | 291 |
|
297 | 292 |
while (!select->select) { |
... | ... |
@@ -346,9 +318,7 @@ static const AVFilterPad avfilter_vf_select_inputs[] = { |
346 | 346 |
.type = AVMEDIA_TYPE_VIDEO, |
347 | 347 |
.get_video_buffer = ff_null_get_video_buffer, |
348 | 348 |
.config_props = config_input, |
349 |
- .start_frame = start_frame, |
|
350 |
- .draw_slice = draw_slice, |
|
351 |
- .end_frame = end_frame |
|
349 |
+ .filter_frame = filter_frame, |
|
352 | 350 |
}, |
353 | 351 |
{ NULL } |
354 | 352 |
}; |
... | ... |
@@ -102,39 +102,36 @@ static int config_input(AVFilterLink *inlink) |
102 | 102 |
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) |
103 | 103 |
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) |
104 | 104 |
|
105 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
105 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
106 | 106 |
{ |
107 | 107 |
SetPTSContext *setpts = inlink->dst->priv; |
108 |
+ int64_t in_pts = frame->pts; |
|
108 | 109 |
double d; |
109 |
- AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); |
|
110 |
- |
|
111 |
- if (!outpicref) |
|
112 |
- return AVERROR(ENOMEM); |
|
113 | 110 |
|
114 | 111 |
if (isnan(setpts->var_values[VAR_STARTPTS])) |
115 |
- setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts); |
|
112 |
+ setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); |
|
116 | 113 |
|
117 |
- setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced; |
|
118 |
- setpts->var_values[VAR_PTS ] = TS2D(inpicref->pts); |
|
119 |
- setpts->var_values[VAR_POS ] = inpicref->pos == -1 ? NAN : inpicref->pos; |
|
114 |
+ setpts->var_values[VAR_INTERLACED] = frame->video->interlaced; |
|
115 |
+ setpts->var_values[VAR_PTS ] = TS2D(frame->pts); |
|
116 |
+ setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos; |
|
120 | 117 |
|
121 | 118 |
d = av_expr_eval(setpts->expr, setpts->var_values, NULL); |
122 |
- outpicref->pts = D2TS(d); |
|
119 |
+ frame->pts = D2TS(d); |
|
123 | 120 |
|
124 | 121 |
#ifdef DEBUG |
125 | 122 |
av_log(inlink->dst, AV_LOG_DEBUG, |
126 | 123 |
"n:%"PRId64" interlaced:%d pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", |
127 | 124 |
(int64_t)setpts->var_values[VAR_N], |
128 | 125 |
(int)setpts->var_values[VAR_INTERLACED], |
129 |
- inpicref ->pos, |
|
130 |
- inpicref ->pts, inpicref ->pts * av_q2d(inlink->time_base), |
|
131 |
- outpicref->pts, outpicref->pts * av_q2d(inlink->time_base)); |
|
126 |
+ frame->pos, in_pts, in_pts * av_q2d(inlink->time_base), |
|
127 |
+ frame->pts, frame->pts * av_q2d(inlink->time_base)); |
|
132 | 128 |
#endif |
133 | 129 |
|
130 |
+ |
|
134 | 131 |
setpts->var_values[VAR_N] += 1.0; |
135 |
- setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); |
|
136 |
- setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); |
|
137 |
- return ff_start_frame(inlink->dst->outputs[0], outpicref); |
|
132 |
+ setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); |
|
133 |
+ setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); |
|
134 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
138 | 135 |
} |
139 | 136 |
|
140 | 137 |
static av_cold void uninit(AVFilterContext *ctx) |
... | ... |
@@ -150,7 +147,7 @@ static const AVFilterPad avfilter_vf_setpts_inputs[] = { |
150 | 150 |
.type = AVMEDIA_TYPE_VIDEO, |
151 | 151 |
.get_video_buffer = ff_null_get_video_buffer, |
152 | 152 |
.config_props = config_input, |
153 |
- .start_frame = start_frame, |
|
153 |
+ .filter_frame = filter_frame, |
|
154 | 154 |
}, |
155 | 155 |
{ NULL } |
156 | 156 |
}; |
... | ... |
@@ -108,21 +108,20 @@ static int config_output_props(AVFilterLink *outlink) |
108 | 108 |
return 0; |
109 | 109 |
} |
110 | 110 |
|
111 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
111 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
112 | 112 |
{ |
113 | 113 |
AVFilterContext *ctx = inlink->dst; |
114 | 114 |
AVFilterLink *outlink = ctx->outputs[0]; |
115 | 115 |
|
116 | 116 |
if (av_cmp_q(inlink->time_base, outlink->time_base)) { |
117 |
- int64_t orig_pts = picref->pts; |
|
118 |
- picref->pts = av_rescale_q(picref->pts, inlink->time_base, outlink->time_base); |
|
117 |
+ int64_t orig_pts = frame->pts; |
|
118 |
+ frame->pts = av_rescale_q(frame->pts, inlink->time_base, outlink->time_base); |
|
119 | 119 |
av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", |
120 | 120 |
inlink ->time_base.num, inlink ->time_base.den, orig_pts, |
121 |
- outlink->time_base.num, outlink->time_base.den, picref->pts); |
|
121 |
+ outlink->time_base.num, outlink->time_base.den, frame->pts); |
|
122 | 122 |
} |
123 |
- inlink->cur_buf = NULL; |
|
124 | 123 |
|
125 |
- return ff_start_frame(outlink, picref); |
|
124 |
+ return ff_filter_frame(outlink, frame); |
|
126 | 125 |
} |
127 | 126 |
|
128 | 127 |
static const AVFilterPad avfilter_vf_settb_inputs[] = { |
... | ... |
@@ -130,8 +129,7 @@ static const AVFilterPad avfilter_vf_settb_inputs[] = { |
130 | 130 |
.name = "default", |
131 | 131 |
.type = AVMEDIA_TYPE_VIDEO, |
132 | 132 |
.get_video_buffer = ff_null_get_video_buffer, |
133 |
- .start_frame = start_frame, |
|
134 |
- .end_frame = ff_null_end_frame |
|
133 |
+ .filter_frame = filter_frame, |
|
135 | 134 |
}, |
136 | 135 |
{ NULL } |
137 | 136 |
}; |
... | ... |
@@ -41,24 +41,23 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
41 | 41 |
return 0; |
42 | 42 |
} |
43 | 43 |
|
44 |
-static int end_frame(AVFilterLink *inlink) |
|
44 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
45 | 45 |
{ |
46 | 46 |
AVFilterContext *ctx = inlink->dst; |
47 | 47 |
ShowInfoContext *showinfo = ctx->priv; |
48 |
- AVFilterBufferRef *picref = inlink->cur_buf; |
|
49 | 48 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
50 | 49 |
uint32_t plane_checksum[4] = {0}, checksum = 0; |
51 | 50 |
int i, plane, vsub = desc->log2_chroma_h; |
52 | 51 |
|
53 |
- for (plane = 0; picref->data[plane] && plane < 4; plane++) { |
|
54 |
- size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); |
|
55 |
- uint8_t *data = picref->data[plane]; |
|
52 |
+ for (plane = 0; frame->data[plane] && plane < 4; plane++) { |
|
53 |
+ size_t linesize = av_image_get_linesize(frame->format, frame->video->w, plane); |
|
54 |
+ uint8_t *data = frame->data[plane]; |
|
56 | 55 |
int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h; |
57 | 56 |
|
58 | 57 |
for (i = 0; i < h; i++) { |
59 | 58 |
plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); |
60 | 59 |
checksum = av_adler32_update(checksum, data, linesize); |
61 |
- data += picref->linesize[plane]; |
|
60 |
+ data += frame->linesize[plane]; |
|
62 | 61 |
} |
63 | 62 |
} |
64 | 63 |
|
... | ... |
@@ -67,18 +66,18 @@ static int end_frame(AVFilterLink *inlink) |
67 | 67 |
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " |
68 | 68 |
"checksum:%u plane_checksum:[%u %u %u %u]\n", |
69 | 69 |
showinfo->frame, |
70 |
- picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos, |
|
70 |
+ frame->pts, frame->pts * av_q2d(inlink->time_base), frame->pos, |
|
71 | 71 |
desc->name, |
72 |
- picref->video->pixel_aspect.num, picref->video->pixel_aspect.den, |
|
73 |
- picref->video->w, picref->video->h, |
|
74 |
- !picref->video->interlaced ? 'P' : /* Progressive */ |
|
75 |
- picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
76 |
- picref->video->key_frame, |
|
77 |
- av_get_picture_type_char(picref->video->pict_type), |
|
72 |
+ frame->video->pixel_aspect.num, frame->video->pixel_aspect.den, |
|
73 |
+ frame->video->w, frame->video->h, |
|
74 |
+ !frame->video->interlaced ? 'P' : /* Progressive */ |
|
75 |
+ frame->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
76 |
+ frame->video->key_frame, |
|
77 |
+ av_get_picture_type_char(frame->video->pict_type), |
|
78 | 78 |
checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]); |
79 | 79 |
|
80 | 80 |
showinfo->frame++; |
81 |
- return ff_end_frame(inlink->dst->outputs[0]); |
|
81 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
82 | 82 |
} |
83 | 83 |
|
84 | 84 |
static const AVFilterPad avfilter_vf_showinfo_inputs[] = { |
... | ... |
@@ -86,8 +85,7 @@ static const AVFilterPad avfilter_vf_showinfo_inputs[] = { |
86 | 86 |
.name = "default", |
87 | 87 |
.type = AVMEDIA_TYPE_VIDEO, |
88 | 88 |
.get_video_buffer = ff_null_get_video_buffer, |
89 |
- .start_frame = ff_null_start_frame, |
|
90 |
- .end_frame = end_frame, |
|
89 |
+ .filter_frame = filter_frame, |
|
91 | 90 |
.min_perms = AV_PERM_READ, |
92 | 91 |
}, |
93 | 92 |
{ NULL } |
... | ... |
@@ -121,100 +121,88 @@ static int config_props_output(AVFilterLink *outlink) |
121 | 121 |
return 0; |
122 | 122 |
} |
123 | 123 |
|
124 |
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
124 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
125 | 125 |
{ |
126 | 126 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
127 |
- AVFilterBufferRef *buf_out; |
|
127 |
+ TransContext *trans = inlink->dst->priv; |
|
128 |
+ AVFilterBufferRef *out; |
|
129 |
+ int plane; |
|
128 | 130 |
|
129 |
- outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
130 |
- outlink->w, outlink->h); |
|
131 |
- if (!outlink->out_buf) |
|
131 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
132 |
+ if (!out) { |
|
133 |
+ avfilter_unref_bufferp(&in); |
|
132 | 134 |
return AVERROR(ENOMEM); |
135 |
+ } |
|
133 | 136 |
|
134 |
- outlink->out_buf->pts = picref->pts; |
|
137 |
+ out->pts = in->pts; |
|
135 | 138 |
|
136 |
- if (picref->video->pixel_aspect.num == 0) { |
|
137 |
- outlink->out_buf->video->pixel_aspect = picref->video->pixel_aspect; |
|
139 |
+ if (in->video->pixel_aspect.num == 0) { |
|
140 |
+ out->video->pixel_aspect = in->video->pixel_aspect; |
|
138 | 141 |
} else { |
139 |
- outlink->out_buf->video->pixel_aspect.num = picref->video->pixel_aspect.den; |
|
140 |
- outlink->out_buf->video->pixel_aspect.den = picref->video->pixel_aspect.num; |
|
142 |
+ out->video->pixel_aspect.num = in->video->pixel_aspect.den; |
|
143 |
+ out->video->pixel_aspect.den = in->video->pixel_aspect.num; |
|
141 | 144 |
} |
142 | 145 |
|
143 |
- buf_out = avfilter_ref_buffer(outlink->out_buf, ~0); |
|
144 |
- if (!buf_out) |
|
145 |
- return AVERROR(ENOMEM); |
|
146 |
- return ff_start_frame(outlink, buf_out); |
|
147 |
-} |
|
148 |
- |
|
149 |
-static int end_frame(AVFilterLink *inlink) |
|
150 |
-{ |
|
151 |
- TransContext *trans = inlink->dst->priv; |
|
152 |
- AVFilterBufferRef *inpic = inlink->cur_buf; |
|
153 |
- AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; |
|
154 |
- AVFilterLink *outlink = inlink->dst->outputs[0]; |
|
155 |
- int plane, ret; |
|
156 |
- |
|
157 |
- for (plane = 0; outpic->data[plane]; plane++) { |
|
146 |
+ for (plane = 0; out->data[plane]; plane++) { |
|
158 | 147 |
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; |
159 | 148 |
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; |
160 | 149 |
int pixstep = trans->pixsteps[plane]; |
161 |
- int inh = inpic->video->h>>vsub; |
|
162 |
- int outw = outpic->video->w>>hsub; |
|
163 |
- int outh = outpic->video->h>>vsub; |
|
164 |
- uint8_t *out, *in; |
|
165 |
- int outlinesize, inlinesize; |
|
150 |
+ int inh = in->video->h>>vsub; |
|
151 |
+ int outw = out->video->w>>hsub; |
|
152 |
+ int outh = out->video->h>>vsub; |
|
153 |
+ uint8_t *dst, *src; |
|
154 |
+ int dstlinesize, srclinesize; |
|
166 | 155 |
int x, y; |
167 | 156 |
|
168 |
- out = outpic->data[plane]; outlinesize = outpic->linesize[plane]; |
|
169 |
- in = inpic ->data[plane]; inlinesize = inpic ->linesize[plane]; |
|
157 |
+ dst = out->data[plane]; |
|
158 |
+ dstlinesize = out->linesize[plane]; |
|
159 |
+ src = in->data[plane]; |
|
160 |
+ srclinesize = in->linesize[plane]; |
|
170 | 161 |
|
171 | 162 |
if (trans->dir&1) { |
172 |
- in += inpic->linesize[plane] * (inh-1); |
|
173 |
- inlinesize *= -1; |
|
163 |
+ src += in->linesize[plane] * (inh-1); |
|
164 |
+ srclinesize *= -1; |
|
174 | 165 |
} |
175 | 166 |
|
176 | 167 |
if (trans->dir&2) { |
177 |
- out += outpic->linesize[plane] * (outh-1); |
|
178 |
- outlinesize *= -1; |
|
168 |
+ dst += out->linesize[plane] * (outh-1); |
|
169 |
+ dstlinesize *= -1; |
|
179 | 170 |
} |
180 | 171 |
|
181 | 172 |
for (y = 0; y < outh; y++) { |
182 | 173 |
switch (pixstep) { |
183 | 174 |
case 1: |
184 | 175 |
for (x = 0; x < outw; x++) |
185 |
- out[x] = in[x*inlinesize + y]; |
|
176 |
+ dst[x] = src[x*srclinesize + y]; |
|
186 | 177 |
break; |
187 | 178 |
case 2: |
188 | 179 |
for (x = 0; x < outw; x++) |
189 |
- *((uint16_t *)(out + 2*x)) = *((uint16_t *)(in + x*inlinesize + y*2)); |
|
180 |
+ *((uint16_t *)(dst + 2*x)) = *((uint16_t *)(src + x*srclinesize + y*2)); |
|
190 | 181 |
break; |
191 | 182 |
case 3: |
192 | 183 |
for (x = 0; x < outw; x++) { |
193 |
- int32_t v = AV_RB24(in + x*inlinesize + y*3); |
|
194 |
- AV_WB24(out + 3*x, v); |
|
184 |
+ int32_t v = AV_RB24(src + x*srclinesize + y*3); |
|
185 |
+ AV_WB24(dst + 3*x, v); |
|
195 | 186 |
} |
196 | 187 |
break; |
197 | 188 |
case 4: |
198 | 189 |
for (x = 0; x < outw; x++) |
199 |
- *((uint32_t *)(out + 4*x)) = *((uint32_t *)(in + x*inlinesize + y*4)); |
|
190 |
+ *((uint32_t *)(dst + 4*x)) = *((uint32_t *)(src + x*srclinesize + y*4)); |
|
200 | 191 |
break; |
201 | 192 |
} |
202 |
- out += outlinesize; |
|
193 |
+ dst += dstlinesize; |
|
203 | 194 |
} |
204 | 195 |
} |
205 | 196 |
|
206 |
- if ((ret = ff_draw_slice(outlink, 0, outpic->video->h, 1)) < 0 || |
|
207 |
- (ret = ff_end_frame(outlink)) < 0) |
|
208 |
- return ret; |
|
209 |
- return 0; |
|
197 |
+ avfilter_unref_bufferp(&in); |
|
198 |
+ return ff_filter_frame(outlink, out); |
|
210 | 199 |
} |
211 | 200 |
|
212 | 201 |
static const AVFilterPad avfilter_vf_transpose_inputs[] = { |
213 | 202 |
{ |
214 | 203 |
.name = "default", |
215 | 204 |
.type = AVMEDIA_TYPE_VIDEO, |
216 |
- .start_frame = start_frame, |
|
217 |
- .end_frame = end_frame, |
|
205 |
+ .filter_frame = filter_frame, |
|
218 | 206 |
.min_perms = AV_PERM_READ, |
219 | 207 |
}, |
220 | 208 |
{ NULL } |
... | ... |
@@ -214,36 +214,34 @@ static av_cold void uninit(AVFilterContext *ctx) |
214 | 214 |
free_filter_param(&unsharp->chroma); |
215 | 215 |
} |
216 | 216 |
|
217 |
-static int end_frame(AVFilterLink *link) |
|
217 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
218 | 218 |
{ |
219 | 219 |
UnsharpContext *unsharp = link->dst->priv; |
220 |
- AVFilterBufferRef *in = link->cur_buf; |
|
221 |
- AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; |
|
220 |
+ AVFilterLink *outlink = link->dst->outputs[0]; |
|
221 |
+ AVFilterBufferRef *out; |
|
222 | 222 |
int cw = SHIFTUP(link->w, unsharp->hsub); |
223 | 223 |
int ch = SHIFTUP(link->h, unsharp->vsub); |
224 |
- int ret; |
|
224 |
+ |
|
225 |
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
226 |
+ if (!out) { |
|
227 |
+ avfilter_unref_bufferp(&in); |
|
228 |
+ return AVERROR(ENOMEM); |
|
229 |
+ } |
|
230 |
+ avfilter_copy_buffer_ref_props(out, in); |
|
225 | 231 |
|
226 | 232 |
apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma); |
227 | 233 |
apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma); |
228 | 234 |
apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma); |
229 | 235 |
|
230 |
- if ((ret = ff_draw_slice(link->dst->outputs[0], 0, link->h, 1)) < 0 || |
|
231 |
- (ret = ff_end_frame(link->dst->outputs[0])) < 0) |
|
232 |
- return ret; |
|
233 |
- return 0; |
|
234 |
-} |
|
235 |
- |
|
236 |
-static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
237 |
-{ |
|
238 |
- return 0; |
|
236 |
+ avfilter_unref_bufferp(&in); |
|
237 |
+ return ff_filter_frame(outlink, out); |
|
239 | 238 |
} |
240 | 239 |
|
241 | 240 |
static const AVFilterPad avfilter_vf_unsharp_inputs[] = { |
242 | 241 |
{ |
243 | 242 |
.name = "default", |
244 | 243 |
.type = AVMEDIA_TYPE_VIDEO, |
245 |
- .draw_slice = draw_slice, |
|
246 |
- .end_frame = end_frame, |
|
244 |
+ .filter_frame = filter_frame, |
|
247 | 245 |
.config_props = config_props, |
248 | 246 |
.min_perms = AV_PERM_READ, |
249 | 247 |
}, |
... | ... |
@@ -69,41 +69,28 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, |
69 | 69 |
return picref; |
70 | 70 |
} |
71 | 71 |
|
72 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *inpicref) |
|
72 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
73 | 73 |
{ |
74 | 74 |
FlipContext *flip = link->dst->priv; |
75 |
- AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); |
|
76 | 75 |
int i; |
77 | 76 |
|
78 |
- if (!outpicref) |
|
79 |
- return AVERROR(ENOMEM); |
|
80 |
- |
|
81 | 77 |
for (i = 0; i < 4; i ++) { |
82 | 78 |
int vsub = i == 1 || i == 2 ? flip->vsub : 0; |
83 | 79 |
|
84 |
- if (outpicref->data[i]) { |
|
85 |
- outpicref->data[i] += ((link->h >> vsub)-1) * outpicref->linesize[i]; |
|
86 |
- outpicref->linesize[i] = -outpicref->linesize[i]; |
|
80 |
+ if (frame->data[i]) { |
|
81 |
+ frame->data[i] += ((link->h >> vsub)-1) * frame->linesize[i]; |
|
82 |
+ frame->linesize[i] = -frame->linesize[i]; |
|
87 | 83 |
} |
88 | 84 |
} |
89 | 85 |
|
90 |
- return ff_start_frame(link->dst->outputs[0], outpicref); |
|
91 |
-} |
|
92 |
- |
|
93 |
-static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
94 |
-{ |
|
95 |
- AVFilterContext *ctx = link->dst; |
|
96 |
- |
|
97 |
- return ff_draw_slice(ctx->outputs[0], link->h - (y+h), h, -1 * slice_dir); |
|
86 |
+ return ff_filter_frame(link->dst->outputs[0], frame); |
|
98 | 87 |
} |
99 |
- |
|
100 | 88 |
static const AVFilterPad avfilter_vf_vflip_inputs[] = { |
101 | 89 |
{ |
102 | 90 |
.name = "default", |
103 | 91 |
.type = AVMEDIA_TYPE_VIDEO, |
104 | 92 |
.get_video_buffer = get_video_buffer, |
105 |
- .start_frame = start_frame, |
|
106 |
- .draw_slice = draw_slice, |
|
93 |
+ .filter_frame = filter_frame, |
|
107 | 94 |
.config_props = config_input, |
108 | 95 |
}, |
109 | 96 |
{ NULL } |
... | ... |
@@ -202,19 +202,14 @@ static int return_frame(AVFilterContext *ctx, int is_second) |
202 | 202 |
} else { |
203 | 203 |
yadif->out->pts = AV_NOPTS_VALUE; |
204 | 204 |
} |
205 |
- ret = ff_start_frame(ctx->outputs[0], yadif->out); |
|
206 |
- if (ret < 0) |
|
207 |
- return ret; |
|
208 | 205 |
} |
209 |
- if ((ret = ff_draw_slice(ctx->outputs[0], 0, link->h, 1)) < 0 || |
|
210 |
- (ret = ff_end_frame(ctx->outputs[0])) < 0) |
|
211 |
- return ret; |
|
206 |
+ ret = ff_filter_frame(ctx->outputs[0], yadif->out); |
|
212 | 207 |
|
213 | 208 |
yadif->frame_pending = (yadif->mode&1) && !is_second; |
214 |
- return 0; |
|
209 |
+ return ret; |
|
215 | 210 |
} |
216 | 211 |
|
217 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
212 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
218 | 213 |
{ |
219 | 214 |
AVFilterContext *ctx = link->dst; |
220 | 215 |
YADIFContext *yadif = ctx->priv; |
... | ... |
@@ -227,7 +222,6 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
227 | 227 |
yadif->prev = yadif->cur; |
228 | 228 |
yadif->cur = yadif->next; |
229 | 229 |
yadif->next = picref; |
230 |
- link->cur_buf = NULL; |
|
231 | 230 |
|
232 | 231 |
if (!yadif->cur) |
233 | 232 |
return 0; |
... | ... |
@@ -240,7 +234,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
240 | 240 |
avfilter_unref_bufferp(&yadif->prev); |
241 | 241 |
if (yadif->out->pts != AV_NOPTS_VALUE) |
242 | 242 |
yadif->out->pts *= 2; |
243 |
- return ff_start_frame(ctx->outputs[0], yadif->out); |
|
243 |
+ return ff_filter_frame(ctx->outputs[0], yadif->out); |
|
244 | 244 |
} |
245 | 245 |
|
246 | 246 |
if (!yadif->prev && |
... | ... |
@@ -258,26 +252,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
258 | 258 |
if (yadif->out->pts != AV_NOPTS_VALUE) |
259 | 259 |
yadif->out->pts *= 2; |
260 | 260 |
|
261 |
- return ff_start_frame(ctx->outputs[0], yadif->out); |
|
262 |
-} |
|
263 |
- |
|
264 |
-static int end_frame(AVFilterLink *link) |
|
265 |
-{ |
|
266 |
- AVFilterContext *ctx = link->dst; |
|
267 |
- YADIFContext *yadif = ctx->priv; |
|
268 |
- |
|
269 |
- if (!yadif->out) |
|
270 |
- return 0; |
|
271 |
- |
|
272 |
- if (yadif->auto_enable && !yadif->cur->video->interlaced) { |
|
273 |
- int ret = ff_draw_slice(ctx->outputs[0], 0, link->h, 1); |
|
274 |
- if (ret >= 0) |
|
275 |
- ret = ff_end_frame(ctx->outputs[0]); |
|
276 |
- return ret; |
|
277 |
- } |
|
278 |
- |
|
279 |
- return_frame(ctx, 0); |
|
280 |
- return 0; |
|
261 |
+ return return_frame(ctx, 0); |
|
281 | 262 |
} |
282 | 263 |
|
283 | 264 |
static int request_frame(AVFilterLink *link) |
... | ... |
@@ -307,8 +282,7 @@ static int request_frame(AVFilterLink *link) |
307 | 307 |
|
308 | 308 |
next->pts = yadif->next->pts * 2 - yadif->cur->pts; |
309 | 309 |
|
310 |
- start_frame(link->src->inputs[0], next); |
|
311 |
- end_frame(link->src->inputs[0]); |
|
310 |
+ filter_frame(link->src->inputs[0], next); |
|
312 | 311 |
yadif->eof = 1; |
313 | 312 |
} else if (ret < 0) { |
314 | 313 |
return ret; |
... | ... |
@@ -409,11 +383,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
409 | 409 |
return 0; |
410 | 410 |
} |
411 | 411 |
|
412 |
-static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
413 |
-{ |
|
414 |
- return 0; |
|
415 |
-} |
|
416 |
- |
|
417 | 412 |
static int config_props(AVFilterLink *link) |
418 | 413 |
{ |
419 | 414 |
link->time_base.num = link->src->inputs[0]->time_base.num; |
... | ... |
@@ -428,10 +397,8 @@ static const AVFilterPad avfilter_vf_yadif_inputs[] = { |
428 | 428 |
{ |
429 | 429 |
.name = "default", |
430 | 430 |
.type = AVMEDIA_TYPE_VIDEO, |
431 |
- .start_frame = start_frame, |
|
432 | 431 |
.get_video_buffer = get_video_buffer, |
433 |
- .draw_slice = null_draw_slice, |
|
434 |
- .end_frame = end_frame, |
|
432 |
+ .filter_frame = filter_frame, |
|
435 | 433 |
}, |
436 | 434 |
{ NULL } |
437 | 435 |
}; |
... | ... |
@@ -163,213 +163,3 @@ AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int |
163 | 163 |
|
164 | 164 |
return ret; |
165 | 165 |
} |
166 |
- |
|
167 |
-int ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
168 |
-{ |
|
169 |
- AVFilterBufferRef *buf_out = avfilter_ref_buffer(picref, ~0); |
|
170 |
- if (!buf_out) |
|
171 |
- return AVERROR(ENOMEM); |
|
172 |
- return ff_start_frame(link->dst->outputs[0], buf_out); |
|
173 |
-} |
|
174 |
- |
|
175 |
-// for filters that support (but don't require) outpic==inpic |
|
176 |
-int ff_inplace_start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
177 |
-{ |
|
178 |
- AVFilterLink *outlink = inlink->dst->outputs[0]; |
|
179 |
- AVFilterBufferRef *outpicref = NULL, *for_next_filter; |
|
180 |
- int ret = 0; |
|
181 |
- |
|
182 |
- if ((inpicref->perms & AV_PERM_WRITE) && !(inpicref->perms & AV_PERM_PRESERVE)) { |
|
183 |
- outpicref = avfilter_ref_buffer(inpicref, ~0); |
|
184 |
- if (!outpicref) |
|
185 |
- return AVERROR(ENOMEM); |
|
186 |
- } else { |
|
187 |
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
188 |
- if (!outpicref) |
|
189 |
- return AVERROR(ENOMEM); |
|
190 |
- |
|
191 |
- avfilter_copy_buffer_ref_props(outpicref, inpicref); |
|
192 |
- outpicref->video->w = outlink->w; |
|
193 |
- outpicref->video->h = outlink->h; |
|
194 |
- } |
|
195 |
- |
|
196 |
- for_next_filter = avfilter_ref_buffer(outpicref, ~0); |
|
197 |
- if (for_next_filter) |
|
198 |
- ret = ff_start_frame(outlink, for_next_filter); |
|
199 |
- else |
|
200 |
- ret = AVERROR(ENOMEM); |
|
201 |
- |
|
202 |
- if (ret < 0) { |
|
203 |
- avfilter_unref_bufferp(&outpicref); |
|
204 |
- return ret; |
|
205 |
- } |
|
206 |
- |
|
207 |
- outlink->out_buf = outpicref; |
|
208 |
- return 0; |
|
209 |
-} |
|
210 |
- |
|
211 |
-static int default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
212 |
-{ |
|
213 |
- AVFilterLink *outlink = NULL; |
|
214 |
- |
|
215 |
- if (inlink->dst->nb_outputs) |
|
216 |
- outlink = inlink->dst->outputs[0]; |
|
217 |
- |
|
218 |
- if (outlink) { |
|
219 |
- AVFilterBufferRef *buf_out; |
|
220 |
- outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
221 |
- if (!outlink->out_buf) |
|
222 |
- return AVERROR(ENOMEM); |
|
223 |
- |
|
224 |
- avfilter_copy_buffer_ref_props(outlink->out_buf, picref); |
|
225 |
- buf_out = avfilter_ref_buffer(outlink->out_buf, ~0); |
|
226 |
- if (!buf_out) |
|
227 |
- return AVERROR(ENOMEM); |
|
228 |
- |
|
229 |
- return ff_start_frame(outlink, buf_out); |
|
230 |
- } |
|
231 |
- return 0; |
|
232 |
-} |
|
233 |
- |
|
234 |
-static void clear_link(AVFilterLink *link) |
|
235 |
-{ |
|
236 |
- avfilter_unref_bufferp(&link->cur_buf); |
|
237 |
- avfilter_unref_bufferp(&link->src_buf); |
|
238 |
- avfilter_unref_bufferp(&link->out_buf); |
|
239 |
-} |
|
240 |
- |
|
241 |
-/* XXX: should we do the duplicating of the picture ref here, instead of |
|
242 |
- * forcing the source filter to do it? */ |
|
243 |
-int ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
244 |
-{ |
|
245 |
- int (*start_frame)(AVFilterLink *, AVFilterBufferRef *); |
|
246 |
- AVFilterPad *dst = link->dstpad; |
|
247 |
- int ret, perms = picref->perms; |
|
248 |
- |
|
249 |
- FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1); |
|
250 |
- |
|
251 |
- if (!(start_frame = dst->start_frame)) |
|
252 |
- start_frame = default_start_frame; |
|
253 |
- |
|
254 |
- if (picref->linesize[0] < 0) |
|
255 |
- perms |= AV_PERM_NEG_LINESIZES; |
|
256 |
- /* prepare to copy the picture if it has insufficient permissions */ |
|
257 |
- if ((dst->min_perms & perms) != dst->min_perms || dst->rej_perms & perms) { |
|
258 |
- av_log(link->dst, AV_LOG_DEBUG, |
|
259 |
- "frame copy needed (have perms %x, need %x, reject %x)\n", |
|
260 |
- picref->perms, |
|
261 |
- link->dstpad->min_perms, link->dstpad->rej_perms); |
|
262 |
- |
|
263 |
- link->cur_buf = ff_get_video_buffer(link, dst->min_perms, link->w, link->h); |
|
264 |
- if (!link->cur_buf) { |
|
265 |
- avfilter_unref_bufferp(&picref); |
|
266 |
- return AVERROR(ENOMEM); |
|
267 |
- } |
|
268 |
- |
|
269 |
- link->src_buf = picref; |
|
270 |
- avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf); |
|
271 |
- } |
|
272 |
- else |
|
273 |
- link->cur_buf = picref; |
|
274 |
- |
|
275 |
- ret = start_frame(link, link->cur_buf); |
|
276 |
- if (ret < 0) |
|
277 |
- clear_link(link); |
|
278 |
- |
|
279 |
- return ret; |
|
280 |
-} |
|
281 |
- |
|
282 |
-int ff_null_end_frame(AVFilterLink *link) |
|
283 |
-{ |
|
284 |
- return ff_end_frame(link->dst->outputs[0]); |
|
285 |
-} |
|
286 |
- |
|
287 |
-static int default_end_frame(AVFilterLink *inlink) |
|
288 |
-{ |
|
289 |
- AVFilterLink *outlink = NULL; |
|
290 |
- |
|
291 |
- if (inlink->dst->nb_outputs) |
|
292 |
- outlink = inlink->dst->outputs[0]; |
|
293 |
- |
|
294 |
- if (outlink) { |
|
295 |
- return ff_end_frame(outlink); |
|
296 |
- } |
|
297 |
- return 0; |
|
298 |
-} |
|
299 |
- |
|
300 |
-int ff_end_frame(AVFilterLink *link) |
|
301 |
-{ |
|
302 |
- int (*end_frame)(AVFilterLink *); |
|
303 |
- int ret; |
|
304 |
- |
|
305 |
- if (!(end_frame = link->dstpad->end_frame)) |
|
306 |
- end_frame = default_end_frame; |
|
307 |
- |
|
308 |
- ret = end_frame(link); |
|
309 |
- |
|
310 |
- clear_link(link); |
|
311 |
- |
|
312 |
- return ret; |
|
313 |
-} |
|
314 |
- |
|
315 |
-int ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
316 |
-{ |
|
317 |
- return ff_draw_slice(link->dst->outputs[0], y, h, slice_dir); |
|
318 |
-} |
|
319 |
- |
|
320 |
-static int default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) |
|
321 |
-{ |
|
322 |
- AVFilterLink *outlink = NULL; |
|
323 |
- |
|
324 |
- if (inlink->dst->nb_outputs) |
|
325 |
- outlink = inlink->dst->outputs[0]; |
|
326 |
- |
|
327 |
- if (outlink) |
|
328 |
- return ff_draw_slice(outlink, y, h, slice_dir); |
|
329 |
- return 0; |
|
330 |
-} |
|
331 |
- |
|
332 |
-int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) |
|
333 |
-{ |
|
334 |
- uint8_t *src[4], *dst[4]; |
|
335 |
- int i, j, vsub, ret; |
|
336 |
- int (*draw_slice)(AVFilterLink *, int, int, int); |
|
337 |
- |
|
338 |
- FF_DPRINTF_START(NULL, draw_slice); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir); |
|
339 |
- |
|
340 |
- /* copy the slice if needed for permission reasons */ |
|
341 |
- if (link->src_buf) { |
|
342 |
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); |
|
343 |
- vsub = desc->log2_chroma_h; |
|
344 |
- |
|
345 |
- for (i = 0; i < 4; i++) { |
|
346 |
- if (link->src_buf->data[i]) { |
|
347 |
- src[i] = link->src_buf-> data[i] + |
|
348 |
- (y >> (i==1 || i==2 ? vsub : 0)) * link->src_buf-> linesize[i]; |
|
349 |
- dst[i] = link->cur_buf->data[i] + |
|
350 |
- (y >> (i==1 || i==2 ? vsub : 0)) * link->cur_buf->linesize[i]; |
|
351 |
- } else |
|
352 |
- src[i] = dst[i] = NULL; |
|
353 |
- } |
|
354 |
- |
|
355 |
- for (i = 0; i < 4; i++) { |
|
356 |
- int planew = |
|
357 |
- av_image_get_linesize(link->format, link->cur_buf->video->w, i); |
|
358 |
- |
|
359 |
- if (!src[i]) continue; |
|
360 |
- |
|
361 |
- for (j = 0; j < h >> (i==1 || i==2 ? vsub : 0); j++) { |
|
362 |
- memcpy(dst[i], src[i], planew); |
|
363 |
- src[i] += link->src_buf->linesize[i]; |
|
364 |
- dst[i] += link->cur_buf->linesize[i]; |
|
365 |
- } |
|
366 |
- } |
|
367 |
- } |
|
368 |
- |
|
369 |
- if (!(draw_slice = link->dstpad->draw_slice)) |
|
370 |
- draw_slice = default_draw_slice; |
|
371 |
- ret = draw_slice(link, y, h, slice_dir); |
|
372 |
- if (ret < 0) |
|
373 |
- clear_link(link); |
|
374 |
- return ret; |
|
375 |
-} |
... | ... |
@@ -39,51 +39,4 @@ AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w |
39 | 39 |
AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, |
40 | 40 |
int w, int h); |
41 | 41 |
|
42 |
-int ff_inplace_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); |
|
43 |
-int ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); |
|
44 |
-int ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); |
|
45 |
-int ff_null_end_frame(AVFilterLink *link); |
|
46 |
- |
|
47 |
-/** |
|
48 |
- * Notify the next filter of the start of a frame. |
|
49 |
- * |
|
50 |
- * @param link the output link the frame will be sent over |
|
51 |
- * @param picref A reference to the frame about to be sent. The data for this |
|
52 |
- * frame need only be valid once draw_slice() is called for that |
|
53 |
- * portion. The receiving filter will free this reference when |
|
54 |
- * it no longer needs it. |
|
55 |
- * |
|
56 |
- * @return >= 0 on success, a negative AVERROR on error. This function will |
|
57 |
- * unreference picref in case of error. |
|
58 |
- */ |
|
59 |
-int ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); |
|
60 |
- |
|
61 |
-/** |
|
62 |
- * Notify the next filter that the current frame has finished. |
|
63 |
- * |
|
64 |
- * @param link the output link the frame was sent over |
|
65 |
- * |
|
66 |
- * @return >= 0 on success, a negative AVERROR on error |
|
67 |
- */ |
|
68 |
-int ff_end_frame(AVFilterLink *link); |
|
69 |
- |
|
70 |
-/** |
|
71 |
- * Send a slice to the next filter. |
|
72 |
- * |
|
73 |
- * Slices have to be provided in sequential order, either in |
|
74 |
- * top-bottom or bottom-top order. If slices are provided in |
|
75 |
- * non-sequential order the behavior of the function is undefined. |
|
76 |
- * |
|
77 |
- * @param link the output link over which the frame is being sent |
|
78 |
- * @param y offset in pixels from the top of the image for this slice |
|
79 |
- * @param h height of this slice in pixels |
|
80 |
- * @param slice_dir the assumed direction for sending slices, |
|
81 |
- * from the top slice to the bottom slice if the value is 1, |
|
82 |
- * from the bottom slice to the top slice if the value is -1, |
|
83 |
- * for other values the behavior of the function is undefined. |
|
84 |
- * |
|
85 |
- * @return >= 0 on success, a negative AVERROR on error. |
|
86 |
- */ |
|
87 |
-int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); |
|
88 |
- |
|
89 | 42 |
#endif /* AVFILTER_VIDEO_H */ |
... | ... |
@@ -20,13 +20,9 @@ |
20 | 20 |
#include "internal.h" |
21 | 21 |
#include "libavutil/internal.h" |
22 | 22 |
|
23 |
-static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
24 |
-{ |
|
25 |
- return 0; |
|
26 |
-} |
|
27 |
- |
|
28 |
-static int end_frame(AVFilterLink *link) |
|
23 |
+static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
29 | 24 |
{ |
25 |
+ avfilter_unref_bufferp(&frame); |
|
30 | 26 |
return 0; |
31 | 27 |
} |
32 | 28 |
|
... | ... |
@@ -34,8 +30,7 @@ static const AVFilterPad avfilter_vsink_nullsink_inputs[] = { |
34 | 34 |
{ |
35 | 35 |
.name = "default", |
36 | 36 |
.type = AVMEDIA_TYPE_VIDEO, |
37 |
- .start_frame = start_frame, |
|
38 |
- .end_frame = end_frame, |
|
37 |
+ .filter_frame = filter_frame, |
|
39 | 38 |
}, |
40 | 39 |
{ NULL }, |
41 | 40 |
}; |
... | ... |
@@ -147,8 +147,6 @@ static int color_request_frame(AVFilterLink *link) |
147 | 147 |
{ |
148 | 148 |
ColorContext *color = link->src->priv; |
149 | 149 |
AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); |
150 |
- AVFilterBufferRef *buf_out; |
|
151 |
- int ret; |
|
152 | 150 |
|
153 | 151 |
if (!picref) |
154 | 152 |
return AVERROR(ENOMEM); |
... | ... |
@@ -157,29 +155,10 @@ static int color_request_frame(AVFilterLink *link) |
157 | 157 |
picref->pts = color->pts++; |
158 | 158 |
picref->pos = -1; |
159 | 159 |
|
160 |
- buf_out = avfilter_ref_buffer(picref, ~0); |
|
161 |
- if (!buf_out) { |
|
162 |
- ret = AVERROR(ENOMEM); |
|
163 |
- goto fail; |
|
164 |
- } |
|
165 |
- |
|
166 |
- ret = ff_start_frame(link, buf_out); |
|
167 |
- if (ret < 0) |
|
168 |
- goto fail; |
|
169 |
- |
|
170 | 160 |
ff_draw_rectangle(picref->data, picref->linesize, |
171 | 161 |
color->line, color->line_step, color->hsub, color->vsub, |
172 | 162 |
0, 0, color->w, color->h); |
173 |
- ret = ff_draw_slice(link, 0, color->h, 1); |
|
174 |
- if (ret < 0) |
|
175 |
- goto fail; |
|
176 |
- |
|
177 |
- ret = ff_end_frame(link); |
|
178 |
- |
|
179 |
-fail: |
|
180 |
- avfilter_unref_buffer(picref); |
|
181 |
- |
|
182 |
- return ret; |
|
163 |
+ return ff_filter_frame(link, picref); |
|
183 | 164 |
} |
184 | 165 |
|
185 | 166 |
static const AVFilterPad avfilter_vsrc_color_outputs[] = { |
... | ... |
@@ -279,7 +279,6 @@ static int movie_get_frame(AVFilterLink *outlink) |
279 | 279 |
|
280 | 280 |
static int request_frame(AVFilterLink *outlink) |
281 | 281 |
{ |
282 |
- AVFilterBufferRef *outpicref; |
|
283 | 282 |
MovieContext *movie = outlink->src->priv; |
284 | 283 |
int ret; |
285 | 284 |
|
... | ... |
@@ -288,23 +287,8 @@ static int request_frame(AVFilterLink *outlink) |
288 | 288 |
if ((ret = movie_get_frame(outlink)) < 0) |
289 | 289 |
return ret; |
290 | 290 |
|
291 |
- outpicref = avfilter_ref_buffer(movie->picref, ~0); |
|
292 |
- if (!outpicref) { |
|
293 |
- ret = AVERROR(ENOMEM); |
|
294 |
- goto fail; |
|
295 |
- } |
|
296 |
- |
|
297 |
- ret = ff_start_frame(outlink, outpicref); |
|
298 |
- if (ret < 0) |
|
299 |
- goto fail; |
|
300 |
- |
|
301 |
- ret = ff_draw_slice(outlink, 0, outlink->h, 1); |
|
302 |
- if (ret < 0) |
|
303 |
- goto fail; |
|
304 |
- |
|
305 |
- ret = ff_end_frame(outlink); |
|
306 |
-fail: |
|
307 |
- avfilter_unref_bufferp(&movie->picref); |
|
291 |
+ ret = ff_filter_frame(outlink, movie->picref); |
|
292 |
+ movie->picref = NULL; |
|
308 | 293 |
|
309 | 294 |
return ret; |
310 | 295 |
} |
... | ... |
@@ -131,7 +131,6 @@ static int request_frame(AVFilterLink *outlink) |
131 | 131 |
{ |
132 | 132 |
TestSourceContext *test = outlink->src->priv; |
133 | 133 |
AVFilterBufferRef *picref; |
134 |
- int ret; |
|
135 | 134 |
|
136 | 135 |
if (test->max_pts >= 0 && test->pts > test->max_pts) |
137 | 136 |
return AVERROR_EOF; |
... | ... |
@@ -148,12 +147,7 @@ static int request_frame(AVFilterLink *outlink) |
148 | 148 |
test->nb_frame++; |
149 | 149 |
test->fill_picture_fn(outlink->src, picref); |
150 | 150 |
|
151 |
- if ((ret = ff_start_frame(outlink, picref)) < 0 || |
|
152 |
- (ret = ff_draw_slice(outlink, 0, test->h, 1)) < 0 || |
|
153 |
- (ret = ff_end_frame(outlink)) < 0) |
|
154 |
- return ret; |
|
155 |
- |
|
156 |
- return 0; |
|
151 |
+ return ff_filter_frame(outlink, picref); |
|
157 | 152 |
} |
158 | 153 |
|
159 | 154 |
#if CONFIG_TESTSRC_FILTER |