* commit '7e350379f87e7f74420b4813170fe808e2313911':
lavfi: switch to AVFrame.
Conflicts:
doc/filters.texi
libavfilter/af_ashowinfo.c
libavfilter/audio.c
libavfilter/avfilter.c
libavfilter/avfilter.h
libavfilter/buffersink.c
libavfilter/buffersrc.c
libavfilter/buffersrc.h
libavfilter/f_select.c
libavfilter/f_setpts.c
libavfilter/fifo.c
libavfilter/split.c
libavfilter/src_movie.c
libavfilter/version.h
libavfilter/vf_aspect.c
libavfilter/vf_bbox.c
libavfilter/vf_blackframe.c
libavfilter/vf_delogo.c
libavfilter/vf_drawbox.c
libavfilter/vf_drawtext.c
libavfilter/vf_fade.c
libavfilter/vf_fieldorder.c
libavfilter/vf_fps.c
libavfilter/vf_frei0r.c
libavfilter/vf_gradfun.c
libavfilter/vf_hqdn3d.c
libavfilter/vf_lut.c
libavfilter/vf_overlay.c
libavfilter/vf_pad.c
libavfilter/vf_scale.c
libavfilter/vf_showinfo.c
libavfilter/vf_transpose.c
libavfilter/vf_vflip.c
libavfilter/vf_yadif.c
libavfilter/video.c
libavfilter/vsrc_testsrc.c
libavfilter/yadif.h
Following are notes about the merge authorship and various technical details.
Michael Niedermayer:
* Main merge operation, notably avfilter.c and video.c
* Switch to AVFrame:
- afade
- anullsrc
- apad
- aresample
- blackframe
- deshake
- idet
- il
- mandelbrot
- mptestsrc
- noise
- setfield
- smartblur
- tinterlace
* various merge changes and fixes in:
- ashowinfo
- blackdetect
- field
- fps
- select
- testsrc
- yadif
Nicolas George:
* Switch to AVFrame:
- make rawdec work with refcounted frames. Adapted from commit
759001c534287a96dc96d1e274665feb7059145d by Anton Khirnov.
Also, fix the use of || instead of | in a flags check.
- make buffer sink and src, audio and video work all together
Clément Bœsch:
* Switch to AVFrame:
- aevalsrc
- alphaextract
- blend
- cellauto
- colormatrix
- concat
- earwax
- ebur128
- edgedetect
- geq
- histeq
- histogram
- hue
- kerndeint
- life
- movie
- mp (with the help of Michael)
- overlay
- pad
- pan
- pp
- pp
- removelogo
- sendcmd
- showspectrum
- showwaves
- silencedetect
- stereo3d
- subtitles
- super2xsai
- swapuv
- thumbnail
- tile
Hendrik Leppkes:
* Switch to AVFrame:
- aconvert
- amerge
- asetnsamples
- atempo
- biquads
Matthieu Bouron:
* Switch to AVFrame
- alphamerge
- decimate
- volumedetect
Stefano Sabatini:
* Switch to AVFrame:
- astreamsync
- flite
- framestep
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Signed-off-by: Nicolas George <nicolas.george@normalesup.org>
Signed-off-by: Clément Bœsch <ubitux@gmail.com>
Signed-off-by: Hendrik Leppkes <h.leppkes@gmail.com>
Signed-off-by: Matthieu Bouron <matthieu.bouron@gmail.com>
Signed-off-by: Stefano Sabatini <stefasab@gmail.com>
Merged-by: Michael Niedermayer <michaelni@gmx.at>
... | ... |
@@ -2086,9 +2086,6 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. |
2086 | 2086 |
@item n |
2087 | 2087 |
the number of input frame, starting from 0 |
2088 | 2088 |
|
2089 |
-@item pos |
|
2090 |
-the position in the file of the input frame, NAN if unknown |
|
2091 |
- |
|
2092 | 2089 |
@item t |
2093 | 2090 |
timestamp expressed in seconds, NAN if the input timestamp is unknown |
2094 | 2091 |
|
... | ... |
@@ -1627,8 +1627,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
1627 | 1627 |
(AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, |
1628 | 1628 |
(AVRational){1, ist->st->codec->sample_rate}); |
1629 | 1629 |
for (i = 0; i < ist->nb_filters; i++) |
1630 |
- av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, |
|
1631 |
- AV_BUFFERSRC_FLAG_PUSH); |
|
1630 |
+ av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame); |
|
1631 |
+ /* TODO re-add AV_BUFFERSRC_FLAG_PUSH */ |
|
1632 | 1632 |
|
1633 | 1633 |
decoded_frame->pts = AV_NOPTS_VALUE; |
1634 | 1634 |
|
... | ... |
@@ -1737,7 +1737,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
1737 | 1737 |
AV_BUFFERSRC_FLAG_NO_COPY | |
1738 | 1738 |
AV_BUFFERSRC_FLAG_PUSH); |
1739 | 1739 |
} else |
1740 |
- if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) { |
|
1740 |
+ if(av_buffersrc_add_frame_flags(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) { |
|
1741 | 1741 |
av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n"); |
1742 | 1742 |
exit(1); |
1743 | 1743 |
} |
... | ... |
@@ -33,7 +33,6 @@ OBJS = allfilters.o \ |
33 | 33 |
avfilter.o \ |
34 | 34 |
avfiltergraph.o \ |
35 | 35 |
buffer.o \ |
36 |
- buffersink.o \ |
|
37 | 36 |
buffersrc.o \ |
38 | 37 |
drawutils.o \ |
39 | 38 |
fifo.o \ |
... | ... |
@@ -41,7 +40,6 @@ OBJS = allfilters.o \ |
41 | 41 |
graphdump.o \ |
42 | 42 |
graphparser.o \ |
43 | 43 |
sink_buffer.o \ |
44 |
- src_buffer.o \ |
|
45 | 44 |
transform.o \ |
46 | 45 |
video.o \ |
47 | 46 |
|
... | ... |
@@ -135,23 +135,23 @@ static int config_output(AVFilterLink *outlink) |
135 | 135 |
return 0; |
136 | 136 |
} |
137 | 137 |
|
138 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) |
|
138 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) |
|
139 | 139 |
{ |
140 | 140 |
AConvertContext *aconvert = inlink->dst->priv; |
141 |
- const int n = insamplesref->audio->nb_samples; |
|
141 |
+ const int n = insamplesref->nb_samples; |
|
142 | 142 |
AVFilterLink *const outlink = inlink->dst->outputs[0]; |
143 |
- AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); |
|
143 |
+ AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n); |
|
144 | 144 |
int ret; |
145 | 145 |
|
146 |
- swr_convert(aconvert->swr, outsamplesref->data, n, |
|
147 |
- (void *)insamplesref->data, n); |
|
146 |
+ swr_convert(aconvert->swr, outsamplesref->extended_data, n, |
|
147 |
+ (void *)insamplesref->extended_data, n); |
|
148 | 148 |
|
149 |
- avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); |
|
150 |
- outsamplesref->audio->channels = outlink->channels; |
|
151 |
- outsamplesref->audio->channel_layout = outlink->channel_layout; |
|
149 |
+ av_frame_copy_props(outsamplesref, insamplesref); |
|
150 |
+ outsamplesref->channels = outlink->channels; |
|
151 |
+ outsamplesref->channel_layout = outlink->channel_layout; |
|
152 | 152 |
|
153 | 153 |
ret = ff_filter_frame(outlink, outsamplesref); |
154 |
- avfilter_unref_buffer(insamplesref); |
|
154 |
+ av_frame_free(&insamplesref); |
|
155 | 155 |
return ret; |
156 | 156 |
} |
157 | 157 |
|
... | ... |
@@ -160,7 +160,6 @@ static const AVFilterPad aconvert_inputs[] = { |
160 | 160 |
.name = "default", |
161 | 161 |
.type = AVMEDIA_TYPE_AUDIO, |
162 | 162 |
.filter_frame = filter_frame, |
163 |
- .min_perms = AV_PERM_READ, |
|
164 | 163 |
}, |
165 | 164 |
{ NULL } |
166 | 165 |
}; |
... | ... |
@@ -232,22 +232,22 @@ static int config_output(AVFilterLink *outlink) |
232 | 232 |
return 0; |
233 | 233 |
} |
234 | 234 |
|
235 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
235 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
236 | 236 |
{ |
237 | 237 |
AudioFadeContext *afade = inlink->dst->priv; |
238 | 238 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
239 |
- int nb_samples = buf->audio->nb_samples; |
|
240 |
- AVFilterBufferRef *out_buf; |
|
239 |
+ int nb_samples = buf->nb_samples; |
|
240 |
+ AVFrame *out_buf; |
|
241 | 241 |
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
242 | 242 |
|
243 | 243 |
if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) || |
244 | 244 |
( afade->type && (cur_sample + afade->nb_samples < afade->start_sample))) |
245 | 245 |
return ff_filter_frame(outlink, buf); |
246 | 246 |
|
247 |
- if (buf->perms & AV_PERM_WRITE) { |
|
247 |
+ if (av_frame_is_writable(buf)) { |
|
248 | 248 |
out_buf = buf; |
249 | 249 |
} else { |
250 |
- out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); |
|
250 |
+ out_buf = ff_get_audio_buffer(inlink, nb_samples); |
|
251 | 251 |
if (!out_buf) |
252 | 252 |
return AVERROR(ENOMEM); |
253 | 253 |
out_buf->pts = buf->pts; |
... | ... |
@@ -256,7 +256,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
256 | 256 |
if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) || |
257 | 257 |
( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) { |
258 | 258 |
av_samples_set_silence(out_buf->extended_data, 0, nb_samples, |
259 |
- out_buf->audio->channels, out_buf->format); |
|
259 |
+ out_buf->channels, out_buf->format); |
|
260 | 260 |
} else { |
261 | 261 |
int64_t start; |
262 | 262 |
|
... | ... |
@@ -266,13 +266,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
266 | 266 |
start = afade->start_sample + afade->nb_samples - cur_sample; |
267 | 267 |
|
268 | 268 |
afade->fade_samples(out_buf->extended_data, buf->extended_data, |
269 |
- nb_samples, buf->audio->channels, |
|
269 |
+ nb_samples, buf->channels, |
|
270 | 270 |
afade->type ? -1 : 1, start, |
271 | 271 |
afade->nb_samples, afade->curve); |
272 | 272 |
} |
273 | 273 |
|
274 | 274 |
if (buf != out_buf) |
275 |
- avfilter_unref_buffer(buf); |
|
275 |
+ av_frame_free(&buf); |
|
276 | 276 |
|
277 | 277 |
return ff_filter_frame(outlink, out_buf); |
278 | 278 |
} |
... | ... |
@@ -219,14 +219,14 @@ static inline void copy_samples(int nb_inputs, struct amerge_input in[], |
219 | 219 |
} |
220 | 220 |
} |
221 | 221 |
|
222 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
222 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
223 | 223 |
{ |
224 | 224 |
AVFilterContext *ctx = inlink->dst; |
225 | 225 |
AMergeContext *am = ctx->priv; |
226 | 226 |
AVFilterLink *const outlink = ctx->outputs[0]; |
227 | 227 |
int input_number; |
228 | 228 |
int nb_samples, ns, i; |
229 |
- AVFilterBufferRef *outbuf, *inbuf[SWR_CH_MAX]; |
|
229 |
+ AVFrame *outbuf, *inbuf[SWR_CH_MAX]; |
|
230 | 230 |
uint8_t *ins[SWR_CH_MAX], *outs; |
231 | 231 |
|
232 | 232 |
for (input_number = 0; input_number < am->nb_inputs; input_number++) |
... | ... |
@@ -235,39 +235,40 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
235 | 235 |
av_assert1(input_number < am->nb_inputs); |
236 | 236 |
if (ff_bufqueue_is_full(&am->in[input_number].queue)) { |
237 | 237 |
av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n"); |
238 |
- avfilter_unref_buffer(insamples); |
|
238 |
+ av_frame_free(&insamples); |
|
239 | 239 |
return AVERROR(ENOMEM); |
240 | 240 |
} |
241 |
- ff_bufqueue_add(ctx, &am->in[input_number].queue, insamples); |
|
242 |
- am->in[input_number].nb_samples += insamples->audio->nb_samples; |
|
241 |
+ ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples)); |
|
242 |
+ am->in[input_number].nb_samples += insamples->nb_samples; |
|
243 |
+ av_frame_free(&insamples); |
|
243 | 244 |
nb_samples = am->in[0].nb_samples; |
244 | 245 |
for (i = 1; i < am->nb_inputs; i++) |
245 | 246 |
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); |
246 | 247 |
if (!nb_samples) |
247 | 248 |
return 0; |
248 | 249 |
|
249 |
- outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples); |
|
250 |
+ outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples); |
|
250 | 251 |
outs = outbuf->data[0]; |
251 | 252 |
for (i = 0; i < am->nb_inputs; i++) { |
252 | 253 |
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); |
253 | 254 |
ins[i] = inbuf[i]->data[0] + |
254 | 255 |
am->in[i].pos * am->in[i].nb_ch * am->bps; |
255 | 256 |
} |
256 |
- avfilter_copy_buffer_ref_props(outbuf, inbuf[0]); |
|
257 |
+ av_frame_copy_props(outbuf, inbuf[0]); |
|
257 | 258 |
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : |
258 | 259 |
inbuf[0]->pts + |
259 | 260 |
av_rescale_q(am->in[0].pos, |
260 | 261 |
(AVRational){ 1, ctx->inputs[0]->sample_rate }, |
261 | 262 |
ctx->outputs[0]->time_base); |
262 | 263 |
|
263 |
- outbuf->audio->nb_samples = nb_samples; |
|
264 |
- outbuf->audio->channel_layout = outlink->channel_layout; |
|
265 |
- outbuf->audio->channels = outlink->channels; |
|
264 |
+ outbuf->nb_samples = nb_samples; |
|
265 |
+ outbuf->channel_layout = outlink->channel_layout; |
|
266 |
+ outbuf->channels = outlink->channels; |
|
266 | 267 |
|
267 | 268 |
while (nb_samples) { |
268 | 269 |
ns = nb_samples; |
269 | 270 |
for (i = 0; i < am->nb_inputs; i++) |
270 |
- ns = FFMIN(ns, inbuf[i]->audio->nb_samples - am->in[i].pos); |
|
271 |
+ ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos); |
|
271 | 272 |
/* Unroll the most common sample formats: speed +~350% for the loop, |
272 | 273 |
+~13% overall (including two common decoders) */ |
273 | 274 |
switch (am->bps) { |
... | ... |
@@ -289,9 +290,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
289 | 289 |
for (i = 0; i < am->nb_inputs; i++) { |
290 | 290 |
am->in[i].nb_samples -= ns; |
291 | 291 |
am->in[i].pos += ns; |
292 |
- if (am->in[i].pos == inbuf[i]->audio->nb_samples) { |
|
292 |
+ if (am->in[i].pos == inbuf[i]->nb_samples) { |
|
293 | 293 |
am->in[i].pos = 0; |
294 |
- avfilter_unref_buffer(inbuf[i]); |
|
294 |
+ av_frame_free(&inbuf[i]); |
|
295 | 295 |
ff_bufqueue_get(&am->in[i].queue); |
296 | 296 |
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); |
297 | 297 |
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; |
... | ... |
@@ -322,7 +323,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
322 | 322 |
.name = name, |
323 | 323 |
.type = AVMEDIA_TYPE_AUDIO, |
324 | 324 |
.filter_frame = filter_frame, |
325 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
326 | 325 |
}; |
327 | 326 |
if (!name) |
328 | 327 |
return AVERROR(ENOMEM); |
... | ... |
@@ -270,18 +270,18 @@ static int output_frame(AVFilterLink *outlink, int nb_samples) |
270 | 270 |
{ |
271 | 271 |
AVFilterContext *ctx = outlink->src; |
272 | 272 |
MixContext *s = ctx->priv; |
273 |
- AVFilterBufferRef *out_buf, *in_buf; |
|
273 |
+ AVFrame *out_buf, *in_buf; |
|
274 | 274 |
int i; |
275 | 275 |
|
276 | 276 |
calculate_scales(s, nb_samples); |
277 | 277 |
|
278 |
- out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); |
|
278 |
+ out_buf = ff_get_audio_buffer(outlink, nb_samples); |
|
279 | 279 |
if (!out_buf) |
280 | 280 |
return AVERROR(ENOMEM); |
281 | 281 |
|
282 |
- in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); |
|
282 |
+ in_buf = ff_get_audio_buffer(outlink, nb_samples); |
|
283 | 283 |
if (!in_buf) { |
284 |
- avfilter_unref_buffer(out_buf); |
|
284 |
+ av_frame_free(&out_buf); |
|
285 | 285 |
return AVERROR(ENOMEM); |
286 | 286 |
} |
287 | 287 |
|
... | ... |
@@ -303,7 +303,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples) |
303 | 303 |
} |
304 | 304 |
} |
305 | 305 |
} |
306 |
- avfilter_unref_buffer(in_buf); |
|
306 |
+ av_frame_free(&in_buf); |
|
307 | 307 |
|
308 | 308 |
out_buf->pts = s->next_pts; |
309 | 309 |
if (s->next_pts != AV_NOPTS_VALUE) |
... | ... |
@@ -450,7 +450,7 @@ static int request_frame(AVFilterLink *outlink) |
450 | 450 |
return output_frame(outlink, available_samples); |
451 | 451 |
} |
452 | 452 |
|
453 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
453 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
454 | 454 |
{ |
455 | 455 |
AVFilterContext *ctx = inlink->dst; |
456 | 456 |
MixContext *s = ctx->priv; |
... | ... |
@@ -469,16 +469,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
469 | 469 |
if (i == 0) { |
470 | 470 |
int64_t pts = av_rescale_q(buf->pts, inlink->time_base, |
471 | 471 |
outlink->time_base); |
472 |
- ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts); |
|
472 |
+ ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts); |
|
473 | 473 |
if (ret < 0) |
474 | 474 |
goto fail; |
475 | 475 |
} |
476 | 476 |
|
477 | 477 |
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, |
478 |
- buf->audio->nb_samples); |
|
478 |
+ buf->nb_samples); |
|
479 | 479 |
|
480 | 480 |
fail: |
481 |
- avfilter_unref_buffer(buf); |
|
481 |
+ av_frame_free(&buf); |
|
482 | 482 |
|
483 | 483 |
return ret; |
484 | 484 |
} |
... | ... |
@@ -77,15 +77,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
77 | 77 |
return 0; |
78 | 78 |
} |
79 | 79 |
|
80 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
80 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
81 | 81 |
{ |
82 | 82 |
AVFilterContext *ctx = inlink->dst; |
83 | 83 |
APadContext *apad = ctx->priv; |
84 | 84 |
|
85 | 85 |
if (apad->whole_len) |
86 |
- apad->whole_len -= frame->audio->nb_samples; |
|
86 |
+ apad->whole_len -= frame->nb_samples; |
|
87 | 87 |
|
88 |
- apad->next_pts = frame->pts + av_rescale_q(frame->audio->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); |
|
88 |
+ apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); |
|
89 | 89 |
return ff_filter_frame(ctx->outputs[0], frame); |
90 | 90 |
} |
91 | 91 |
|
... | ... |
@@ -99,7 +99,7 @@ static int request_frame(AVFilterLink *outlink) |
99 | 99 |
|
100 | 100 |
if (ret == AVERROR_EOF) { |
101 | 101 |
int n_out = apad->packet_size; |
102 |
- AVFilterBufferRef *outsamplesref; |
|
102 |
+ AVFrame *outsamplesref; |
|
103 | 103 |
|
104 | 104 |
if (apad->whole_len > 0) { |
105 | 105 |
apad->pad_len = apad->whole_len; |
... | ... |
@@ -113,16 +113,16 @@ static int request_frame(AVFilterLink *outlink) |
113 | 113 |
if(!n_out) |
114 | 114 |
return AVERROR_EOF; |
115 | 115 |
|
116 |
- outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); |
|
116 |
+ outsamplesref = ff_get_audio_buffer(outlink, n_out); |
|
117 | 117 |
if (!outsamplesref) |
118 | 118 |
return AVERROR(ENOMEM); |
119 | 119 |
|
120 |
- av_assert0(outsamplesref->audio->sample_rate == outlink->sample_rate); |
|
121 |
- av_assert0(outsamplesref->audio->nb_samples == n_out); |
|
120 |
+ av_assert0(outsamplesref->sample_rate == outlink->sample_rate); |
|
121 |
+ av_assert0(outsamplesref->nb_samples == n_out); |
|
122 | 122 |
|
123 | 123 |
av_samples_set_silence(outsamplesref->extended_data, 0, |
124 | 124 |
n_out, |
125 |
- outsamplesref->audio->channels, |
|
125 |
+ outsamplesref->channels, |
|
126 | 126 |
outsamplesref->format); |
127 | 127 |
|
128 | 128 |
outsamplesref->pts = apad->next_pts; |
... | ... |
@@ -174,23 +174,23 @@ static int config_output(AVFilterLink *outlink) |
174 | 174 |
return 0; |
175 | 175 |
} |
176 | 176 |
|
177 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) |
|
177 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) |
|
178 | 178 |
{ |
179 | 179 |
AResampleContext *aresample = inlink->dst->priv; |
180 |
- const int n_in = insamplesref->audio->nb_samples; |
|
180 |
+ const int n_in = insamplesref->nb_samples; |
|
181 | 181 |
int n_out = n_in * aresample->ratio * 2 + 256; |
182 | 182 |
AVFilterLink *const outlink = inlink->dst->outputs[0]; |
183 |
- AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); |
|
183 |
+ AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out); |
|
184 | 184 |
int ret; |
185 | 185 |
|
186 | 186 |
if(!outsamplesref) |
187 | 187 |
return AVERROR(ENOMEM); |
188 | 188 |
|
189 |
- avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); |
|
189 |
+ av_frame_copy_props(outsamplesref, insamplesref); |
|
190 | 190 |
outsamplesref->format = outlink->format; |
191 |
- outsamplesref->audio->channels = outlink->channels; |
|
192 |
- outsamplesref->audio->channel_layout = outlink->channel_layout; |
|
193 |
- outsamplesref->audio->sample_rate = outlink->sample_rate; |
|
191 |
+ outsamplesref->channels = outlink->channels; |
|
192 |
+ outsamplesref->channel_layout = outlink->channel_layout; |
|
193 |
+ outsamplesref->sample_rate = outlink->sample_rate; |
|
194 | 194 |
|
195 | 195 |
if(insamplesref->pts != AV_NOPTS_VALUE) { |
196 | 196 |
int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); |
... | ... |
@@ -203,16 +203,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) |
203 | 203 |
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, |
204 | 204 |
(void *)insamplesref->extended_data, n_in); |
205 | 205 |
if (n_out <= 0) { |
206 |
- avfilter_unref_buffer(outsamplesref); |
|
207 |
- avfilter_unref_buffer(insamplesref); |
|
206 |
+ av_frame_free(&outsamplesref); |
|
207 |
+ av_frame_free(&insamplesref); |
|
208 | 208 |
return 0; |
209 | 209 |
} |
210 | 210 |
|
211 |
- outsamplesref->audio->nb_samples = n_out; |
|
211 |
+ outsamplesref->nb_samples = n_out; |
|
212 | 212 |
|
213 | 213 |
ret = ff_filter_frame(outlink, outsamplesref); |
214 | 214 |
aresample->req_fullfilled= 1; |
215 |
- avfilter_unref_buffer(insamplesref); |
|
215 |
+ av_frame_free(&insamplesref); |
|
216 | 216 |
return ret; |
217 | 217 |
} |
218 | 218 |
|
... | ... |
@@ -229,20 +229,20 @@ static int request_frame(AVFilterLink *outlink) |
229 | 229 |
}while(!aresample->req_fullfilled && ret>=0); |
230 | 230 |
|
231 | 231 |
if (ret == AVERROR_EOF) { |
232 |
- AVFilterBufferRef *outsamplesref; |
|
232 |
+ AVFrame *outsamplesref; |
|
233 | 233 |
int n_out = 4096; |
234 | 234 |
|
235 |
- outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); |
|
235 |
+ outsamplesref = ff_get_audio_buffer(outlink, n_out); |
|
236 | 236 |
if (!outsamplesref) |
237 | 237 |
return AVERROR(ENOMEM); |
238 | 238 |
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0); |
239 | 239 |
if (n_out <= 0) { |
240 |
- avfilter_unref_buffer(outsamplesref); |
|
240 |
+ av_frame_free(&outsamplesref); |
|
241 | 241 |
return (n_out == 0) ? AVERROR_EOF : n_out; |
242 | 242 |
} |
243 | 243 |
|
244 |
- outsamplesref->audio->sample_rate = outlink->sample_rate; |
|
245 |
- outsamplesref->audio->nb_samples = n_out; |
|
244 |
+ outsamplesref->sample_rate = outlink->sample_rate; |
|
245 |
+ outsamplesref->nb_samples = n_out; |
|
246 | 246 |
#if 0 |
247 | 247 |
outsamplesref->pts = aresample->next_pts; |
248 | 248 |
if(aresample->next_pts != AV_NOPTS_VALUE) |
... | ... |
@@ -263,7 +263,6 @@ static const AVFilterPad aresample_inputs[] = { |
263 | 263 |
.name = "default", |
264 | 264 |
.type = AVMEDIA_TYPE_AUDIO, |
265 | 265 |
.filter_frame = filter_frame, |
266 |
- .min_perms = AV_PERM_READ, |
|
267 | 266 |
}, |
268 | 267 |
{ NULL }, |
269 | 268 |
}; |
... | ... |
@@ -93,7 +93,7 @@ static int config_props_output(AVFilterLink *outlink) |
93 | 93 |
static int push_samples(AVFilterLink *outlink) |
94 | 94 |
{ |
95 | 95 |
ASNSContext *asns = outlink->src->priv; |
96 |
- AVFilterBufferRef *outsamples = NULL; |
|
96 |
+ AVFrame *outsamples = NULL; |
|
97 | 97 |
int nb_out_samples, nb_pad_samples; |
98 | 98 |
|
99 | 99 |
if (asns->pad) { |
... | ... |
@@ -107,7 +107,7 @@ static int push_samples(AVFilterLink *outlink) |
107 | 107 |
if (!nb_out_samples) |
108 | 108 |
return 0; |
109 | 109 |
|
110 |
- outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_out_samples); |
|
110 |
+ outsamples = ff_get_audio_buffer(outlink, nb_out_samples); |
|
111 | 111 |
av_assert0(outsamples); |
112 | 112 |
|
113 | 113 |
av_audio_fifo_read(asns->fifo, |
... | ... |
@@ -117,9 +117,9 @@ static int push_samples(AVFilterLink *outlink) |
117 | 117 |
av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples, |
118 | 118 |
nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout), |
119 | 119 |
outlink->format); |
120 |
- outsamples->audio->nb_samples = nb_out_samples; |
|
121 |
- outsamples->audio->channel_layout = outlink->channel_layout; |
|
122 |
- outsamples->audio->sample_rate = outlink->sample_rate; |
|
120 |
+ outsamples->nb_samples = nb_out_samples; |
|
121 |
+ outsamples->channel_layout = outlink->channel_layout; |
|
122 |
+ outsamples->sample_rate = outlink->sample_rate; |
|
123 | 123 |
outsamples->pts = asns->next_out_pts; |
124 | 124 |
|
125 | 125 |
if (asns->next_out_pts != AV_NOPTS_VALUE) |
... | ... |
@@ -130,13 +130,13 @@ static int push_samples(AVFilterLink *outlink) |
130 | 130 |
return nb_out_samples; |
131 | 131 |
} |
132 | 132 |
|
133 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
133 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
134 | 134 |
{ |
135 | 135 |
AVFilterContext *ctx = inlink->dst; |
136 | 136 |
ASNSContext *asns = ctx->priv; |
137 | 137 |
AVFilterLink *outlink = ctx->outputs[0]; |
138 | 138 |
int ret; |
139 |
- int nb_samples = insamples->audio->nb_samples; |
|
139 |
+ int nb_samples = insamples->nb_samples; |
|
140 | 140 |
|
141 | 141 |
if (av_audio_fifo_space(asns->fifo) < nb_samples) { |
142 | 142 |
av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples); |
... | ... |
@@ -150,7 +150,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
150 | 150 |
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples); |
151 | 151 |
if (asns->next_out_pts == AV_NOPTS_VALUE) |
152 | 152 |
asns->next_out_pts = insamples->pts; |
153 |
- avfilter_unref_buffer(insamples); |
|
153 |
+ av_frame_free(&insamples); |
|
154 | 154 |
|
155 | 155 |
while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples) |
156 | 156 |
push_samples(outlink); |
... | ... |
@@ -177,10 +177,10 @@ static int request_frame(AVFilterLink *outlink) |
177 | 177 |
|
178 | 178 |
static const AVFilterPad asetnsamples_inputs[] = { |
179 | 179 |
{ |
180 |
- .name = "default", |
|
181 |
- .type = AVMEDIA_TYPE_AUDIO, |
|
182 |
- .filter_frame = filter_frame, |
|
183 |
- .min_perms = AV_PERM_READ | AV_PERM_WRITE, |
|
180 |
+ .name = "default", |
|
181 |
+ .type = AVMEDIA_TYPE_AUDIO, |
|
182 |
+ .filter_frame = filter_frame, |
|
183 |
+ .needs_writable = 1, |
|
184 | 184 |
}, |
185 | 185 |
{ NULL } |
186 | 186 |
}; |
... | ... |
@@ -55,16 +55,16 @@ static void uninit(AVFilterContext *ctx) |
55 | 55 |
av_freep(&s->plane_checksums); |
56 | 56 |
} |
57 | 57 |
|
58 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
58 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
59 | 59 |
{ |
60 | 60 |
AVFilterContext *ctx = inlink->dst; |
61 | 61 |
AShowInfoContext *s = ctx->priv; |
62 | 62 |
char chlayout_str[128]; |
63 | 63 |
uint32_t checksum = 0; |
64 |
- int channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); |
|
64 |
+ int channels = av_get_channel_layout_nb_channels(buf->channel_layout); |
|
65 | 65 |
int planar = av_sample_fmt_is_planar(buf->format); |
66 | 66 |
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); |
67 |
- int data_size = buf->audio->nb_samples * block_align; |
|
67 |
+ int data_size = buf->nb_samples * block_align; |
|
68 | 68 |
int planes = planar ? channels : 1; |
69 | 69 |
int i; |
70 | 70 |
void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); |
... | ... |
@@ -82,7 +82,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
82 | 82 |
} |
83 | 83 |
|
84 | 84 |
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, |
85 |
- buf->audio->channel_layout); |
|
85 |
+ buf->channel_layout); |
|
86 | 86 |
|
87 | 87 |
av_log(ctx, AV_LOG_INFO, |
88 | 88 |
"n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" " |
... | ... |
@@ -90,9 +90,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
90 | 90 |
"checksum:%08X ", |
91 | 91 |
s->frame, |
92 | 92 |
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), |
93 |
- buf->pos, |
|
94 |
- av_get_sample_fmt_name(buf->format), buf->audio->channels, chlayout_str, |
|
95 |
- buf->audio->sample_rate, buf->audio->nb_samples, |
|
93 |
+ av_frame_get_pkt_pos(buf), |
|
94 |
+ av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str, |
|
95 |
+ buf->sample_rate, buf->nb_samples, |
|
96 | 96 |
checksum); |
97 | 97 |
|
98 | 98 |
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); |
... | ... |
@@ -110,7 +110,6 @@ static const AVFilterPad inputs[] = { |
110 | 110 |
.type = AVMEDIA_TYPE_AUDIO, |
111 | 111 |
.get_audio_buffer = ff_null_get_audio_buffer, |
112 | 112 |
.filter_frame = filter_frame, |
113 |
- .min_perms = AV_PERM_READ, |
|
114 | 113 |
}, |
115 | 114 |
{ NULL }, |
116 | 115 |
}; |
... | ... |
@@ -48,7 +48,7 @@ typedef struct { |
48 | 48 |
AVExpr *expr; |
49 | 49 |
double var_values[VAR_NB]; |
50 | 50 |
struct buf_queue { |
51 |
- AVFilterBufferRef *buf[QUEUE_SIZE]; |
|
51 |
+ AVFrame *buf[QUEUE_SIZE]; |
|
52 | 52 |
unsigned tail, nb; |
53 | 53 |
/* buf[tail] is the oldest, |
54 | 54 |
buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ |
... | ... |
@@ -111,16 +111,16 @@ static int send_out(AVFilterContext *ctx, int out_id) |
111 | 111 |
{ |
112 | 112 |
AStreamSyncContext *as = ctx->priv; |
113 | 113 |
struct buf_queue *queue = &as->queue[out_id]; |
114 |
- AVFilterBufferRef *buf = queue->buf[queue->tail]; |
|
114 |
+ AVFrame *buf = queue->buf[queue->tail]; |
|
115 | 115 |
int ret; |
116 | 116 |
|
117 | 117 |
queue->buf[queue->tail] = NULL; |
118 | 118 |
as->var_values[VAR_B1 + out_id]++; |
119 |
- as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples; |
|
119 |
+ as->var_values[VAR_S1 + out_id] += buf->nb_samples; |
|
120 | 120 |
if (buf->pts != AV_NOPTS_VALUE) |
121 | 121 |
as->var_values[VAR_T1 + out_id] = |
122 | 122 |
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; |
123 |
- as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / |
|
123 |
+ as->var_values[VAR_T1 + out_id] += buf->nb_samples / |
|
124 | 124 |
(double)ctx->inputs[out_id]->sample_rate; |
125 | 125 |
ret = ff_filter_frame(ctx->outputs[out_id], buf); |
126 | 126 |
queue->nb--; |
... | ... |
@@ -167,7 +167,7 @@ static int request_frame(AVFilterLink *outlink) |
167 | 167 |
return 0; |
168 | 168 |
} |
169 | 169 |
|
170 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
170 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
171 | 171 |
{ |
172 | 172 |
AVFilterContext *ctx = inlink->dst; |
173 | 173 |
AStreamSyncContext *as = ctx->priv; |
... | ... |
@@ -185,12 +185,10 @@ static const AVFilterPad astreamsync_inputs[] = { |
185 | 185 |
.name = "in1", |
186 | 186 |
.type = AVMEDIA_TYPE_AUDIO, |
187 | 187 |
.filter_frame = filter_frame, |
188 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
189 | 188 |
},{ |
190 | 189 |
.name = "in2", |
191 | 190 |
.type = AVMEDIA_TYPE_AUDIO, |
192 | 191 |
.filter_frame = filter_frame, |
193 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
194 | 192 |
}, |
195 | 193 |
{ NULL } |
196 | 194 |
}; |
... | ... |
@@ -152,14 +152,13 @@ static int request_frame(AVFilterLink *link) |
152 | 152 |
handle_trimming(ctx); |
153 | 153 |
|
154 | 154 |
if (nb_samples = get_delay(s)) { |
155 |
- AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE, |
|
156 |
- nb_samples); |
|
155 |
+ AVFrame *buf = ff_get_audio_buffer(link, nb_samples); |
|
157 | 156 |
if (!buf) |
158 | 157 |
return AVERROR(ENOMEM); |
159 | 158 |
ret = avresample_convert(s->avr, buf->extended_data, |
160 | 159 |
buf->linesize[0], nb_samples, NULL, 0, 0); |
161 | 160 |
if (ret <= 0) { |
162 |
- avfilter_unref_bufferp(&buf); |
|
161 |
+ av_frame_free(&buf); |
|
163 | 162 |
return (ret < 0) ? ret : AVERROR_EOF; |
164 | 163 |
} |
165 | 164 |
|
... | ... |
@@ -171,20 +170,20 @@ static int request_frame(AVFilterLink *link) |
171 | 171 |
return ret; |
172 | 172 |
} |
173 | 173 |
|
174 |
-static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf) |
|
174 |
+static int write_to_fifo(ASyncContext *s, AVFrame *buf) |
|
175 | 175 |
{ |
176 | 176 |
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, |
177 |
- buf->linesize[0], buf->audio->nb_samples); |
|
178 |
- avfilter_unref_buffer(buf); |
|
177 |
+ buf->linesize[0], buf->nb_samples); |
|
178 |
+ av_frame_free(&buf); |
|
179 | 179 |
return ret; |
180 | 180 |
} |
181 | 181 |
|
182 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
182 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
183 | 183 |
{ |
184 | 184 |
AVFilterContext *ctx = inlink->dst; |
185 | 185 |
ASyncContext *s = ctx->priv; |
186 | 186 |
AVFilterLink *outlink = ctx->outputs[0]; |
187 |
- int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); |
|
187 |
+ int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout); |
|
188 | 188 |
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : |
189 | 189 |
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); |
190 | 190 |
int out_size, ret; |
... | ... |
@@ -223,8 +222,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
223 | 223 |
} |
224 | 224 |
|
225 | 225 |
if (out_size > 0) { |
226 |
- AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, |
|
227 |
- out_size); |
|
226 |
+ AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size); |
|
228 | 227 |
if (!buf_out) { |
229 | 228 |
ret = AVERROR(ENOMEM); |
230 | 229 |
goto fail; |
... | ... |
@@ -266,11 +264,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
266 | 266 |
|
267 | 267 |
s->pts = pts - avresample_get_delay(s->avr); |
268 | 268 |
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, |
269 |
- buf->linesize[0], buf->audio->nb_samples); |
|
269 |
+ buf->linesize[0], buf->nb_samples); |
|
270 | 270 |
|
271 | 271 |
s->first_frame = 0; |
272 | 272 |
fail: |
273 |
- avfilter_unref_buffer(buf); |
|
273 |
+ av_frame_free(&buf); |
|
274 | 274 |
|
275 | 275 |
return ret; |
276 | 276 |
} |
... | ... |
@@ -140,7 +140,7 @@ typedef struct { |
140 | 140 |
|
141 | 141 |
// for managing AVFilterPad.request_frame and AVFilterPad.filter_frame |
142 | 142 |
int request_fulfilled; |
143 |
- AVFilterBufferRef *dst_buffer; |
|
143 |
+ AVFrame *dst_buffer; |
|
144 | 144 |
uint8_t *dst; |
145 | 145 |
uint8_t *dst_end; |
146 | 146 |
uint64_t nsamples_in; |
... | ... |
@@ -177,7 +177,7 @@ static void yae_clear(ATempoContext *atempo) |
177 | 177 |
atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2); |
178 | 178 |
atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2); |
179 | 179 |
|
180 |
- avfilter_unref_bufferp(&atempo->dst_buffer); |
|
180 |
+ av_frame_free(&atempo->dst_buffer); |
|
181 | 181 |
atempo->dst = NULL; |
182 | 182 |
atempo->dst_end = NULL; |
183 | 183 |
|
... | ... |
@@ -1024,8 +1024,8 @@ static void push_samples(ATempoContext *atempo, |
1024 | 1024 |
AVFilterLink *outlink, |
1025 | 1025 |
int n_out) |
1026 | 1026 |
{ |
1027 |
- atempo->dst_buffer->audio->sample_rate = outlink->sample_rate; |
|
1028 |
- atempo->dst_buffer->audio->nb_samples = n_out; |
|
1027 |
+ atempo->dst_buffer->sample_rate = outlink->sample_rate; |
|
1028 |
+ atempo->dst_buffer->nb_samples = n_out; |
|
1029 | 1029 |
|
1030 | 1030 |
// adjust the PTS: |
1031 | 1031 |
atempo->dst_buffer->pts = |
... | ... |
@@ -1041,14 +1041,13 @@ static void push_samples(ATempoContext *atempo, |
1041 | 1041 |
atempo->nsamples_out += n_out; |
1042 | 1042 |
} |
1043 | 1043 |
|
1044 |
-static int filter_frame(AVFilterLink *inlink, |
|
1045 |
- AVFilterBufferRef *src_buffer) |
|
1044 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer) |
|
1046 | 1045 |
{ |
1047 | 1046 |
AVFilterContext *ctx = inlink->dst; |
1048 | 1047 |
ATempoContext *atempo = ctx->priv; |
1049 | 1048 |
AVFilterLink *outlink = ctx->outputs[0]; |
1050 | 1049 |
|
1051 |
- int n_in = src_buffer->audio->nb_samples; |
|
1050 |
+ int n_in = src_buffer->nb_samples; |
|
1052 | 1051 |
int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); |
1053 | 1052 |
|
1054 | 1053 |
const uint8_t *src = src_buffer->data[0]; |
... | ... |
@@ -1056,10 +1055,8 @@ static int filter_frame(AVFilterLink *inlink, |
1056 | 1056 |
|
1057 | 1057 |
while (src < src_end) { |
1058 | 1058 |
if (!atempo->dst_buffer) { |
1059 |
- atempo->dst_buffer = ff_get_audio_buffer(outlink, |
|
1060 |
- AV_PERM_WRITE, |
|
1061 |
- n_out); |
|
1062 |
- avfilter_copy_buffer_ref_props(atempo->dst_buffer, src_buffer); |
|
1059 |
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out); |
|
1060 |
+ av_frame_copy_props(atempo->dst_buffer, src_buffer); |
|
1063 | 1061 |
|
1064 | 1062 |
atempo->dst = atempo->dst_buffer->data[0]; |
1065 | 1063 |
atempo->dst_end = atempo->dst + n_out * atempo->stride; |
... | ... |
@@ -1074,7 +1071,7 @@ static int filter_frame(AVFilterLink *inlink, |
1074 | 1074 |
} |
1075 | 1075 |
|
1076 | 1076 |
atempo->nsamples_in += n_in; |
1077 |
- avfilter_unref_bufferp(&src_buffer); |
|
1077 |
+ av_frame_free(&src_buffer); |
|
1078 | 1078 |
return 0; |
1079 | 1079 |
} |
1080 | 1080 |
|
... | ... |
@@ -1098,9 +1095,7 @@ static int request_frame(AVFilterLink *outlink) |
1098 | 1098 |
|
1099 | 1099 |
while (err == AVERROR(EAGAIN)) { |
1100 | 1100 |
if (!atempo->dst_buffer) { |
1101 |
- atempo->dst_buffer = ff_get_audio_buffer(outlink, |
|
1102 |
- AV_PERM_WRITE, |
|
1103 |
- n_max); |
|
1101 |
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max); |
|
1104 | 1102 |
|
1105 | 1103 |
atempo->dst = atempo->dst_buffer->data[0]; |
1106 | 1104 |
atempo->dst_end = atempo->dst + n_max * atempo->stride; |
... | ... |
@@ -1116,7 +1111,7 @@ static int request_frame(AVFilterLink *outlink) |
1116 | 1116 |
} |
1117 | 1117 |
} |
1118 | 1118 |
|
1119 |
- avfilter_unref_bufferp(&atempo->dst_buffer); |
|
1119 |
+ av_frame_free(&atempo->dst_buffer); |
|
1120 | 1120 |
atempo->dst = NULL; |
1121 | 1121 |
atempo->dst_end = NULL; |
1122 | 1122 |
|
... | ... |
@@ -1142,7 +1137,6 @@ static const AVFilterPad atempo_inputs[] = { |
1142 | 1142 |
.type = AVMEDIA_TYPE_AUDIO, |
1143 | 1143 |
.filter_frame = filter_frame, |
1144 | 1144 |
.config_props = config_props, |
1145 |
- .min_perms = AV_PERM_READ, |
|
1146 | 1145 |
}, |
1147 | 1146 |
{ NULL } |
1148 | 1147 |
}; |
... | ... |
@@ -392,24 +392,24 @@ static int config_output(AVFilterLink *outlink) |
392 | 392 |
return 0; |
393 | 393 |
} |
394 | 394 |
|
395 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
395 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
396 | 396 |
{ |
397 | 397 |
BiquadsContext *p = inlink->dst->priv; |
398 | 398 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
399 |
- AVFilterBufferRef *out_buf; |
|
400 |
- int nb_samples = buf->audio->nb_samples; |
|
399 |
+ AVFrame *out_buf; |
|
400 |
+ int nb_samples = buf->nb_samples; |
|
401 | 401 |
int ch; |
402 | 402 |
|
403 |
- if (buf->perms & AV_PERM_WRITE) { |
|
403 |
+ if (av_frame_is_writable(buf)) { |
|
404 | 404 |
out_buf = buf; |
405 | 405 |
} else { |
406 |
- out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); |
|
406 |
+ out_buf = ff_get_audio_buffer(inlink, nb_samples); |
|
407 | 407 |
if (!out_buf) |
408 | 408 |
return AVERROR(ENOMEM); |
409 | 409 |
out_buf->pts = buf->pts; |
410 | 410 |
} |
411 | 411 |
|
412 |
- for (ch = 0; ch < buf->audio->channels; ch++) |
|
412 |
+ for (ch = 0; ch < buf->channels; ch++) |
|
413 | 413 |
p->filter(buf->extended_data[ch], |
414 | 414 |
out_buf->extended_data[ch], nb_samples, |
415 | 415 |
&p->cache[ch].i1, &p->cache[ch].i2, |
... | ... |
@@ -417,7 +417,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
417 | 417 |
p->b0, p->b1, p->b2, p->a1, p->a2); |
418 | 418 |
|
419 | 419 |
if (buf != out_buf) |
420 |
- avfilter_unref_buffer(buf); |
|
420 |
+ av_frame_free(&buf); |
|
421 | 421 |
|
422 | 422 |
return ff_filter_frame(outlink, out_buf); |
423 | 423 |
} |
... | ... |
@@ -312,7 +312,7 @@ static int channelmap_query_formats(AVFilterContext *ctx) |
312 | 312 |
return 0; |
313 | 313 |
} |
314 | 314 |
|
315 |
-static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
315 |
+static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
316 | 316 |
{ |
317 | 317 |
AVFilterContext *ctx = inlink->dst; |
318 | 318 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -330,7 +330,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
330 | 330 |
uint8_t **new_extended_data = |
331 | 331 |
av_mallocz(nch_out * sizeof(*buf->extended_data)); |
332 | 332 |
if (!new_extended_data) { |
333 |
- avfilter_unref_buffer(buf); |
|
333 |
+ av_frame_free(&buf); |
|
334 | 334 |
return AVERROR(ENOMEM); |
335 | 335 |
} |
336 | 336 |
if (buf->extended_data == buf->data) { |
... | ... |
@@ -105,13 +105,13 @@ static int query_formats(AVFilterContext *ctx) |
105 | 105 |
return 0; |
106 | 106 |
} |
107 | 107 |
|
108 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
108 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
109 | 109 |
{ |
110 | 110 |
AVFilterContext *ctx = inlink->dst; |
111 | 111 |
int i, ret = 0; |
112 | 112 |
|
113 | 113 |
for (i = 0; i < ctx->nb_outputs; i++) { |
114 |
- AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE); |
|
114 |
+ AVFrame *buf_out = av_frame_clone(buf); |
|
115 | 115 |
|
116 | 116 |
if (!buf_out) { |
117 | 117 |
ret = AVERROR(ENOMEM); |
... | ... |
@@ -119,14 +119,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
119 | 119 |
} |
120 | 120 |
|
121 | 121 |
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; |
122 |
- buf_out->audio->channel_layout = |
|
123 |
- av_channel_layout_extract_channel(buf->audio->channel_layout, i); |
|
122 |
+ buf_out->channel_layout = |
|
123 |
+ av_channel_layout_extract_channel(buf->channel_layout, i); |
|
124 | 124 |
|
125 | 125 |
ret = ff_filter_frame(ctx->outputs[i], buf_out); |
126 | 126 |
if (ret < 0) |
127 | 127 |
break; |
128 | 128 |
} |
129 |
- avfilter_unref_buffer(buf); |
|
129 |
+ av_frame_free(&buf); |
|
130 | 130 |
return ret; |
131 | 131 |
} |
132 | 132 |
|
... | ... |
@@ -109,18 +109,18 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in |
109 | 109 |
return out; |
110 | 110 |
} |
111 | 111 |
|
112 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
112 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
113 | 113 |
{ |
114 | 114 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
115 | 115 |
int16_t *taps, *endin, *in, *out; |
116 |
- AVFilterBufferRef *outsamples = |
|
117 |
- ff_get_audio_buffer(inlink, AV_PERM_WRITE, |
|
118 |
- insamples->audio->nb_samples); |
|
116 |
+ AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples); |
|
119 | 117 |
int ret; |
120 | 118 |
|
121 |
- if (!outsamples) |
|
119 |
+ if (!outsamples) { |
|
120 |
+ av_frame_free(&insamples); |
|
122 | 121 |
return AVERROR(ENOMEM); |
123 |
- avfilter_copy_buffer_ref_props(outsamples, insamples); |
|
122 |
+ } |
|
123 |
+ av_frame_copy_props(outsamples, insamples); |
|
124 | 124 |
|
125 | 125 |
taps = ((EarwaxContext *)inlink->dst->priv)->taps; |
126 | 126 |
out = (int16_t *)outsamples->data[0]; |
... | ... |
@@ -131,14 +131,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
131 | 131 |
out = scalarproduct(taps, taps + NUMTAPS, out); |
132 | 132 |
|
133 | 133 |
// process current input |
134 |
- endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; |
|
134 |
+ endin = in + insamples->nb_samples * 2 - NUMTAPS; |
|
135 | 135 |
scalarproduct(in, endin, out); |
136 | 136 |
|
137 | 137 |
// save part of input for next round |
138 | 138 |
memcpy(taps, endin, NUMTAPS * sizeof(*taps)); |
139 | 139 |
|
140 | 140 |
ret = ff_filter_frame(outlink, outsamples); |
141 |
- avfilter_unref_buffer(insamples); |
|
141 |
+ av_frame_free(&insamples); |
|
142 | 142 |
return ret; |
143 | 143 |
} |
144 | 144 |
|
... | ... |
@@ -147,7 +147,6 @@ static const AVFilterPad earwax_inputs[] = { |
147 | 147 |
.name = "default", |
148 | 148 |
.type = AVMEDIA_TYPE_AUDIO, |
149 | 149 |
.filter_frame = filter_frame, |
150 |
- .min_perms = AV_PERM_READ, |
|
151 | 150 |
}, |
152 | 151 |
{ NULL } |
153 | 152 |
}; |
... | ... |
@@ -56,24 +56,14 @@ typedef struct JoinContext { |
56 | 56 |
/** |
57 | 57 |
* Temporary storage for input frames, until we get one on each input. |
58 | 58 |
*/ |
59 |
- AVFilterBufferRef **input_frames; |
|
59 |
+ AVFrame **input_frames; |
|
60 | 60 |
|
61 | 61 |
/** |
62 |
- * Temporary storage for data pointers, for assembling the output buffer. |
|
62 |
+ * Temporary storage for buffer references, for assembling the output frame. |
|
63 | 63 |
*/ |
64 |
- uint8_t **data; |
|
64 |
+ AVBufferRef **buffers; |
|
65 | 65 |
} JoinContext; |
66 | 66 |
|
67 |
-/** |
|
68 |
- * To avoid copying the data from input buffers, this filter creates |
|
69 |
- * a custom output buffer that stores references to all inputs and |
|
70 |
- * unrefs them on free. |
|
71 |
- */ |
|
72 |
-typedef struct JoinBufferPriv { |
|
73 |
- AVFilterBufferRef **in_buffers; |
|
74 |
- int nb_in_buffers; |
|
75 |
-} JoinBufferPriv; |
|
76 |
- |
|
77 | 67 |
#define OFFSET(x) offsetof(JoinContext, x) |
78 | 68 |
#define A AV_OPT_FLAG_AUDIO_PARAM |
79 | 69 |
#define F AV_OPT_FLAG_FILTERING_PARAM |
... | ... |
@@ -94,7 +84,7 @@ static const AVClass join_class = { |
94 | 94 |
.version = LIBAVUTIL_VERSION_INT, |
95 | 95 |
}; |
96 | 96 |
|
97 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
97 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
98 | 98 |
{ |
99 | 99 |
AVFilterContext *ctx = link->dst; |
100 | 100 |
JoinContext *s = ctx->priv; |
... | ... |
@@ -105,7 +95,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
105 | 105 |
break; |
106 | 106 |
av_assert0(i < ctx->nb_inputs); |
107 | 107 |
av_assert0(!s->input_frames[i]); |
108 |
- s->input_frames[i] = buf; |
|
108 |
+ s->input_frames[i] = frame; |
|
109 | 109 |
|
110 | 110 |
return 0; |
111 | 111 |
} |
... | ... |
@@ -207,9 +197,9 @@ static int join_init(AVFilterContext *ctx, const char *args) |
207 | 207 |
|
208 | 208 |
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); |
209 | 209 |
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels); |
210 |
- s->data = av_mallocz(sizeof(*s->data) * s->nb_channels); |
|
210 |
+ s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels); |
|
211 | 211 |
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs); |
212 |
- if (!s->channels || !s->data || !s->input_frames) { |
|
212 |
+ if (!s->channels || !s->buffers|| !s->input_frames) { |
|
213 | 213 |
ret = AVERROR(ENOMEM); |
214 | 214 |
goto fail; |
215 | 215 |
} |
... | ... |
@@ -248,11 +238,11 @@ static void join_uninit(AVFilterContext *ctx) |
248 | 248 |
|
249 | 249 |
for (i = 0; i < ctx->nb_inputs; i++) { |
250 | 250 |
av_freep(&ctx->input_pads[i].name); |
251 |
- avfilter_unref_bufferp(&s->input_frames[i]); |
|
251 |
+ av_frame_free(&s->input_frames[i]); |
|
252 | 252 |
} |
253 | 253 |
|
254 | 254 |
av_freep(&s->channels); |
255 |
- av_freep(&s->data); |
|
255 |
+ av_freep(&s->buffers); |
|
256 | 256 |
av_freep(&s->input_frames); |
257 | 257 |
} |
258 | 258 |
|
... | ... |
@@ -394,34 +384,14 @@ fail: |
394 | 394 |
return ret; |
395 | 395 |
} |
396 | 396 |
|
397 |
-static void join_free_buffer(AVFilterBuffer *buf) |
|
398 |
-{ |
|
399 |
- JoinBufferPriv *priv = buf->priv; |
|
400 |
- |
|
401 |
- if (priv) { |
|
402 |
- int i; |
|
403 |
- |
|
404 |
- for (i = 0; i < priv->nb_in_buffers; i++) |
|
405 |
- avfilter_unref_bufferp(&priv->in_buffers[i]); |
|
406 |
- |
|
407 |
- av_freep(&priv->in_buffers); |
|
408 |
- av_freep(&buf->priv); |
|
409 |
- } |
|
410 |
- |
|
411 |
- if (buf->extended_data != buf->data) |
|
412 |
- av_freep(&buf->extended_data); |
|
413 |
- av_freep(&buf); |
|
414 |
-} |
|
415 |
- |
|
416 | 397 |
static int join_request_frame(AVFilterLink *outlink) |
417 | 398 |
{ |
418 | 399 |
AVFilterContext *ctx = outlink->src; |
419 | 400 |
JoinContext *s = ctx->priv; |
420 |
- AVFilterBufferRef *buf; |
|
421 |
- JoinBufferPriv *priv; |
|
401 |
+ AVFrame *frame; |
|
422 | 402 |
int linesize = INT_MAX; |
423 |
- int perms = ~0; |
|
424 | 403 |
int nb_samples = 0; |
404 |
+ int nb_buffers = 0; |
|
425 | 405 |
int i, j, ret; |
426 | 406 |
|
427 | 407 |
/* get a frame on each input */ |
... | ... |
@@ -434,54 +404,95 @@ static int join_request_frame(AVFilterLink *outlink) |
434 | 434 |
|
435 | 435 |
/* request the same number of samples on all inputs */ |
436 | 436 |
if (i == 0) { |
437 |
- nb_samples = s->input_frames[0]->audio->nb_samples; |
|
437 |
+ nb_samples = s->input_frames[0]->nb_samples; |
|
438 | 438 |
|
439 | 439 |
for (j = 1; !i && j < ctx->nb_inputs; j++) |
440 | 440 |
ctx->inputs[j]->request_samples = nb_samples; |
441 | 441 |
} |
442 | 442 |
} |
443 | 443 |
|
444 |
+ /* setup the output frame */ |
|
445 |
+ frame = av_frame_alloc(); |
|
446 |
+ if (!frame) |
|
447 |
+ return AVERROR(ENOMEM); |
|
448 |
+ if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) { |
|
449 |
+ frame->extended_data = av_mallocz(s->nb_channels * |
|
450 |
+ sizeof(*frame->extended_data)); |
|
451 |
+ if (!frame->extended_data) { |
|
452 |
+ ret = AVERROR(ENOMEM); |
|
453 |
+ goto fail; |
|
454 |
+ } |
|
455 |
+ } |
|
456 |
+ |
|
457 |
+ /* copy the data pointers */ |
|
444 | 458 |
for (i = 0; i < s->nb_channels; i++) { |
445 | 459 |
ChannelMap *ch = &s->channels[i]; |
446 |
- AVFilterBufferRef *cur_buf = s->input_frames[ch->input]; |
|
447 |
- |
|
448 |
- s->data[i] = cur_buf->extended_data[ch->in_channel_idx]; |
|
449 |
- linesize = FFMIN(linesize, cur_buf->linesize[0]); |
|
450 |
- perms &= cur_buf->perms; |
|
451 |
- } |
|
460 |
+ AVFrame *cur = s->input_frames[ch->input]; |
|
461 |
+ AVBufferRef *buf; |
|
452 | 462 |
|
453 |
- av_assert0(nb_samples > 0); |
|
454 |
- buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms, |
|
455 |
- nb_samples, outlink->format, |
|
456 |
- outlink->channel_layout); |
|
457 |
- if (!buf) |
|
458 |
- return AVERROR(ENOMEM); |
|
463 |
+ frame->extended_data[i] = cur->extended_data[ch->in_channel_idx]; |
|
464 |
+ linesize = FFMIN(linesize, cur->linesize[0]); |
|
459 | 465 |
|
460 |
- buf->buf->free = join_free_buffer; |
|
461 |
- buf->pts = s->input_frames[0]->pts; |
|
466 |
+ /* add the buffer where this plan is stored to the list if it's |
|
467 |
+ * not already there */ |
|
468 |
+ buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx); |
|
469 |
+ if (!buf) { |
|
470 |
+ ret = AVERROR(EINVAL); |
|
471 |
+ goto fail; |
|
472 |
+ } |
|
473 |
+ for (j = 0; j < nb_buffers; j++) |
|
474 |
+ if (s->buffers[j]->buffer == buf->buffer) |
|
475 |
+ break; |
|
476 |
+ if (j == i) |
|
477 |
+ s->buffers[nb_buffers++] = buf; |
|
478 |
+ } |
|
462 | 479 |
|
463 |
- if (!(priv = av_mallocz(sizeof(*priv)))) |
|
464 |
- goto fail; |
|
465 |
- if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs))) |
|
466 |
- goto fail; |
|
480 |
+ /* create references to the buffers we copied to output */ |
|
481 |
+ if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) { |
|
482 |
+ frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf); |
|
483 |
+ frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * |
|
484 |
+ frame->nb_extended_buf); |
|
485 |
+ if (!frame->extended_buf) { |
|
486 |
+ frame->nb_extended_buf = 0; |
|
487 |
+ ret = AVERROR(ENOMEM); |
|
488 |
+ goto fail; |
|
489 |
+ } |
|
490 |
+ } |
|
491 |
+ for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) { |
|
492 |
+ frame->buf[i] = av_buffer_ref(s->buffers[i]); |
|
493 |
+ if (!frame->buf[i]) { |
|
494 |
+ ret = AVERROR(ENOMEM); |
|
495 |
+ goto fail; |
|
496 |
+ } |
|
497 |
+ } |
|
498 |
+ for (i = 0; i < frame->nb_extended_buf; i++) { |
|
499 |
+ frame->extended_buf[i] = av_buffer_ref(s->buffers[i + |
|
500 |
+ FF_ARRAY_ELEMS(frame->buf)]); |
|
501 |
+ if (!frame->extended_buf[i]) { |
|
502 |
+ ret = AVERROR(ENOMEM); |
|
503 |
+ goto fail; |
|
504 |
+ } |
|
505 |
+ } |
|
467 | 506 |
|
468 |
- for (i = 0; i < ctx->nb_inputs; i++) |
|
469 |
- priv->in_buffers[i] = s->input_frames[i]; |
|
470 |
- priv->nb_in_buffers = ctx->nb_inputs; |
|
471 |
- buf->buf->priv = priv; |
|
507 |
+ frame->nb_samples = nb_samples; |
|
508 |
+ frame->channel_layout = outlink->channel_layout; |
|
509 |
+ frame->sample_rate = outlink->sample_rate; |
|
510 |
+ frame->pts = s->input_frames[0]->pts; |
|
511 |
+ frame->linesize[0] = linesize; |
|
512 |
+ if (frame->data != frame->extended_data) { |
|
513 |
+ memcpy(frame->data, frame->extended_data, sizeof(*frame->data) * |
|
514 |
+ FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels)); |
|
515 |
+ } |
|
472 | 516 |
|
473 |
- ret = ff_filter_frame(outlink, buf); |
|
517 |
+ ret = ff_filter_frame(outlink, frame); |
|
474 | 518 |
|
475 | 519 |
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); |
476 | 520 |
|
477 | 521 |
return ret; |
478 | 522 |
|
479 | 523 |
fail: |
480 |
- avfilter_unref_buffer(buf); |
|
481 |
- if (priv) |
|
482 |
- av_freep(&priv->in_buffers); |
|
483 |
- av_freep(&priv); |
|
484 |
- return AVERROR(ENOMEM); |
|
524 |
+ av_frame_free(&frame); |
|
525 |
+ return ret; |
|
485 | 526 |
} |
486 | 527 |
|
487 | 528 |
static const AVFilterPad avfilter_af_join_outputs[] = { |
... | ... |
@@ -353,21 +353,21 @@ static int config_props(AVFilterLink *link) |
353 | 353 |
return 0; |
354 | 354 |
} |
355 | 355 |
|
356 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
356 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
357 | 357 |
{ |
358 | 358 |
int ret; |
359 |
- int n = insamples->audio->nb_samples; |
|
359 |
+ int n = insamples->nb_samples; |
|
360 | 360 |
AVFilterLink *const outlink = inlink->dst->outputs[0]; |
361 |
- AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); |
|
361 |
+ AVFrame *outsamples = ff_get_audio_buffer(outlink, n); |
|
362 | 362 |
PanContext *pan = inlink->dst->priv; |
363 | 363 |
|
364 | 364 |
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n); |
365 |
- avfilter_copy_buffer_ref_props(outsamples, insamples); |
|
366 |
- outsamples->audio->channel_layout = outlink->channel_layout; |
|
367 |
- outsamples->audio->channels = outlink->channels; |
|
365 |
+ av_frame_copy_props(outsamples, insamples); |
|
366 |
+ outsamples->channel_layout = outlink->channel_layout; |
|
367 |
+ outsamples->channels = outlink->channels; |
|
368 | 368 |
|
369 | 369 |
ret = ff_filter_frame(outlink, outsamples); |
370 |
- avfilter_unref_buffer(insamples); |
|
370 |
+ av_frame_free(&insamples); |
|
371 | 371 |
return ret; |
372 | 372 |
} |
373 | 373 |
|
... | ... |
@@ -383,7 +383,6 @@ static const AVFilterPad pan_inputs[] = { |
383 | 383 |
.type = AVMEDIA_TYPE_AUDIO, |
384 | 384 |
.config_props = config_props, |
385 | 385 |
.filter_frame = filter_frame, |
386 |
- .min_perms = AV_PERM_READ, |
|
387 | 386 |
}, |
388 | 387 |
{ NULL } |
389 | 388 |
}; |
... | ... |
@@ -174,7 +174,7 @@ static int request_frame(AVFilterLink *outlink) |
174 | 174 |
|
175 | 175 |
/* flush the lavr delay buffer */ |
176 | 176 |
if (ret == AVERROR_EOF && s->avr) { |
177 |
- AVFilterBufferRef *buf; |
|
177 |
+ AVFrame *frame; |
|
178 | 178 |
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), |
179 | 179 |
outlink->sample_rate, |
180 | 180 |
ctx->inputs[0]->sample_rate, |
... | ... |
@@ -183,25 +183,25 @@ static int request_frame(AVFilterLink *outlink) |
183 | 183 |
if (!nb_samples) |
184 | 184 |
return ret; |
185 | 185 |
|
186 |
- buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); |
|
187 |
- if (!buf) |
|
186 |
+ frame = ff_get_audio_buffer(outlink, nb_samples); |
|
187 |
+ if (!frame) |
|
188 | 188 |
return AVERROR(ENOMEM); |
189 | 189 |
|
190 |
- ret = avresample_convert(s->avr, buf->extended_data, |
|
191 |
- buf->linesize[0], nb_samples, |
|
190 |
+ ret = avresample_convert(s->avr, frame->extended_data, |
|
191 |
+ frame->linesize[0], nb_samples, |
|
192 | 192 |
NULL, 0, 0); |
193 | 193 |
if (ret <= 0) { |
194 |
- avfilter_unref_buffer(buf); |
|
194 |
+ av_frame_free(&frame); |
|
195 | 195 |
return (ret == 0) ? AVERROR_EOF : ret; |
196 | 196 |
} |
197 | 197 |
|
198 |
- buf->pts = s->next_pts; |
|
199 |
- return ff_filter_frame(outlink, buf); |
|
198 |
+ frame->pts = s->next_pts; |
|
199 |
+ return ff_filter_frame(outlink, frame); |
|
200 | 200 |
} |
201 | 201 |
return ret; |
202 | 202 |
} |
203 | 203 |
|
204 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
204 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
205 | 205 |
{ |
206 | 206 |
AVFilterContext *ctx = inlink->dst; |
207 | 207 |
ResampleContext *s = ctx->priv; |
... | ... |
@@ -209,27 +209,26 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
209 | 209 |
int ret; |
210 | 210 |
|
211 | 211 |
if (s->avr) { |
212 |
- AVFilterBufferRef *buf_out; |
|
212 |
+ AVFrame *out; |
|
213 | 213 |
int delay, nb_samples; |
214 | 214 |
|
215 | 215 |
/* maximum possible samples lavr can output */ |
216 | 216 |
delay = avresample_get_delay(s->avr); |
217 |
- nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay, |
|
217 |
+ nb_samples = av_rescale_rnd(in->nb_samples + delay, |
|
218 | 218 |
outlink->sample_rate, inlink->sample_rate, |
219 | 219 |
AV_ROUND_UP); |
220 | 220 |
|
221 |
- buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); |
|
222 |
- if (!buf_out) { |
|
221 |
+ out = ff_get_audio_buffer(outlink, nb_samples); |
|
222 |
+ if (!out) { |
|
223 | 223 |
ret = AVERROR(ENOMEM); |
224 | 224 |
goto fail; |
225 | 225 |
} |
226 | 226 |
|
227 |
- ret = avresample_convert(s->avr, buf_out->extended_data, |
|
228 |
- buf_out->linesize[0], nb_samples, |
|
229 |
- buf->extended_data, buf->linesize[0], |
|
230 |
- buf->audio->nb_samples); |
|
227 |
+ ret = avresample_convert(s->avr, out->extended_data, out->linesize[0], |
|
228 |
+ nb_samples, in->extended_data, in->linesize[0], |
|
229 |
+ in->nb_samples); |
|
231 | 230 |
if (ret <= 0) { |
232 |
- avfilter_unref_buffer(buf_out); |
|
231 |
+ av_frame_free(&out); |
|
233 | 232 |
if (ret < 0) |
234 | 233 |
goto fail; |
235 | 234 |
} |
... | ... |
@@ -237,36 +236,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
237 | 237 |
av_assert0(!avresample_available(s->avr)); |
238 | 238 |
|
239 | 239 |
if (s->next_pts == AV_NOPTS_VALUE) { |
240 |
- if (buf->pts == AV_NOPTS_VALUE) { |
|
240 |
+ if (in->pts == AV_NOPTS_VALUE) { |
|
241 | 241 |
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " |
242 | 242 |
"assuming 0.\n"); |
243 | 243 |
s->next_pts = 0; |
244 | 244 |
} else |
245 |
- s->next_pts = av_rescale_q(buf->pts, inlink->time_base, |
|
245 |
+ s->next_pts = av_rescale_q(in->pts, inlink->time_base, |
|
246 | 246 |
outlink->time_base); |
247 | 247 |
} |
248 | 248 |
|
249 | 249 |
if (ret > 0) { |
250 |
- buf_out->audio->nb_samples = ret; |
|
251 |
- if (buf->pts != AV_NOPTS_VALUE) { |
|
252 |
- buf_out->pts = av_rescale_q(buf->pts, inlink->time_base, |
|
250 |
+ out->nb_samples = ret; |
|
251 |
+ if (in->pts != AV_NOPTS_VALUE) { |
|
252 |
+ out->pts = av_rescale_q(in->pts, inlink->time_base, |
|
253 | 253 |
outlink->time_base) - |
254 | 254 |
av_rescale(delay, outlink->sample_rate, |
255 | 255 |
inlink->sample_rate); |
256 | 256 |
} else |
257 |
- buf_out->pts = s->next_pts; |
|
257 |
+ out->pts = s->next_pts; |
|
258 | 258 |
|
259 |
- s->next_pts = buf_out->pts + buf_out->audio->nb_samples; |
|
259 |
+ s->next_pts = out->pts + out->nb_samples; |
|
260 | 260 |
|
261 |
- ret = ff_filter_frame(outlink, buf_out); |
|
261 |
+ ret = ff_filter_frame(outlink, out); |
|
262 | 262 |
s->got_output = 1; |
263 | 263 |
} |
264 | 264 |
|
265 | 265 |
fail: |
266 |
- avfilter_unref_buffer(buf); |
|
266 |
+ av_frame_free(&in); |
|
267 | 267 |
} else { |
268 |
- buf->format = outlink->format; |
|
269 |
- ret = ff_filter_frame(outlink, buf); |
|
268 |
+ in->format = outlink->format; |
|
269 |
+ ret = ff_filter_frame(outlink, in); |
|
270 | 270 |
s->got_output = 1; |
271 | 271 |
} |
272 | 272 |
|
... | ... |
@@ -278,7 +277,6 @@ static const AVFilterPad avfilter_af_resample_inputs[] = { |
278 | 278 |
.name = "default", |
279 | 279 |
.type = AVMEDIA_TYPE_AUDIO, |
280 | 280 |
.filter_frame = filter_frame, |
281 |
- .min_perms = AV_PERM_READ |
|
282 | 281 |
}, |
283 | 282 |
{ NULL } |
284 | 283 |
}; |
... | ... |
@@ -70,20 +70,20 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
70 | 70 |
return 0; |
71 | 71 |
} |
72 | 72 |
|
73 |
-static char *get_metadata_val(AVFilterBufferRef *insamples, const char *key) |
|
73 |
+static char *get_metadata_val(AVFrame *insamples, const char *key) |
|
74 | 74 |
{ |
75 | 75 |
AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0); |
76 | 76 |
return e && e->value ? e->value : NULL; |
77 | 77 |
} |
78 | 78 |
|
79 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
79 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
80 | 80 |
{ |
81 | 81 |
int i; |
82 | 82 |
SilenceDetectContext *silence = inlink->dst->priv; |
83 | 83 |
const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); |
84 | 84 |
const int srate = inlink->sample_rate; |
85 |
- const int nb_samples = insamples->audio->nb_samples * nb_channels; |
|
86 |
- const int64_t nb_samples_notify = srate * silence->duration * nb_channels; |
|
85 |
+ const int nb_samples = insamples->nb_samples * nb_channels; |
|
86 |
+ const int64_t nb_samples_notify = srate * silence->duration * nb_channels; |
|
87 | 87 |
|
88 | 88 |
// scale number of null samples to the new sample rate |
89 | 89 |
if (silence->last_sample_rate && silence->last_sample_rate != srate) |
... | ... |
@@ -226,21 +226,21 @@ static int config_output(AVFilterLink *outlink) |
226 | 226 |
return 0; |
227 | 227 |
} |
228 | 228 |
|
229 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
229 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
230 | 230 |
{ |
231 | 231 |
VolumeContext *vol = inlink->dst->priv; |
232 | 232 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
233 |
- int nb_samples = buf->audio->nb_samples; |
|
234 |
- AVFilterBufferRef *out_buf; |
|
233 |
+ int nb_samples = buf->nb_samples; |
|
234 |
+ AVFrame *out_buf; |
|
235 | 235 |
|
236 | 236 |
if (vol->volume == 1.0 || vol->volume_i == 256) |
237 | 237 |
return ff_filter_frame(outlink, buf); |
238 | 238 |
|
239 | 239 |
/* do volume scaling in-place if input buffer is writable */ |
240 |
- if (buf->perms & AV_PERM_WRITE) { |
|
240 |
+ if (av_frame_is_writable(buf)) { |
|
241 | 241 |
out_buf = buf; |
242 | 242 |
} else { |
243 |
- out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); |
|
243 |
+ out_buf = ff_get_audio_buffer(inlink, nb_samples); |
|
244 | 244 |
if (!out_buf) |
245 | 245 |
return AVERROR(ENOMEM); |
246 | 246 |
out_buf->pts = buf->pts; |
... | ... |
@@ -276,7 +276,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
276 | 276 |
} |
277 | 277 |
|
278 | 278 |
if (buf != out_buf) |
279 |
- avfilter_unref_buffer(buf); |
|
279 |
+ av_frame_free(&buf); |
|
280 | 280 |
|
281 | 281 |
return ff_filter_frame(outlink, out_buf); |
282 | 282 |
} |
... | ... |
@@ -49,12 +49,12 @@ static int query_formats(AVFilterContext *ctx) |
49 | 49 |
return 0; |
50 | 50 |
} |
51 | 51 |
|
52 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samples) |
|
52 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *samples) |
|
53 | 53 |
{ |
54 | 54 |
AVFilterContext *ctx = inlink->dst; |
55 | 55 |
VolDetectContext *vd = ctx->priv; |
56 |
- int64_t layout = samples->audio->channel_layout; |
|
57 |
- int nb_samples = samples->audio->nb_samples; |
|
56 |
+ int64_t layout = samples->channel_layout; |
|
57 |
+ int nb_samples = samples->nb_samples; |
|
58 | 58 |
int nb_channels = av_get_channel_layout_nb_channels(layout); |
59 | 59 |
int nb_planes = nb_channels; |
60 | 60 |
int plane, i; |
... | ... |
@@ -137,7 +137,6 @@ static const AVFilterPad volumedetect_inputs[] = { |
137 | 137 |
.type = AVMEDIA_TYPE_AUDIO, |
138 | 138 |
.get_audio_buffer = ff_null_get_audio_buffer, |
139 | 139 |
.filter_frame = filter_frame, |
140 |
- .min_perms = AV_PERM_READ, |
|
141 | 140 |
}, |
142 | 141 |
{ NULL } |
143 | 142 |
}; |
... | ... |
@@ -193,8 +193,8 @@ void avfilter_register_all(void) |
193 | 193 |
* unconditionally */ |
194 | 194 |
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer); |
195 | 195 |
REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer); |
196 |
- REGISTER_FILTER_UNCONDITIONAL(asink_abuffer); |
|
197 |
- REGISTER_FILTER_UNCONDITIONAL(vsink_buffer); |
|
196 |
+ //REGISTER_FILTER_UNCONDITIONAL(asink_abuffer); |
|
197 |
+ //REGISTER_FILTER_UNCONDITIONAL(vsink_buffer); |
|
198 | 198 |
REGISTER_FILTER_UNCONDITIONAL(af_afifo); |
199 | 199 |
REGISTER_FILTER_UNCONDITIONAL(vf_fifo); |
200 | 200 |
} |
... | ... |
@@ -22,9 +22,9 @@ |
22 | 22 |
#include "avfilter.h" |
23 | 23 |
#include "internal.h" |
24 | 24 |
|
25 |
-static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref) |
|
25 |
+static int null_filter_frame(AVFilterLink *link, AVFrame *frame) |
|
26 | 26 |
{ |
27 |
- avfilter_unref_bufferp(&samplesref); |
|
27 |
+ av_frame_free(&frame); |
|
28 | 28 |
return 0; |
29 | 29 |
} |
30 | 30 |
|
... | ... |
@@ -212,14 +212,14 @@ static int query_formats(AVFilterContext *ctx) |
212 | 212 |
static int request_frame(AVFilterLink *outlink) |
213 | 213 |
{ |
214 | 214 |
EvalContext *eval = outlink->src->priv; |
215 |
- AVFilterBufferRef *samplesref; |
|
215 |
+ AVFrame *samplesref; |
|
216 | 216 |
int i, j; |
217 | 217 |
double t = eval->n * (double)1/eval->sample_rate; |
218 | 218 |
|
219 | 219 |
if (eval->duration >= 0 && t >= eval->duration) |
220 | 220 |
return AVERROR_EOF; |
221 | 221 |
|
222 |
- samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples); |
|
222 |
+ samplesref = ff_get_audio_buffer(outlink, eval->nb_samples); |
|
223 | 223 |
|
224 | 224 |
/* evaluate expression for each single sample and for each channel */ |
225 | 225 |
for (i = 0; i < eval->nb_samples; i++, eval->n++) { |
... | ... |
@@ -233,8 +233,7 @@ static int request_frame(AVFilterLink *outlink) |
233 | 233 |
} |
234 | 234 |
|
235 | 235 |
samplesref->pts = eval->pts; |
236 |
- samplesref->pos = -1; |
|
237 |
- samplesref->audio->sample_rate = eval->sample_rate; |
|
236 |
+ samplesref->sample_rate = eval->sample_rate; |
|
238 | 237 |
eval->pts += eval->nb_samples; |
239 | 238 |
|
240 | 239 |
ff_filter_frame(outlink, samplesref); |
... | ... |
@@ -102,17 +102,15 @@ static int config_props(AVFilterLink *outlink) |
102 | 102 |
static int request_frame(AVFilterLink *outlink) |
103 | 103 |
{ |
104 | 104 |
ANullContext *null = outlink->src->priv; |
105 |
- AVFilterBufferRef *samplesref; |
|
105 |
+ AVFrame *samplesref; |
|
106 | 106 |
|
107 |
- samplesref = |
|
108 |
- ff_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples); |
|
107 |
+ samplesref = ff_get_audio_buffer(outlink, null->nb_samples); |
|
109 | 108 |
samplesref->pts = null->pts; |
110 |
- samplesref->pos = -1; |
|
111 |
- samplesref->audio->channel_layout = null->channel_layout; |
|
112 |
- samplesref->audio->sample_rate = outlink->sample_rate; |
|
109 |
+ samplesref->channel_layout = null->channel_layout; |
|
110 |
+ samplesref->sample_rate = outlink->sample_rate; |
|
113 | 111 |
|
114 |
- ff_filter_frame(outlink, avfilter_ref_buffer(samplesref, ~0)); |
|
115 |
- avfilter_unref_buffer(samplesref); |
|
112 |
+ ff_filter_frame(outlink, av_frame_clone(samplesref)); |
|
113 |
+ av_frame_free(&samplesref); |
|
116 | 114 |
|
117 | 115 |
null->pts += null->nb_samples; |
118 | 116 |
return 0; |
... | ... |
@@ -245,22 +245,22 @@ static int config_props(AVFilterLink *outlink) |
245 | 245 |
|
246 | 246 |
static int request_frame(AVFilterLink *outlink) |
247 | 247 |
{ |
248 |
- AVFilterBufferRef *samplesref; |
|
248 |
+ AVFrame *samplesref; |
|
249 | 249 |
FliteContext *flite = outlink->src->priv; |
250 | 250 |
int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples); |
251 | 251 |
|
252 | 252 |
if (!nb_samples) |
253 | 253 |
return AVERROR_EOF; |
254 | 254 |
|
255 |
- samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); |
|
255 |
+ samplesref = ff_get_audio_buffer(outlink, nb_samples); |
|
256 | 256 |
if (!samplesref) |
257 | 257 |
return AVERROR(ENOMEM); |
258 | 258 |
|
259 | 259 |
memcpy(samplesref->data[0], flite->wave_samples, |
260 | 260 |
nb_samples * flite->wave->num_channels * 2); |
261 | 261 |
samplesref->pts = flite->pts; |
262 |
- samplesref->pos = -1; |
|
263 |
- samplesref->audio->sample_rate = flite->wave->sample_rate; |
|
262 |
+ av_frame_set_pkt_pos(samplesref, -1); |
|
263 |
+ av_frame_set_sample_rate(samplesref, flite->wave->sample_rate); |
|
264 | 264 |
flite->pts += nb_samples; |
265 | 265 |
flite->wave_samples += nb_samples * flite->wave->num_channels; |
266 | 266 |
flite->wave_nb_samples -= nb_samples; |
... | ... |
@@ -22,6 +22,7 @@ |
22 | 22 |
#include "libavutil/avassert.h" |
23 | 23 |
#include "libavutil/channel_layout.h" |
24 | 24 |
#include "libavutil/common.h" |
25 |
+#include "libavcodec/avcodec.h" |
|
25 | 26 |
|
26 | 27 |
#include "audio.h" |
27 | 28 |
#include "avfilter.h" |
... | ... |
@@ -32,69 +33,70 @@ int avfilter_ref_get_channels(AVFilterBufferRef *ref) |
32 | 32 |
return ref->audio ? ref->audio->channels : 0; |
33 | 33 |
} |
34 | 34 |
|
35 |
-AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, |
|
36 |
- int nb_samples) |
|
35 |
+AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples) |
|
37 | 36 |
{ |
38 |
- return ff_get_audio_buffer(link->dst->outputs[0], perms, nb_samples); |
|
37 |
+ return ff_get_audio_buffer(link->dst->outputs[0], nb_samples); |
|
39 | 38 |
} |
40 | 39 |
|
41 |
-AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, |
|
42 |
- int nb_samples) |
|
40 |
+AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples) |
|
43 | 41 |
{ |
44 |
- AVFilterBufferRef *samplesref = NULL; |
|
45 |
- uint8_t **data; |
|
46 |
- int planar = av_sample_fmt_is_planar(link->format); |
|
47 |
- int nb_channels = link->channels; |
|
48 |
- int planes = planar ? nb_channels : 1; |
|
49 |
- int linesize; |
|
50 |
- int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE | |
|
51 |
- AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN; |
|
52 |
- |
|
53 |
- av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES))); |
|
54 |
- |
|
55 |
- if (!(data = av_mallocz(sizeof(*data) * planes))) |
|
42 |
+ AVFrame *frame = av_frame_alloc(); |
|
43 |
+ int channels = link->channels; |
|
44 |
+ int buf_size, ret; |
|
45 |
+ |
|
46 |
+ av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout)); |
|
47 |
+ |
|
48 |
+ if (!frame) |
|
49 |
+ return NULL; |
|
50 |
+ |
|
51 |
+ buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples, |
|
52 |
+ link->format, 0); |
|
53 |
+ if (buf_size < 0) |
|
56 | 54 |
goto fail; |
57 | 55 |
|
58 |
- if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0) |
|
56 |
+ frame->buf[0] = av_buffer_alloc(buf_size); |
|
57 |
+ if (!frame->buf[0]) |
|
59 | 58 |
goto fail; |
60 | 59 |
|
61 |
- samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( |
|
62 |
- data, linesize, full_perms, nb_samples, link->format, |
|
63 |
- link->channels, link->channel_layout); |
|
64 |
- if (!samplesref) |
|
60 |
+ frame->nb_samples = nb_samples; |
|
61 |
+ ret = avcodec_fill_audio_frame(frame, channels, link->format, |
|
62 |
+ frame->buf[0]->data, buf_size, 0); |
|
63 |
+ if (ret < 0) |
|
65 | 64 |
goto fail; |
66 | 65 |
|
67 |
- samplesref->audio->sample_rate = link->sample_rate; |
|
66 |
+ av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, |
|
67 |
+ link->format); |
|
68 |
+ |
|
69 |
+ frame->nb_samples = nb_samples; |
|
70 |
+ frame->format = link->format; |
|
71 |
+ frame->channels = link->channels; |
|
72 |
+ frame->channel_layout = link->channel_layout; |
|
73 |
+ frame->sample_rate = link->sample_rate; |
|
68 | 74 |
|
69 |
- av_freep(&data); |
|
75 |
+ return frame; |
|
70 | 76 |
|
71 | 77 |
fail: |
72 |
- if (data) |
|
73 |
- av_freep(&data[0]); |
|
74 |
- av_freep(&data); |
|
75 |
- return samplesref; |
|
78 |
+ av_buffer_unref(&frame->buf[0]); |
|
79 |
+ av_frame_free(&frame); |
|
80 |
+ return NULL; |
|
76 | 81 |
} |
77 | 82 |
|
78 |
-AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, |
|
79 |
- int nb_samples) |
|
83 |
+AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples) |
|
80 | 84 |
{ |
81 |
- AVFilterBufferRef *ret = NULL; |
|
85 |
+ AVFrame *ret = NULL; |
|
82 | 86 |
|
83 | 87 |
if (link->dstpad->get_audio_buffer) |
84 |
- ret = link->dstpad->get_audio_buffer(link, perms, nb_samples); |
|
88 |
+ ret = link->dstpad->get_audio_buffer(link, nb_samples); |
|
85 | 89 |
|
86 | 90 |
if (!ret) |
87 |
- ret = ff_default_get_audio_buffer(link, perms, nb_samples); |
|
88 |
- |
|
89 |
- if (ret) |
|
90 |
- ret->type = AVMEDIA_TYPE_AUDIO; |
|
91 |
+ ret = ff_default_get_audio_buffer(link, nb_samples); |
|
91 | 92 |
|
92 | 93 |
return ret; |
93 | 94 |
} |
94 | 95 |
|
96 |
+#if FF_API_AVFILTERBUFFER |
|
95 | 97 |
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, |
96 |
- int linesize, |
|
97 |
- int perms, |
|
98 |
+ int linesize,int perms, |
|
98 | 99 |
int nb_samples, |
99 | 100 |
enum AVSampleFormat sample_fmt, |
100 | 101 |
int channels, |
... | ... |
@@ -179,3 +181,4 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, |
179 | 179 |
nb_samples, sample_fmt, |
180 | 180 |
channels, channel_layout); |
181 | 181 |
} |
182 |
+#endif |
... | ... |
@@ -44,25 +44,21 @@ static const enum AVSampleFormat ff_planar_sample_fmts_array[] = { |
44 | 44 |
}; |
45 | 45 |
|
46 | 46 |
/** default handler for get_audio_buffer() for audio inputs */ |
47 |
-AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, |
|
48 |
- int nb_samples); |
|
47 |
+AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples); |
|
49 | 48 |
|
50 | 49 |
/** get_audio_buffer() handler for filters which simply pass audio along */ |
51 |
-AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, |
|
52 |
- int nb_samples); |
|
50 |
+AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples); |
|
53 | 51 |
|
54 | 52 |
/** |
55 | 53 |
* Request an audio samples buffer with a specific set of permissions. |
56 | 54 |
* |
57 | 55 |
* @param link the output link to the filter from which the buffer will |
58 | 56 |
* be requested |
59 |
- * @param perms the required access permissions |
|
60 | 57 |
* @param nb_samples the number of samples per channel |
61 | 58 |
* @return A reference to the samples. This must be unreferenced with |
62 | 59 |
* avfilter_unref_buffer when you are finished with it. |
63 | 60 |
*/ |
64 |
-AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, |
|
65 |
- int nb_samples); |
|
61 |
+AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples); |
|
66 | 62 |
|
67 | 63 |
/** |
68 | 64 |
* Send a buffer of audio samples to the next filter. |
... | ... |
@@ -27,52 +27,6 @@ |
27 | 27 |
#include "libavutil/avassert.h" |
28 | 28 |
#include "libavutil/opt.h" |
29 | 29 |
|
30 |
-int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) |
|
31 |
-{ |
|
32 |
- dst->pts = src->pts; |
|
33 |
- dst->pos = av_frame_get_pkt_pos(src); |
|
34 |
- dst->format = src->format; |
|
35 |
- |
|
36 |
- av_dict_free(&dst->metadata); |
|
37 |
- av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); |
|
38 |
- |
|
39 |
- switch (dst->type) { |
|
40 |
- case AVMEDIA_TYPE_VIDEO: |
|
41 |
- dst->video->w = src->width; |
|
42 |
- dst->video->h = src->height; |
|
43 |
- dst->video->sample_aspect_ratio = src->sample_aspect_ratio; |
|
44 |
- dst->video->interlaced = src->interlaced_frame; |
|
45 |
- dst->video->top_field_first = src->top_field_first; |
|
46 |
- dst->video->key_frame = src->key_frame; |
|
47 |
- dst->video->pict_type = src->pict_type; |
|
48 |
- av_freep(&dst->video->qp_table); |
|
49 |
- dst->video->qp_table_linesize = 0; |
|
50 |
- if (src->qscale_table) { |
|
51 |
- int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16; |
|
52 |
- dst->video->qp_table = av_malloc(qsize); |
|
53 |
- if (!dst->video->qp_table) |
|
54 |
- return AVERROR(ENOMEM); |
|
55 |
- dst->video->qp_table_linesize = src->qstride; |
|
56 |
- dst->video->qp_table_size = qsize; |
|
57 |
- memcpy(dst->video->qp_table, src->qscale_table, qsize); |
|
58 |
- } |
|
59 |
- break; |
|
60 |
- case AVMEDIA_TYPE_AUDIO: |
|
61 |
- dst->audio->sample_rate = src->sample_rate; |
|
62 |
- dst->audio->channel_layout = src->channel_layout; |
|
63 |
- dst->audio->channels = src->channels; |
|
64 |
- if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) { |
|
65 |
- av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n"); |
|
66 |
- return AVERROR(EINVAL); |
|
67 |
- } |
|
68 |
- break; |
|
69 |
- default: |
|
70 |
- return AVERROR(EINVAL); |
|
71 |
- } |
|
72 |
- |
|
73 |
- return 0; |
|
74 |
-} |
|
75 |
- |
|
76 | 30 |
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, |
77 | 31 |
int perms) |
78 | 32 |
{ |
... | ... |
@@ -32,22 +32,6 @@ |
32 | 32 |
#include "avfilter.h" |
33 | 33 |
|
34 | 34 |
/** |
35 |
- * Copy the frame properties of src to dst, without copying the actual |
|
36 |
- * image data. |
|
37 |
- * |
|
38 |
- * @return 0 on success, a negative number on error. |
|
39 |
- */ |
|
40 |
-int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); |
|
41 |
- |
|
42 |
-/** |
|
43 |
- * Copy the frame properties and data pointers of src to dst, without copying |
|
44 |
- * the actual data. |
|
45 |
- * |
|
46 |
- * @return 0 on success, a negative number on error. |
|
47 |
- */ |
|
48 |
-int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); |
|
49 |
- |
|
50 |
-/** |
|
51 | 35 |
* Create and return a picref reference from the data and properties |
52 | 36 |
* contained in frame. |
53 | 37 |
* |
... | ... |
@@ -116,16 +100,4 @@ int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, |
116 | 116 |
const AVFilterBufferRef *ref); |
117 | 117 |
#endif |
118 | 118 |
|
119 |
-/** |
|
120 |
- * Add frame data to buffer_src. |
|
121 |
- * |
|
122 |
- * @param buffer_src pointer to a buffer source context |
|
123 |
- * @param frame a frame, or NULL to mark EOF |
|
124 |
- * @param flags a combination of AV_BUFFERSRC_FLAG_* |
|
125 |
- * @return >= 0 in case of success, a negative AVERROR code |
|
126 |
- * in case of failure |
|
127 |
- */ |
|
128 |
-int av_buffersrc_add_frame(AVFilterContext *buffer_src, |
|
129 |
- const AVFrame *frame, int flags); |
|
130 |
- |
|
131 | 119 |
#endif /* AVFILTER_AVCODEC_H */ |
... | ... |
@@ -157,7 +157,7 @@ static int config_output(AVFilterLink *outlink) |
157 | 157 |
} |
158 | 158 |
|
159 | 159 |
static void push_frame(AVFilterContext *ctx, unsigned in_no, |
160 |
- AVFilterBufferRef *buf) |
|
160 |
+ AVFrame *buf) |
|
161 | 161 |
{ |
162 | 162 |
ConcatContext *cat = ctx->priv; |
163 | 163 |
unsigned out_no = in_no % ctx->nb_outputs; |
... | ... |
@@ -171,7 +171,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no, |
171 | 171 |
/* add duration to input PTS */ |
172 | 172 |
if (inlink->sample_rate) |
173 | 173 |
/* use number of audio samples */ |
174 |
- in->pts += av_rescale_q(buf->audio->nb_samples, |
|
174 |
+ in->pts += av_rescale_q(buf->nb_samples, |
|
175 | 175 |
(AVRational){ 1, inlink->sample_rate }, |
176 | 176 |
outlink->time_base); |
177 | 177 |
else if (in->nb_frames >= 2) |
... | ... |
@@ -182,7 +182,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no, |
182 | 182 |
ff_filter_frame(outlink, buf); |
183 | 183 |
} |
184 | 184 |
|
185 |
-static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
185 |
+static void process_frame(AVFilterLink *inlink, AVFrame *buf) |
|
186 | 186 |
{ |
187 | 187 |
AVFilterContext *ctx = inlink->dst; |
188 | 188 |
ConcatContext *cat = ctx->priv; |
... | ... |
@@ -191,7 +191,7 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
191 | 191 |
if (in_no < cat->cur_idx) { |
192 | 192 |
av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", |
193 | 193 |
ctx->input_pads[in_no].name); |
194 |
- avfilter_unref_buffer(buf); |
|
194 |
+ av_frame_free(&buf); |
|
195 | 195 |
} else if (in_no >= cat->cur_idx + ctx->nb_outputs) { |
196 | 196 |
ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); |
197 | 197 |
} else { |
... | ... |
@@ -199,27 +199,25 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
199 | 199 |
} |
200 | 200 |
} |
201 | 201 |
|
202 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, |
|
203 |
- int w, int h) |
|
202 |
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) |
|
204 | 203 |
{ |
205 | 204 |
AVFilterContext *ctx = inlink->dst; |
206 | 205 |
unsigned in_no = FF_INLINK_IDX(inlink); |
207 | 206 |
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; |
208 | 207 |
|
209 |
- return ff_get_video_buffer(outlink, perms, w, h); |
|
208 |
+ return ff_get_video_buffer(outlink, w, h); |
|
210 | 209 |
} |
211 | 210 |
|
212 |
-static AVFilterBufferRef *get_audio_buffer(AVFilterLink *inlink, int perms, |
|
213 |
- int nb_samples) |
|
211 |
+static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples) |
|
214 | 212 |
{ |
215 | 213 |
AVFilterContext *ctx = inlink->dst; |
216 | 214 |
unsigned in_no = FF_INLINK_IDX(inlink); |
217 | 215 |
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; |
218 | 216 |
|
219 |
- return ff_get_audio_buffer(outlink, perms, nb_samples); |
|
217 |
+ return ff_get_audio_buffer(outlink, nb_samples); |
|
220 | 218 |
} |
221 | 219 |
|
222 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
220 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
223 | 221 |
{ |
224 | 222 |
process_frame(inlink, buf); |
225 | 223 |
return 0; /* enhancement: handle error return */ |
... | ... |
@@ -256,7 +254,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no) |
256 | 256 |
int64_t nb_samples, sent = 0; |
257 | 257 |
int frame_nb_samples; |
258 | 258 |
AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate }; |
259 |
- AVFilterBufferRef *buf; |
|
259 |
+ AVFrame *buf; |
|
260 | 260 |
int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); |
261 | 261 |
|
262 | 262 |
if (!rate_tb.den) |
... | ... |
@@ -266,7 +264,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no) |
266 | 266 |
frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */ |
267 | 267 |
while (nb_samples) { |
268 | 268 |
frame_nb_samples = FFMIN(frame_nb_samples, nb_samples); |
269 |
- buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, frame_nb_samples); |
|
269 |
+ buf = ff_get_audio_buffer(outlink, frame_nb_samples); |
|
270 | 270 |
if (!buf) |
271 | 271 |
return; |
272 | 272 |
av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, |
... | ... |
@@ -360,7 +358,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
360 | 360 |
for (str = 0; str < cat->nb_streams[type]; str++) { |
361 | 361 |
AVFilterPad pad = { |
362 | 362 |
.type = type, |
363 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
364 | 363 |
.get_video_buffer = get_video_buffer, |
365 | 364 |
.get_audio_buffer = get_audio_buffer, |
366 | 365 |
.filter_frame = filter_frame, |
... | ... |
@@ -41,7 +41,7 @@ enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES }; |
41 | 41 |
typedef struct { |
42 | 42 |
const AVClass *class; |
43 | 43 |
int w, h; |
44 |
- AVFilterBufferRef *outpicref; |
|
44 |
+ AVFrame *outpicref; |
|
45 | 45 |
int req_fullfilled; |
46 | 46 |
int nb_display_channels; |
47 | 47 |
int channel_height; |
... | ... |
@@ -122,7 +122,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
122 | 122 |
av_freep(&showspectrum->rdft_data[i]); |
123 | 123 |
av_freep(&showspectrum->rdft_data); |
124 | 124 |
av_freep(&showspectrum->window_func_lut); |
125 |
- avfilter_unref_bufferp(&showspectrum->outpicref); |
|
125 |
+ av_frame_free(&showspectrum->outpicref); |
|
126 | 126 |
} |
127 | 127 |
|
128 | 128 |
static int query_formats(AVFilterContext *ctx) |
... | ... |
@@ -179,7 +179,7 @@ static int config_output(AVFilterLink *outlink) |
179 | 179 |
/* (re-)configuration if the video output changed (or first init) */ |
180 | 180 |
if (rdft_bits != showspectrum->rdft_bits) { |
181 | 181 |
size_t rdft_size, rdft_listsize; |
182 |
- AVFilterBufferRef *outpicref; |
|
182 |
+ AVFrame *outpicref; |
|
183 | 183 |
|
184 | 184 |
av_rdft_end(showspectrum->rdft); |
185 | 185 |
showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C); |
... | ... |
@@ -219,10 +219,9 @@ static int config_output(AVFilterLink *outlink) |
219 | 219 |
showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1))); |
220 | 220 |
|
221 | 221 |
/* prepare the initial picref buffer (black frame) */ |
222 |
- avfilter_unref_bufferp(&showspectrum->outpicref); |
|
222 |
+ av_frame_free(&showspectrum->outpicref); |
|
223 | 223 |
showspectrum->outpicref = outpicref = |
224 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, |
|
225 |
- outlink->w, outlink->h); |
|
224 |
+ ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
226 | 225 |
if (!outpicref) |
227 | 226 |
return AVERROR(ENOMEM); |
228 | 227 |
outlink->sample_aspect_ratio = (AVRational){1,1}; |
... | ... |
@@ -253,7 +252,7 @@ inline static void push_frame(AVFilterLink *outlink) |
253 | 253 |
showspectrum->filled = 0; |
254 | 254 |
showspectrum->req_fullfilled = 1; |
255 | 255 |
|
256 |
- ff_filter_frame(outlink, avfilter_ref_buffer(showspectrum->outpicref, ~AV_PERM_WRITE)); |
|
256 |
+ ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref)); |
|
257 | 257 |
} |
258 | 258 |
|
259 | 259 |
static int request_frame(AVFilterLink *outlink) |
... | ... |
@@ -272,12 +271,12 @@ static int request_frame(AVFilterLink *outlink) |
272 | 272 |
return ret; |
273 | 273 |
} |
274 | 274 |
|
275 |
-static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insamples, int nb_samples) |
|
275 |
+static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples) |
|
276 | 276 |
{ |
277 | 277 |
AVFilterContext *ctx = inlink->dst; |
278 | 278 |
AVFilterLink *outlink = ctx->outputs[0]; |
279 | 279 |
ShowSpectrumContext *showspectrum = ctx->priv; |
280 |
- AVFilterBufferRef *outpicref = showspectrum->outpicref; |
|
280 |
+ AVFrame *outpicref = showspectrum->outpicref; |
|
281 | 281 |
|
282 | 282 |
/* nb_freq contains the power of two superior or equal to the output image |
283 | 283 |
* height (or half the RDFT window size) */ |
... | ... |
@@ -462,11 +461,11 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insampl |
462 | 462 |
return add_samples; |
463 | 463 |
} |
464 | 464 |
|
465 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
465 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
466 | 466 |
{ |
467 | 467 |
AVFilterContext *ctx = inlink->dst; |
468 | 468 |
ShowSpectrumContext *showspectrum = ctx->priv; |
469 |
- int left_samples = insamples->audio->nb_samples; |
|
469 |
+ int left_samples = insamples->nb_samples; |
|
470 | 470 |
|
471 | 471 |
showspectrum->consumed = 0; |
472 | 472 |
while (left_samples) { |
... | ... |
@@ -475,7 +474,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
475 | 475 |
left_samples -= added_samples; |
476 | 476 |
} |
477 | 477 |
|
478 |
- avfilter_unref_buffer(insamples); |
|
478 |
+ av_frame_free(&insamples); |
|
479 | 479 |
return 0; |
480 | 480 |
} |
481 | 481 |
|
... | ... |
@@ -484,7 +483,6 @@ static const AVFilterPad showspectrum_inputs[] = { |
484 | 484 |
.name = "default", |
485 | 485 |
.type = AVMEDIA_TYPE_AUDIO, |
486 | 486 |
.filter_frame = filter_frame, |
487 |
- .min_perms = AV_PERM_READ, |
|
488 | 487 |
}, |
489 | 488 |
{ NULL } |
490 | 489 |
}; |
... | ... |
@@ -44,7 +44,7 @@ typedef struct { |
44 | 44 |
char *rate_str; |
45 | 45 |
AVRational rate; |
46 | 46 |
int buf_idx; |
47 |
- AVFilterBufferRef *outpicref; |
|
47 |
+ AVFrame *outpicref; |
|
48 | 48 |
int req_fullfilled; |
49 | 49 |
int n; |
50 | 50 |
int sample_count_mod; |
... | ... |
@@ -89,7 +89,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
89 | 89 |
ShowWavesContext *showwaves = ctx->priv; |
90 | 90 |
|
91 | 91 |
av_freep(&showwaves->rate_str); |
92 |
- avfilter_unref_bufferp(&showwaves->outpicref); |
|
92 |
+ av_frame_free(&showwaves->outpicref); |
|
93 | 93 |
} |
94 | 94 |
|
95 | 95 |
static int query_formats(AVFilterContext *ctx) |
... | ... |
@@ -190,16 +190,16 @@ static int request_frame(AVFilterLink *outlink) |
190 | 190 |
|
191 | 191 |
#define MAX_INT16 ((1<<15) -1) |
192 | 192 |
|
193 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
193 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
194 | 194 |
{ |
195 | 195 |
AVFilterContext *ctx = inlink->dst; |
196 | 196 |
AVFilterLink *outlink = ctx->outputs[0]; |
197 | 197 |
ShowWavesContext *showwaves = ctx->priv; |
198 |
- const int nb_samples = insamples->audio->nb_samples; |
|
199 |
- AVFilterBufferRef *outpicref = showwaves->outpicref; |
|
198 |
+ const int nb_samples = insamples->nb_samples; |
|
199 |
+ AVFrame *outpicref = showwaves->outpicref; |
|
200 | 200 |
int linesize = outpicref ? outpicref->linesize[0] : 0; |
201 | 201 |
int16_t *p = (int16_t *)insamples->data[0]; |
202 |
- int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout); |
|
202 |
+ int nb_channels = av_get_channel_layout_nb_channels(insamples->channel_layout); |
|
203 | 203 |
int i, j, k, h, ret = 0; |
204 | 204 |
const int n = showwaves->n; |
205 | 205 |
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ |
... | ... |
@@ -208,12 +208,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
208 | 208 |
for (i = 0; i < nb_samples; i++) { |
209 | 209 |
if (!showwaves->outpicref) { |
210 | 210 |
showwaves->outpicref = outpicref = |
211 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, |
|
212 |
- outlink->w, outlink->h); |
|
211 |
+ ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
213 | 212 |
if (!outpicref) |
214 | 213 |
return AVERROR(ENOMEM); |
215 |
- outpicref->video->w = outlink->w; |
|
216 |
- outpicref->video->h = outlink->h; |
|
214 |
+ outpicref->width = outlink->w; |
|
215 |
+ outpicref->height = outlink->h; |
|
217 | 216 |
outpicref->pts = insamples->pts + |
218 | 217 |
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, |
219 | 218 |
(AVRational){ 1, inlink->sample_rate }, |
... | ... |
@@ -251,7 +250,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
251 | 251 |
outpicref = showwaves->outpicref; |
252 | 252 |
} |
253 | 253 |
|
254 |
- avfilter_unref_buffer(insamples); |
|
254 |
+ av_frame_free(&insamples); |
|
255 | 255 |
return ret; |
256 | 256 |
} |
257 | 257 |
|
... | ... |
@@ -260,7 +259,6 @@ static const AVFilterPad showwaves_inputs[] = { |
260 | 260 |
.name = "default", |
261 | 261 |
.type = AVMEDIA_TYPE_AUDIO, |
262 | 262 |
.filter_frame = filter_frame, |
263 |
- .min_perms = AV_PERM_READ, |
|
264 | 263 |
}, |
265 | 264 |
{ NULL } |
266 | 265 |
}; |
... | ... |
@@ -34,43 +34,31 @@ |
34 | 34 |
#include "internal.h" |
35 | 35 |
#include "audio.h" |
36 | 36 |
|
37 |
-static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame); |
|
37 |
+static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame); |
|
38 | 38 |
|
39 |
-char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms) |
|
40 |
-{ |
|
41 |
- snprintf(buf, buf_size, "%s%s%s%s%s%s", |
|
42 |
- perms & AV_PERM_READ ? "r" : "", |
|
43 |
- perms & AV_PERM_WRITE ? "w" : "", |
|
44 |
- perms & AV_PERM_PRESERVE ? "p" : "", |
|
45 |
- perms & AV_PERM_REUSE ? "u" : "", |
|
46 |
- perms & AV_PERM_REUSE2 ? "U" : "", |
|
47 |
- perms & AV_PERM_NEG_LINESIZES ? "n" : ""); |
|
48 |
- return buf; |
|
49 |
-} |
|
50 |
- |
|
51 |
-void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end) |
|
39 |
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end) |
|
52 | 40 |
{ |
53 | 41 |
av_unused char buf[16]; |
54 | 42 |
ff_tlog(ctx, |
55 |
- "ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, |
|
56 |
- ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0], |
|
43 |
+ "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, |
|
44 |
+ ref, ref->buf, ref->data[0], |
|
57 | 45 |
ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], |
58 |
- ref->pts, ref->pos); |
|
46 |
+ ref->pts, av_frame_get_pkt_pos(ref)); |
|
59 | 47 |
|
60 |
- if (ref->video) { |
|
48 |
+ if (ref->width) { |
|
61 | 49 |
ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", |
62 |
- ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den, |
|
63 |
- ref->video->w, ref->video->h, |
|
64 |
- !ref->video->interlaced ? 'P' : /* Progressive */ |
|
65 |
- ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
66 |
- ref->video->key_frame, |
|
67 |
- av_get_picture_type_char(ref->video->pict_type)); |
|
50 |
+ ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den, |
|
51 |
+ ref->width, ref->height, |
|
52 |
+ !ref->interlaced_frame ? 'P' : /* Progressive */ |
|
53 |
+ ref->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
54 |
+ ref->key_frame, |
|
55 |
+ av_get_picture_type_char(ref->pict_type)); |
|
68 | 56 |
} |
69 |
- if (ref->audio) { |
|
57 |
+ if (ref->nb_samples) { |
|
70 | 58 |
ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", |
71 |
- ref->audio->channel_layout, |
|
72 |
- ref->audio->nb_samples, |
|
73 |
- ref->audio->sample_rate); |
|
59 |
+ ref->channel_layout, |
|
60 |
+ ref->nb_samples, |
|
61 |
+ ref->sample_rate); |
|
74 | 62 |
} |
75 | 63 |
|
76 | 64 |
ff_tlog(ctx, "]%s", end ? "\n" : ""); |
... | ... |
@@ -158,10 +146,7 @@ void avfilter_link_free(AVFilterLink **link) |
158 | 158 |
if (!*link) |
159 | 159 |
return; |
160 | 160 |
|
161 |
- if ((*link)->pool) |
|
162 |
- ff_free_pool((*link)->pool); |
|
163 |
- |
|
164 |
- avfilter_unref_bufferp(&(*link)->partial_buf); |
|
161 |
+ av_frame_free(&(*link)->partial_buf); |
|
165 | 162 |
|
166 | 163 |
av_freep(link); |
167 | 164 |
} |
... | ... |
@@ -342,7 +327,7 @@ int ff_request_frame(AVFilterLink *link) |
342 | 342 |
else if (link->src->inputs[0]) |
343 | 343 |
ret = ff_request_frame(link->src->inputs[0]); |
344 | 344 |
if (ret == AVERROR_EOF && link->partial_buf) { |
345 |
- AVFilterBufferRef *pbuf = link->partial_buf; |
|
345 |
+ AVFrame *pbuf = link->partial_buf; |
|
346 | 346 |
link->partial_buf = NULL; |
347 | 347 |
ff_filter_frame_framed(link, pbuf); |
348 | 348 |
return 0; |
... | ... |
@@ -633,76 +618,64 @@ enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx) |
633 | 633 |
return pads[pad_idx].type; |
634 | 634 |
} |
635 | 635 |
|
636 |
-static int default_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
636 |
+static int default_filter_frame(AVFilterLink *link, AVFrame *frame) |
|
637 | 637 |
{ |
638 | 638 |
return ff_filter_frame(link->dst->outputs[0], frame); |
639 | 639 |
} |
640 | 640 |
|
641 |
-static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame) |
|
641 |
+static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) |
|
642 | 642 |
{ |
643 |
- int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *); |
|
643 |
+ int (*filter_frame)(AVFilterLink *, AVFrame *); |
|
644 | 644 |
AVFilterPad *src = link->srcpad; |
645 | 645 |
AVFilterPad *dst = link->dstpad; |
646 |
- AVFilterBufferRef *out; |
|
647 |
- int perms, ret; |
|
646 |
+ AVFrame *out; |
|
647 |
+ int ret; |
|
648 | 648 |
AVFilterCommand *cmd= link->dst->command_queue; |
649 | 649 |
int64_t pts; |
650 | 650 |
|
651 | 651 |
if (link->closed) { |
652 |
- avfilter_unref_buffer(frame); |
|
652 |
+ av_frame_free(&frame); |
|
653 | 653 |
return AVERROR_EOF; |
654 | 654 |
} |
655 | 655 |
|
656 | 656 |
if (!(filter_frame = dst->filter_frame)) |
657 | 657 |
filter_frame = default_filter_frame; |
658 | 658 |
|
659 |
- av_assert1((frame->perms & src->min_perms) == src->min_perms); |
|
660 |
- frame->perms &= ~ src->rej_perms; |
|
661 |
- perms = frame->perms; |
|
662 |
- |
|
663 |
- if (frame->linesize[0] < 0) |
|
664 |
- perms |= AV_PERM_NEG_LINESIZES; |
|
665 |
- |
|
666 |
- /* prepare to copy the frame if the buffer has insufficient permissions */ |
|
667 |
- if ((dst->min_perms & perms) != dst->min_perms || |
|
668 |
- dst->rej_perms & perms) { |
|
669 |
- av_log(link->dst, AV_LOG_DEBUG, |
|
670 |
- "Copying data in avfilter (have perms %x, need %x, reject %x)\n", |
|
671 |
- perms, link->dstpad->min_perms, link->dstpad->rej_perms); |
|
659 |
+ /* copy the frame if needed */ |
|
660 |
+ if (dst->needs_writable && !av_frame_is_writable(frame)) { |
|
661 |
+ av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); |
|
672 | 662 |
|
673 | 663 |
/* Maybe use ff_copy_buffer_ref instead? */ |
674 | 664 |
switch (link->type) { |
675 | 665 |
case AVMEDIA_TYPE_VIDEO: |
676 |
- out = ff_get_video_buffer(link, dst->min_perms, |
|
677 |
- link->w, link->h); |
|
666 |
+ out = ff_get_video_buffer(link, link->w, link->h); |
|
678 | 667 |
break; |
679 | 668 |
case AVMEDIA_TYPE_AUDIO: |
680 |
- out = ff_get_audio_buffer(link, dst->min_perms, |
|
681 |
- frame->audio->nb_samples); |
|
669 |
+ out = ff_get_audio_buffer(link, frame->nb_samples); |
|
682 | 670 |
break; |
683 | 671 |
default: return AVERROR(EINVAL); |
684 | 672 |
} |
685 | 673 |
if (!out) { |
686 |
- avfilter_unref_buffer(frame); |
|
674 |
+ av_frame_free(&frame); |
|
687 | 675 |
return AVERROR(ENOMEM); |
688 | 676 |
} |
689 |
- avfilter_copy_buffer_ref_props(out, frame); |
|
677 |
+ av_frame_copy_props(out, frame); |
|
690 | 678 |
|
691 | 679 |
switch (link->type) { |
692 | 680 |
case AVMEDIA_TYPE_VIDEO: |
693 | 681 |
av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, |
694 |
- frame->format, frame->video->w, frame->video->h); |
|
682 |
+ frame->format, frame->width, frame->height); |
|
695 | 683 |
break; |
696 | 684 |
case AVMEDIA_TYPE_AUDIO: |
697 | 685 |
av_samples_copy(out->extended_data, frame->extended_data, |
698 |
- 0, 0, frame->audio->nb_samples, |
|
699 |
- av_get_channel_layout_nb_channels(frame->audio->channel_layout), |
|
686 |
+ 0, 0, frame->nb_samples, |
|
687 |
+ av_get_channel_layout_nb_channels(frame->channel_layout), |
|
700 | 688 |
frame->format); |
701 | 689 |
break; |
702 | 690 |
default: return AVERROR(EINVAL); |
703 | 691 |
} |
704 | 692 |
|
705 |
- avfilter_unref_buffer(frame); |
|
693 |
+ av_frame_free(&frame); |
|
706 | 694 |
} else |
707 | 695 |
out = frame; |
708 | 696 |
|
... | ... |
@@ -721,48 +694,47 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame) |
721 | 721 |
return ret; |
722 | 722 |
} |
723 | 723 |
|
724 |
-static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFilterBufferRef *frame) |
|
724 |
+static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame) |
|
725 | 725 |
{ |
726 |
- int insamples = frame->audio->nb_samples, inpos = 0, nb_samples; |
|
727 |
- AVFilterBufferRef *pbuf = link->partial_buf; |
|
728 |
- int nb_channels = frame->audio->channels; |
|
726 |
+ int insamples = frame->nb_samples, inpos = 0, nb_samples; |
|
727 |
+ AVFrame *pbuf = link->partial_buf; |
|
728 |
+ int nb_channels = frame->channels; |
|
729 | 729 |
int ret = 0; |
730 | 730 |
|
731 | 731 |
/* Handle framing (min_samples, max_samples) */ |
732 | 732 |
while (insamples) { |
733 | 733 |
if (!pbuf) { |
734 | 734 |
AVRational samples_tb = { 1, link->sample_rate }; |
735 |
- int perms = link->dstpad->min_perms | AV_PERM_WRITE; |
|
736 |
- pbuf = ff_get_audio_buffer(link, perms, link->partial_buf_size); |
|
735 |
+ pbuf = ff_get_audio_buffer(link, link->partial_buf_size); |
|
737 | 736 |
if (!pbuf) { |
738 | 737 |
av_log(link->dst, AV_LOG_WARNING, |
739 | 738 |
"Samples dropped due to memory allocation failure.\n"); |
740 | 739 |
return 0; |
741 | 740 |
} |
742 |
- avfilter_copy_buffer_ref_props(pbuf, frame); |
|
741 |
+ av_frame_copy_props(pbuf, frame); |
|
743 | 742 |
pbuf->pts = frame->pts + |
744 | 743 |
av_rescale_q(inpos, samples_tb, link->time_base); |
745 |
- pbuf->audio->nb_samples = 0; |
|
744 |
+ pbuf->nb_samples = 0; |
|
746 | 745 |
} |
747 | 746 |
nb_samples = FFMIN(insamples, |
748 |
- link->partial_buf_size - pbuf->audio->nb_samples); |
|
747 |
+ link->partial_buf_size - pbuf->nb_samples); |
|
749 | 748 |
av_samples_copy(pbuf->extended_data, frame->extended_data, |
750 |
- pbuf->audio->nb_samples, inpos, |
|
749 |
+ pbuf->nb_samples, inpos, |
|
751 | 750 |
nb_samples, nb_channels, link->format); |
752 | 751 |
inpos += nb_samples; |
753 | 752 |
insamples -= nb_samples; |
754 |
- pbuf->audio->nb_samples += nb_samples; |
|
755 |
- if (pbuf->audio->nb_samples >= link->min_samples) { |
|
753 |
+ pbuf->nb_samples += nb_samples; |
|
754 |
+ if (pbuf->nb_samples >= link->min_samples) { |
|
756 | 755 |
ret = ff_filter_frame_framed(link, pbuf); |
757 | 756 |
pbuf = NULL; |
758 | 757 |
} |
759 | 758 |
} |
760 |
- avfilter_unref_buffer(frame); |
|
759 |
+ av_frame_free(&frame); |
|
761 | 760 |
link->partial_buf = pbuf; |
762 | 761 |
return ret; |
763 | 762 |
} |
764 | 763 |
|
765 |
-int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
764 |
+int ff_filter_frame(AVFilterLink *link, AVFrame *frame) |
|
766 | 765 |
{ |
767 | 766 |
FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); |
768 | 767 |
|
... | ... |
@@ -770,22 +742,22 @@ int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
770 | 770 |
if (link->type == AVMEDIA_TYPE_VIDEO) { |
771 | 771 |
if (strcmp(link->dst->filter->name, "scale")) { |
772 | 772 |
av_assert1(frame->format == link->format); |
773 |
- av_assert1(frame->video->w == link->w); |
|
774 |
- av_assert1(frame->video->h == link->h); |
|
773 |
+ av_assert1(frame->width == link->w); |
|
774 |
+ av_assert1(frame->height == link->h); |
|
775 | 775 |
} |
776 | 776 |
} else { |
777 | 777 |
av_assert1(frame->format == link->format); |
778 |
- av_assert1(frame->audio->channels == link->channels); |
|
779 |
- av_assert1(frame->audio->channel_layout == link->channel_layout); |
|
780 |
- av_assert1(frame->audio->sample_rate == link->sample_rate); |
|
778 |
+ av_assert1(frame->channels == link->channels); |
|
779 |
+ av_assert1(frame->channel_layout == link->channel_layout); |
|
780 |
+ av_assert1(frame->sample_rate == link->sample_rate); |
|
781 | 781 |
} |
782 | 782 |
|
783 | 783 |
/* Go directly to actual filtering if possible */ |
784 | 784 |
if (link->type == AVMEDIA_TYPE_AUDIO && |
785 | 785 |
link->min_samples && |
786 | 786 |
(link->partial_buf || |
787 |
- frame->audio->nb_samples < link->min_samples || |
|
788 |
- frame->audio->nb_samples > link->max_samples)) { |
|
787 |
+ frame->nb_samples < link->min_samples || |
|
788 |
+ frame->nb_samples > link->max_samples)) { |
|
789 | 789 |
return ff_filter_frame_needs_framing(link, frame); |
790 | 790 |
} else { |
791 | 791 |
return ff_filter_frame_framed(link, frame); |
... | ... |
@@ -37,6 +37,7 @@ |
37 | 37 |
|
38 | 38 |
#include "libavutil/avutil.h" |
39 | 39 |
#include "libavutil/dict.h" |
40 |
+#include "libavutil/frame.h" |
|
40 | 41 |
#include "libavutil/log.h" |
41 | 42 |
#include "libavutil/samplefmt.h" |
42 | 43 |
#include "libavutil/pixfmt.h" |
... | ... |
@@ -69,6 +70,7 @@ typedef struct AVFilterLink AVFilterLink; |
69 | 69 |
typedef struct AVFilterPad AVFilterPad; |
70 | 70 |
typedef struct AVFilterFormats AVFilterFormats; |
71 | 71 |
|
72 |
+#if FF_API_AVFILTERBUFFER |
|
72 | 73 |
/** |
73 | 74 |
* A reference-counted buffer data type used by the filter system. Filters |
74 | 75 |
* should not store pointers to this structure directly, but instead use the |
... | ... |
@@ -200,6 +202,7 @@ typedef struct AVFilterBufferRef { |
200 | 200 |
/** |
201 | 201 |
* Copy properties of src to dst, without copying the actual data |
202 | 202 |
*/ |
203 |
+attribute_deprecated |
|
203 | 204 |
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); |
204 | 205 |
|
205 | 206 |
/** |
... | ... |
@@ -211,6 +214,7 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s |
211 | 211 |
* @return a new reference to the buffer with the same properties as the |
212 | 212 |
* old, excluding any permissions denied by pmask |
213 | 213 |
*/ |
214 |
+attribute_deprecated |
|
214 | 215 |
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); |
215 | 216 |
|
216 | 217 |
/** |
... | ... |
@@ -222,6 +226,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); |
222 | 222 |
* @note it is recommended to use avfilter_unref_bufferp() instead of this |
223 | 223 |
* function |
224 | 224 |
*/ |
225 |
+attribute_deprecated |
|
225 | 226 |
void avfilter_unref_buffer(AVFilterBufferRef *ref); |
226 | 227 |
|
227 | 228 |
/** |
... | ... |
@@ -231,11 +236,14 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref); |
231 | 231 |
* |
232 | 232 |
* @param ref pointer to the buffer reference |
233 | 233 |
*/ |
234 |
+attribute_deprecated |
|
234 | 235 |
void avfilter_unref_bufferp(AVFilterBufferRef **ref); |
236 |
+#endif |
|
235 | 237 |
|
236 | 238 |
/** |
237 | 239 |
* Get the number of channels of a buffer reference. |
238 | 240 |
*/ |
241 |
+attribute_deprecated |
|
239 | 242 |
int avfilter_ref_get_channels(AVFilterBufferRef *ref); |
240 | 243 |
|
241 | 244 |
#if FF_API_AVFILTERPAD_PUBLIC |
... | ... |
@@ -273,7 +281,7 @@ struct AVFilterPad { |
273 | 273 |
* link must have at least these permissions; this fact is checked by |
274 | 274 |
* asserts. It can be used to optimize buffer allocation. |
275 | 275 |
*/ |
276 |
- int min_perms; |
|
276 |
+ attribute_deprecated int min_perms; |
|
277 | 277 |
|
278 | 278 |
/** |
279 | 279 |
* Input pads: |
... | ... |
@@ -287,7 +295,7 @@ struct AVFilterPad { |
287 | 287 |
* Permissions which are automatically removed on outgoing buffers. It |
288 | 288 |
* can be used to optimize buffer allocation. |
289 | 289 |
*/ |
290 |
- int rej_perms; |
|
290 |
+ attribute_deprecated int rej_perms; |
|
291 | 291 |
|
292 | 292 |
/** |
293 | 293 |
* @deprecated unused |
... | ... |
@@ -300,7 +308,7 @@ struct AVFilterPad { |
300 | 300 |
* |
301 | 301 |
* Input video pads only. |
302 | 302 |
*/ |
303 |
- AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h); |
|
303 |
+ AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); |
|
304 | 304 |
|
305 | 305 |
/** |
306 | 306 |
* Callback function to get an audio buffer. If NULL, the filter system will |
... | ... |
@@ -308,8 +316,7 @@ struct AVFilterPad { |
308 | 308 |
* |
309 | 309 |
* Input audio pads only. |
310 | 310 |
*/ |
311 |
- AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, |
|
312 |
- int nb_samples); |
|
311 |
+ AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); |
|
313 | 312 |
|
314 | 313 |
/** |
315 | 314 |
* @deprecated unused |
... | ... |
@@ -331,7 +338,7 @@ struct AVFilterPad { |
331 | 331 |
* must ensure that frame is properly unreferenced on error if it |
332 | 332 |
* hasn't been passed on to another filter. |
333 | 333 |
*/ |
334 |
- int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); |
|
334 |
+ int (*filter_frame)(AVFilterLink *link, AVFrame *frame); |
|
335 | 335 |
|
336 | 336 |
/** |
337 | 337 |
* Frame poll callback. This returns the number of immediately available |
... | ... |
@@ -381,6 +388,8 @@ struct AVFilterPad { |
381 | 381 |
* input pads only. |
382 | 382 |
*/ |
383 | 383 |
int needs_fifo; |
384 |
+ |
|
385 |
+ int needs_writable; |
|
384 | 386 |
}; |
385 | 387 |
#endif |
386 | 388 |
|
... | ... |
@@ -616,7 +625,7 @@ struct AVFilterLink { |
616 | 616 |
/** |
617 | 617 |
* Buffer partially filled with samples to achieve a fixed/minimum size. |
618 | 618 |
*/ |
619 |
- AVFilterBufferRef *partial_buf; |
|
619 |
+ AVFrame *partial_buf; |
|
620 | 620 |
|
621 | 621 |
/** |
622 | 622 |
* Size of the partial buffer to allocate. |
... | ... |
@@ -701,6 +710,7 @@ void avfilter_link_set_closed(AVFilterLink *link, int closed); |
701 | 701 |
*/ |
702 | 702 |
int avfilter_config_links(AVFilterContext *filter); |
703 | 703 |
|
704 |
+#if FF_API_AVFILTERBUFFER |
|
704 | 705 |
/** |
705 | 706 |
* Create a buffer reference wrapped around an already allocated image |
706 | 707 |
* buffer. |
... | ... |
@@ -712,6 +722,7 @@ int avfilter_config_links(AVFilterContext *filter); |
712 | 712 |
* @param h the height of the image specified by the data and linesize arrays |
713 | 713 |
* @param format the pixel format of the image specified by the data and linesize arrays |
714 | 714 |
*/ |
715 |
+attribute_deprecated |
|
715 | 716 |
AVFilterBufferRef * |
716 | 717 |
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, |
717 | 718 |
int w, int h, enum AVPixelFormat format); |
... | ... |
@@ -730,6 +741,7 @@ avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int lin |
730 | 730 |
* @param sample_fmt the format of each sample in the buffer to allocate |
731 | 731 |
* @param channel_layout the channel layout of the buffer |
732 | 732 |
*/ |
733 |
+attribute_deprecated |
|
733 | 734 |
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, |
734 | 735 |
int linesize, |
735 | 736 |
int perms, |
... | ... |
@@ -749,6 +761,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, |
749 | 749 |
* @param channel_layout the channel layout of the buffer, |
750 | 750 |
* must be either 0 or consistent with channels |
751 | 751 |
*/ |
752 |
+attribute_deprecated |
|
752 | 753 |
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, |
753 | 754 |
int linesize, |
754 | 755 |
int perms, |
... | ... |
@@ -757,6 +770,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t ** |
757 | 757 |
int channels, |
758 | 758 |
uint64_t channel_layout); |
759 | 759 |
|
760 |
+#endif |
|
760 | 761 |
|
761 | 762 |
|
762 | 763 |
#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically |
... | ... |
@@ -845,6 +859,26 @@ void avfilter_free(AVFilterContext *filter); |
845 | 845 |
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, |
846 | 846 |
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); |
847 | 847 |
|
848 |
+#if FF_API_AVFILTERBUFFER |
|
849 |
+/** |
|
850 |
+ * Copy the frame properties of src to dst, without copying the actual |
|
851 |
+ * image data. |
|
852 |
+ * |
|
853 |
+ * @return 0 on success, a negative number on error. |
|
854 |
+ */ |
|
855 |
+attribute_deprecated |
|
856 |
+int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); |
|
857 |
+ |
|
858 |
+/** |
|
859 |
+ * Copy the frame properties and data pointers of src to dst, without copying |
|
860 |
+ * the actual data. |
|
861 |
+ * |
|
862 |
+ * @return 0 on success, a negative number on error. |
|
863 |
+ */ |
|
864 |
+attribute_deprecated |
|
865 |
+int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); |
|
866 |
+#endif |
|
867 |
+ |
|
848 | 868 |
/** |
849 | 869 |
* @} |
850 | 870 |
*/ |
... | ... |
@@ -92,84 +92,13 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) |
92 | 92 |
return ret; |
93 | 93 |
} |
94 | 94 |
|
95 |
-void ff_free_pool(AVFilterPool *pool) |
|
96 |
-{ |
|
97 |
- int i; |
|
98 |
- |
|
99 |
- av_assert0(pool->refcount > 0); |
|
100 |
- |
|
101 |
- for (i = 0; i < POOL_SIZE; i++) { |
|
102 |
- if (pool->pic[i]) { |
|
103 |
- AVFilterBufferRef *picref = pool->pic[i]; |
|
104 |
- /* free buffer: picrefs stored in the pool are not |
|
105 |
- * supposed to contain a free callback */ |
|
106 |
- av_assert0(!picref->buf->refcount); |
|
107 |
- av_freep(&picref->buf->data[0]); |
|
108 |
- av_freep(&picref->buf); |
|
109 |
- |
|
110 |
- av_freep(&picref->audio); |
|
111 |
- av_assert0(!picref->video || !picref->video->qp_table); |
|
112 |
- av_freep(&picref->video); |
|
113 |
- av_freep(&pool->pic[i]); |
|
114 |
- pool->count--; |
|
115 |
- } |
|
116 |
- } |
|
117 |
- pool->draining = 1; |
|
118 |
- |
|
119 |
- if (!--pool->refcount) { |
|
120 |
- av_assert0(!pool->count); |
|
121 |
- av_free(pool); |
|
122 |
- } |
|
123 |
-} |
|
124 |
- |
|
125 |
-static void store_in_pool(AVFilterBufferRef *ref) |
|
126 |
-{ |
|
127 |
- int i; |
|
128 |
- AVFilterPool *pool= ref->buf->priv; |
|
129 |
- |
|
130 |
- av_assert0(ref->buf->data[0]); |
|
131 |
- av_assert0(pool->refcount>0); |
|
132 |
- |
|
133 |
- if (ref->video) |
|
134 |
- av_freep(&ref->video->qp_table); |
|
135 |
- |
|
136 |
- if (pool->count == POOL_SIZE) { |
|
137 |
- AVFilterBufferRef *ref1 = pool->pic[0]; |
|
138 |
- av_freep(&ref1->video); |
|
139 |
- av_freep(&ref1->audio); |
|
140 |
- av_freep(&ref1->buf->data[0]); |
|
141 |
- av_freep(&ref1->buf); |
|
142 |
- av_free(ref1); |
|
143 |
- memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1)); |
|
144 |
- pool->count--; |
|
145 |
- pool->pic[POOL_SIZE-1] = NULL; |
|
146 |
- } |
|
147 |
- |
|
148 |
- for (i = 0; i < POOL_SIZE; i++) { |
|
149 |
- if (!pool->pic[i]) { |
|
150 |
- pool->pic[i] = ref; |
|
151 |
- pool->count++; |
|
152 |
- break; |
|
153 |
- } |
|
154 |
- } |
|
155 |
- if (pool->draining) { |
|
156 |
- ff_free_pool(pool); |
|
157 |
- } else |
|
158 |
- --pool->refcount; |
|
159 |
-} |
|
160 |
- |
|
161 | 95 |
void avfilter_unref_buffer(AVFilterBufferRef *ref) |
162 | 96 |
{ |
163 | 97 |
if (!ref) |
164 | 98 |
return; |
165 | 99 |
av_assert0(ref->buf->refcount > 0); |
166 |
- if (!(--ref->buf->refcount)) { |
|
167 |
- if (!ref->buf->free) { |
|
168 |
- store_in_pool(ref); |
|
169 |
- return; |
|
170 |
- } |
|
100 |
+ if (!(--ref->buf->refcount)) |
|
171 | 101 |
ref->buf->free(ref->buf); |
172 |
- } |
|
173 | 102 |
if (ref->extended_data != ref->data) |
174 | 103 |
av_freep(&ref->extended_data); |
175 | 104 |
if (ref->video) |
... | ... |
@@ -186,6 +115,36 @@ void avfilter_unref_bufferp(AVFilterBufferRef **ref) |
186 | 186 |
*ref = NULL; |
187 | 187 |
} |
188 | 188 |
|
189 |
+int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) |
|
190 |
+{ |
|
191 |
+ dst->pts = src->pts; |
|
192 |
+ dst->pos = av_frame_get_pkt_pos(src); |
|
193 |
+ dst->format = src->format; |
|
194 |
+ |
|
195 |
+ av_dict_free(&dst->metadata); |
|
196 |
+ av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); |
|
197 |
+ |
|
198 |
+ switch (dst->type) { |
|
199 |
+ case AVMEDIA_TYPE_VIDEO: |
|
200 |
+ dst->video->w = src->width; |
|
201 |
+ dst->video->h = src->height; |
|
202 |
+ dst->video->sample_aspect_ratio = src->sample_aspect_ratio; |
|
203 |
+ dst->video->interlaced = src->interlaced_frame; |
|
204 |
+ dst->video->top_field_first = src->top_field_first; |
|
205 |
+ dst->video->key_frame = src->key_frame; |
|
206 |
+ dst->video->pict_type = src->pict_type; |
|
207 |
+ break; |
|
208 |
+ case AVMEDIA_TYPE_AUDIO: |
|
209 |
+ dst->audio->sample_rate = src->sample_rate; |
|
210 |
+ dst->audio->channel_layout = src->channel_layout; |
|
211 |
+ break; |
|
212 |
+ default: |
|
213 |
+ return AVERROR(EINVAL); |
|
214 |
+ } |
|
215 |
+ |
|
216 |
+ return 0; |
|
217 |
+} |
|
218 |
+ |
|
189 | 219 |
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) |
190 | 220 |
{ |
191 | 221 |
// copy common properties |
... | ... |
@@ -206,40 +165,3 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s |
206 | 206 |
av_dict_free(&dst->metadata); |
207 | 207 |
av_dict_copy(&dst->metadata, src->metadata, 0); |
208 | 208 |
} |
209 |
- |
|
210 |
-AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink, |
|
211 |
- AVFilterBufferRef *ref) |
|
212 |
-{ |
|
213 |
- AVFilterBufferRef *buf; |
|
214 |
- int channels; |
|
215 |
- |
|
216 |
- switch (outlink->type) { |
|
217 |
- |
|
218 |
- case AVMEDIA_TYPE_VIDEO: |
|
219 |
- buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
220 |
- ref->video->w, ref->video->h); |
|
221 |
- if(!buf) |
|
222 |
- return NULL; |
|
223 |
- av_image_copy(buf->data, buf->linesize, |
|
224 |
- (void*)ref->data, ref->linesize, |
|
225 |
- ref->format, ref->video->w, ref->video->h); |
|
226 |
- break; |
|
227 |
- |
|
228 |
- case AVMEDIA_TYPE_AUDIO: |
|
229 |
- buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, |
|
230 |
- ref->audio->nb_samples); |
|
231 |
- if(!buf) |
|
232 |
- return NULL; |
|
233 |
- channels = ref->audio->channels; |
|
234 |
- av_samples_copy(buf->extended_data, ref->buf->extended_data, |
|
235 |
- 0, 0, ref->audio->nb_samples, |
|
236 |
- channels, |
|
237 |
- ref->format); |
|
238 |
- break; |
|
239 |
- |
|
240 |
- default: |
|
241 |
- return NULL; |
|
242 |
- } |
|
243 |
- avfilter_copy_buffer_ref_props(buf, ref); |
|
244 |
- return buf; |
|
245 |
-} |
... | ... |
@@ -23,7 +23,7 @@ |
23 | 23 |
#define AVFILTER_BUFFERQUEUE_H |
24 | 24 |
|
25 | 25 |
/** |
26 |
- * FFBufQueue: simple AVFilterBufferRef queue API |
|
26 |
+ * FFBufQueue: simple AVFrame queue API |
|
27 | 27 |
* |
28 | 28 |
* Note: this API is not thread-safe. Concurrent access to the same queue |
29 | 29 |
* must be protected by a mutex or any synchronization mechanism. |
... | ... |
@@ -47,7 +47,7 @@ |
47 | 47 |
* Structure holding the queue |
48 | 48 |
*/ |
49 | 49 |
struct FFBufQueue { |
50 |
- AVFilterBufferRef *queue[FF_BUFQUEUE_SIZE]; |
|
50 |
+ AVFrame *queue[FF_BUFQUEUE_SIZE]; |
|
51 | 51 |
unsigned short head; |
52 | 52 |
unsigned short available; /**< number of available buffers */ |
53 | 53 |
}; |
... | ... |
@@ -69,11 +69,11 @@ static inline int ff_bufqueue_is_full(struct FFBufQueue *queue) |
69 | 69 |
* (and unrefed) with a warning before adding the new buffer. |
70 | 70 |
*/ |
71 | 71 |
static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, |
72 |
- AVFilterBufferRef *buf) |
|
72 |
+ AVFrame *buf) |
|
73 | 73 |
{ |
74 | 74 |
if (ff_bufqueue_is_full(queue)) { |
75 | 75 |
av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n"); |
76 |
- avfilter_unref_buffer(BUCKET(--queue->available)); |
|
76 |
+ av_frame_free(&BUCKET(--queue->available)); |
|
77 | 77 |
} |
78 | 78 |
BUCKET(queue->available++) = buf; |
79 | 79 |
} |
... | ... |
@@ -84,8 +84,8 @@ static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, |
84 | 84 |
* Buffer with index 0 is the first buffer in the queue. |
85 | 85 |
* Return NULL if the queue has not enough buffers. |
86 | 86 |
*/ |
87 |
-static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue, |
|
88 |
- unsigned index) |
|
87 |
+static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue, |
|
88 |
+ unsigned index) |
|
89 | 89 |
{ |
90 | 90 |
return index < queue->available ? BUCKET(index) : NULL; |
91 | 91 |
} |
... | ... |
@@ -95,9 +95,9 @@ static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue, |
95 | 95 |
* |
96 | 96 |
* Do not use on an empty queue. |
97 | 97 |
*/ |
98 |
-static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue) |
|
98 |
+static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue) |
|
99 | 99 |
{ |
100 |
- AVFilterBufferRef *ret = queue->queue[queue->head]; |
|
100 |
+ AVFrame *ret = queue->queue[queue->head]; |
|
101 | 101 |
av_assert0(queue->available); |
102 | 102 |
queue->available--; |
103 | 103 |
queue->queue[queue->head] = NULL; |
... | ... |
@@ -110,8 +110,10 @@ static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue) |
110 | 110 |
*/ |
111 | 111 |
static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue) |
112 | 112 |
{ |
113 |
- while (queue->available) |
|
114 |
- avfilter_unref_buffer(ff_bufqueue_get(queue)); |
|
113 |
+ while (queue->available) { |
|
114 |
+ AVFrame *buf = ff_bufqueue_get(queue); |
|
115 |
+ av_frame_free(&buf); |
|
116 |
+ } |
|
115 | 117 |
} |
116 | 118 |
|
117 | 119 |
#undef BUCKET |
... | ... |
@@ -35,7 +35,7 @@ |
35 | 35 |
#include "internal.h" |
36 | 36 |
|
37 | 37 |
typedef struct { |
38 |
- AVFilterBufferRef *cur_buf; ///< last buffer delivered on the sink |
|
38 |
+ AVFrame *cur_frame; ///< last frame delivered on the sink |
|
39 | 39 |
AVAudioFifo *audio_fifo; ///< FIFO for audio samples |
40 | 40 |
int64_t next_pts; ///< interpolating audio pts |
41 | 41 |
} BufferSinkContext; |
... | ... |
@@ -48,59 +48,71 @@ static av_cold void uninit(AVFilterContext *ctx) |
48 | 48 |
av_audio_fifo_free(sink->audio_fifo); |
49 | 49 |
} |
50 | 50 |
|
51 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) |
|
51 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
52 | 52 |
{ |
53 | 53 |
BufferSinkContext *s = link->dst->priv; |
54 | 54 |
|
55 |
-// av_assert0(!s->cur_buf); |
|
56 |
- s->cur_buf = buf; |
|
55 |
+// av_assert0(!s->cur_frame); |
|
56 |
+ s->cur_frame = frame; |
|
57 | 57 |
|
58 | 58 |
return 0; |
59 | 59 |
} |
60 | 60 |
|
61 |
+<<<<<<< HEAD |
|
61 | 62 |
int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf) |
63 |
+||||||| merged common ancestors |
|
64 |
+int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) |
|
65 |
+======= |
|
66 |
+int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) |
|
67 |
+>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911 |
|
62 | 68 |
{ |
63 | 69 |
BufferSinkContext *s = ctx->priv; |
64 | 70 |
AVFilterLink *link = ctx->inputs[0]; |
65 | 71 |
int ret; |
66 | 72 |
|
67 |
- if (!buf) |
|
68 |
- return ff_poll_frame(ctx->inputs[0]); |
|
69 |
- |
|
70 | 73 |
if ((ret = ff_request_frame(link)) < 0) |
71 | 74 |
return ret; |
72 | 75 |
|
73 |
- if (!s->cur_buf) |
|
76 |
+ if (!s->cur_frame) |
|
74 | 77 |
return AVERROR(EINVAL); |
75 | 78 |
|
76 |
- *buf = s->cur_buf; |
|
77 |
- s->cur_buf = NULL; |
|
79 |
+ av_frame_move_ref(frame, s->cur_frame); |
|
80 |
+ av_frame_free(&s->cur_frame); |
|
78 | 81 |
|
79 | 82 |
return 0; |
80 | 83 |
} |
81 | 84 |
|
82 |
-static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf, |
|
85 |
+static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame, |
|
83 | 86 |
int nb_samples) |
84 | 87 |
{ |
85 | 88 |
BufferSinkContext *s = ctx->priv; |
86 | 89 |
AVFilterLink *link = ctx->inputs[0]; |
87 |
- AVFilterBufferRef *buf; |
|
90 |
+ AVFrame *tmp; |
|
88 | 91 |
|
89 |
- if (!(buf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples))) |
|
92 |
+ if (!(tmp = ff_get_audio_buffer(link, nb_samples))) |
|
90 | 93 |
return AVERROR(ENOMEM); |
91 |
- av_audio_fifo_read(s->audio_fifo, (void**)buf->extended_data, nb_samples); |
|
94 |
+ av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples); |
|
92 | 95 |
|
93 |
- buf->pts = s->next_pts; |
|
96 |
+ tmp->pts = s->next_pts; |
|
94 | 97 |
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate}, |
95 | 98 |
link->time_base); |
96 | 99 |
|
97 |
- *pbuf = buf; |
|
100 |
+ av_frame_move_ref(frame, tmp); |
|
101 |
+ av_frame_free(&tmp); |
|
102 |
+ |
|
98 | 103 |
return 0; |
99 | 104 |
|
100 | 105 |
} |
101 | 106 |
|
107 |
+<<<<<<< HEAD |
|
102 | 108 |
int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf, |
103 | 109 |
int nb_samples) |
110 |
+||||||| merged common ancestors |
|
111 |
+int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf, |
|
112 |
+ int nb_samples) |
|
113 |
+======= |
|
114 |
+int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples) |
|
115 |
+>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911 |
|
104 | 116 |
{ |
105 | 117 |
BufferSinkContext *s = ctx->priv; |
106 | 118 |
AVFilterLink *link = ctx->inputs[0]; |
... | ... |
@@ -113,38 +125,107 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef ** |
113 | 113 |
} |
114 | 114 |
|
115 | 115 |
while (ret >= 0) { |
116 |
- AVFilterBufferRef *buf; |
|
117 |
- |
|
118 | 116 |
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples) |
119 |
- return read_from_fifo(ctx, pbuf, nb_samples); |
|
117 |
+ return read_from_fifo(ctx, frame, nb_samples); |
|
120 | 118 |
|
121 |
- ret = av_buffersink_read(ctx, &buf); |
|
119 |
+ ret = ff_request_frame(link); |
|
122 | 120 |
if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) |
123 |
- return read_from_fifo(ctx, pbuf, av_audio_fifo_size(s->audio_fifo)); |
|
121 |
+ return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo)); |
|
124 | 122 |
else if (ret < 0) |
125 | 123 |
return ret; |
126 | 124 |
|
127 |
- if (buf->pts != AV_NOPTS_VALUE) { |
|
128 |
- s->next_pts = buf->pts - |
|
125 |
+ if (s->cur_frame->pts != AV_NOPTS_VALUE) { |
|
126 |
+ s->next_pts = s->cur_frame->pts - |
|
129 | 127 |
av_rescale_q(av_audio_fifo_size(s->audio_fifo), |
130 | 128 |
(AVRational){ 1, link->sample_rate }, |
131 | 129 |
link->time_base); |
132 | 130 |
} |
133 | 131 |
|
134 |
- ret = av_audio_fifo_write(s->audio_fifo, (void**)buf->extended_data, |
|
135 |
- buf->audio->nb_samples); |
|
136 |
- avfilter_unref_buffer(buf); |
|
132 |
+ ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data, |
|
133 |
+ s->cur_frame->nb_samples); |
|
134 |
+ av_frame_free(&s->cur_frame); |
|
135 |
+ } |
|
136 |
+ |
|
137 |
+ return ret; |
|
138 |
+ |
|
139 |
+} |
|
140 |
+ |
|
141 |
+#if FF_API_AVFILTERBUFFER |
|
142 |
+static void compat_free_buffer(AVFilterBuffer *buf) |
|
143 |
+{ |
|
144 |
+ AVFrame *frame = buf->priv; |
|
145 |
+ av_frame_free(&frame); |
|
146 |
+ av_free(buf); |
|
147 |
+} |
|
148 |
+ |
|
149 |
+static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples) |
|
150 |
+{ |
|
151 |
+ AVFilterBufferRef *buf; |
|
152 |
+ AVFrame *frame; |
|
153 |
+ int ret; |
|
154 |
+ |
|
155 |
+ if (!pbuf) |
|
156 |
+ return ff_poll_frame(ctx->inputs[0]); |
|
157 |
+ |
|
158 |
+ frame = av_frame_alloc(); |
|
159 |
+ if (!frame) |
|
160 |
+ return AVERROR(ENOMEM); |
|
161 |
+ |
|
162 |
+ if (!nb_samples) |
|
163 |
+ ret = av_buffersink_get_frame(ctx, frame); |
|
164 |
+ else |
|
165 |
+ ret = av_buffersink_get_samples(ctx, frame, nb_samples); |
|
166 |
+ |
|
167 |
+ if (ret < 0) |
|
168 |
+ goto fail; |
|
169 |
+ |
|
170 |
+ if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { |
|
171 |
+ buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, |
|
172 |
+ AV_PERM_READ, |
|
173 |
+ frame->width, frame->height, |
|
174 |
+ frame->format); |
|
175 |
+ } else { |
|
176 |
+ buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, |
|
177 |
+ frame->linesize[0], AV_PERM_READ, |
|
178 |
+ frame->nb_samples, |
|
179 |
+ frame->format, |
|
180 |
+ frame->channel_layout); |
|
137 | 181 |
} |
182 |
+ if (!buf) { |
|
183 |
+ ret = AVERROR(ENOMEM); |
|
184 |
+ goto fail; |
|
185 |
+ } |
|
186 |
+ |
|
187 |
+ avfilter_copy_frame_props(buf, frame); |
|
188 |
+ |
|
189 |
+ buf->buf->priv = frame; |
|
190 |
+ buf->buf->free = compat_free_buffer; |
|
191 |
+ |
|
192 |
+ *pbuf = buf; |
|
138 | 193 |
|
194 |
+ return 0; |
|
195 |
+fail: |
|
196 |
+ av_frame_free(&frame); |
|
139 | 197 |
return ret; |
140 | 198 |
} |
141 | 199 |
|
200 |
+int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) |
|
201 |
+{ |
|
202 |
+ return compat_read(ctx, buf, 0); |
|
203 |
+} |
|
204 |
+ |
|
205 |
+int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, |
|
206 |
+ int nb_samples) |
|
207 |
+{ |
|
208 |
+ return compat_read(ctx, buf, nb_samples); |
|
209 |
+} |
|
210 |
+#endif |
|
211 |
+ |
|
142 | 212 |
static const AVFilterPad avfilter_vsink_buffer_inputs[] = { |
143 | 213 |
{ |
144 | 214 |
.name = "default", |
145 | 215 |
.type = AVMEDIA_TYPE_VIDEO, |
146 | 216 |
.filter_frame = filter_frame, |
147 |
- .min_perms = AV_PERM_READ, |
|
148 | 217 |
.needs_fifo = 1 |
149 | 218 |
}, |
150 | 219 |
{ NULL } |
... | ... |
@@ -169,7 +250,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = { |
169 | 169 |
.name = "default", |
170 | 170 |
.type = AVMEDIA_TYPE_AUDIO, |
171 | 171 |
.filter_frame = filter_frame, |
172 |
- .min_perms = AV_PERM_READ, |
|
173 | 172 |
.needs_fifo = 1 |
174 | 173 |
}, |
175 | 174 |
{ NULL } |
... | ... |
@@ -26,6 +26,7 @@ |
26 | 26 |
|
27 | 27 |
#include "avfilter.h" |
28 | 28 |
|
29 |
+#if FF_API_AVFILTERBUFFER |
|
29 | 30 |
/** |
30 | 31 |
* Struct to use for initializing a buffersink context. |
31 | 32 |
*/ |
... | ... |
@@ -94,6 +95,8 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); |
94 | 94 |
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, |
95 | 95 |
AVFilterBufferRef **bufref, int flags); |
96 | 96 |
|
97 |
+/* TODO */ |
|
98 |
+int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); |
|
97 | 99 |
|
98 | 100 |
/** |
99 | 101 |
* Get the number of immediately available frames. |
... | ... |
@@ -122,6 +125,7 @@ AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); |
122 | 122 |
* @return >= 0 in case of success, a negative AVERROR code in case of |
123 | 123 |
* failure. |
124 | 124 |
*/ |
125 |
+attribute_deprecated |
|
125 | 126 |
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); |
126 | 127 |
|
127 | 128 |
/** |
... | ... |
@@ -140,8 +144,38 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); |
140 | 140 |
* @warning do not mix this function with av_buffersink_read(). Use only one or |
141 | 141 |
* the other with a single sink, not both. |
142 | 142 |
*/ |
143 |
+attribute_deprecated |
|
143 | 144 |
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, |
144 | 145 |
int nb_samples); |
146 |
+#endif |
|
147 |
+ |
|
148 |
+/** |
|
149 |
+ * Get a frame with filtered data from sink and put it in frame. |
|
150 |
+ * |
|
151 |
+ * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. |
|
152 |
+ * @param frame pointer to an allocated frame that will be filled with data. |
|
153 |
+ * The data must be freed using av_frame_unref() / av_frame_free() |
|
154 |
+ * |
|
155 |
+ * @return >= 0 in case of success, a negative AVERROR code in case of |
|
156 |
+ * failure. |
|
157 |
+ */ |
|
158 |
+int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); |
|
159 |
+ |
|
160 |
+/** |
|
161 |
+ * Same as av_buffersink_get_frame(), but with the ability to specify the number |
|
162 |
+ * of samples read. This function is less efficient than |
|
163 |
+ * av_buffersink_get_frame(), because it copies the data around. |
|
164 |
+ * |
|
165 |
+ * @param ctx pointer to a context of the abuffersink AVFilter. |
|
166 |
+ * @param frame pointer to an allocated frame that will be filled with data. |
|
167 |
+ * The data must be freed using av_frame_unref() / av_frame_free() |
|
168 |
+ * frame will contain exactly nb_samples audio samples, except at |
|
169 |
+ * the end of stream, when it can contain less than nb_samples. |
|
170 |
+ * |
|
171 |
+ * @warning do not mix this function with av_buffersink_get_frame(). Use only one or |
|
172 |
+ * the other with a single sink, not both. |
|
173 |
+ */ |
|
174 |
+int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); |
|
145 | 175 |
|
146 | 176 |
/** |
147 | 177 |
* @} |
... | ... |
@@ -26,6 +26,7 @@ |
26 | 26 |
#include "libavutil/channel_layout.h" |
27 | 27 |
#include "libavutil/common.h" |
28 | 28 |
#include "libavutil/fifo.h" |
29 |
+#include "libavutil/frame.h" |
|
29 | 30 |
#include "libavutil/imgutils.h" |
30 | 31 |
#include "libavutil/opt.h" |
31 | 32 |
#include "libavutil/samplefmt.h" |
... | ... |
@@ -74,99 +75,193 @@ typedef struct { |
74 | 74 |
return AVERROR(EINVAL);\ |
75 | 75 |
} |
76 | 76 |
|
77 |
-int av_buffersrc_add_frame(AVFilterContext *buffer_src, |
|
78 |
- const AVFrame *frame, int flags) |
|
77 |
+int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) |
|
79 | 78 |
{ |
80 |
- AVFilterBufferRef *picref; |
|
81 |
- int ret; |
|
79 |
+ return av_buffersrc_add_frame(ctx, frame); |
|
80 |
+} |
|
81 |
+ |
|
82 |
+int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame) |
|
83 |
+{ |
|
84 |
+ AVFrame *copy; |
|
85 |
+ int ret = 0; |
|
86 |
+ int64_t layout = frame->channel_layout; |
|
82 | 87 |
|
83 |
- if (!frame) /* NULL for EOF */ |
|
84 |
- return av_buffersrc_add_ref(buffer_src, NULL, flags); |
|
88 |
+ if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { |
|
89 |
+ av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); |
|
90 |
+ return AVERROR(EINVAL); |
|
91 |
+ } |
|
85 | 92 |
|
86 |
- picref = avfilter_get_buffer_ref_from_frame(buffer_src->outputs[0]->type, |
|
87 |
- frame, AV_PERM_WRITE); |
|
88 |
- if (!picref) |
|
93 |
+ if (!(copy = av_frame_alloc())) |
|
89 | 94 |
return AVERROR(ENOMEM); |
90 |
- ret = av_buffersrc_add_ref(buffer_src, picref, flags); |
|
91 |
- picref->buf->data[0] = NULL; |
|
92 |
- avfilter_unref_buffer(picref); |
|
93 |
- return ret; |
|
94 |
-} |
|
95 |
+ ret = av_frame_ref(copy, frame); |
|
96 |
+ if (ret >= 0) |
|
97 |
+ ret = av_buffersrc_add_frame(ctx, copy); |
|
95 | 98 |
|
96 |
-int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame) |
|
97 |
-{ |
|
98 |
- return av_buffersrc_add_frame(buffer_filter, frame, 0); |
|
99 |
+ av_frame_free(©); |
|
100 |
+ return ret; |
|
99 | 101 |
} |
100 | 102 |
|
101 |
-int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags) |
|
103 |
+int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame) |
|
102 | 104 |
{ |
103 |
- BufferSourceContext *c = s->priv; |
|
104 |
- AVFilterBufferRef *to_free = NULL; |
|
105 |
+ BufferSourceContext *s = ctx->priv; |
|
106 |
+ AVFrame *copy; |
|
105 | 107 |
int ret; |
108 |
+ int64_t layout; |
|
106 | 109 |
|
107 |
- if (!buf) { |
|
108 |
- c->eof = 1; |
|
110 |
+ if (!frame) { |
|
111 |
+ s->eof = 1; |
|
109 | 112 |
return 0; |
110 |
- } else if (c->eof) |
|
113 |
+ } else if (s->eof) |
|
111 | 114 |
return AVERROR(EINVAL); |
112 | 115 |
|
113 |
- if (!av_fifo_space(c->fifo) && |
|
114 |
- (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) + |
|
115 |
- sizeof(buf))) < 0) |
|
116 |
- return ret; |
|
116 |
+ switch (ctx->outputs[0]->type) { |
|
117 |
+ case AVMEDIA_TYPE_VIDEO: |
|
118 |
+ CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height, |
|
119 |
+ frame->format); |
|
120 |
+ break; |
|
121 |
+ case AVMEDIA_TYPE_AUDIO: |
|
122 |
+ CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout, |
|
123 |
+ frame->format); |
|
117 | 124 |
|
118 |
- if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { |
|
119 |
- switch (s->outputs[0]->type) { |
|
120 |
- case AVMEDIA_TYPE_VIDEO: |
|
121 |
- CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format); |
|
122 |
- break; |
|
123 |
- case AVMEDIA_TYPE_AUDIO: |
|
124 |
- if (!buf->audio->channel_layout) |
|
125 |
- buf->audio->channel_layout = c->channel_layout; |
|
126 |
- CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout, |
|
127 |
- buf->format); |
|
128 |
- break; |
|
129 |
- default: |
|
125 |
+ layout = frame->channel_layout; |
|
126 |
+ if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { |
|
127 |
+ av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); |
|
130 | 128 |
return AVERROR(EINVAL); |
131 | 129 |
} |
130 |
+ break; |
|
131 |
+ default: |
|
132 |
+ return AVERROR(EINVAL); |
|
132 | 133 |
} |
133 |
- if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY)) |
|
134 |
- to_free = buf = ff_copy_buffer_ref(s->outputs[0], buf); |
|
135 |
- if(!buf) |
|
136 |
- return -1; |
|
137 | 134 |
|
138 |
- if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) { |
|
139 |
- avfilter_unref_buffer(to_free); |
|
135 |
+ if (!av_fifo_space(s->fifo) && |
|
136 |
+ (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + |
|
137 |
+ sizeof(copy))) < 0) |
|
140 | 138 |
return ret; |
141 |
- } |
|
142 |
- c->nb_failed_requests = 0; |
|
143 |
- if (c->warning_limit && |
|
144 |
- av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) { |
|
145 |
- av_log(s, AV_LOG_WARNING, |
|
146 |
- "%d buffers queued in %s, something may be wrong.\n", |
|
147 |
- c->warning_limit, |
|
148 |
- (char *)av_x_if_null(s->name, s->filter->name)); |
|
149 |
- c->warning_limit *= 10; |
|
150 |
- } |
|
151 | 139 |
|
152 |
- if ((flags & AV_BUFFERSRC_FLAG_PUSH)) |
|
153 |
- if ((ret = s->output_pads[0].request_frame(s->outputs[0])) < 0) |
|
154 |
- return ret; |
|
140 |
+ if (!(copy = av_frame_alloc())) |
|
141 |
+ return AVERROR(ENOMEM); |
|
142 |
+ av_frame_move_ref(copy, frame); |
|
143 |
+ |
|
144 |
+ if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { |
|
145 |
+ av_frame_move_ref(frame, copy); |
|
146 |
+ av_frame_free(©); |
|
147 |
+ return ret; |
|
148 |
+ } |
|
155 | 149 |
|
156 | 150 |
return 0; |
157 | 151 |
} |
158 | 152 |
|
159 |
-#ifdef FF_API_BUFFERSRC_BUFFER |
|
160 |
-int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) |
|
153 |
+#if FF_API_AVFILTERBUFFER |
|
154 |
+static void compat_free_buffer(void *opaque, uint8_t *data) |
|
161 | 155 |
{ |
162 |
- return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY); |
|
156 |
+ AVFilterBufferRef *buf = opaque; |
|
157 |
+ avfilter_unref_buffer(buf); |
|
163 | 158 |
} |
164 |
-#endif |
|
165 | 159 |
|
166 |
-unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) |
|
160 |
+static void compat_unref_buffer(void *opaque, uint8_t *data) |
|
167 | 161 |
{ |
168 |
- return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; |
|
162 |
+ AVBufferRef *buf = opaque; |
|
163 |
+ av_buffer_unref(&buf); |
|
164 |
+} |
|
165 |
+ |
|
166 |
+int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf, |
|
167 |
+ int flags) |
|
168 |
+{ |
|
169 |
+ BufferSourceContext *s = ctx->priv; |
|
170 |
+ AVFrame *frame = NULL; |
|
171 |
+ AVBufferRef *dummy_buf = NULL; |
|
172 |
+ int ret = 0, planes, i; |
|
173 |
+ |
|
174 |
+ if (!buf) { |
|
175 |
+ s->eof = 1; |
|
176 |
+ return 0; |
|
177 |
+ } else if (s->eof) |
|
178 |
+ return AVERROR(EINVAL); |
|
179 |
+ |
|
180 |
+ frame = av_frame_alloc(); |
|
181 |
+ if (!frame) |
|
182 |
+ return AVERROR(ENOMEM); |
|
183 |
+ |
|
184 |
+ dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, |
|
185 |
+ (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); |
|
186 |
+ if (!dummy_buf) { |
|
187 |
+ ret = AVERROR(ENOMEM); |
|
188 |
+ goto fail; |
|
189 |
+ } |
|
190 |
+ |
|
191 |
+ if ((ret = avfilter_copy_buf_props(frame, buf)) < 0) |
|
192 |
+ goto fail; |
|
193 |
+ |
|
194 |
+#define WRAP_PLANE(ref_out, data, data_size) \ |
|
195 |
+do { \ |
|
196 |
+ AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ |
|
197 |
+ if (!dummy_ref) { \ |
|
198 |
+ ret = AVERROR(ENOMEM); \ |
|
199 |
+ goto fail; \ |
|
200 |
+ } \ |
|
201 |
+ ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \ |
|
202 |
+ dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \ |
|
203 |
+ if (!ref_out) { \ |
|
204 |
+ av_frame_unref(frame); \ |
|
205 |
+ ret = AVERROR(ENOMEM); \ |
|
206 |
+ goto fail; \ |
|
207 |
+ } \ |
|
208 |
+} while (0) |
|
209 |
+ |
|
210 |
+ if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) { |
|
211 |
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); |
|
212 |
+ |
|
213 |
+ if (!desc) { |
|
214 |
+ ret = AVERROR(EINVAL); |
|
215 |
+ goto fail; |
|
216 |
+ } |
|
217 |
+ planes = (desc->flags & PIX_FMT_PLANAR) ? desc->nb_components : 1; |
|
218 |
+ |
|
219 |
+ for (i = 0; i < planes; i++) { |
|
220 |
+ int h_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; |
|
221 |
+ int plane_size = (frame->width >> h_shift) * frame->linesize[i]; |
|
222 |
+ |
|
223 |
+ WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); |
|
224 |
+ } |
|
225 |
+ } else { |
|
226 |
+ int planar = av_sample_fmt_is_planar(frame->format); |
|
227 |
+ int channels = av_get_channel_layout_nb_channels(frame->channel_layout); |
|
228 |
+ |
|
229 |
+ planes = planar ? channels : 1; |
|
230 |
+ |
|
231 |
+ if (planes > FF_ARRAY_ELEMS(frame->buf)) { |
|
232 |
+ frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); |
|
233 |
+ frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * |
|
234 |
+ frame->nb_extended_buf); |
|
235 |
+ if (!frame->extended_buf) { |
|
236 |
+ ret = AVERROR(ENOMEM); |
|
237 |
+ goto fail; |
|
238 |
+ } |
|
239 |
+ } |
|
240 |
+ |
|
241 |
+ for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) |
|
242 |
+ WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); |
|
243 |
+ |
|
244 |
+ for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++) |
|
245 |
+ WRAP_PLANE(frame->extended_buf[i], |
|
246 |
+ frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], |
|
247 |
+ frame->linesize[0]); |
|
248 |
+ } |
|
249 |
+ |
|
250 |
+ ret = av_buffersrc_add_frame_flags(ctx, frame, flags); |
|
251 |
+ |
|
252 |
+fail: |
|
253 |
+ av_buffer_unref(&dummy_buf); |
|
254 |
+ av_frame_free(&frame); |
|
255 |
+ |
|
256 |
+ return ret; |
|
257 |
+} |
|
258 |
+ |
|
259 |
+int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf) |
|
260 |
+{ |
|
261 |
+ return av_buffersrc_add_ref(ctx, buf, 0); |
|
169 | 262 |
} |
263 |
+#endif |
|
170 | 264 |
|
171 | 265 |
#define OFFSET(x) offsetof(BufferSourceContext, x) |
172 | 266 |
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
... | ... |
@@ -186,7 +281,7 @@ AVFILTER_DEFINE_CLASS(buffer); |
186 | 186 |
static av_cold int init_video(AVFilterContext *ctx, const char *args) |
187 | 187 |
{ |
188 | 188 |
BufferSourceContext *c = ctx->priv; |
189 |
- char pix_fmt_str[128], sws_param[256] = "", *colon, *equal; |
|
189 |
+ char pix_fmt_str[128], *colon, *equal; |
|
190 | 190 |
int ret, n = 0; |
191 | 191 |
|
192 | 192 |
c->class = &buffer_class; |
... | ... |
@@ -195,6 +290,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) |
195 | 195 |
av_log(ctx, AV_LOG_ERROR, "Arguments required\n"); |
196 | 196 |
return AVERROR(EINVAL); |
197 | 197 |
} |
198 |
+ |
|
198 | 199 |
colon = strchr(args, ':'); |
199 | 200 |
equal = strchr(args, '='); |
200 | 201 |
if (equal && (!colon || equal < colon)) { |
... | ... |
@@ -203,28 +299,25 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) |
203 | 203 |
if (ret < 0) |
204 | 204 |
goto fail; |
205 | 205 |
} else { |
206 |
- if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str, |
|
206 |
+ if (!args || |
|
207 |
+ (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, |
|
207 | 208 |
&c->time_base.num, &c->time_base.den, |
208 |
- &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) { |
|
209 |
- av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args); |
|
210 |
- ret = AVERROR(EINVAL); |
|
211 |
- goto fail; |
|
209 |
+ &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) { |
|
210 |
+ av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args); |
|
211 |
+ return AVERROR(EINVAL); |
|
212 | 212 |
} |
213 |
- av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n"); |
|
214 |
- |
|
215 |
- if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0) |
|
216 |
- goto fail; |
|
217 |
- c->sws_param = av_strdup(sws_param); |
|
218 |
- if (!c->sws_param) { |
|
219 |
- ret = AVERROR(ENOMEM); |
|
220 |
- goto fail; |
|
213 |
+ if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == AV_PIX_FMT_NONE) { |
|
214 |
+ char *tail; |
|
215 |
+ c->pix_fmt = strtol(pix_fmt_str, &tail, 10); |
|
216 |
+ if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) { |
|
217 |
+ av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); |
|
218 |
+ return AVERROR(EINVAL); |
|
219 |
+ } |
|
221 | 220 |
} |
222 | 221 |
} |
223 | 222 |
|
224 |
- if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { |
|
225 |
- ret = AVERROR(ENOMEM); |
|
226 |
- goto fail; |
|
227 |
- } |
|
223 |
+ if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*)))) |
|
224 |
+ return AVERROR(ENOMEM); |
|
228 | 225 |
|
229 | 226 |
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", |
230 | 227 |
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), |
... | ... |
@@ -238,6 +331,11 @@ fail: |
238 | 238 |
return ret; |
239 | 239 |
} |
240 | 240 |
|
241 |
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) |
|
242 |
+{ |
|
243 |
+ return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; |
|
244 |
+} |
|
245 |
+ |
|
241 | 246 |
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM |
242 | 247 |
static const AVOption abuffer_options[] = { |
243 | 248 |
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, |
... | ... |
@@ -298,7 +396,7 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args) |
298 | 298 |
goto fail; |
299 | 299 |
} |
300 | 300 |
|
301 |
- if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { |
|
301 |
+ if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) { |
|
302 | 302 |
ret = AVERROR(ENOMEM); |
303 | 303 |
goto fail; |
304 | 304 |
} |
... | ... |
@@ -321,9 +419,9 @@ static av_cold void uninit(AVFilterContext *ctx) |
321 | 321 |
{ |
322 | 322 |
BufferSourceContext *s = ctx->priv; |
323 | 323 |
while (s->fifo && av_fifo_size(s->fifo)) { |
324 |
- AVFilterBufferRef *buf; |
|
325 |
- av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); |
|
326 |
- avfilter_unref_buffer(buf); |
|
324 |
+ AVFrame *frame; |
|
325 |
+ av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); |
|
326 |
+ av_frame_free(&frame); |
|
327 | 327 |
} |
328 | 328 |
av_fifo_free(s->fifo); |
329 | 329 |
s->fifo = NULL; |
... | ... |
@@ -387,7 +485,8 @@ static int config_props(AVFilterLink *link) |
387 | 387 |
static int request_frame(AVFilterLink *link) |
388 | 388 |
{ |
389 | 389 |
BufferSourceContext *c = link->src->priv; |
390 |
- AVFilterBufferRef *buf; |
|
390 |
+ AVFrame *frame; |
|
391 |
+ int ret = 0; |
|
391 | 392 |
|
392 | 393 |
if (!av_fifo_size(c->fifo)) { |
393 | 394 |
if (c->eof) |
... | ... |
@@ -395,9 +494,12 @@ static int request_frame(AVFilterLink *link) |
395 | 395 |
c->nb_failed_requests++; |
396 | 396 |
return AVERROR(EAGAIN); |
397 | 397 |
} |
398 |
- av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL); |
|
398 |
+ av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL); |
|
399 | 399 |
|
400 |
- return ff_filter_frame(link, buf); |
|
400 |
+ /* CIG TODO do not ignore error */ |
|
401 |
+ ff_filter_frame(link, frame); |
|
402 |
+ |
|
403 |
+ return ret; |
|
401 | 404 |
} |
402 | 405 |
|
403 | 406 |
static int poll_frame(AVFilterLink *link) |
... | ... |
@@ -406,7 +508,7 @@ static int poll_frame(AVFilterLink *link) |
406 | 406 |
int size = av_fifo_size(c->fifo); |
407 | 407 |
if (!size && c->eof) |
408 | 408 |
return AVERROR_EOF; |
409 |
- return size/sizeof(AVFilterBufferRef*); |
|
409 |
+ return size/sizeof(AVFrame*); |
|
410 | 410 |
} |
411 | 411 |
|
412 | 412 |
static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { |
... | ... |
@@ -68,14 +68,15 @@ int av_buffersrc_add_ref(AVFilterContext *buffer_src, |
68 | 68 |
*/ |
69 | 69 |
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); |
70 | 70 |
|
71 |
-#ifdef FF_API_BUFFERSRC_BUFFER |
|
71 |
+#if FF_API_AVFILTERBUFFER |
|
72 | 72 |
/** |
73 | 73 |
* Add a buffer to the filtergraph s. |
74 | 74 |
* |
75 | 75 |
* @param buf buffer containing frame data to be passed down the filtergraph. |
76 | 76 |
* This function will take ownership of buf, the user must not free it. |
77 | 77 |
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. |
78 |
- * @deprecated Use av_buffersrc_add_ref(s, picref, AV_BUFFERSRC_FLAG_NO_COPY) instead. |
|
78 |
+ * |
|
79 |
+ * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() |
|
79 | 80 |
*/ |
80 | 81 |
attribute_deprecated |
81 | 82 |
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); |
... | ... |
@@ -85,11 +86,42 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); |
85 | 85 |
* Add a frame to the buffer source. |
86 | 86 |
* |
87 | 87 |
* @param s an instance of the buffersrc filter. |
88 |
- * @param frame frame to be added. |
|
88 |
+ * @param frame frame to be added. If the frame is reference counted, this |
|
89 |
+ * function will make a new reference to it. Otherwise the frame data will be |
|
90 |
+ * copied. |
|
89 | 91 |
* |
90 |
- * @warning frame data will be memcpy()ed, which may be a big performance |
|
91 |
- * hit. Use av_buffersrc_buffer() to avoid copying the data. |
|
92 |
+ * @return 0 on success, a negative AVERROR on error |
|
92 | 93 |
*/ |
93 | 94 |
int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); |
94 | 95 |
|
96 |
+/** |
|
97 |
+ * Add a frame to the buffer source. |
|
98 |
+ * |
|
99 |
+ * @param s an instance of the buffersrc filter. |
|
100 |
+ * @param frame frame to be added. If the frame is reference counted, this |
|
101 |
+ * function will take ownership of the reference(s) and reset the frame. |
|
102 |
+ * Otherwise the frame data will be copied. If this function returns an error, |
|
103 |
+ * the input frame is not touched. |
|
104 |
+ * |
|
105 |
+ * @return 0 on success, a negative AVERROR on error. |
|
106 |
+ * |
|
107 |
+ * @note the difference between this function and av_buffersrc_write_frame() is |
|
108 |
+ * that av_buffersrc_write_frame() creates a new reference to the input frame, |
|
109 |
+ * while this function takes ownership of the reference passed to it. |
|
110 |
+ */ |
|
111 |
+int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); |
|
112 |
+ |
|
113 |
+/** |
|
114 |
+ * Add frame data to buffer_src. XXX |
|
115 |
+ * |
|
116 |
+ * @param buffer_src pointer to a buffer source context |
|
117 |
+ * @param frame a frame, or NULL to mark EOF |
|
118 |
+ * @param flags a combination of AV_BUFFERSRC_FLAG_* |
|
119 |
+ * @return >= 0 in case of success, a negative AVERROR code |
|
120 |
+ * in case of failure |
|
121 |
+ */ |
|
122 |
+int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, |
|
123 |
+ AVFrame *frame, int flags); |
|
124 |
+ |
|
125 |
+ |
|
95 | 126 |
#endif /* AVFILTER_BUFFERSRC_H */ |
... | ... |
@@ -97,7 +97,7 @@ typedef struct { |
97 | 97 |
struct rect text; ///< rectangle for the LU legend on the left |
98 | 98 |
struct rect graph; ///< rectangle for the main graph in the center |
99 | 99 |
struct rect gauge; ///< rectangle for the gauge on the right |
100 |
- AVFilterBufferRef *outpicref; ///< output picture reference, updated regularly |
|
100 |
+ AVFrame *outpicref; ///< output picture reference, updated regularly |
|
101 | 101 |
int meter; ///< select a EBU mode between +9 and +18 |
102 | 102 |
int scale_range; ///< the range of LU values according to the meter |
103 | 103 |
int y_zero_lu; ///< the y value (pixel position) for 0 LU |
... | ... |
@@ -174,7 +174,7 @@ static const uint8_t font_colors[] = { |
174 | 174 |
0x00, 0x96, 0x96, |
175 | 175 |
}; |
176 | 176 |
|
177 |
-static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) |
|
177 |
+static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) |
|
178 | 178 |
{ |
179 | 179 |
int i; |
180 | 180 |
char buf[128] = {0}; |
... | ... |
@@ -207,7 +207,7 @@ static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8 |
207 | 207 |
} |
208 | 208 |
} |
209 | 209 |
|
210 |
-static void drawline(AVFilterBufferRef *pic, int x, int y, int len, int step) |
|
210 |
+static void drawline(AVFrame *pic, int x, int y, int len, int step) |
|
211 | 211 |
{ |
212 | 212 |
int i; |
213 | 213 |
uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3; |
... | ... |
@@ -224,7 +224,7 @@ static int config_video_output(AVFilterLink *outlink) |
224 | 224 |
uint8_t *p; |
225 | 225 |
AVFilterContext *ctx = outlink->src; |
226 | 226 |
EBUR128Context *ebur128 = ctx->priv; |
227 |
- AVFilterBufferRef *outpicref; |
|
227 |
+ AVFrame *outpicref; |
|
228 | 228 |
|
229 | 229 |
/* check if there is enough space to represent everything decently */ |
230 | 230 |
if (ebur128->w < 640 || ebur128->h < 480) { |
... | ... |
@@ -259,10 +259,9 @@ static int config_video_output(AVFilterLink *outlink) |
259 | 259 |
av_assert0(ebur128->graph.h == ebur128->gauge.h); |
260 | 260 |
|
261 | 261 |
/* prepare the initial picref buffer */ |
262 |
- avfilter_unref_bufferp(&ebur128->outpicref); |
|
262 |
+ av_frame_free(&ebur128->outpicref); |
|
263 | 263 |
ebur128->outpicref = outpicref = |
264 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, |
|
265 |
- outlink->w, outlink->h); |
|
264 |
+ ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
266 | 265 |
if (!outpicref) |
267 | 266 |
return AVERROR(ENOMEM); |
268 | 267 |
outlink->sample_aspect_ratio = (AVRational){1,1}; |
... | ... |
@@ -450,15 +449,15 @@ static int gate_update(struct integrator *integ, double power, |
450 | 450 |
return gate_hist_pos; |
451 | 451 |
} |
452 | 452 |
|
453 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
|
453 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
454 | 454 |
{ |
455 | 455 |
int i, ch, idx_insample; |
456 | 456 |
AVFilterContext *ctx = inlink->dst; |
457 | 457 |
EBUR128Context *ebur128 = ctx->priv; |
458 | 458 |
const int nb_channels = ebur128->nb_channels; |
459 |
- const int nb_samples = insamples->audio->nb_samples; |
|
459 |
+ const int nb_samples = insamples->nb_samples; |
|
460 | 460 |
const double *samples = (double *)insamples->data[0]; |
461 |
- AVFilterBufferRef *pic = ebur128->outpicref; |
|
461 |
+ AVFrame *pic = ebur128->outpicref; |
|
462 | 462 |
|
463 | 463 |
for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) { |
464 | 464 |
const int bin_id_400 = ebur128->i400.cache_pos; |
... | ... |
@@ -639,7 +638,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) |
639 | 639 |
|
640 | 640 |
/* set pts and push frame */ |
641 | 641 |
pic->pts = pts; |
642 |
- ret = ff_filter_frame(outlink, avfilter_ref_buffer(pic, ~AV_PERM_WRITE)); |
|
642 |
+ ret = ff_filter_frame(outlink, av_frame_clone(pic)); |
|
643 | 643 |
if (ret < 0) |
644 | 644 |
return ret; |
645 | 645 |
} |
... | ... |
@@ -738,7 +737,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
738 | 738 |
} |
739 | 739 |
for (i = 0; i < ctx->nb_outputs; i++) |
740 | 740 |
av_freep(&ctx->output_pads[i].name); |
741 |
- avfilter_unref_bufferp(&ebur128->outpicref); |
|
741 |
+ av_frame_free(&ebur128->outpicref); |
|
742 | 742 |
} |
743 | 743 |
|
744 | 744 |
static const AVFilterPad ebur128_inputs[] = { |
... | ... |
@@ -134,7 +134,7 @@ typedef struct { |
134 | 134 |
DSPContext c; ///< context providing optimized SAD methods (scene detect only) |
135 | 135 |
double prev_mafd; ///< previous MAFD (scene detect only) |
136 | 136 |
#endif |
137 |
- AVFilterBufferRef *prev_picref; ///< previous frame (scene detect only) |
|
137 |
+ AVFrame *prev_picref; ///< previous frame (scene detect only) |
|
138 | 138 |
double select; |
139 | 139 |
} SelectContext; |
140 | 140 |
|
... | ... |
@@ -219,25 +219,25 @@ static int config_input(AVFilterLink *inlink) |
219 | 219 |
} |
220 | 220 |
|
221 | 221 |
#if CONFIG_AVCODEC |
222 |
-static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
222 |
+static double get_scene_score(AVFilterContext *ctx, AVFrame *frame) |
|
223 | 223 |
{ |
224 | 224 |
double ret = 0; |
225 | 225 |
SelectContext *select = ctx->priv; |
226 |
- AVFilterBufferRef *prev_picref = select->prev_picref; |
|
226 |
+ AVFrame *prev_picref = select->prev_picref; |
|
227 | 227 |
|
228 | 228 |
if (prev_picref && |
229 |
- picref->video->h == prev_picref->video->h && |
|
230 |
- picref->video->w == prev_picref->video->w && |
|
231 |
- picref->linesize[0] == prev_picref->linesize[0]) { |
|
229 |
+ frame->height == prev_picref->height && |
|
230 |
+ frame->width == prev_picref->width && |
|
231 |
+ frame->linesize[0] == prev_picref->linesize[0]) { |
|
232 | 232 |
int x, y, nb_sad = 0; |
233 | 233 |
int64_t sad = 0; |
234 | 234 |
double mafd, diff; |
235 |
- uint8_t *p1 = picref->data[0]; |
|
235 |
+ uint8_t *p1 = frame->data[0]; |
|
236 | 236 |
uint8_t *p2 = prev_picref->data[0]; |
237 |
- const int linesize = picref->linesize[0]; |
|
237 |
+ const int linesize = frame->linesize[0]; |
|
238 | 238 |
|
239 |
- for (y = 0; y < picref->video->h - 8; y += 8) { |
|
240 |
- for (x = 0; x < picref->video->w*3 - 8; x += 8) { |
|
239 |
+ for (y = 0; y < frame->height - 8; y += 8) { |
|
240 |
+ for (x = 0; x < frame->width*3 - 8; x += 8) { |
|
241 | 241 |
sad += select->c.sad[1](select, p1 + x, p2 + x, |
242 | 242 |
linesize, 8); |
243 | 243 |
nb_sad += 8 * 8; |
... | ... |
@@ -250,9 +250,9 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) |
250 | 250 |
diff = fabs(mafd - select->prev_mafd); |
251 | 251 |
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); |
252 | 252 |
select->prev_mafd = mafd; |
253 |
- avfilter_unref_buffer(prev_picref); |
|
253 |
+ av_frame_free(&prev_picref); |
|
254 | 254 |
} |
255 |
- select->prev_picref = avfilter_ref_buffer(picref, ~0); |
|
255 |
+ select->prev_picref = av_frame_clone(frame); |
|
256 | 256 |
return ret; |
257 | 257 |
} |
258 | 258 |
#endif |
... | ... |
@@ -260,38 +260,38 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) |
260 | 260 |
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) |
261 | 261 |
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) |
262 | 262 |
|
263 |
-static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) |
|
263 |
+static int select_frame(AVFilterContext *ctx, AVFrame *frame) |
|
264 | 264 |
{ |
265 | 265 |
SelectContext *select = ctx->priv; |
266 | 266 |
AVFilterLink *inlink = ctx->inputs[0]; |
267 | 267 |
double res; |
268 | 268 |
|
269 | 269 |
if (isnan(select->var_values[VAR_START_PTS])) |
270 |
- select->var_values[VAR_START_PTS] = TS2D(ref->pts); |
|
270 |
+ select->var_values[VAR_START_PTS] = TS2D(frame->pts); |
|
271 | 271 |
if (isnan(select->var_values[VAR_START_T])) |
272 |
- select->var_values[VAR_START_T] = TS2D(ref->pts) * av_q2d(inlink->time_base); |
|
272 |
+ select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base); |
|
273 | 273 |
|
274 |
- select->var_values[VAR_PTS] = TS2D(ref->pts); |
|
275 |
- select->var_values[VAR_T ] = TS2D(ref->pts) * av_q2d(inlink->time_base); |
|
276 |
- select->var_values[VAR_POS] = ref->pos == -1 ? NAN : ref->pos; |
|
274 |
+ select->var_values[VAR_PTS] = TS2D(frame->pts); |
|
275 |
+ select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base); |
|
276 |
+ select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); |
|
277 | 277 |
|
278 | 278 |
switch (inlink->type) { |
279 | 279 |
case AVMEDIA_TYPE_AUDIO: |
280 |
- select->var_values[VAR_SAMPLES_N] = ref->audio->nb_samples; |
|
280 |
+ select->var_values[VAR_SAMPLES_N] = frame->nb_samples; |
|
281 | 281 |
break; |
282 | 282 |
|
283 | 283 |
case AVMEDIA_TYPE_VIDEO: |
284 | 284 |
select->var_values[VAR_INTERLACE_TYPE] = |
285 |
- !ref->video->interlaced ? INTERLACE_TYPE_P : |
|
286 |
- ref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; |
|
287 |
- select->var_values[VAR_PICT_TYPE] = ref->video->pict_type; |
|
285 |
+ !frame->interlaced_frame ? INTERLACE_TYPE_P : |
|
286 |
+ frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; |
|
287 |
+ select->var_values[VAR_PICT_TYPE] = frame->pict_type; |
|
288 | 288 |
#if CONFIG_AVCODEC |
289 | 289 |
if (select->do_scene_detect) { |
290 | 290 |
char buf[32]; |
291 |
- select->var_values[VAR_SCENE] = get_scene_score(ctx, ref); |
|
291 |
+ select->var_values[VAR_SCENE] = get_scene_score(ctx, frame); |
|
292 | 292 |
// TODO: document metadata |
293 | 293 |
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); |
294 |
- av_dict_set(&ref->metadata, "lavfi.scene_score", buf, 0); |
|
294 |
+ av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0); |
|
295 | 295 |
} |
296 | 296 |
#endif |
297 | 297 |
break; |
... | ... |
@@ -299,11 +299,10 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) |
299 | 299 |
|
300 | 300 |
res = av_expr_eval(select->expr, select->var_values, NULL); |
301 | 301 |
av_log(inlink->dst, AV_LOG_DEBUG, |
302 |
- "n:%f pts:%f t:%f pos:%f key:%d", |
|
302 |
+ "n:%f pts:%f t:%f key:%d", |
|
303 | 303 |
select->var_values[VAR_N], |
304 | 304 |
select->var_values[VAR_PTS], |
305 | 305 |
select->var_values[VAR_T], |
306 |
- select->var_values[VAR_POS], |
|
307 | 306 |
(int)select->var_values[VAR_KEY]); |
308 | 307 |
|
309 | 308 |
switch (inlink->type) { |
... | ... |
@@ -330,7 +329,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) |
330 | 330 |
select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; |
331 | 331 |
select->var_values[VAR_SELECTED_N] += 1.0; |
332 | 332 |
if (inlink->type == AVMEDIA_TYPE_AUDIO) |
333 |
- select->var_values[VAR_CONSUMED_SAMPLES_N] += ref->audio->nb_samples; |
|
333 |
+ select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples; |
|
334 | 334 |
} |
335 | 335 |
|
336 | 336 |
select->var_values[VAR_N] += 1.0; |
... | ... |
@@ -340,7 +339,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) |
340 | 340 |
return res; |
341 | 341 |
} |
342 | 342 |
|
343 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
343 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
344 | 344 |
{ |
345 | 345 |
SelectContext *select = inlink->dst->priv; |
346 | 346 |
|
... | ... |
@@ -348,7 +347,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
348 | 348 |
if (select->select) |
349 | 349 |
return ff_filter_frame(inlink->dst->outputs[0], frame); |
350 | 350 |
|
351 |
- avfilter_unref_bufferp(&frame); |
|
351 |
+ av_frame_free(&frame); |
|
352 | 352 |
return 0; |
353 | 353 |
} |
354 | 354 |
|
... | ... |
@@ -378,7 +377,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
378 | 378 |
|
379 | 379 |
#if CONFIG_AVCODEC |
380 | 380 |
if (select->do_scene_detect) { |
381 |
- avfilter_unref_bufferp(&select->prev_picref); |
|
381 |
+ av_frame_free(&select->prev_picref); |
|
382 | 382 |
if (select->avctx) { |
383 | 383 |
avcodec_close(select->avctx); |
384 | 384 |
av_freep(&select->avctx); |
... | ... |
@@ -448,7 +448,7 @@ static void av_cold uninit(AVFilterContext *ctx) |
448 | 448 |
av_freep(&sendcmd->intervals); |
449 | 449 |
} |
450 | 450 |
|
451 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) |
|
451 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref) |
|
452 | 452 |
{ |
453 | 453 |
AVFilterContext *ctx = inlink->dst; |
454 | 454 |
SendCmdContext *sendcmd = ctx->priv; |
... | ... |
@@ -138,7 +138,7 @@ static inline char *double2int64str(char *buf, double v) |
138 | 138 |
|
139 | 139 |
#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) |
140 | 140 |
|
141 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
141 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
142 | 142 |
{ |
143 | 143 |
SetPTSContext *setpts = inlink->dst->priv; |
144 | 144 |
int64_t in_pts = frame->pts; |
... | ... |
@@ -150,16 +150,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
150 | 150 |
} |
151 | 151 |
setpts->var_values[VAR_PTS ] = TS2D(frame->pts); |
152 | 152 |
setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); |
153 |
- setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos; |
|
153 |
+ setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); |
|
154 | 154 |
setpts->var_values[VAR_RTCTIME ] = av_gettime(); |
155 | 155 |
|
156 | 156 |
switch (inlink->type) { |
157 | 157 |
case AVMEDIA_TYPE_VIDEO: |
158 |
- setpts->var_values[VAR_INTERLACED] = frame->video->interlaced; |
|
158 |
+ setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; |
|
159 | 159 |
break; |
160 | 160 |
|
161 | 161 |
case AVMEDIA_TYPE_AUDIO: |
162 |
- setpts->var_values[VAR_NB_SAMPLES] = frame->audio->nb_samples; |
|
162 |
+ setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; |
|
163 | 163 |
break; |
164 | 164 |
} |
165 | 165 |
|
... | ... |
@@ -192,7 +192,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
192 | 192 |
setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); |
193 | 193 |
setpts->var_values[VAR_N] += 1.0; |
194 | 194 |
if (setpts->type == AVMEDIA_TYPE_AUDIO) { |
195 |
- setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->audio->nb_samples; |
|
195 |
+ setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; |
|
196 | 196 |
} |
197 | 197 |
return ff_filter_frame(inlink->dst->outputs[0], frame); |
198 | 198 |
} |
... | ... |
@@ -103,7 +103,7 @@ static int config_output_props(AVFilterLink *outlink) |
103 | 103 |
return 0; |
104 | 104 |
} |
105 | 105 |
|
106 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
106 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
107 | 107 |
{ |
108 | 108 |
AVFilterContext *ctx = inlink->dst; |
109 | 109 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -35,7 +35,7 @@ |
35 | 35 |
#include "video.h" |
36 | 36 |
|
37 | 37 |
typedef struct Buf { |
38 |
- AVFilterBufferRef *buf; |
|
38 |
+ AVFrame *frame; |
|
39 | 39 |
struct Buf *next; |
40 | 40 |
} Buf; |
41 | 41 |
|
... | ... |
@@ -47,8 +47,8 @@ typedef struct { |
47 | 47 |
* When a specific number of output samples is requested, the partial |
48 | 48 |
* buffer is stored here |
49 | 49 |
*/ |
50 |
- AVFilterBufferRef *buf_out; |
|
51 |
- int allocated_samples; ///< number of samples buf_out was allocated for |
|
50 |
+ AVFrame *out; |
|
51 |
+ int allocated_samples; ///< number of samples out was allocated for |
|
52 | 52 |
} FifoContext; |
53 | 53 |
|
54 | 54 |
static av_cold int init(AVFilterContext *ctx, const char *args) |
... | ... |
@@ -66,25 +66,25 @@ static av_cold void uninit(AVFilterContext *ctx) |
66 | 66 |
|
67 | 67 |
for (buf = fifo->root.next; buf; buf = tmp) { |
68 | 68 |
tmp = buf->next; |
69 |
- avfilter_unref_bufferp(&buf->buf); |
|
69 |
+ av_frame_free(&buf->frame); |
|
70 | 70 |
av_free(buf); |
71 | 71 |
} |
72 | 72 |
|
73 |
- avfilter_unref_bufferp(&fifo->buf_out); |
|
73 |
+ av_frame_free(&fifo->out); |
|
74 | 74 |
} |
75 | 75 |
|
76 |
-static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
76 |
+static int add_to_queue(AVFilterLink *inlink, AVFrame *frame) |
|
77 | 77 |
{ |
78 | 78 |
FifoContext *fifo = inlink->dst->priv; |
79 | 79 |
|
80 | 80 |
fifo->last->next = av_mallocz(sizeof(Buf)); |
81 | 81 |
if (!fifo->last->next) { |
82 |
- avfilter_unref_buffer(buf); |
|
82 |
+ av_frame_free(&frame); |
|
83 | 83 |
return AVERROR(ENOMEM); |
84 | 84 |
} |
85 | 85 |
|
86 | 86 |
fifo->last = fifo->last->next; |
87 |
- fifo->last->buf = buf; |
|
87 |
+ fifo->last->frame = frame; |
|
88 | 88 |
|
89 | 89 |
return 0; |
90 | 90 |
} |
... | ... |
@@ -101,7 +101,7 @@ static void queue_pop(FifoContext *s) |
101 | 101 |
/** |
102 | 102 |
* Move data pointers and pts offset samples forward. |
103 | 103 |
*/ |
104 |
-static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf, |
|
104 |
+static void buffer_offset(AVFilterLink *link, AVFrame *frame, |
|
105 | 105 |
int offset) |
106 | 106 |
{ |
107 | 107 |
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); |
... | ... |
@@ -110,32 +110,32 @@ static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf, |
110 | 110 |
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels); |
111 | 111 |
int i; |
112 | 112 |
|
113 |
- av_assert0(buf->audio->nb_samples > offset); |
|
113 |
+ av_assert0(frame->nb_samples > offset); |
|
114 | 114 |
|
115 | 115 |
for (i = 0; i < planes; i++) |
116 |
- buf->extended_data[i] += block_align*offset; |
|
117 |
- if (buf->data != buf->extended_data) |
|
118 |
- memcpy(buf->data, buf->extended_data, |
|
119 |
- FFMIN(planes, FF_ARRAY_ELEMS(buf->data)) * sizeof(*buf->data)); |
|
120 |
- buf->linesize[0] -= block_align*offset; |
|
121 |
- buf->audio->nb_samples -= offset; |
|
122 |
- |
|
123 |
- if (buf->pts != AV_NOPTS_VALUE) { |
|
124 |
- buf->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate}, |
|
125 |
- link->time_base); |
|
116 |
+ frame->extended_data[i] += block_align * offset; |
|
117 |
+ if (frame->data != frame->extended_data) |
|
118 |
+ memcpy(frame->data, frame->extended_data, |
|
119 |
+ FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data)); |
|
120 |
+ frame->linesize[0] -= block_align*offset; |
|
121 |
+ frame->nb_samples -= offset; |
|
122 |
+ |
|
123 |
+ if (frame->pts != AV_NOPTS_VALUE) { |
|
124 |
+ frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate}, |
|
125 |
+ link->time_base); |
|
126 | 126 |
} |
127 | 127 |
} |
128 | 128 |
|
129 |
-static int calc_ptr_alignment(AVFilterBufferRef *buf) |
|
129 |
+static int calc_ptr_alignment(AVFrame *frame) |
|
130 | 130 |
{ |
131 |
- int planes = av_sample_fmt_is_planar(buf->format) ? |
|
132 |
- av_get_channel_layout_nb_channels(buf->audio->channel_layout) : 1; |
|
131 |
+ int planes = av_sample_fmt_is_planar(frame->format) ? |
|
132 |
+ av_get_channel_layout_nb_channels(frame->channel_layout) : 1; |
|
133 | 133 |
int min_align = 128; |
134 | 134 |
int p; |
135 | 135 |
|
136 | 136 |
for (p = 0; p < planes; p++) { |
137 | 137 |
int cur_align = 128; |
138 |
- while ((intptr_t)buf->extended_data[p] % cur_align) |
|
138 |
+ while ((intptr_t)frame->extended_data[p] % cur_align) |
|
139 | 139 |
cur_align >>= 1; |
140 | 140 |
if (cur_align < min_align) |
141 | 141 |
min_align = cur_align; |
... | ... |
@@ -147,35 +147,34 @@ static int return_audio_frame(AVFilterContext *ctx) |
147 | 147 |
{ |
148 | 148 |
AVFilterLink *link = ctx->outputs[0]; |
149 | 149 |
FifoContext *s = ctx->priv; |
150 |
- AVFilterBufferRef *head = s->root.next->buf; |
|
151 |
- AVFilterBufferRef *buf_out; |
|
150 |
+ AVFrame *head = s->root.next->frame; |
|
151 |
+ AVFrame *out; |
|
152 | 152 |
int ret; |
153 | 153 |
|
154 |
- if (!s->buf_out && |
|
155 |
- head->audio->nb_samples >= link->request_samples && |
|
154 |
+ if (!s->out && |
|
155 |
+ head->nb_samples >= link->request_samples && |
|
156 | 156 |
calc_ptr_alignment(head) >= 32) { |
157 |
- if (head->audio->nb_samples == link->request_samples) { |
|
158 |
- buf_out = head; |
|
157 |
+ if (head->nb_samples == link->request_samples) { |
|
158 |
+ out = head; |
|
159 | 159 |
queue_pop(s); |
160 | 160 |
} else { |
161 |
- buf_out = avfilter_ref_buffer(head, AV_PERM_READ); |
|
162 |
- if (!buf_out) |
|
161 |
+ out = av_frame_clone(head); |
|
162 |
+ if (!out) |
|
163 | 163 |
return AVERROR(ENOMEM); |
164 | 164 |
|
165 |
- buf_out->audio->nb_samples = link->request_samples; |
|
165 |
+ out->nb_samples = link->request_samples; |
|
166 | 166 |
buffer_offset(link, head, link->request_samples); |
167 | 167 |
} |
168 | 168 |
} else { |
169 | 169 |
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); |
170 | 170 |
|
171 |
- if (!s->buf_out) { |
|
172 |
- s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE, |
|
173 |
- link->request_samples); |
|
174 |
- if (!s->buf_out) |
|
171 |
+ if (!s->out) { |
|
172 |
+ s->out = ff_get_audio_buffer(link, link->request_samples); |
|
173 |
+ if (!s->out) |
|
175 | 174 |
return AVERROR(ENOMEM); |
176 | 175 |
|
177 |
- s->buf_out->audio->nb_samples = 0; |
|
178 |
- s->buf_out->pts = head->pts; |
|
176 |
+ s->out->nb_samples = 0; |
|
177 |
+ s->out->pts = head->pts; |
|
179 | 178 |
s->allocated_samples = link->request_samples; |
180 | 179 |
} else if (link->request_samples != s->allocated_samples) { |
181 | 180 |
av_log(ctx, AV_LOG_ERROR, "request_samples changed before the " |
... | ... |
@@ -183,41 +182,41 @@ static int return_audio_frame(AVFilterContext *ctx) |
183 | 183 |
return AVERROR(EINVAL); |
184 | 184 |
} |
185 | 185 |
|
186 |
- while (s->buf_out->audio->nb_samples < s->allocated_samples) { |
|
187 |
- int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples, |
|
188 |
- head->audio->nb_samples); |
|
186 |
+ while (s->out->nb_samples < s->allocated_samples) { |
|
187 |
+ int len = FFMIN(s->allocated_samples - s->out->nb_samples, |
|
188 |
+ head->nb_samples); |
|
189 | 189 |
|
190 |
- av_samples_copy(s->buf_out->extended_data, head->extended_data, |
|
191 |
- s->buf_out->audio->nb_samples, 0, len, nb_channels, |
|
190 |
+ av_samples_copy(s->out->extended_data, head->extended_data, |
|
191 |
+ s->out->nb_samples, 0, len, nb_channels, |
|
192 | 192 |
link->format); |
193 |
- s->buf_out->audio->nb_samples += len; |
|
193 |
+ s->out->nb_samples += len; |
|
194 | 194 |
|
195 |
- if (len == head->audio->nb_samples) { |
|
196 |
- avfilter_unref_buffer(head); |
|
195 |
+ if (len == head->nb_samples) { |
|
196 |
+ av_frame_free(&head); |
|
197 | 197 |
queue_pop(s); |
198 | 198 |
|
199 | 199 |
if (!s->root.next && |
200 | 200 |
(ret = ff_request_frame(ctx->inputs[0])) < 0) { |
201 | 201 |
if (ret == AVERROR_EOF) { |
202 |
- av_samples_set_silence(s->buf_out->extended_data, |
|
203 |
- s->buf_out->audio->nb_samples, |
|
202 |
+ av_samples_set_silence(s->out->extended_data, |
|
203 |
+ s->out->nb_samples, |
|
204 | 204 |
s->allocated_samples - |
205 |
- s->buf_out->audio->nb_samples, |
|
205 |
+ s->out->nb_samples, |
|
206 | 206 |
nb_channels, link->format); |
207 |
- s->buf_out->audio->nb_samples = s->allocated_samples; |
|
207 |
+ s->out->nb_samples = s->allocated_samples; |
|
208 | 208 |
break; |
209 | 209 |
} |
210 | 210 |
return ret; |
211 | 211 |
} |
212 |
- head = s->root.next->buf; |
|
212 |
+ head = s->root.next->frame; |
|
213 | 213 |
} else { |
214 | 214 |
buffer_offset(link, head, len); |
215 | 215 |
} |
216 | 216 |
} |
217 |
- buf_out = s->buf_out; |
|
218 |
- s->buf_out = NULL; |
|
217 |
+ out = s->out; |
|
218 |
+ s->out = NULL; |
|
219 | 219 |
} |
220 |
- return ff_filter_frame(link, buf_out); |
|
220 |
+ return ff_filter_frame(link, out); |
|
221 | 221 |
} |
222 | 222 |
|
223 | 223 |
static int request_frame(AVFilterLink *outlink) |
... | ... |
@@ -234,7 +233,7 @@ static int request_frame(AVFilterLink *outlink) |
234 | 234 |
if (outlink->request_samples) { |
235 | 235 |
return return_audio_frame(outlink->src); |
236 | 236 |
} else { |
237 |
- ret = ff_filter_frame(outlink, fifo->root.next->buf); |
|
237 |
+ ret = ff_filter_frame(outlink, fifo->root.next->frame); |
|
238 | 238 |
queue_pop(fifo); |
239 | 239 |
} |
240 | 240 |
|
... | ... |
@@ -247,7 +246,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = { |
247 | 247 |
.type = AVMEDIA_TYPE_VIDEO, |
248 | 248 |
.get_video_buffer = ff_null_get_video_buffer, |
249 | 249 |
.filter_frame = add_to_queue, |
250 |
- .min_perms = AV_PERM_PRESERVE, |
|
251 | 250 |
}, |
252 | 251 |
{ NULL } |
253 | 252 |
}; |
... | ... |
@@ -280,7 +278,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = { |
280 | 280 |
.type = AVMEDIA_TYPE_AUDIO, |
281 | 281 |
.get_audio_buffer = ff_null_get_audio_buffer, |
282 | 282 |
.filter_frame = add_to_queue, |
283 |
- .min_perms = AV_PERM_PRESERVE, |
|
284 | 283 |
}, |
285 | 284 |
{ NULL } |
286 | 285 |
}; |
... | ... |
@@ -68,32 +68,12 @@ struct AVFilterPad { |
68 | 68 |
enum AVMediaType type; |
69 | 69 |
|
70 | 70 |
/** |
71 |
- * Minimum required permissions on incoming buffers. Any buffer with |
|
72 |
- * insufficient permissions will be automatically copied by the filter |
|
73 |
- * system to a new buffer which provides the needed access permissions. |
|
74 |
- * |
|
75 |
- * Input pads only. |
|
76 |
- */ |
|
77 |
- int min_perms; |
|
78 |
- |
|
79 |
- /** |
|
80 |
- * Permissions which are not accepted on incoming buffers. Any buffer |
|
81 |
- * which has any of these permissions set will be automatically copied |
|
82 |
- * by the filter system to a new buffer which does not have those |
|
83 |
- * permissions. This can be used to easily disallow buffers with |
|
84 |
- * AV_PERM_REUSE. |
|
85 |
- * |
|
86 |
- * Input pads only. |
|
87 |
- */ |
|
88 |
- int rej_perms; |
|
89 |
- |
|
90 |
- /** |
|
91 | 71 |
* Callback function to get a video buffer. If NULL, the filter system will |
92 | 72 |
* use ff_default_get_video_buffer(). |
93 | 73 |
* |
94 | 74 |
* Input video pads only. |
95 | 75 |
*/ |
96 |
- AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h); |
|
76 |
+ AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); |
|
97 | 77 |
|
98 | 78 |
/** |
99 | 79 |
* Callback function to get an audio buffer. If NULL, the filter system will |
... | ... |
@@ -101,8 +81,7 @@ struct AVFilterPad { |
101 | 101 |
* |
102 | 102 |
* Input audio pads only. |
103 | 103 |
*/ |
104 |
- AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, |
|
105 |
- int nb_samples); |
|
104 |
+ AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); |
|
106 | 105 |
|
107 | 106 |
/** |
108 | 107 |
* Filtering callback. This is where a filter receives a frame with |
... | ... |
@@ -114,7 +93,7 @@ struct AVFilterPad { |
114 | 114 |
* must ensure that samplesref is properly unreferenced on error if it |
115 | 115 |
* hasn't been passed on to another filter. |
116 | 116 |
*/ |
117 |
- int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); |
|
117 |
+ int (*filter_frame)(AVFilterLink *link, AVFrame *frame); |
|
118 | 118 |
|
119 | 119 |
/** |
120 | 120 |
* Frame poll callback. This returns the number of immediately available |
... | ... |
@@ -234,8 +213,6 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx); |
234 | 234 |
|
235 | 235 |
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts); |
236 | 236 |
|
237 |
-void ff_free_pool(AVFilterPool *pool); |
|
238 |
- |
|
239 | 237 |
void ff_command_queue_pop(AVFilterContext *filter); |
240 | 238 |
|
241 | 239 |
/* misc trace functions */ |
... | ... |
@@ -252,7 +229,7 @@ void ff_command_queue_pop(AVFilterContext *filter); |
252 | 252 |
|
253 | 253 |
char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms); |
254 | 254 |
|
255 |
-void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end); |
|
255 |
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end); |
|
256 | 256 |
|
257 | 257 |
void ff_tlog_link(void *ctx, AVFilterLink *link, int end); |
258 | 258 |
|
... | ... |
@@ -346,6 +323,6 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef ** |
346 | 346 |
* @return >= 0 on success, a negative AVERROR on error. The receiving filter |
347 | 347 |
* is responsible for unreferencing frame in case of error. |
348 | 348 |
*/ |
349 |
-int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame); |
|
349 |
+int ff_filter_frame(AVFilterLink *link, AVFrame *frame); |
|
350 | 350 |
|
351 | 351 |
#endif /* AVFILTER_INTERNAL_H */ |
... | ... |
@@ -31,6 +31,8 @@ |
31 | 31 |
#include "audio.h" |
32 | 32 |
#include "internal.h" |
33 | 33 |
|
34 |
+#include "libavutil/audio_fifo.h" |
|
35 |
+ |
|
34 | 36 |
AVBufferSinkParams *av_buffersink_params_alloc(void) |
35 | 37 |
{ |
36 | 38 |
static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; |
... | ... |
@@ -88,14 +90,14 @@ static av_cold void common_uninit(AVFilterContext *ctx) |
88 | 88 |
if (buf->fifo) { |
89 | 89 |
while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) { |
90 | 90 |
av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL); |
91 |
- avfilter_unref_buffer(picref); |
|
91 |
+ av_frame_unref(picref); |
|
92 | 92 |
} |
93 | 93 |
av_fifo_free(buf->fifo); |
94 | 94 |
buf->fifo = NULL; |
95 | 95 |
} |
96 | 96 |
} |
97 | 97 |
|
98 |
-static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref) |
|
98 |
+static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref) |
|
99 | 99 |
{ |
100 | 100 |
BufferSinkContext *buf = ctx->priv; |
101 | 101 |
|
... | ... |
@@ -114,7 +116,7 @@ static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref) |
114 | 114 |
return 0; |
115 | 115 |
} |
116 | 116 |
|
117 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) |
|
117 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref) |
|
118 | 118 |
{ |
119 | 119 |
AVFilterContext *ctx = inlink->dst; |
120 | 120 |
BufferSinkContext *buf = inlink->dst->priv; |
... | ... |
@@ -141,18 +143,12 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size) |
141 | 141 |
inlink->partial_buf_size = frame_size; |
142 | 142 |
} |
143 | 143 |
|
144 |
-int av_buffersink_get_buffer_ref(AVFilterContext *ctx, |
|
145 |
- AVFilterBufferRef **bufref, int flags) |
|
144 |
+int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) |
|
146 | 145 |
{ |
147 | 146 |
BufferSinkContext *buf = ctx->priv; |
148 | 147 |
AVFilterLink *inlink = ctx->inputs[0]; |
149 | 148 |
int ret; |
150 |
- *bufref = NULL; |
|
151 |
- |
|
152 |
- av_assert0( !strcmp(ctx->filter->name, "buffersink") |
|
153 |
- || !strcmp(ctx->filter->name, "abuffersink") |
|
154 |
- || !strcmp(ctx->filter->name, "ffbuffersink") |
|
155 |
- || !strcmp(ctx->filter->name, "ffabuffersink")); |
|
149 |
+ AVFrame *cur_frame; |
|
156 | 150 |
|
157 | 151 |
/* no picref available, fetch it from the filterchain */ |
158 | 152 |
if (!av_fifo_size(buf->fifo)) { |
... | ... |
@@ -165,13 +161,114 @@ int av_buffersink_get_buffer_ref(AVFilterContext *ctx, |
165 | 165 |
if (!av_fifo_size(buf->fifo)) |
166 | 166 |
return AVERROR(EINVAL); |
167 | 167 |
|
168 |
- if (flags & AV_BUFFERSINK_FLAG_PEEK) |
|
169 |
- *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0)); |
|
168 |
+ if (flags & AV_BUFFERSINK_FLAG_PEEK) { |
|
169 |
+ cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0)); |
|
170 |
+ av_frame_ref(frame, cur_frame); /* TODO check failure */ |
|
171 |
+ } else { |
|
172 |
+ av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL); |
|
173 |
+ av_frame_move_ref(frame, cur_frame); |
|
174 |
+ av_frame_free(&cur_frame); |
|
175 |
+ } |
|
176 |
+ |
|
177 |
+ return 0; |
|
178 |
+} |
|
179 |
+ |
|
180 |
+int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) |
|
181 |
+{ |
|
182 |
+ return av_buffersink_get_frame_flags(ctx, frame, 0); |
|
183 |
+} |
|
184 |
+ |
|
185 |
+int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples) |
|
186 |
+{ |
|
187 |
+ av_assert0(!"TODO"); |
|
188 |
+} |
|
189 |
+ |
|
190 |
+#if FF_API_AVFILTERBUFFER |
|
191 |
+static void compat_free_buffer(AVFilterBuffer *buf) |
|
192 |
+{ |
|
193 |
+ AVFrame *frame = buf->priv; |
|
194 |
+ av_frame_free(&frame); |
|
195 |
+ av_free(buf); |
|
196 |
+} |
|
197 |
+ |
|
198 |
+static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags) |
|
199 |
+{ |
|
200 |
+ AVFilterBufferRef *buf; |
|
201 |
+ AVFrame *frame; |
|
202 |
+ int ret; |
|
203 |
+ |
|
204 |
+ if (!pbuf) |
|
205 |
+ return ff_poll_frame(ctx->inputs[0]); |
|
206 |
+ |
|
207 |
+ frame = av_frame_alloc(); |
|
208 |
+ if (!frame) |
|
209 |
+ return AVERROR(ENOMEM); |
|
210 |
+ |
|
211 |
+ if (!nb_samples) |
|
212 |
+ ret = av_buffersink_get_frame_flags(ctx, frame, flags); |
|
170 | 213 |
else |
171 |
- av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL); |
|
214 |
+ ret = av_buffersink_get_samples(ctx, frame, nb_samples); |
|
215 |
+ |
|
216 |
+ if (ret < 0) |
|
217 |
+ goto fail; |
|
218 |
+ |
|
219 |
+ if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { |
|
220 |
+ buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, |
|
221 |
+ AV_PERM_READ, |
|
222 |
+ frame->width, frame->height, |
|
223 |
+ frame->format); |
|
224 |
+ } else { |
|
225 |
+ buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, |
|
226 |
+ frame->linesize[0], AV_PERM_READ, |
|
227 |
+ frame->nb_samples, |
|
228 |
+ frame->format, |
|
229 |
+ frame->channel_layout); |
|
230 |
+ } |
|
231 |
+ if (!buf) { |
|
232 |
+ ret = AVERROR(ENOMEM); |
|
233 |
+ goto fail; |
|
234 |
+ } |
|
235 |
+ |
|
236 |
+ avfilter_copy_frame_props(buf, frame); |
|
237 |
+ |
|
238 |
+ buf->buf->priv = frame; |
|
239 |
+ buf->buf->free = compat_free_buffer; |
|
240 |
+ |
|
241 |
+ *pbuf = buf; |
|
172 | 242 |
|
173 | 243 |
return 0; |
244 |
+fail: |
|
245 |
+ av_frame_free(&frame); |
|
246 |
+ return ret; |
|
247 |
+} |
|
248 |
+ |
|
249 |
+int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) |
|
250 |
+{ |
|
251 |
+ return compat_read(ctx, buf, 0, 0); |
|
252 |
+} |
|
253 |
+ |
|
254 |
+int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, |
|
255 |
+ int nb_samples) |
|
256 |
+{ |
|
257 |
+ return compat_read(ctx, buf, nb_samples, 0); |
|
258 |
+} |
|
259 |
+ |
|
260 |
+int av_buffersink_get_buffer_ref(AVFilterContext *ctx, |
|
261 |
+ AVFilterBufferRef **bufref, int flags) |
|
262 |
+{ |
|
263 |
+ BufferSinkContext *buf = ctx->priv; |
|
264 |
+ AVFilterLink *inlink = ctx->inputs[0]; |
|
265 |
+ int ret; |
|
266 |
+ *bufref = NULL; |
|
267 |
+ |
|
268 |
+ av_assert0( !strcmp(ctx->filter->name, "buffersink") |
|
269 |
+ || !strcmp(ctx->filter->name, "abuffersink") |
|
270 |
+ || !strcmp(ctx->filter->name, "ffbuffersink") |
|
271 |
+ || !strcmp(ctx->filter->name, "ffabuffersink")); |
|
272 |
+ |
|
273 |
+ return compat_read(ctx, bufref, 0, flags); |
|
174 | 274 |
} |
275 |
+#endif |
|
175 | 276 |
|
176 | 277 |
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx) |
177 | 278 |
{ |
... | ... |
@@ -406,94 +503,3 @@ AVFilter avfilter_asink_abuffersink = { |
406 | 406 |
.inputs = abuffersink_inputs, |
407 | 407 |
.outputs = NULL, |
408 | 408 |
}; |
409 |
- |
|
410 |
-/* Libav compatibility API */ |
|
411 |
- |
|
412 |
-extern AVFilter avfilter_vsink_buffer; |
|
413 |
-extern AVFilter avfilter_asink_abuffer; |
|
414 |
- |
|
415 |
-int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) |
|
416 |
-{ |
|
417 |
- AVFilterBufferRef *tbuf; |
|
418 |
- int ret; |
|
419 |
- |
|
420 |
- if (ctx->filter-> inputs[0].start_frame == |
|
421 |
- avfilter_vsink_buffer. inputs[0].start_frame || |
|
422 |
- ctx->filter-> inputs[0].filter_frame == |
|
423 |
- avfilter_asink_abuffer.inputs[0].filter_frame) |
|
424 |
- return ff_buffersink_read_compat(ctx, buf); |
|
425 |
- av_assert0(ctx->filter-> inputs[0].end_frame == |
|
426 |
- avfilter_vsink_ffbuffersink. inputs[0].end_frame || |
|
427 |
- ctx->filter-> inputs[0].filter_frame == |
|
428 |
- avfilter_asink_ffabuffersink.inputs[0].filter_frame); |
|
429 |
- |
|
430 |
- ret = av_buffersink_get_buffer_ref(ctx, &tbuf, |
|
431 |
- buf ? 0 : AV_BUFFERSINK_FLAG_PEEK); |
|
432 |
- if (!buf) |
|
433 |
- return ret >= 0; |
|
434 |
- if (ret < 0) |
|
435 |
- return ret; |
|
436 |
- *buf = tbuf; |
|
437 |
- return 0; |
|
438 |
-} |
|
439 |
- |
|
440 |
-int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, |
|
441 |
- int nb_samples) |
|
442 |
-{ |
|
443 |
- BufferSinkContext *sink = ctx->priv; |
|
444 |
- int ret = 0, have_samples = 0, need_samples; |
|
445 |
- AVFilterBufferRef *tbuf, *in_buf; |
|
446 |
- AVFilterLink *link = ctx->inputs[0]; |
|
447 |
- int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); |
|
448 |
- |
|
449 |
- if (ctx->filter-> inputs[0].filter_frame == |
|
450 |
- avfilter_asink_abuffer.inputs[0].filter_frame) |
|
451 |
- return ff_buffersink_read_samples_compat(ctx, buf, nb_samples); |
|
452 |
- av_assert0(ctx->filter-> inputs[0].filter_frame == |
|
453 |
- avfilter_asink_ffabuffersink.inputs[0].filter_frame); |
|
454 |
- |
|
455 |
- tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples); |
|
456 |
- if (!tbuf) |
|
457 |
- return AVERROR(ENOMEM); |
|
458 |
- |
|
459 |
- while (have_samples < nb_samples) { |
|
460 |
- ret = av_buffersink_get_buffer_ref(ctx, &in_buf, |
|
461 |
- AV_BUFFERSINK_FLAG_PEEK); |
|
462 |
- if (ret < 0) { |
|
463 |
- if (ret == AVERROR_EOF && have_samples) { |
|
464 |
- nb_samples = have_samples; |
|
465 |
- ret = 0; |
|
466 |
- } |
|
467 |
- break; |
|
468 |
- } |
|
469 |
- |
|
470 |
- need_samples = FFMIN(in_buf->audio->nb_samples, |
|
471 |
- nb_samples - have_samples); |
|
472 |
- av_samples_copy(tbuf->extended_data, in_buf->extended_data, |
|
473 |
- have_samples, 0, need_samples, |
|
474 |
- nb_channels, in_buf->format); |
|
475 |
- have_samples += need_samples; |
|
476 |
- if (need_samples < in_buf->audio->nb_samples) { |
|
477 |
- in_buf->audio->nb_samples -= need_samples; |
|
478 |
- av_samples_copy(in_buf->extended_data, in_buf->extended_data, |
|
479 |
- 0, need_samples, in_buf->audio->nb_samples, |
|
480 |
- nb_channels, in_buf->format); |
|
481 |
- } else { |
|
482 |
- av_buffersink_get_buffer_ref(ctx, &in_buf, 0); |
|
483 |
- avfilter_unref_buffer(in_buf); |
|
484 |
- } |
|
485 |
- } |
|
486 |
- tbuf->audio->nb_samples = have_samples; |
|
487 |
- |
|
488 |
- if (ret < 0) { |
|
489 |
- av_assert0(!av_fifo_size(sink->fifo)); |
|
490 |
- if (have_samples) |
|
491 |
- add_buffer_ref(ctx, tbuf); |
|
492 |
- else |
|
493 |
- avfilter_unref_buffer(tbuf); |
|
494 |
- return ret; |
|
495 |
- } |
|
496 |
- |
|
497 |
- *buf = tbuf; |
|
498 |
- return 0; |
|
499 |
-} |
... | ... |
@@ -68,17 +68,17 @@ static void split_uninit(AVFilterContext *ctx) |
68 | 68 |
av_freep(&ctx->output_pads[i].name); |
69 | 69 |
} |
70 | 70 |
|
71 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
71 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
72 | 72 |
{ |
73 | 73 |
AVFilterContext *ctx = inlink->dst; |
74 | 74 |
int i, ret = AVERROR_EOF; |
75 | 75 |
|
76 | 76 |
for (i = 0; i < ctx->nb_outputs; i++) { |
77 |
- AVFilterBufferRef *buf_out; |
|
77 |
+ AVFrame *buf_out; |
|
78 | 78 |
|
79 | 79 |
if (ctx->outputs[i]->closed) |
80 | 80 |
continue; |
81 |
- buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE); |
|
81 |
+ buf_out = av_frame_clone(frame); |
|
82 | 82 |
if (!buf_out) { |
83 | 83 |
ret = AVERROR(ENOMEM); |
84 | 84 |
break; |
... | ... |
@@ -88,7 +88,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
88 | 88 |
if (ret < 0) |
89 | 89 |
break; |
90 | 90 |
} |
91 |
- avfilter_unref_bufferp(&frame); |
|
91 |
+ av_frame_free(&frame); |
|
92 | 92 |
return ret; |
93 | 93 |
} |
94 | 94 |
|
95 | 95 |
deleted file mode 100644 |
... | ... |
@@ -1,123 +0,0 @@ |
1 |
-/* |
|
2 |
- * Copyright (c) 2008 Vitor Sessak |
|
3 |
- * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram |
|
4 |
- * Copyright (c) 2011 Mina Nagy Zaki |
|
5 |
- * |
|
6 |
- * This file is part of FFmpeg. |
|
7 |
- * |
|
8 |
- * FFmpeg is free software; you can redistribute it and/or |
|
9 |
- * modify it under the terms of the GNU Lesser General Public |
|
10 |
- * License as published by the Free Software Foundation; either |
|
11 |
- * version 2.1 of the License, or (at your option) any later version. |
|
12 |
- * |
|
13 |
- * FFmpeg is distributed in the hope that it will be useful, |
|
14 |
- * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
15 |
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
16 |
- * Lesser General Public License for more details. |
|
17 |
- * |
|
18 |
- * You should have received a copy of the GNU Lesser General Public |
|
19 |
- * License along with FFmpeg; if not, write to the Free Software |
|
20 |
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
21 |
- */ |
|
22 |
- |
|
23 |
-/** |
|
24 |
- * @file |
|
25 |
- * memory buffer source filter |
|
26 |
- */ |
|
27 |
- |
|
28 |
-#include "avfilter.h" |
|
29 |
-#include "internal.h" |
|
30 |
-#include "audio.h" |
|
31 |
-#include "avcodec.h" |
|
32 |
-#include "buffersrc.h" |
|
33 |
-#include "asrc_abuffer.h" |
|
34 |
-#include "libavutil/avstring.h" |
|
35 |
-#include "libavutil/channel_layout.h" |
|
36 |
-#include "libavutil/fifo.h" |
|
37 |
-#include "libavutil/imgutils.h" |
|
38 |
- |
|
39 |
-typedef struct { |
|
40 |
- AVFifoBuffer *fifo; |
|
41 |
- AVRational time_base; ///< time_base to set in the output link |
|
42 |
- int eof; |
|
43 |
- unsigned nb_failed_requests; |
|
44 |
- |
|
45 |
- /* Video only */ |
|
46 |
- AVFilterContext *scale; |
|
47 |
- int h, w; |
|
48 |
- enum AVPixelFormat pix_fmt; |
|
49 |
- AVRational sample_aspect_ratio; |
|
50 |
- char sws_param[256]; |
|
51 |
- |
|
52 |
- /* Audio only */ |
|
53 |
- // Audio format of incoming buffers |
|
54 |
- int sample_rate; |
|
55 |
- unsigned int sample_format; |
|
56 |
- int64_t channel_layout; |
|
57 |
- |
|
58 |
- // Normalization filters |
|
59 |
- AVFilterContext *aconvert; |
|
60 |
- AVFilterContext *aresample; |
|
61 |
-} BufferSourceContext; |
|
62 |
- |
|
63 |
-static void buf_free(AVFilterBuffer *ptr) |
|
64 |
-{ |
|
65 |
- av_free(ptr); |
|
66 |
- return; |
|
67 |
-} |
|
68 |
- |
|
69 |
-int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx, |
|
70 |
- AVFilterBufferRef *samplesref, |
|
71 |
- int av_unused flags) |
|
72 |
-{ |
|
73 |
- return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY); |
|
74 |
-} |
|
75 |
- |
|
76 |
-int av_asrc_buffer_add_samples(AVFilterContext *ctx, |
|
77 |
- uint8_t *data[8], int linesize[8], |
|
78 |
- int nb_samples, int sample_rate, |
|
79 |
- int sample_fmt, int64_t channel_layout, int planar, |
|
80 |
- int64_t pts, int av_unused flags) |
|
81 |
-{ |
|
82 |
- AVFilterBufferRef *samplesref; |
|
83 |
- |
|
84 |
- if (!channel_layout) |
|
85 |
- return AVERROR(EINVAL); |
|
86 |
- samplesref = avfilter_get_audio_buffer_ref_from_arrays( |
|
87 |
- data, linesize[0], AV_PERM_WRITE, |
|
88 |
- nb_samples, |
|
89 |
- sample_fmt, channel_layout); |
|
90 |
- if (!samplesref) |
|
91 |
- return AVERROR(ENOMEM); |
|
92 |
- |
|
93 |
- samplesref->buf->free = buf_free; |
|
94 |
- samplesref->pts = pts; |
|
95 |
- samplesref->audio->sample_rate = sample_rate; |
|
96 |
- |
|
97 |
- AV_NOWARN_DEPRECATED( |
|
98 |
- return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0); |
|
99 |
- ) |
|
100 |
-} |
|
101 |
- |
|
102 |
-int av_asrc_buffer_add_buffer(AVFilterContext *ctx, |
|
103 |
- uint8_t *buf, int buf_size, int sample_rate, |
|
104 |
- int sample_fmt, int64_t channel_layout, int planar, |
|
105 |
- int64_t pts, int av_unused flags) |
|
106 |
-{ |
|
107 |
- uint8_t *data[8] = {0}; |
|
108 |
- int linesize[8]; |
|
109 |
- int nb_channels = av_get_channel_layout_nb_channels(channel_layout), |
|
110 |
- nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt); |
|
111 |
- |
|
112 |
- av_samples_fill_arrays(data, linesize, |
|
113 |
- buf, nb_channels, nb_samples, |
|
114 |
- sample_fmt, 16); |
|
115 |
- |
|
116 |
- AV_NOWARN_DEPRECATED( |
|
117 |
- return av_asrc_buffer_add_samples(ctx, |
|
118 |
- data, linesize, nb_samples, |
|
119 |
- sample_rate, |
|
120 |
- sample_fmt, channel_layout, planar, |
|
121 |
- pts, flags); |
|
122 |
- ) |
|
123 |
-} |
... | ... |
@@ -313,11 +313,6 @@ static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, con |
313 | 313 |
} |
314 | 314 |
} |
315 | 315 |
|
316 |
- if (!(movie->frame = avcodec_alloc_frame()) ) { |
|
317 |
- av_log(log, AV_LOG_ERROR, "Failed to alloc frame\n"); |
|
318 |
- return AVERROR(ENOMEM); |
|
319 |
- } |
|
320 |
- |
|
321 | 316 |
av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", |
322 | 317 |
movie->seek_point, movie->format_name, movie->file_name, |
323 | 318 |
movie->stream_index); |
... | ... |
@@ -339,7 +334,7 @@ static av_cold void movie_uninit(AVFilterContext *ctx) |
339 | 339 |
av_freep(&movie->file_name); |
340 | 340 |
av_freep(&movie->st); |
341 | 341 |
av_freep(&movie->out_index); |
342 |
- avcodec_free_frame(&movie->frame); |
|
342 |
+ av_frame_free(&movie->frame); |
|
343 | 343 |
if (movie->format_ctx) |
344 | 344 |
avformat_close_input(&movie->format_ctx); |
345 | 345 |
} |
... | ... |
@@ -399,54 +394,34 @@ static int movie_config_output_props(AVFilterLink *outlink) |
399 | 399 |
return 0; |
400 | 400 |
} |
401 | 401 |
|
402 |
-static AVFilterBufferRef *frame_to_buf(enum AVMediaType type, AVFrame *frame, |
|
403 |
- AVFilterLink *outlink) |
|
404 |
-{ |
|
405 |
- AVFilterBufferRef *buf, *copy; |
|
406 |
- |
|
407 |
- buf = avfilter_get_buffer_ref_from_frame(type, frame, |
|
408 |
- AV_PERM_WRITE | |
|
409 |
- AV_PERM_PRESERVE | |
|
410 |
- AV_PERM_REUSE2); |
|
411 |
- if (!buf) |
|
412 |
- return NULL; |
|
413 |
- buf->pts = av_frame_get_best_effort_timestamp(frame); |
|
414 |
- copy = ff_copy_buffer_ref(outlink, buf); |
|
415 |
- if (!copy) |
|
416 |
- return NULL; |
|
417 |
- buf->buf->data[0] = NULL; /* it belongs to the frame */ |
|
418 |
- avfilter_unref_buffer(buf); |
|
419 |
- return copy; |
|
420 |
-} |
|
421 |
- |
|
422 |
-static char *describe_bufref_to_str(char *dst, size_t dst_size, |
|
423 |
- AVFilterBufferRef *buf, |
|
402 |
+static char *describe_frame_to_str(char *dst, size_t dst_size, |
|
403 |
+ AVFrame *frame, |
|
424 | 404 |
AVFilterLink *link) |
425 | 405 |
{ |
426 |
- switch (buf->type) { |
|
406 |
+ switch (frame->type) { |
|
427 | 407 |
case AVMEDIA_TYPE_VIDEO: |
428 | 408 |
snprintf(dst, dst_size, |
429 |
- "video pts:%s time:%s pos:%"PRId64" size:%dx%d aspect:%d/%d", |
|
430 |
- av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), |
|
431 |
- buf->pos, buf->video->w, buf->video->h, |
|
432 |
- buf->video->sample_aspect_ratio.num, |
|
433 |
- buf->video->sample_aspect_ratio.den); |
|
409 |
+ "video pts:%s time:%s size:%dx%d aspect:%d/%d", |
|
410 |
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base), |
|
411 |
+ frame->width, frame->height, |
|
412 |
+ frame->sample_aspect_ratio.num, |
|
413 |
+ frame->sample_aspect_ratio.den); |
|
434 | 414 |
break; |
435 | 415 |
case AVMEDIA_TYPE_AUDIO: |
436 | 416 |
snprintf(dst, dst_size, |
437 |
- "audio pts:%s time:%s pos:%"PRId64" samples:%d", |
|
438 |
- av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), |
|
439 |
- buf->pos, buf->audio->nb_samples); |
|
417 |
+ "audio pts:%s time:%s samples:%d", |
|
418 |
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base), |
|
419 |
+ frame->nb_samples); |
|
440 | 420 |
break; |
441 | 421 |
default: |
442 |
- snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(buf->type)); |
|
422 |
+ snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame->type)); |
|
443 | 423 |
break; |
444 | 424 |
} |
445 | 425 |
return dst; |
446 | 426 |
} |
447 | 427 |
|
448 |
-#define describe_bufref(buf, link) \ |
|
449 |
- describe_bufref_to_str((char[1024]){0}, 1024, buf, link) |
|
428 |
+#define describe_frameref(f, link) \ |
|
429 |
+ describe_frame_to_str((char[1024]){0}, 1024, f, link) |
|
450 | 430 |
|
451 | 431 |
static int rewind_file(AVFilterContext *ctx) |
452 | 432 |
{ |
... | ... |
@@ -489,7 +464,6 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) |
489 | 489 |
MovieStream *st; |
490 | 490 |
int ret, got_frame = 0, pkt_out_id; |
491 | 491 |
AVFilterLink *outlink; |
492 |
- AVFilterBufferRef *buf; |
|
493 | 492 |
|
494 | 493 |
if (!pkt->size) { |
495 | 494 |
if (movie->eof) { |
... | ... |
@@ -532,6 +506,10 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) |
532 | 532 |
st = &movie->st[pkt_out_id]; |
533 | 533 |
outlink = ctx->outputs[pkt_out_id]; |
534 | 534 |
|
535 |
+ movie->frame = av_frame_alloc(); |
|
536 |
+ if (!movie->frame) |
|
537 |
+ return AVERROR(ENOMEM); |
|
538 |
+ |
|
535 | 539 |
switch (st->st->codec->codec_type) { |
536 | 540 |
case AVMEDIA_TYPE_VIDEO: |
537 | 541 |
ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt); |
... | ... |
@@ -545,6 +523,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) |
545 | 545 |
} |
546 | 546 |
if (ret < 0) { |
547 | 547 |
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret)); |
548 |
+ av_frame_free(&movie->frame); |
|
548 | 549 |
return 0; |
549 | 550 |
} |
550 | 551 |
if (!ret) |
... | ... |
@@ -560,23 +539,16 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) |
560 | 560 |
if (!got_frame) { |
561 | 561 |
if (!ret) |
562 | 562 |
st->done = 1; |
563 |
+ av_frame_free(&movie->frame); |
|
563 | 564 |
return 0; |
564 | 565 |
} |
565 | 566 |
|
566 |
- buf = frame_to_buf(st->st->codec->codec_type, movie->frame, outlink); |
|
567 |
- if (!buf) |
|
568 |
- return AVERROR(ENOMEM); |
|
569 | 567 |
av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name, |
570 |
- describe_bufref(buf, outlink)); |
|
571 |
- switch (st->st->codec->codec_type) { |
|
572 |
- case AVMEDIA_TYPE_VIDEO: |
|
573 |
- if (!movie->frame->sample_aspect_ratio.num) |
|
574 |
- buf->video->sample_aspect_ratio = st->st->sample_aspect_ratio; |
|
575 |
- /* Fall through */ |
|
576 |
- case AVMEDIA_TYPE_AUDIO: |
|
577 |
- ff_filter_frame(outlink, buf); |
|
578 |
- break; |
|
579 |
- } |
|
568 |
+ describe_frameref(movie->frame, outlink)); |
|
569 |
+ |
|
570 |
+ movie->frame->pts = av_frame_get_best_effort_timestamp(movie->frame); |
|
571 |
+ ff_filter_frame(outlink, movie->frame); // FIXME: raise error properly |
|
572 |
+ movie->frame = NULL; |
|
580 | 573 |
|
581 | 574 |
return pkt_out_id == out_id; |
582 | 575 |
} |
... | ... |
@@ -60,5 +60,8 @@ |
60 | 60 |
#ifndef FF_API_BUFFERSRC_BUFFER |
61 | 61 |
#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) |
62 | 62 |
#endif |
63 |
+#ifndef FF_API_AVFILTERBUFFER |
|
64 |
+#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) |
|
65 |
+#endif |
|
63 | 66 |
|
64 | 67 |
#endif /* AVFILTER_VERSION_H */ |
... | ... |
@@ -60,19 +60,18 @@ static int config_input(AVFilterLink *inlink) |
60 | 60 |
return 0; |
61 | 61 |
} |
62 | 62 |
|
63 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf) |
|
63 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *cur_buf) |
|
64 | 64 |
{ |
65 | 65 |
AlphaExtractContext *extract = inlink->dst->priv; |
66 | 66 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
67 |
- AVFilterBufferRef *out_buf = |
|
68 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
67 |
+ AVFrame *out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
69 | 68 |
int ret; |
70 | 69 |
|
71 | 70 |
if (!out_buf) { |
72 | 71 |
ret = AVERROR(ENOMEM); |
73 | 72 |
goto end; |
74 | 73 |
} |
75 |
- avfilter_copy_buffer_ref_props(out_buf, cur_buf); |
|
74 |
+ av_frame_copy_props(out_buf, cur_buf); |
|
76 | 75 |
|
77 | 76 |
if (extract->is_packed_rgb) { |
78 | 77 |
int x, y; |
... | ... |
@@ -99,7 +98,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf) |
99 | 99 |
ret = ff_filter_frame(outlink, out_buf); |
100 | 100 |
|
101 | 101 |
end: |
102 |
- avfilter_unref_buffer(cur_buf); |
|
102 |
+ av_frame_unref(cur_buf); |
|
103 | 103 |
return ret; |
104 | 104 |
} |
105 | 105 |
|
... | ... |
@@ -109,7 +108,6 @@ static const AVFilterPad alphaextract_inputs[] = { |
109 | 109 |
.type = AVMEDIA_TYPE_VIDEO, |
110 | 110 |
.config_props = config_input, |
111 | 111 |
.filter_frame = filter_frame, |
112 |
- .min_perms = AV_PERM_READ, |
|
113 | 112 |
}, |
114 | 113 |
{ NULL } |
115 | 114 |
}; |
... | ... |
@@ -96,11 +96,11 @@ static int config_output(AVFilterLink *outlink) |
96 | 96 |
} |
97 | 97 |
|
98 | 98 |
static void draw_frame(AVFilterContext *ctx, |
99 |
- AVFilterBufferRef *main_buf, |
|
100 |
- AVFilterBufferRef *alpha_buf) |
|
99 |
+ AVFrame *main_buf, |
|
100 |
+ AVFrame *alpha_buf) |
|
101 | 101 |
{ |
102 | 102 |
AlphaMergeContext *merge = ctx->priv; |
103 |
- int h = main_buf->video->h; |
|
103 |
+ int h = main_buf->height; |
|
104 | 104 |
|
105 | 105 |
if (merge->is_packed_rgb) { |
106 | 106 |
int x, y; |
... | ... |
@@ -108,7 +108,7 @@ static void draw_frame(AVFilterContext *ctx, |
108 | 108 |
for (y = 0; y < h; y++) { |
109 | 109 |
pin = alpha_buf->data[0] + y * alpha_buf->linesize[0]; |
110 | 110 |
pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A]; |
111 |
- for (x = 0; x < main_buf->video->w; x++) { |
|
111 |
+ for (x = 0; x < main_buf->width; x++) { |
|
112 | 112 |
*pout = *pin; |
113 | 113 |
pin += 1; |
114 | 114 |
pout += 4; |
... | ... |
@@ -118,7 +118,7 @@ static void draw_frame(AVFilterContext *ctx, |
118 | 118 |
int y; |
119 | 119 |
const int main_linesize = main_buf->linesize[A]; |
120 | 120 |
const int alpha_linesize = alpha_buf->linesize[Y]; |
121 |
- for (y = 0; y < h && y < alpha_buf->video->h; y++) { |
|
121 |
+ for (y = 0; y < h && y < alpha_buf->height; y++) { |
|
122 | 122 |
memcpy(main_buf->data[A] + y * main_linesize, |
123 | 123 |
alpha_buf->data[Y] + y * alpha_linesize, |
124 | 124 |
FFMIN(main_linesize, alpha_linesize)); |
... | ... |
@@ -126,7 +126,7 @@ static void draw_frame(AVFilterContext *ctx, |
126 | 126 |
} |
127 | 127 |
} |
128 | 128 |
|
129 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
129 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
130 | 130 |
{ |
131 | 131 |
AVFilterContext *ctx = inlink->dst; |
132 | 132 |
AlphaMergeContext *merge = ctx->priv; |
... | ... |
@@ -137,7 +137,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
137 | 137 |
ff_bufqueue_add(ctx, queue, buf); |
138 | 138 |
|
139 | 139 |
while (1) { |
140 |
- AVFilterBufferRef *main_buf, *alpha_buf; |
|
140 |
+ AVFrame *main_buf, *alpha_buf; |
|
141 | 141 |
|
142 | 142 |
if (!ff_bufqueue_peek(&merge->queue_main, 0) || |
143 | 143 |
!ff_bufqueue_peek(&merge->queue_alpha, 0)) break; |
... | ... |
@@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
148 | 148 |
merge->frame_requested = 0; |
149 | 149 |
draw_frame(ctx, main_buf, alpha_buf); |
150 | 150 |
ff_filter_frame(ctx->outputs[0], main_buf); |
151 |
- avfilter_unref_buffer(alpha_buf); |
|
151 |
+ av_frame_free(&alpha_buf); |
|
152 | 152 |
} |
153 | 153 |
return 0; |
154 | 154 |
} |
... | ... |
@@ -80,11 +80,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *c |
80 | 80 |
return 0; |
81 | 81 |
} |
82 | 82 |
|
83 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
83 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
84 | 84 |
{ |
85 | 85 |
AspectContext *aspect = link->dst->priv; |
86 | 86 |
|
87 |
- frame->video->sample_aspect_ratio = aspect->ratio; |
|
87 |
+ frame->sample_aspect_ratio = aspect->ratio; |
|
88 | 88 |
return ff_filter_frame(link->dst->outputs[0], frame); |
89 | 89 |
} |
90 | 90 |
|
... | ... |
@@ -56,7 +56,7 @@ static int query_formats(AVFilterContext *ctx) |
56 | 56 |
return 0; |
57 | 57 |
} |
58 | 58 |
|
59 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
59 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
60 | 60 |
{ |
61 | 61 |
AVFilterContext *ctx = inlink->dst; |
62 | 62 |
BBoxContext *bbox = ctx->priv; |
... | ... |
@@ -65,14 +65,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
65 | 65 |
|
66 | 66 |
has_bbox = |
67 | 67 |
ff_calculate_bounding_box(&box, |
68 |
- picref->data[0], picref->linesize[0], |
|
68 |
+ frame->data[0], frame->linesize[0], |
|
69 | 69 |
inlink->w, inlink->h, 16); |
70 | 70 |
w = box.x2 - box.x1 + 1; |
71 | 71 |
h = box.y2 - box.y1 + 1; |
72 | 72 |
|
73 | 73 |
av_log(ctx, AV_LOG_INFO, |
74 | 74 |
"n:%d pts:%s pts_time:%s", bbox->frame, |
75 |
- av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base)); |
|
75 |
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); |
|
76 | 76 |
|
77 | 77 |
if (has_bbox) { |
78 | 78 |
av_log(ctx, AV_LOG_INFO, |
... | ... |
@@ -85,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
85 | 85 |
av_log(ctx, AV_LOG_INFO, "\n"); |
86 | 86 |
|
87 | 87 |
bbox->frame++; |
88 |
- return ff_filter_frame(inlink->dst->outputs[0], picref); |
|
88 |
+ return ff_filter_frame(inlink->dst->outputs[0], frame); |
|
89 | 89 |
} |
90 | 90 |
|
91 | 91 |
static const AVFilterPad bbox_inputs[] = { |
... | ... |
@@ -146,7 +146,7 @@ static int request_frame(AVFilterLink *outlink) |
146 | 146 |
return ret; |
147 | 147 |
} |
148 | 148 |
|
149 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
149 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref) |
|
150 | 150 |
{ |
151 | 151 |
AVFilterContext *ctx = inlink->dst; |
152 | 152 |
BlackDetectContext *blackdetect = ctx->priv; |
... | ... |
@@ -163,10 +163,10 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
163 | 163 |
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h); |
164 | 164 |
|
165 | 165 |
av_log(ctx, AV_LOG_DEBUG, |
166 |
- "frame:%u picture_black_ratio:%f pos:%"PRId64" pts:%s t:%s type:%c\n", |
|
166 |
+ "frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n", |
|
167 | 167 |
blackdetect->frame_count, picture_black_ratio, |
168 |
- picref->pos, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), |
|
169 |
- av_get_picture_type_char(picref->video->pict_type)); |
|
168 |
+ av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), |
|
169 |
+ av_get_picture_type_char(picref->pict_type)); |
|
170 | 170 |
|
171 | 171 |
if (picture_black_ratio >= blackdetect->picture_black_ratio_th) { |
172 | 172 |
if (!blackdetect->black_started) { |
... | ... |
@@ -81,7 +81,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
81 | 81 |
return 0; |
82 | 82 |
} |
83 | 83 |
|
84 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
84 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
85 | 85 |
{ |
86 | 86 |
AVFilterContext *ctx = inlink->dst; |
87 | 87 |
BlackFrameContext *blackframe = ctx->priv; |
... | ... |
@@ -89,22 +89,22 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
89 | 89 |
int pblack = 0; |
90 | 90 |
uint8_t *p = frame->data[0]; |
91 | 91 |
|
92 |
- for (i = 0; i < frame->video->h; i++) { |
|
92 |
+ for (i = 0; i < frame->height; i++) { |
|
93 | 93 |
for (x = 0; x < inlink->w; x++) |
94 | 94 |
blackframe->nblack += p[x] < blackframe->bthresh; |
95 | 95 |
p += frame->linesize[0]; |
96 | 96 |
} |
97 | 97 |
|
98 |
- if (frame->video->key_frame) |
|
98 |
+ if (frame->key_frame) |
|
99 | 99 |
blackframe->last_keyframe = blackframe->frame; |
100 | 100 |
|
101 | 101 |
pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); |
102 | 102 |
if (pblack >= blackframe->bamount) |
103 |
- av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f " |
|
103 |
+ av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f " |
|
104 | 104 |
"type:%c last_keyframe:%d\n", |
105 |
- blackframe->frame, pblack, frame->pos, frame->pts, |
|
105 |
+ blackframe->frame, pblack, frame->pts, |
|
106 | 106 |
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), |
107 |
- av_get_picture_type_char(frame->video->pict_type), blackframe->last_keyframe); |
|
107 |
+ av_get_picture_type_char(frame->pict_type), blackframe->last_keyframe); |
|
108 | 108 |
|
109 | 109 |
blackframe->frame++; |
110 | 110 |
blackframe->nblack = 0; |
... | ... |
@@ -368,9 +368,9 @@ static int request_frame(AVFilterLink *outlink) |
368 | 368 |
} |
369 | 369 |
|
370 | 370 |
static void blend_frame(AVFilterContext *ctx, |
371 |
- AVFilterBufferRef *top_buf, |
|
372 |
- AVFilterBufferRef *bottom_buf, |
|
373 |
- AVFilterBufferRef *dst_buf) |
|
371 |
+ AVFrame *top_buf, |
|
372 |
+ AVFrame *bottom_buf, |
|
373 |
+ AVFrame *dst_buf) |
|
374 | 374 |
{ |
375 | 375 |
BlendContext *b = ctx->priv; |
376 | 376 |
AVFilterLink *inlink = ctx->inputs[0]; |
... | ... |
@@ -380,8 +380,8 @@ static void blend_frame(AVFilterContext *ctx, |
380 | 380 |
for (plane = 0; dst_buf->data[plane]; plane++) { |
381 | 381 |
int hsub = plane == 1 || plane == 2 ? b->hsub : 0; |
382 | 382 |
int vsub = plane == 1 || plane == 2 ? b->vsub : 0; |
383 |
- int outw = dst_buf->video->w >> hsub; |
|
384 |
- int outh = dst_buf->video->h >> vsub; |
|
383 |
+ int outw = dst_buf->width >> hsub; |
|
384 |
+ int outh = dst_buf->height >> vsub; |
|
385 | 385 |
uint8_t *dst = dst_buf->data[plane]; |
386 | 386 |
uint8_t *top = top_buf->data[plane]; |
387 | 387 |
uint8_t *bottom = bottom_buf->data[plane]; |
... | ... |
@@ -390,15 +390,15 @@ static void blend_frame(AVFilterContext *ctx, |
390 | 390 |
param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base); |
391 | 391 |
param->values[VAR_W] = outw; |
392 | 392 |
param->values[VAR_H] = outh; |
393 |
- param->values[VAR_SW] = outw / dst_buf->video->w; |
|
394 |
- param->values[VAR_SH] = outh / dst_buf->video->h; |
|
393 |
+ param->values[VAR_SW] = outw / dst_buf->width; |
|
394 |
+ param->values[VAR_SH] = outh / dst_buf->height; |
|
395 | 395 |
param->blend(top, top_buf->linesize[plane], |
396 | 396 |
bottom, bottom_buf->linesize[plane], |
397 | 397 |
dst, dst_buf->linesize[plane], outw, outh, param); |
398 | 398 |
} |
399 | 399 |
} |
400 | 400 |
|
401 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
401 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
402 | 402 |
{ |
403 | 403 |
AVFilterContext *ctx = inlink->dst; |
404 | 404 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -411,7 +411,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
411 | 411 |
ff_bufqueue_add(ctx, queue, buf); |
412 | 412 |
|
413 | 413 |
while (1) { |
414 |
- AVFilterBufferRef *top_buf, *bottom_buf, *out_buf; |
|
414 |
+ AVFrame *top_buf, *bottom_buf, *out_buf; |
|
415 | 415 |
|
416 | 416 |
if (!ff_bufqueue_peek(&b->queue_top, TOP) || |
417 | 417 |
!ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break; |
... | ... |
@@ -419,18 +419,17 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
419 | 419 |
top_buf = ff_bufqueue_get(&b->queue_top); |
420 | 420 |
bottom_buf = ff_bufqueue_get(&b->queue_bottom); |
421 | 421 |
|
422 |
- out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
423 |
- outlink->w, outlink->h); |
|
422 |
+ out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
424 | 423 |
if (!out_buf) { |
425 | 424 |
return AVERROR(ENOMEM); |
426 | 425 |
} |
427 |
- avfilter_copy_buffer_ref_props(out_buf, top_buf); |
|
426 |
+ av_frame_copy_props(out_buf, top_buf); |
|
428 | 427 |
|
429 | 428 |
b->frame_requested = 0; |
430 | 429 |
blend_frame(ctx, top_buf, bottom_buf, out_buf); |
431 | 430 |
ret = ff_filter_frame(ctx->outputs[0], out_buf); |
432 |
- avfilter_unref_buffer(top_buf); |
|
433 |
- avfilter_unref_buffer(bottom_buf); |
|
431 |
+ av_frame_free(&top_buf); |
|
432 |
+ av_frame_free(&bottom_buf); |
|
434 | 433 |
} |
435 | 434 |
return ret; |
436 | 435 |
} |
... | ... |
@@ -441,12 +440,10 @@ static const AVFilterPad blend_inputs[] = { |
441 | 441 |
.type = AVMEDIA_TYPE_VIDEO, |
442 | 442 |
.config_props = config_input_top, |
443 | 443 |
.filter_frame = filter_frame, |
444 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
445 | 444 |
},{ |
446 | 445 |
.name = "bottom", |
447 | 446 |
.type = AVMEDIA_TYPE_VIDEO, |
448 | 447 |
.filter_frame = filter_frame, |
449 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
450 | 448 |
}, |
451 | 449 |
{ NULL } |
452 | 450 |
}; |
... | ... |
@@ -328,23 +328,23 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li |
328 | 328 |
h, radius, power, temp); |
329 | 329 |
} |
330 | 330 |
|
331 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
331 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
332 | 332 |
{ |
333 | 333 |
AVFilterContext *ctx = inlink->dst; |
334 | 334 |
BoxBlurContext *boxblur = ctx->priv; |
335 | 335 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
336 |
- AVFilterBufferRef *out; |
|
336 |
+ AVFrame *out; |
|
337 | 337 |
int plane; |
338 |
- int cw = inlink->w >> boxblur->hsub, ch = in->video->h >> boxblur->vsub; |
|
338 |
+ int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub; |
|
339 | 339 |
int w[4] = { inlink->w, cw, cw, inlink->w }; |
340 |
- int h[4] = { in->video->h, ch, ch, in->video->h }; |
|
340 |
+ int h[4] = { in->height, ch, ch, in->height }; |
|
341 | 341 |
|
342 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
342 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
343 | 343 |
if (!out) { |
344 |
- avfilter_unref_bufferp(&in); |
|
344 |
+ av_frame_free(&in); |
|
345 | 345 |
return AVERROR(ENOMEM); |
346 | 346 |
} |
347 |
- avfilter_copy_buffer_ref_props(out, in); |
|
347 |
+ av_frame_copy_props(out, in); |
|
348 | 348 |
|
349 | 349 |
for (plane = 0; in->data[plane] && plane < 4; plane++) |
350 | 350 |
hblur(out->data[plane], out->linesize[plane], |
... | ... |
@@ -358,7 +358,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
358 | 358 |
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], |
359 | 359 |
boxblur->temp); |
360 | 360 |
|
361 |
- avfilter_unref_bufferp(&in); |
|
361 |
+ av_frame_free(&in); |
|
362 | 362 |
|
363 | 363 |
return ff_filter_frame(outlink, out); |
364 | 364 |
} |
... | ... |
@@ -369,7 +369,6 @@ static const AVFilterPad avfilter_vf_boxblur_inputs[] = { |
369 | 369 |
.type = AVMEDIA_TYPE_VIDEO, |
370 | 370 |
.config_props = config_input, |
371 | 371 |
.filter_frame = filter_frame, |
372 |
- .min_perms = AV_PERM_READ |
|
373 | 372 |
}, |
374 | 373 |
{ NULL } |
375 | 374 |
}; |
... | ... |
@@ -183,12 +183,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
183 | 183 |
} |
184 | 184 |
|
185 | 185 |
static void process_frame_uyvy422(ColorMatrixContext *color, |
186 |
- AVFilterBufferRef *dst, AVFilterBufferRef *src) |
|
186 |
+ AVFrame *dst, AVFrame *src) |
|
187 | 187 |
{ |
188 | 188 |
const unsigned char *srcp = src->data[0]; |
189 | 189 |
const int src_pitch = src->linesize[0]; |
190 |
- const int height = src->video->h; |
|
191 |
- const int width = src->video->w*2; |
|
190 |
+ const int height = src->height; |
|
191 |
+ const int width = src->width*2; |
|
192 | 192 |
unsigned char *dstp = dst->data[0]; |
193 | 193 |
const int dst_pitch = dst->linesize[0]; |
194 | 194 |
const int c2 = color->yuv_convert[color->mode][0][1]; |
... | ... |
@@ -215,15 +215,15 @@ static void process_frame_uyvy422(ColorMatrixContext *color, |
215 | 215 |
} |
216 | 216 |
|
217 | 217 |
static void process_frame_yuv422p(ColorMatrixContext *color, |
218 |
- AVFilterBufferRef *dst, AVFilterBufferRef *src) |
|
218 |
+ AVFrame *dst, AVFrame *src) |
|
219 | 219 |
{ |
220 | 220 |
const unsigned char *srcpU = src->data[1]; |
221 | 221 |
const unsigned char *srcpV = src->data[2]; |
222 | 222 |
const unsigned char *srcpY = src->data[0]; |
223 | 223 |
const int src_pitchY = src->linesize[0]; |
224 | 224 |
const int src_pitchUV = src->linesize[1]; |
225 |
- const int height = src->video->h; |
|
226 |
- const int width = src->video->w; |
|
225 |
+ const int height = src->height; |
|
226 |
+ const int width = src->width; |
|
227 | 227 |
unsigned char *dstpU = dst->data[1]; |
228 | 228 |
unsigned char *dstpV = dst->data[2]; |
229 | 229 |
unsigned char *dstpY = dst->data[0]; |
... | ... |
@@ -257,7 +257,7 @@ static void process_frame_yuv422p(ColorMatrixContext *color, |
257 | 257 |
} |
258 | 258 |
|
259 | 259 |
static void process_frame_yuv420p(ColorMatrixContext *color, |
260 |
- AVFilterBufferRef *dst, AVFilterBufferRef *src) |
|
260 |
+ AVFrame *dst, AVFrame *src) |
|
261 | 261 |
{ |
262 | 262 |
const unsigned char *srcpU = src->data[1]; |
263 | 263 |
const unsigned char *srcpV = src->data[2]; |
... | ... |
@@ -265,8 +265,8 @@ static void process_frame_yuv420p(ColorMatrixContext *color, |
265 | 265 |
const unsigned char *srcpN = src->data[0] + src->linesize[0]; |
266 | 266 |
const int src_pitchY = src->linesize[0]; |
267 | 267 |
const int src_pitchUV = src->linesize[1]; |
268 |
- const int height = src->video->h; |
|
269 |
- const int width = src->video->w; |
|
268 |
+ const int height = src->height; |
|
269 |
+ const int width = src->width; |
|
270 | 270 |
unsigned char *dstpU = dst->data[1]; |
271 | 271 |
unsigned char *dstpV = dst->data[2]; |
272 | 272 |
unsigned char *dstpY = dst->data[0]; |
... | ... |
@@ -332,19 +332,19 @@ static int query_formats(AVFilterContext *ctx) |
332 | 332 |
return 0; |
333 | 333 |
} |
334 | 334 |
|
335 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
335 |
+static int filter_frame(AVFilterLink *link, AVFrame *in) |
|
336 | 336 |
{ |
337 | 337 |
AVFilterContext *ctx = link->dst; |
338 | 338 |
ColorMatrixContext *color = ctx->priv; |
339 | 339 |
AVFilterLink *outlink = ctx->outputs[0]; |
340 |
- AVFilterBufferRef *out; |
|
340 |
+ AVFrame *out; |
|
341 | 341 |
|
342 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
342 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
343 | 343 |
if (!out) { |
344 |
- avfilter_unref_bufferp(&in); |
|
344 |
+ av_frame_free(&in); |
|
345 | 345 |
return AVERROR(ENOMEM); |
346 | 346 |
} |
347 |
- avfilter_copy_buffer_ref_props(out, in); |
|
347 |
+ av_frame_copy_props(out, in); |
|
348 | 348 |
|
349 | 349 |
if (in->format == AV_PIX_FMT_YUV422P) |
350 | 350 |
process_frame_yuv422p(color, out, in); |
... | ... |
@@ -353,7 +353,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
353 | 353 |
else |
354 | 354 |
process_frame_uyvy422(color, out, in); |
355 | 355 |
|
356 |
- avfilter_unref_bufferp(&in); |
|
356 |
+ av_frame_free(&in); |
|
357 | 357 |
return ff_filter_frame(outlink, out); |
358 | 358 |
} |
359 | 359 |
|
... | ... |
@@ -362,7 +362,6 @@ static const AVFilterPad colormatrix_inputs[] = { |
362 | 362 |
.name = "default", |
363 | 363 |
.type = AVMEDIA_TYPE_VIDEO, |
364 | 364 |
.config_props = config_input, |
365 |
- .min_perms = AV_PERM_READ, |
|
366 | 365 |
.filter_frame = filter_frame, |
367 | 366 |
}, |
368 | 367 |
{ NULL } |
... | ... |
@@ -21,17 +21,35 @@ |
21 | 21 |
* copy video filter |
22 | 22 |
*/ |
23 | 23 |
|
24 |
+#include "libavutil/imgutils.h" |
|
24 | 25 |
#include "libavutil/internal.h" |
25 | 26 |
#include "avfilter.h" |
26 | 27 |
#include "internal.h" |
27 | 28 |
#include "video.h" |
28 | 29 |
|
30 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
31 |
+{ |
|
32 |
+ AVFilterLink *outlink = inlink->dst->outputs[0]; |
|
33 |
+ AVFrame *out = ff_get_video_buffer(outlink, in->width, in->height); |
|
34 |
+ |
|
35 |
+ if (!out) { |
|
36 |
+ av_frame_free(&in); |
|
37 |
+ return AVERROR(ENOMEM); |
|
38 |
+ } |
|
39 |
+ av_frame_copy_props(out, in); |
|
40 |
+ av_image_copy(out->data, out->linesize, in->data, in->linesize, |
|
41 |
+ in->format, in->width, in->height); |
|
42 |
+ |
|
43 |
+ av_frame_free(&in); |
|
44 |
+ return ff_filter_frame(outlink, out); |
|
45 |
+} |
|
46 |
+ |
|
29 | 47 |
static const AVFilterPad avfilter_vf_copy_inputs[] = { |
30 | 48 |
{ |
31 | 49 |
.name = "default", |
32 | 50 |
.type = AVMEDIA_TYPE_VIDEO, |
33 | 51 |
.get_video_buffer = ff_null_get_video_buffer, |
34 |
- .rej_perms = ~0 |
|
52 |
+ .filter_frame = filter_frame, |
|
35 | 53 |
}, |
36 | 54 |
{ NULL } |
37 | 55 |
}; |
... | ... |
@@ -70,7 +70,6 @@ enum var_name { |
70 | 70 |
VAR_X, |
71 | 71 |
VAR_Y, |
72 | 72 |
VAR_N, |
73 |
- VAR_POS, |
|
74 | 73 |
VAR_T, |
75 | 74 |
VAR_VARS_NB |
76 | 75 |
}; |
... | ... |
@@ -198,7 +197,6 @@ static int config_input(AVFilterLink *link) |
198 | 198 |
crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN; |
199 | 199 |
crop->var_values[VAR_N] = 0; |
200 | 200 |
crop->var_values[VAR_T] = NAN; |
201 |
- crop->var_values[VAR_POS] = NAN; |
|
202 | 201 |
|
203 | 202 |
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); |
204 | 203 |
crop->hsub = pix_desc->log2_chroma_w; |
... | ... |
@@ -277,19 +275,18 @@ static int config_output(AVFilterLink *link) |
277 | 277 |
return 0; |
278 | 278 |
} |
279 | 279 |
|
280 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
280 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
281 | 281 |
{ |
282 | 282 |
AVFilterContext *ctx = link->dst; |
283 | 283 |
CropContext *crop = ctx->priv; |
284 | 284 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); |
285 | 285 |
int i; |
286 | 286 |
|
287 |
- frame->video->w = crop->w; |
|
288 |
- frame->video->h = crop->h; |
|
287 |
+ frame->width = crop->w; |
|
288 |
+ frame->height = crop->h; |
|
289 | 289 |
|
290 | 290 |
crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? |
291 | 291 |
NAN : frame->pts * av_q2d(link->time_base); |
292 |
- crop->var_values[VAR_POS] = frame->pos == -1 ? NAN : frame->pos; |
|
293 | 292 |
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); |
294 | 293 |
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL); |
295 | 294 |
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); |
... | ... |
@@ -117,7 +117,7 @@ static int config_input(AVFilterLink *inlink) |
117 | 117 |
return 0; |
118 | 118 |
} |
119 | 119 |
|
120 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
120 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
121 | 121 |
{ |
122 | 122 |
AVFilterContext *ctx = inlink->dst; |
123 | 123 |
CropDetectContext *cd = ctx->priv; |
... | ... |
@@ -128,36 +128,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
128 | 128 |
if (++cd->frame_nb > 0) { |
129 | 129 |
// Reset the crop area every reset_count frames, if reset_count is > 0 |
130 | 130 |
if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) { |
131 |
- cd->x1 = frame->video->w-1; |
|
132 |
- cd->y1 = frame->video->h-1; |
|
131 |
+ cd->x1 = frame->width - 1; |
|
132 |
+ cd->y1 = frame->height - 1; |
|
133 | 133 |
cd->x2 = 0; |
134 | 134 |
cd->y2 = 0; |
135 | 135 |
cd->frame_nb = 1; |
136 | 136 |
} |
137 | 137 |
|
138 | 138 |
for (y = 0; y < cd->y1; y++) { |
139 |
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { |
|
139 |
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) { |
|
140 | 140 |
cd->y1 = y; |
141 | 141 |
break; |
142 | 142 |
} |
143 | 143 |
} |
144 | 144 |
|
145 |
- for (y = frame->video->h-1; y > cd->y2; y--) { |
|
146 |
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { |
|
145 |
+ for (y = frame->height - 1; y > cd->y2; y--) { |
|
146 |
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) { |
|
147 | 147 |
cd->y2 = y; |
148 | 148 |
break; |
149 | 149 |
} |
150 | 150 |
} |
151 | 151 |
|
152 | 152 |
for (y = 0; y < cd->x1; y++) { |
153 |
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { |
|
153 |
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) { |
|
154 | 154 |
cd->x1 = y; |
155 | 155 |
break; |
156 | 156 |
} |
157 | 157 |
} |
158 | 158 |
|
159 |
- for (y = frame->video->w-1; y > cd->x2; y--) { |
|
160 |
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { |
|
159 |
+ for (y = frame->width - 1; y > cd->x2; y--) { |
|
160 |
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) { |
|
161 | 161 |
cd->x2 = y; |
162 | 162 |
break; |
163 | 163 |
} |
... | ... |
@@ -187,8 +187,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
187 | 187 |
y += (shrink_by/2 + 1) & ~1; |
188 | 188 |
|
189 | 189 |
av_log(ctx, AV_LOG_INFO, |
190 |
- "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", |
|
191 |
- cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pos, frame->pts, |
|
190 |
+ "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", |
|
191 |
+ cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pts, |
|
192 | 192 |
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), |
193 | 193 |
w, h, x, y); |
194 | 194 |
} |
... | ... |
@@ -47,7 +47,7 @@ typedef struct { |
47 | 47 |
///< if negative: number of sequential frames which were not dropped |
48 | 48 |
|
49 | 49 |
int hsub, vsub; ///< chroma subsampling values |
50 |
- AVFilterBufferRef *ref; ///< reference picture |
|
50 |
+ AVFrame *ref; ///< reference picture |
|
51 | 51 |
DSPContext dspctx; ///< context providing optimized diff routines |
52 | 52 |
AVCodecContext *avctx; ///< codec context required for the DSPContext |
53 | 53 |
} DecimateContext; |
... | ... |
@@ -105,7 +105,7 @@ static int diff_planes(AVFilterContext *ctx, |
105 | 105 |
* different with respect to the reference frame ref. |
106 | 106 |
*/ |
107 | 107 |
static int decimate_frame(AVFilterContext *ctx, |
108 |
- AVFilterBufferRef *cur, AVFilterBufferRef *ref) |
|
108 |
+ AVFrame *cur, AVFrame *ref) |
|
109 | 109 |
{ |
110 | 110 |
DecimateContext *decimate = ctx->priv; |
111 | 111 |
int plane; |
... | ... |
@@ -122,7 +122,7 @@ static int decimate_frame(AVFilterContext *ctx, |
122 | 122 |
int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0; |
123 | 123 |
if (diff_planes(ctx, |
124 | 124 |
cur->data[plane], ref->data[plane], ref->linesize[plane], |
125 |
- ref->video->w>>hsub, ref->video->h>>vsub)) |
|
125 |
+ ref->width>>hsub, ref->height>>vsub)) |
|
126 | 126 |
return 0; |
127 | 127 |
} |
128 | 128 |
|
... | ... |
@@ -155,7 +155,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
155 | 155 |
static av_cold void uninit(AVFilterContext *ctx) |
156 | 156 |
{ |
157 | 157 |
DecimateContext *decimate = ctx->priv; |
158 |
- avfilter_unref_bufferp(&decimate->ref); |
|
158 |
+ av_frame_free(&decimate->ref); |
|
159 | 159 |
avcodec_close(decimate->avctx); |
160 | 160 |
av_opt_free(decimate); |
161 | 161 |
av_freep(&decimate->avctx); |
... | ... |
@@ -189,7 +189,7 @@ static int config_input(AVFilterLink *inlink) |
189 | 189 |
return 0; |
190 | 190 |
} |
191 | 191 |
|
192 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) |
|
192 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *cur) |
|
193 | 193 |
{ |
194 | 194 |
DecimateContext *decimate = inlink->dst->priv; |
195 | 195 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
... | ... |
@@ -198,11 +198,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) |
198 | 198 |
if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) { |
199 | 199 |
decimate->drop_count = FFMAX(1, decimate->drop_count+1); |
200 | 200 |
} else { |
201 |
- avfilter_unref_buffer(decimate->ref); |
|
201 |
+ av_frame_free(&decimate->ref); |
|
202 | 202 |
decimate->ref = cur; |
203 | 203 |
decimate->drop_count = FFMIN(-1, decimate->drop_count-1); |
204 | 204 |
|
205 |
- if (ret = ff_filter_frame(outlink, avfilter_ref_buffer(cur, ~AV_PERM_WRITE)) < 0) |
|
205 |
+ if (ret = ff_filter_frame(outlink, av_frame_clone(cur)) < 0) |
|
206 | 206 |
return ret; |
207 | 207 |
} |
208 | 208 |
|
... | ... |
@@ -213,7 +213,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) |
213 | 213 |
decimate->drop_count); |
214 | 214 |
|
215 | 215 |
if (decimate->drop_count > 0) |
216 |
- avfilter_unref_buffer(cur); |
|
216 |
+ av_frame_free(&cur); |
|
217 | 217 |
|
218 | 218 |
return 0; |
219 | 219 |
} |
... | ... |
@@ -238,7 +238,6 @@ static const AVFilterPad decimate_inputs[] = { |
238 | 238 |
.get_video_buffer = ff_null_get_video_buffer, |
239 | 239 |
.config_props = config_input, |
240 | 240 |
.filter_frame = filter_frame, |
241 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
242 | 241 |
}, |
243 | 242 |
{ NULL } |
244 | 243 |
}; |
... | ... |
@@ -209,27 +209,28 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
209 | 209 |
return 0; |
210 | 210 |
} |
211 | 211 |
|
212 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
212 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
213 | 213 |
{ |
214 | 214 |
DelogoContext *delogo = inlink->dst->priv; |
215 | 215 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
216 | 216 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
217 |
- AVFilterBufferRef *out; |
|
217 |
+ AVFrame *out; |
|
218 | 218 |
int hsub0 = desc->log2_chroma_w; |
219 | 219 |
int vsub0 = desc->log2_chroma_h; |
220 | 220 |
int direct = 0; |
221 | 221 |
int plane; |
222 | 222 |
|
223 |
- if (in->perms & AV_PERM_WRITE) { |
|
223 |
+ if (av_frame_is_writable(in)) { |
|
224 | 224 |
direct = 1; |
225 | 225 |
out = in; |
226 | 226 |
} else { |
227 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
227 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
228 | 228 |
if (!out) { |
229 |
- avfilter_unref_bufferp(&in); |
|
229 |
+ av_frame_free(&in); |
|
230 | 230 |
return AVERROR(ENOMEM); |
231 | 231 |
} |
232 |
- avfilter_copy_buffer_ref_props(out, in); |
|
232 |
+ |
|
233 |
+ av_frame_copy_props(out, in); |
|
233 | 234 |
} |
234 | 235 |
|
235 | 236 |
for (plane = 0; plane < 4 && in->data[plane]; plane++) { |
... | ... |
@@ -246,7 +247,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
246 | 246 |
} |
247 | 247 |
|
248 | 248 |
if (!direct) |
249 |
- avfilter_unref_bufferp(&in); |
|
249 |
+ av_frame_free(&in); |
|
250 | 250 |
|
251 | 251 |
return ff_filter_frame(outlink, out); |
252 | 252 |
} |
... | ... |
@@ -257,7 +258,6 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = { |
257 | 257 |
.type = AVMEDIA_TYPE_VIDEO, |
258 | 258 |
.get_video_buffer = ff_null_get_video_buffer, |
259 | 259 |
.filter_frame = filter_frame, |
260 |
- .min_perms = AV_PERM_WRITE | AV_PERM_READ, |
|
261 | 260 |
}, |
262 | 261 |
{ NULL } |
263 | 262 |
}; |
... | ... |
@@ -88,7 +88,7 @@ typedef struct { |
88 | 88 |
|
89 | 89 |
typedef struct { |
90 | 90 |
const AVClass *class; |
91 |
- AVFilterBufferRef *ref; ///< Previous frame |
|
91 |
+ AVFrame *ref; ///< Previous frame |
|
92 | 92 |
int rx; ///< Maximum horizontal shift |
93 | 93 |
int ry; ///< Maximum vertical shift |
94 | 94 |
int edge; ///< Edge fill method |
... | ... |
@@ -434,7 +434,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
434 | 434 |
{ |
435 | 435 |
DeshakeContext *deshake = ctx->priv; |
436 | 436 |
|
437 |
- avfilter_unref_buffer(deshake->ref); |
|
437 |
+ av_frame_free(&deshake->ref); |
|
438 | 438 |
if (deshake->fp) |
439 | 439 |
fclose(deshake->fp); |
440 | 440 |
if (deshake->avctx) |
... | ... |
@@ -443,22 +443,22 @@ static av_cold void uninit(AVFilterContext *ctx) |
443 | 443 |
av_opt_free(deshake); |
444 | 444 |
} |
445 | 445 |
|
446 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
446 |
+static int filter_frame(AVFilterLink *link, AVFrame *in) |
|
447 | 447 |
{ |
448 | 448 |
DeshakeContext *deshake = link->dst->priv; |
449 | 449 |
AVFilterLink *outlink = link->dst->outputs[0]; |
450 |
- AVFilterBufferRef *out; |
|
450 |
+ AVFrame *out; |
|
451 | 451 |
Transform t = {{0},0}, orig = {{0},0}; |
452 | 452 |
float matrix[9]; |
453 | 453 |
float alpha = 2.0 / deshake->refcount; |
454 | 454 |
char tmp[256]; |
455 | 455 |
|
456 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
456 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
457 | 457 |
if (!out) { |
458 |
- avfilter_unref_bufferp(&in); |
|
458 |
+ av_frame_free(&in); |
|
459 | 459 |
return AVERROR(ENOMEM); |
460 | 460 |
} |
461 |
- avfilter_copy_buffer_ref_props(out, in); |
|
461 |
+ av_frame_copy_props(out, in); |
|
462 | 462 |
|
463 | 463 |
if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) { |
464 | 464 |
// Find the most likely global motion for the current frame |
... | ... |
@@ -545,7 +545,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
545 | 545 |
avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); |
546 | 546 |
|
547 | 547 |
// Cleanup the old reference frame |
548 |
- avfilter_unref_buffer(deshake->ref); |
|
548 |
+ av_frame_free(&deshake->ref); |
|
549 | 549 |
|
550 | 550 |
// Store the current frame as the reference frame for calculating the |
551 | 551 |
// motion of the next frame |
... | ... |
@@ -130,13 +130,13 @@ static int config_input(AVFilterLink *inlink) |
130 | 130 |
return 0; |
131 | 131 |
} |
132 | 132 |
|
133 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
133 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
134 | 134 |
{ |
135 | 135 |
DrawBoxContext *drawbox = inlink->dst->priv; |
136 | 136 |
int plane, x, y, xb = drawbox->x, yb = drawbox->y; |
137 | 137 |
unsigned char *row[4]; |
138 | 138 |
|
139 |
- for (y = FFMAX(yb, 0); y < frame->video->h && y < (yb + drawbox->h); y++) { |
|
139 |
+ for (y = FFMAX(yb, 0); y < frame->height && y < (yb + drawbox->h); y++) { |
|
140 | 140 |
row[0] = frame->data[0] + y * frame->linesize[0]; |
141 | 141 |
|
142 | 142 |
for (plane = 1; plane < 3; plane++) |
... | ... |
@@ -144,12 +144,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
144 | 144 |
frame->linesize[plane] * (y >> drawbox->vsub); |
145 | 145 |
|
146 | 146 |
if (drawbox->invert_color) { |
147 |
- for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) |
|
147 |
+ for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++) |
|
148 | 148 |
if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || |
149 | 149 |
(x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) |
150 | 150 |
row[0][x] = 0xff - row[0][x]; |
151 | 151 |
} else { |
152 |
- for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) { |
|
152 |
+ for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++) { |
|
153 | 153 |
double alpha = (double)drawbox->yuv_color[A] / 255; |
154 | 154 |
|
155 | 155 |
if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || |
... | ... |
@@ -172,7 +172,7 @@ static const AVFilterPad avfilter_vf_drawbox_inputs[] = { |
172 | 172 |
.config_props = config_input, |
173 | 173 |
.get_video_buffer = ff_null_get_video_buffer, |
174 | 174 |
.filter_frame = filter_frame, |
175 |
- .min_perms = AV_PERM_WRITE | AV_PERM_READ, |
|
175 |
+ .needs_writable = 1, |
|
176 | 176 |
}, |
177 | 177 |
{ NULL } |
178 | 178 |
}; |
... | ... |
@@ -784,7 +784,7 @@ static int expand_text(AVFilterContext *ctx) |
784 | 784 |
return 0; |
785 | 785 |
} |
786 | 786 |
|
787 |
-static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, |
|
787 |
+static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame, |
|
788 | 788 |
int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y) |
789 | 789 |
{ |
790 | 790 |
char *text = dtext->expanded_text.str; |
... | ... |
@@ -812,7 +812,7 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, |
812 | 812 |
y1 = dtext->positions[i].y+dtext->y+y; |
813 | 813 |
|
814 | 814 |
ff_blend_mask(&dtext->dc, color, |
815 |
- picref->data, picref->linesize, width, height, |
|
815 |
+ frame->data, frame->linesize, width, height, |
|
816 | 816 |
glyph->bitmap.buffer, glyph->bitmap.pitch, |
817 | 817 |
glyph->bitmap.width, glyph->bitmap.rows, |
818 | 818 |
glyph->bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3, |
... | ... |
@@ -822,7 +822,7 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, |
822 | 822 |
return 0; |
823 | 823 |
} |
824 | 824 |
|
825 |
-static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, |
|
825 |
+static int draw_text(AVFilterContext *ctx, AVFrame *frame, |
|
826 | 826 |
int width, int height) |
827 | 827 |
{ |
828 | 828 |
DrawTextContext *dtext = ctx->priv; |
... | ... |
@@ -845,7 +845,7 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, |
845 | 845 |
av_bprint_clear(bp); |
846 | 846 |
|
847 | 847 |
if(dtext->basetime != AV_NOPTS_VALUE) |
848 |
- now= picref->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000; |
|
848 |
+ now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000; |
|
849 | 849 |
|
850 | 850 |
switch (dtext->exp_mode) { |
851 | 851 |
case EXP_NONE: |
... | ... |
@@ -962,23 +962,23 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, |
962 | 962 |
/* draw box */ |
963 | 963 |
if (dtext->draw_box) |
964 | 964 |
ff_blend_rectangle(&dtext->dc, &dtext->boxcolor, |
965 |
- picref->data, picref->linesize, width, height, |
|
965 |
+ frame->data, frame->linesize, width, height, |
|
966 | 966 |
dtext->x, dtext->y, box_w, box_h); |
967 | 967 |
|
968 | 968 |
if (dtext->shadowx || dtext->shadowy) { |
969 |
- if ((ret = draw_glyphs(dtext, picref, width, height, dtext->shadowcolor.rgba, |
|
969 |
+ if ((ret = draw_glyphs(dtext, frame, width, height, dtext->shadowcolor.rgba, |
|
970 | 970 |
&dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0) |
971 | 971 |
return ret; |
972 | 972 |
} |
973 | 973 |
|
974 |
- if ((ret = draw_glyphs(dtext, picref, width, height, dtext->fontcolor.rgba, |
|
974 |
+ if ((ret = draw_glyphs(dtext, frame, width, height, dtext->fontcolor.rgba, |
|
975 | 975 |
&dtext->fontcolor, 0, 0)) < 0) |
976 | 976 |
return ret; |
977 | 977 |
|
978 | 978 |
return 0; |
979 | 979 |
} |
980 | 980 |
|
981 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
981 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
982 | 982 |
{ |
983 | 983 |
AVFilterContext *ctx = inlink->dst; |
984 | 984 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -992,7 +992,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
992 | 992 |
dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? |
993 | 993 |
NAN : frame->pts * av_q2d(inlink->time_base); |
994 | 994 |
|
995 |
- draw_text(ctx, frame, frame->video->w, frame->video->h); |
|
995 |
+ draw_text(ctx, frame, frame->width, frame->height); |
|
996 | 996 |
|
997 | 997 |
av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n", |
998 | 998 |
(int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], |
... | ... |
@@ -1011,8 +1011,7 @@ static const AVFilterPad avfilter_vf_drawtext_inputs[] = { |
1011 | 1011 |
.get_video_buffer = ff_null_get_video_buffer, |
1012 | 1012 |
.filter_frame = filter_frame, |
1013 | 1013 |
.config_props = config_input, |
1014 |
- .min_perms = AV_PERM_WRITE | |
|
1015 |
- AV_PERM_READ, |
|
1014 |
+ .needs_writable = 1, |
|
1016 | 1015 |
}, |
1017 | 1016 |
{ NULL } |
1018 | 1017 |
}; |
... | ... |
@@ -249,21 +249,21 @@ static void double_threshold(AVFilterContext *ctx, int w, int h, |
249 | 249 |
} |
250 | 250 |
} |
251 | 251 |
|
252 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
252 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
253 | 253 |
{ |
254 | 254 |
AVFilterContext *ctx = inlink->dst; |
255 | 255 |
EdgeDetectContext *edgedetect = ctx->priv; |
256 | 256 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
257 | 257 |
uint8_t *tmpbuf = edgedetect->tmpbuf; |
258 | 258 |
uint16_t *gradients = edgedetect->gradients; |
259 |
- AVFilterBufferRef *out; |
|
259 |
+ AVFrame *out; |
|
260 | 260 |
|
261 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
261 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
262 | 262 |
if (!out) { |
263 |
- avfilter_unref_bufferp(&in); |
|
263 |
+ av_frame_free(&in); |
|
264 | 264 |
return AVERROR(ENOMEM); |
265 | 265 |
} |
266 |
- avfilter_copy_buffer_ref_props(out, in); |
|
266 |
+ av_frame_copy_props(out, in); |
|
267 | 267 |
|
268 | 268 |
/* gaussian filter to reduce noise */ |
269 | 269 |
gaussian_blur(ctx, inlink->w, inlink->h, |
... | ... |
@@ -287,7 +287,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
287 | 287 |
out->data[0], out->linesize[0], |
288 | 288 |
tmpbuf, inlink->w); |
289 | 289 |
|
290 |
- avfilter_unref_bufferp(&in); |
|
290 |
+ av_frame_free(&in); |
|
291 | 291 |
return ff_filter_frame(outlink, out); |
292 | 292 |
} |
293 | 293 |
|
... | ... |
@@ -305,7 +305,6 @@ static const AVFilterPad edgedetect_inputs[] = { |
305 | 305 |
.type = AVMEDIA_TYPE_VIDEO, |
306 | 306 |
.config_props = config_props, |
307 | 307 |
.filter_frame = filter_frame, |
308 |
- .min_perms = AV_PERM_READ, |
|
309 | 308 |
}, |
310 | 309 |
{ NULL } |
311 | 310 |
}; |
... | ... |
@@ -178,7 +178,7 @@ static void fade_plane(int y, int h, int w, |
178 | 178 |
} |
179 | 179 |
} |
180 | 180 |
|
181 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
181 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
182 | 182 |
{ |
183 | 183 |
FadeContext *fade = inlink->dst->priv; |
184 | 184 |
uint8_t *p; |
... | ... |
@@ -189,21 +189,21 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
189 | 189 |
// alpha only |
190 | 190 |
plane = fade->is_packed_rgb ? 0 : A; // alpha is on plane 0 for packed formats |
191 | 191 |
// or plane 3 for planar formats |
192 |
- fade_plane(0, frame->video->h, inlink->w, |
|
192 |
+ fade_plane(0, frame->height, inlink->w, |
|
193 | 193 |
fade->factor, fade->black_level, fade->black_level_scaled, |
194 | 194 |
fade->is_packed_rgb ? fade->rgba_map[A] : 0, // alpha offset for packed formats |
195 | 195 |
fade->is_packed_rgb ? 4 : 1, // pixstep for 8 bit packed formats |
196 | 196 |
1, frame->data[plane], frame->linesize[plane]); |
197 | 197 |
} else { |
198 | 198 |
/* luma or rgb plane */ |
199 |
- fade_plane(0, frame->video->h, inlink->w, |
|
199 |
+ fade_plane(0, frame->height, inlink->w, |
|
200 | 200 |
fade->factor, fade->black_level, fade->black_level_scaled, |
201 | 201 |
0, 1, // offset & pixstep for Y plane or RGB packed format |
202 | 202 |
fade->bpp, frame->data[0], frame->linesize[0]); |
203 | 203 |
if (frame->data[1] && frame->data[2]) { |
204 | 204 |
/* chroma planes */ |
205 | 205 |
for (plane = 1; plane < 3; plane++) { |
206 |
- for (i = 0; i < frame->video->h; i++) { |
|
206 |
+ for (i = 0; i < frame->height; i++) { |
|
207 | 207 |
p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; |
208 | 208 |
for (j = 0; j < inlink->w >> fade->hsub; j++) { |
209 | 209 |
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer |
... | ... |
@@ -234,7 +234,7 @@ static const AVFilterPad avfilter_vf_fade_inputs[] = { |
234 | 234 |
.config_props = config_props, |
235 | 235 |
.get_video_buffer = ff_null_get_video_buffer, |
236 | 236 |
.filter_frame = filter_frame, |
237 |
- .min_perms = AV_PERM_READ | AV_PERM_WRITE, |
|
237 |
+ .needs_writable = 1, |
|
238 | 238 |
}, |
239 | 239 |
{ NULL } |
240 | 240 |
}; |
... | ... |
@@ -82,14 +82,14 @@ static int config_props_output(AVFilterLink *outlink) |
82 | 82 |
return 0; |
83 | 83 |
} |
84 | 84 |
|
85 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
85 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
86 | 86 |
{ |
87 | 87 |
FieldContext *field = inlink->dst->priv; |
88 | 88 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
89 | 89 |
int i; |
90 | 90 |
|
91 |
- inpicref->video->h = outlink->h; |
|
92 |
- inpicref->video->interlaced = 0; |
|
91 |
+ inpicref->height = outlink->h; |
|
92 |
+ inpicref->interlaced_frame = 0; |
|
93 | 93 |
|
94 | 94 |
for (i = 0; i < field->nb_planes; i++) { |
95 | 95 |
if (field->type == FIELD_TYPE_BOTTOM) |
... | ... |
@@ -113,15 +113,15 @@ static int config_input(AVFilterLink *inlink) |
113 | 113 |
return 0; |
114 | 114 |
} |
115 | 115 |
|
116 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) |
|
116 |
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) |
|
117 | 117 |
{ |
118 | 118 |
AVFilterContext *ctx = inlink->dst; |
119 | 119 |
AVFilterLink *outlink = ctx->outputs[0]; |
120 | 120 |
|
121 |
- return ff_get_video_buffer(outlink, perms, w, h); |
|
121 |
+ return ff_get_video_buffer(outlink, w, h); |
|
122 | 122 |
} |
123 | 123 |
|
124 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
124 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
125 | 125 |
{ |
126 | 126 |
AVFilterContext *ctx = inlink->dst; |
127 | 127 |
FieldOrderContext *s = ctx->priv; |
... | ... |
@@ -129,14 +129,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
129 | 129 |
int h, plane, line_step, line_size, line; |
130 | 130 |
uint8_t *data; |
131 | 131 |
|
132 |
- if (!frame->video->interlaced || |
|
133 |
- frame->video->top_field_first == s->dst_tff) |
|
132 |
+ if (!frame->interlaced_frame || |
|
133 |
+ frame->top_field_first == s->dst_tff) |
|
134 | 134 |
return ff_filter_frame(outlink, frame); |
135 | 135 |
|
136 | 136 |
av_dlog(ctx, |
137 | 137 |
"picture will move %s one line\n", |
138 | 138 |
s->dst_tff ? "up" : "down"); |
139 |
- h = frame->video->h; |
|
139 |
+ h = frame->height; |
|
140 | 140 |
for (plane = 0; plane < 4 && frame->data[plane]; plane++) { |
141 | 141 |
line_step = frame->linesize[plane]; |
142 | 142 |
line_size = s->line_size[plane]; |
... | ... |
@@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
148 | 148 |
* The new last line is created as a copy of the |
149 | 149 |
* penultimate line from that field. */ |
150 | 150 |
for (line = 0; line < h; line++) { |
151 |
- if (1 + line < frame->video->h) { |
|
151 |
+ if (1 + line < frame->height) { |
|
152 | 152 |
memcpy(data, data + line_step, line_size); |
153 | 153 |
} else { |
154 | 154 |
memcpy(data, data - line_step - line_step, line_size); |
... | ... |
@@ -172,7 +172,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
172 | 172 |
} |
173 | 173 |
} |
174 | 174 |
} |
175 |
- frame->video->top_field_first = s->dst_tff; |
|
175 |
+ frame->top_field_first = s->dst_tff; |
|
176 | 176 |
|
177 | 177 |
return ff_filter_frame(outlink, frame); |
178 | 178 |
} |
... | ... |
@@ -184,7 +184,7 @@ static const AVFilterPad avfilter_vf_fieldorder_inputs[] = { |
184 | 184 |
.config_props = config_input, |
185 | 185 |
.get_video_buffer = get_video_buffer, |
186 | 186 |
.filter_frame = filter_frame, |
187 |
- .min_perms = AV_PERM_READ | AV_PERM_WRITE, |
|
187 |
+ .needs_writable = 1, |
|
188 | 188 |
}, |
189 | 189 |
{ NULL } |
190 | 190 |
}; |
... | ... |
@@ -89,7 +89,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
89 | 89 |
} |
90 | 90 |
av_opt_free(s); |
91 | 91 |
|
92 |
- if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFilterBufferRef*)))) |
|
92 |
+ if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*)))) |
|
93 | 93 |
return AVERROR(ENOMEM); |
94 | 94 |
|
95 | 95 |
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den); |
... | ... |
@@ -99,9 +99,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
99 | 99 |
static void flush_fifo(AVFifoBuffer *fifo) |
100 | 100 |
{ |
101 | 101 |
while (av_fifo_size(fifo)) { |
102 |
- AVFilterBufferRef *tmp; |
|
102 |
+ AVFrame *tmp; |
|
103 | 103 |
av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); |
104 |
- avfilter_unref_buffer(tmp); |
|
104 |
+ av_frame_free(&tmp); |
|
105 | 105 |
} |
106 | 106 |
} |
107 | 107 |
|
... | ... |
@@ -109,7 +109,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
109 | 109 |
{ |
110 | 110 |
FPSContext *s = ctx->priv; |
111 | 111 |
if (s->fifo) { |
112 |
- s->drop += av_fifo_size(s->fifo) / sizeof(AVFilterBufferRef*); |
|
112 |
+ s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*); |
|
113 | 113 |
flush_fifo(s->fifo); |
114 | 114 |
av_fifo_free(s->fifo); |
115 | 115 |
} |
... | ... |
@@ -145,7 +145,7 @@ static int request_frame(AVFilterLink *outlink) |
145 | 145 |
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { |
146 | 146 |
int i; |
147 | 147 |
for (i = 0; av_fifo_size(s->fifo); i++) { |
148 |
- AVFilterBufferRef *buf; |
|
148 |
+ AVFrame *buf; |
|
149 | 149 |
|
150 | 150 |
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); |
151 | 151 |
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, |
... | ... |
@@ -162,13 +162,13 @@ static int request_frame(AVFilterLink *outlink) |
162 | 162 |
return ret; |
163 | 163 |
} |
164 | 164 |
|
165 |
-static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf) |
|
165 |
+static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf) |
|
166 | 166 |
{ |
167 | 167 |
int ret; |
168 | 168 |
|
169 | 169 |
if (!av_fifo_space(fifo) && |
170 | 170 |
(ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) { |
171 |
- avfilter_unref_bufferp(&buf); |
|
171 |
+ av_frame_free(&buf); |
|
172 | 172 |
return ret; |
173 | 173 |
} |
174 | 174 |
|
... | ... |
@@ -176,7 +176,7 @@ static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf) |
176 | 176 |
return 0; |
177 | 177 |
} |
178 | 178 |
|
179 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
179 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
|
180 | 180 |
{ |
181 | 181 |
AVFilterContext *ctx = inlink->dst; |
182 | 182 |
FPSContext *s = ctx->priv; |
... | ... |
@@ -196,7 +196,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
196 | 196 |
} else { |
197 | 197 |
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " |
198 | 198 |
"timestamp.\n"); |
199 |
- avfilter_unref_buffer(buf); |
|
199 |
+ av_frame_free(&buf); |
|
200 | 200 |
s->drop++; |
201 | 201 |
} |
202 | 202 |
return 0; |
... | ... |
@@ -213,8 +213,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
213 | 213 |
|
214 | 214 |
if (delta < 1) { |
215 | 215 |
/* drop the frame and everything buffered except the first */ |
216 |
- AVFilterBufferRef *tmp; |
|
217 |
- int drop = av_fifo_size(s->fifo)/sizeof(AVFilterBufferRef*); |
|
216 |
+ AVFrame *tmp; |
|
217 |
+ int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); |
|
218 | 218 |
|
219 | 219 |
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); |
220 | 220 |
s->drop += drop; |
... | ... |
@@ -223,18 +223,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
223 | 223 |
flush_fifo(s->fifo); |
224 | 224 |
ret = write_to_fifo(s->fifo, tmp); |
225 | 225 |
|
226 |
- avfilter_unref_buffer(buf); |
|
226 |
+ av_frame_free(&buf); |
|
227 | 227 |
return ret; |
228 | 228 |
} |
229 | 229 |
|
230 | 230 |
/* can output >= 1 frames */ |
231 | 231 |
for (i = 0; i < delta; i++) { |
232 |
- AVFilterBufferRef *buf_out; |
|
232 |
+ AVFrame *buf_out; |
|
233 | 233 |
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); |
234 | 234 |
|
235 | 235 |
/* duplicate the frame if needed */ |
236 | 236 |
if (!av_fifo_size(s->fifo) && i < delta - 1) { |
237 |
- AVFilterBufferRef *dup = avfilter_ref_buffer(buf_out, ~0); |
|
237 |
+ AVFrame *dup = av_frame_clone(buf_out); |
|
238 | 238 |
|
239 | 239 |
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); |
240 | 240 |
if (dup) |
... | ... |
@@ -243,8 +243,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
243 | 243 |
ret = AVERROR(ENOMEM); |
244 | 244 |
|
245 | 245 |
if (ret < 0) { |
246 |
- avfilter_unref_bufferp(&buf_out); |
|
247 |
- avfilter_unref_bufferp(&buf); |
|
246 |
+ av_frame_free(&buf_out); |
|
247 |
+ av_frame_free(&buf); |
|
248 | 248 |
return ret; |
249 | 249 |
} |
250 | 250 |
|
... | ... |
@@ -255,7 +255,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
255 | 255 |
outlink->time_base) + s->frames_out; |
256 | 256 |
|
257 | 257 |
if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { |
258 |
- avfilter_unref_bufferp(&buf); |
|
258 |
+ av_frame_free(&buf); |
|
259 | 259 |
return ret; |
260 | 260 |
} |
261 | 261 |
|
... | ... |
@@ -66,7 +66,7 @@ static int config_output_props(AVFilterLink *outlink) |
66 | 66 |
return 0; |
67 | 67 |
} |
68 | 68 |
|
69 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) |
|
69 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref) |
|
70 | 70 |
{ |
71 | 71 |
FrameStepContext *framestep = inlink->dst->priv; |
72 | 72 |
|
... | ... |
@@ -75,7 +75,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) |
75 | 75 |
return ff_filter_frame(inlink->dst->outputs[0], ref); |
76 | 76 |
} else { |
77 | 77 |
framestep->frame_selected = 0; |
78 |
- avfilter_unref_buffer(ref); |
|
78 |
+ av_frame_free(&ref); |
|
79 | 79 |
return 0; |
80 | 80 |
} |
81 | 81 |
} |
... | ... |
@@ -379,24 +379,24 @@ static int query_formats(AVFilterContext *ctx) |
379 | 379 |
return 0; |
380 | 380 |
} |
381 | 381 |
|
382 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
382 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
383 | 383 |
{ |
384 | 384 |
Frei0rContext *frei0r = inlink->dst->priv; |
385 | 385 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
386 |
- AVFilterBufferRef *out; |
|
386 |
+ AVFrame *out; |
|
387 | 387 |
|
388 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
388 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
389 | 389 |
if (!out) { |
390 |
- avfilter_unref_bufferp(&in); |
|
390 |
+ av_frame_free(&in); |
|
391 | 391 |
return AVERROR(ENOMEM); |
392 | 392 |
} |
393 |
- avfilter_copy_buffer_ref_props(out, in); |
|
393 |
+ av_frame_copy_props(out, in); |
|
394 | 394 |
|
395 | 395 |
frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000, |
396 | 396 |
(const uint32_t *)in->data[0], |
397 | 397 |
(uint32_t *)out->data[0]); |
398 | 398 |
|
399 |
- avfilter_unref_bufferp(&in); |
|
399 |
+ av_frame_free(&in); |
|
400 | 400 |
|
401 | 401 |
return ff_filter_frame(outlink, out); |
402 | 402 |
} |
... | ... |
@@ -407,7 +407,6 @@ static const AVFilterPad avfilter_vf_frei0r_inputs[] = { |
407 | 407 |
.type = AVMEDIA_TYPE_VIDEO, |
408 | 408 |
.config_props = config_input_props, |
409 | 409 |
.filter_frame = filter_frame, |
410 |
- .min_perms = AV_PERM_READ |
|
411 | 410 |
}, |
412 | 411 |
{ NULL } |
413 | 412 |
}; |
... | ... |
@@ -487,19 +486,18 @@ static int source_config_props(AVFilterLink *outlink) |
487 | 487 |
static int source_request_frame(AVFilterLink *outlink) |
488 | 488 |
{ |
489 | 489 |
Frei0rContext *frei0r = outlink->src->priv; |
490 |
- AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
490 |
+ AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
491 | 491 |
|
492 |
- if (!picref) |
|
492 |
+ if (!frame) |
|
493 | 493 |
return AVERROR(ENOMEM); |
494 | 494 |
|
495 |
- picref->video->sample_aspect_ratio = (AVRational) {1, 1}; |
|
496 |
- picref->pts = frei0r->pts++; |
|
497 |
- picref->pos = -1; |
|
495 |
+ frame->sample_aspect_ratio = (AVRational) {1, 1}; |
|
496 |
+ frame->pts = frei0r->pts++; |
|
498 | 497 |
|
499 |
- frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), |
|
500 |
- NULL, (uint32_t *)picref->data[0]); |
|
498 |
+ frei0r->update(frei0r->instance, av_rescale_q(frame->pts, frei0r->time_base, (AVRational){1,1000}), |
|
499 |
+ NULL, (uint32_t *)frame->data[0]); |
|
501 | 500 |
|
502 |
- return ff_filter_frame(outlink, picref); |
|
501 |
+ return ff_filter_frame(outlink, frame); |
|
503 | 502 |
} |
504 | 503 |
|
505 | 504 |
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = { |
... | ... |
@@ -37,7 +37,7 @@ typedef struct { |
37 | 37 |
AVExpr *e[4]; ///< expressions for each plane |
38 | 38 |
char *expr_str[4]; ///< expression strings for each plane |
39 | 39 |
int framenum; ///< frame counter |
40 |
- AVFilterBufferRef *picref; ///< current input buffer |
|
40 |
+ AVFrame *picref; ///< current input buffer |
|
41 | 41 |
int hsub, vsub; ///< chroma subsampling |
42 | 42 |
int planes; ///< number of planes |
43 | 43 |
} GEQContext; |
... | ... |
@@ -59,11 +59,11 @@ static inline double getpix(void *priv, double x, double y, int plane) |
59 | 59 |
{ |
60 | 60 |
int xi, yi; |
61 | 61 |
GEQContext *geq = priv; |
62 |
- AVFilterBufferRef *picref = geq->picref; |
|
62 |
+ AVFrame *picref = geq->picref; |
|
63 | 63 |
const uint8_t *src = picref->data[plane]; |
64 | 64 |
const int linesize = picref->linesize[plane]; |
65 |
- const int w = picref->video->w >> ((plane == 1 || plane == 2) ? geq->hsub : 0); |
|
66 |
- const int h = picref->video->h >> ((plane == 1 || plane == 2) ? geq->vsub : 0); |
|
65 |
+ const int w = picref->width >> ((plane == 1 || plane == 2) ? geq->hsub : 0); |
|
66 |
+ const int h = picref->height >> ((plane == 1 || plane == 2) ? geq->vsub : 0); |
|
67 | 67 |
|
68 | 68 |
if (!src) |
69 | 69 |
return 0; |
... | ... |
@@ -163,24 +163,24 @@ static int geq_config_props(AVFilterLink *inlink) |
163 | 163 |
return 0; |
164 | 164 |
} |
165 | 165 |
|
166 |
-static int geq_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
166 |
+static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
167 | 167 |
{ |
168 | 168 |
int plane; |
169 | 169 |
GEQContext *geq = inlink->dst->priv; |
170 | 170 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
171 |
- AVFilterBufferRef *out; |
|
171 |
+ AVFrame *out; |
|
172 | 172 |
double values[VAR_VARS_NB] = { |
173 | 173 |
[VAR_N] = geq->framenum++, |
174 | 174 |
[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base), |
175 | 175 |
}; |
176 | 176 |
|
177 | 177 |
geq->picref = in; |
178 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
178 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
179 | 179 |
if (!out) { |
180 |
- avfilter_unref_bufferp(&in); |
|
180 |
+ av_frame_free(&in); |
|
181 | 181 |
return AVERROR(ENOMEM); |
182 | 182 |
} |
183 |
- avfilter_copy_buffer_ref_props(out, in); |
|
183 |
+ av_frame_copy_props(out, in); |
|
184 | 184 |
|
185 | 185 |
for (plane = 0; plane < geq->planes && out->data[plane]; plane++) { |
186 | 186 |
int x, y; |
... | ... |
@@ -204,7 +204,7 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
204 | 204 |
} |
205 | 205 |
} |
206 | 206 |
|
207 |
- avfilter_unref_bufferp(&geq->picref); |
|
207 |
+ av_frame_free(&geq->picref); |
|
208 | 208 |
return ff_filter_frame(outlink, out); |
209 | 209 |
} |
210 | 210 |
|
... | ... |
@@ -224,7 +224,6 @@ static const AVFilterPad geq_inputs[] = { |
224 | 224 |
.type = AVMEDIA_TYPE_VIDEO, |
225 | 225 |
.config_props = geq_config_props, |
226 | 226 |
.filter_frame = geq_filter_frame, |
227 |
- .min_perms = AV_PERM_READ, |
|
228 | 227 |
}, |
229 | 228 |
{ NULL } |
230 | 229 |
}; |
... | ... |
@@ -197,23 +197,23 @@ static int config_input(AVFilterLink *inlink) |
197 | 197 |
return 0; |
198 | 198 |
} |
199 | 199 |
|
200 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
200 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
201 | 201 |
{ |
202 | 202 |
GradFunContext *gf = inlink->dst->priv; |
203 | 203 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
204 |
- AVFilterBufferRef *out; |
|
204 |
+ AVFrame *out; |
|
205 | 205 |
int p, direct = 0; |
206 | 206 |
|
207 |
- if (in->perms & AV_PERM_WRITE) { |
|
207 |
+ if (av_frame_is_writable(in)) { |
|
208 | 208 |
direct = 1; |
209 | 209 |
out = in; |
210 | 210 |
} else { |
211 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
211 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
212 | 212 |
if (!out) { |
213 |
- avfilter_unref_bufferp(&in); |
|
213 |
+ av_frame_free(&in); |
|
214 | 214 |
return AVERROR(ENOMEM); |
215 | 215 |
} |
216 |
- avfilter_copy_buffer_ref_props(out, in); |
|
216 |
+ av_frame_copy_props(out, in); |
|
217 | 217 |
} |
218 | 218 |
|
219 | 219 |
for (p = 0; p < 4 && in->data[p]; p++) { |
... | ... |
@@ -233,7 +233,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
233 | 233 |
} |
234 | 234 |
|
235 | 235 |
if (!direct) |
236 |
- avfilter_unref_bufferp(&in); |
|
236 |
+ av_frame_free(&in); |
|
237 | 237 |
|
238 | 238 |
return ff_filter_frame(outlink, out); |
239 | 239 |
} |
... | ... |
@@ -244,7 +244,6 @@ static const AVFilterPad avfilter_vf_gradfun_inputs[] = { |
244 | 244 |
.type = AVMEDIA_TYPE_VIDEO, |
245 | 245 |
.config_props = config_input, |
246 | 246 |
.filter_frame = filter_frame, |
247 |
- .min_perms = AV_PERM_READ, |
|
248 | 247 |
}, |
249 | 248 |
{ NULL } |
250 | 249 |
}; |
... | ... |
@@ -70,21 +70,21 @@ static int config_props(AVFilterLink *inlink) |
70 | 70 |
return 0; |
71 | 71 |
} |
72 | 72 |
|
73 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
73 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
74 | 74 |
{ |
75 | 75 |
AVFilterContext *ctx = inlink->dst; |
76 | 76 |
FlipContext *flip = ctx->priv; |
77 | 77 |
AVFilterLink *outlink = ctx->outputs[0]; |
78 |
- AVFilterBufferRef *out; |
|
78 |
+ AVFrame *out; |
|
79 | 79 |
uint8_t *inrow, *outrow; |
80 | 80 |
int i, j, plane, step, hsub, vsub; |
81 | 81 |
|
82 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
82 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
83 | 83 |
if (!out) { |
84 |
- avfilter_unref_bufferp(&in); |
|
84 |
+ av_frame_free(&in); |
|
85 | 85 |
return AVERROR(ENOMEM); |
86 | 86 |
} |
87 |
- avfilter_copy_buffer_ref_props(out, in); |
|
87 |
+ av_frame_copy_props(out, in); |
|
88 | 88 |
|
89 | 89 |
/* copy palette if required */ |
90 | 90 |
if (av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_PAL) |
... | ... |
@@ -97,7 +97,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
97 | 97 |
|
98 | 98 |
outrow = out->data[plane]; |
99 | 99 |
inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step; |
100 |
- for (i = 0; i < in->video->h >> vsub; i++) { |
|
100 |
+ for (i = 0; i < in->height >> vsub; i++) { |
|
101 | 101 |
switch (step) { |
102 | 102 |
case 1: |
103 | 103 |
for (j = 0; j < (inlink->w >> hsub); j++) |
... | ... |
@@ -143,7 +143,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
143 | 143 |
} |
144 | 144 |
} |
145 | 145 |
|
146 |
- avfilter_unref_bufferp(&in); |
|
146 |
+ av_frame_free(&in); |
|
147 | 147 |
return ff_filter_frame(outlink, out); |
148 | 148 |
} |
149 | 149 |
|
... | ... |
@@ -153,7 +153,6 @@ static const AVFilterPad avfilter_vf_hflip_inputs[] = { |
153 | 153 |
.type = AVMEDIA_TYPE_VIDEO, |
154 | 154 |
.filter_frame = filter_frame, |
155 | 155 |
.config_props = config_props, |
156 |
- .min_perms = AV_PERM_READ, |
|
157 | 156 |
}, |
158 | 157 |
{ NULL } |
159 | 158 |
}; |
... | ... |
@@ -142,7 +142,7 @@ static int config_input(AVFilterLink *inlink) |
142 | 142 |
b = src[x + map[B]]; \ |
143 | 143 |
} while (0) |
144 | 144 |
|
145 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
|
145 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
|
146 | 146 |
{ |
147 | 147 |
AVFilterContext *ctx = inlink->dst; |
148 | 148 |
HisteqContext *histeq = ctx->priv; |
... | ... |
@@ -150,16 +150,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
150 | 150 |
int strength = histeq->strength * 1000; |
151 | 151 |
int intensity = histeq->intensity * 1000; |
152 | 152 |
int x, y, i, luthi, lutlo, lut, luma, oluma, m; |
153 |
- AVFilterBufferRef *outpic; |
|
153 |
+ AVFrame *outpic; |
|
154 | 154 |
unsigned int r, g, b, jran; |
155 | 155 |
uint8_t *src, *dst; |
156 | 156 |
|
157 |
- outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); |
|
157 |
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
158 | 158 |
if (!outpic) { |
159 |
- avfilter_unref_bufferp(&inpic); |
|
159 |
+ av_frame_free(&inpic); |
|
160 | 160 |
return AVERROR(ENOMEM); |
161 | 161 |
} |
162 |
- avfilter_copy_buffer_ref_props(outpic, inpic); |
|
162 |
+ av_frame_copy_props(outpic, inpic); |
|
163 | 163 |
|
164 | 164 |
/* Seed random generator for antibanding. */ |
165 | 165 |
jran = LCG_SEED; |
... | ... |
@@ -261,7 +261,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
261 | 261 |
av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]); |
262 | 262 |
#endif |
263 | 263 |
|
264 |
- avfilter_unref_bufferp(&inpic); |
|
264 |
+ av_frame_free(&inpic); |
|
265 | 265 |
return ff_filter_frame(outlink, outpic); |
266 | 266 |
} |
267 | 267 |
|
... | ... |
@@ -271,7 +271,6 @@ static const AVFilterPad histeq_inputs[] = { |
271 | 271 |
.type = AVMEDIA_TYPE_VIDEO, |
272 | 272 |
.config_props = config_input, |
273 | 273 |
.filter_frame = filter_frame, |
274 |
- .min_perms = AV_PERM_READ, |
|
275 | 274 |
}, |
276 | 275 |
{ NULL } |
277 | 276 |
}; |
... | ... |
@@ -174,24 +174,23 @@ static int config_output(AVFilterLink *outlink) |
174 | 174 |
return 0; |
175 | 175 |
} |
176 | 176 |
|
177 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
177 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
178 | 178 |
{ |
179 | 179 |
HistogramContext *h = inlink->dst->priv; |
180 | 180 |
AVFilterContext *ctx = inlink->dst; |
181 | 181 |
AVFilterLink *outlink = ctx->outputs[0]; |
182 |
- AVFilterBufferRef *out; |
|
182 |
+ AVFrame *out; |
|
183 | 183 |
const uint8_t *src; |
184 | 184 |
uint8_t *dst; |
185 | 185 |
int i, j, k, l, ret; |
186 | 186 |
|
187 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
187 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
188 | 188 |
if (!out) { |
189 |
- avfilter_unref_bufferp(&in); |
|
189 |
+ av_frame_free(&in); |
|
190 | 190 |
return AVERROR(ENOMEM); |
191 | 191 |
} |
192 | 192 |
|
193 | 193 |
out->pts = in->pts; |
194 |
- out->pos = in->pos; |
|
195 | 194 |
|
196 | 195 |
for (k = 0; k < h->ncomp; k++) |
197 | 196 |
for (i = 0; i < outlink->h; i++) |
... | ... |
@@ -202,9 +201,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
202 | 202 |
for (k = 0; k < h->ncomp; k++) { |
203 | 203 |
int start = k * (h->level_height + h->scale_height) * h->display_mode; |
204 | 204 |
|
205 |
- for (i = 0; i < in->video->h; i++) { |
|
205 |
+ for (i = 0; i < in->height; i++) { |
|
206 | 206 |
src = in->data[k] + i * in->linesize[k]; |
207 |
- for (j = 0; j < in->video->w; j++) |
|
207 |
+ for (j = 0; j < in->width; j++) |
|
208 | 208 |
h->histogram[src[j]]++; |
209 | 209 |
} |
210 | 210 |
|
... | ... |
@@ -301,7 +300,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
301 | 301 |
} |
302 | 302 |
|
303 | 303 |
ret = ff_filter_frame(outlink, out); |
304 |
- avfilter_unref_bufferp(&in); |
|
304 |
+ av_frame_free(&in); |
|
305 | 305 |
if (ret < 0) |
306 | 306 |
return ret; |
307 | 307 |
return 0; |
... | ... |
@@ -320,7 +319,6 @@ static const AVFilterPad inputs[] = { |
320 | 320 |
.type = AVMEDIA_TYPE_VIDEO, |
321 | 321 |
.filter_frame = filter_frame, |
322 | 322 |
.config_props = config_input, |
323 |
- .min_perms = AV_PERM_READ, |
|
324 | 323 |
}, |
325 | 324 |
{ NULL } |
326 | 325 |
}; |
... | ... |
@@ -304,37 +304,38 @@ static int config_input(AVFilterLink *inlink) |
304 | 304 |
return 0; |
305 | 305 |
} |
306 | 306 |
|
307 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
307 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
308 | 308 |
{ |
309 | 309 |
HQDN3DContext *hqdn3d = inlink->dst->priv; |
310 | 310 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
311 | 311 |
|
312 |
- AVFilterBufferRef *out; |
|
312 |
+ AVFrame *out; |
|
313 | 313 |
int direct = 0, c; |
314 | 314 |
|
315 |
- if (in->perms & AV_PERM_WRITE) { |
|
315 |
+ if (av_frame_is_writable(in)) { |
|
316 | 316 |
direct = 1; |
317 | 317 |
out = in; |
318 | 318 |
} else { |
319 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
319 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
320 | 320 |
if (!out) { |
321 |
- avfilter_unref_bufferp(&in); |
|
321 |
+ av_frame_free(&in); |
|
322 | 322 |
return AVERROR(ENOMEM); |
323 | 323 |
} |
324 |
- avfilter_copy_buffer_ref_props(out, in); |
|
324 |
+ |
|
325 |
+ av_frame_copy_props(out, in); |
|
325 | 326 |
} |
326 | 327 |
|
327 | 328 |
for (c = 0; c < 3; c++) { |
328 | 329 |
denoise(hqdn3d, in->data[c], out->data[c], |
329 | 330 |
hqdn3d->line, &hqdn3d->frame_prev[c], |
330 |
- in->video->w >> (!!c * hqdn3d->hsub), |
|
331 |
- in->video->h >> (!!c * hqdn3d->vsub), |
|
331 |
+ in->width >> (!!c * hqdn3d->hsub), |
|
332 |
+ in->height >> (!!c * hqdn3d->vsub), |
|
332 | 333 |
in->linesize[c], out->linesize[c], |
333 | 334 |
hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); |
334 | 335 |
} |
335 | 336 |
|
336 | 337 |
if (!direct) |
337 |
- avfilter_unref_bufferp(&in); |
|
338 |
+ av_frame_free(&in); |
|
338 | 339 |
|
339 | 340 |
return ff_filter_frame(outlink, out); |
340 | 341 |
} |
... | ... |
@@ -276,18 +276,18 @@ static void process_chrominance(uint8_t *udst, uint8_t *vdst, const int dst_line |
276 | 276 |
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) |
277 | 277 |
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) |
278 | 278 |
|
279 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
|
279 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
|
280 | 280 |
{ |
281 | 281 |
HueContext *hue = inlink->dst->priv; |
282 | 282 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
283 |
- AVFilterBufferRef *outpic; |
|
283 |
+ AVFrame *outpic; |
|
284 | 284 |
|
285 |
- outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
285 |
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
286 | 286 |
if (!outpic) { |
287 |
- avfilter_unref_bufferp(&inpic); |
|
287 |
+ av_frame_free(&inpic); |
|
288 | 288 |
return AVERROR(ENOMEM); |
289 | 289 |
} |
290 |
- avfilter_copy_buffer_ref_props(outpic, inpic); |
|
290 |
+ av_frame_copy_props(outpic, inpic); |
|
291 | 291 |
|
292 | 292 |
if (!hue->flat_syntax) { |
293 | 293 |
hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base); |
... | ... |
@@ -330,7 +330,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
330 | 330 |
inlink->w >> hue->hsub, inlink->h >> hue->vsub, |
331 | 331 |
hue->hue_cos, hue->hue_sin); |
332 | 332 |
|
333 |
- avfilter_unref_bufferp(&inpic); |
|
333 |
+ av_frame_free(&inpic); |
|
334 | 334 |
return ff_filter_frame(outlink, outpic); |
335 | 335 |
} |
336 | 336 |
|
... | ... |
@@ -349,7 +349,6 @@ static const AVFilterPad hue_inputs[] = { |
349 | 349 |
.type = AVMEDIA_TYPE_VIDEO, |
350 | 350 |
.filter_frame = filter_frame, |
351 | 351 |
.config_props = config_props, |
352 |
- .min_perms = AV_PERM_READ, |
|
353 | 352 |
}, |
354 | 353 |
{ NULL } |
355 | 354 |
}; |
... | ... |
@@ -47,9 +47,9 @@ typedef struct { |
47 | 47 |
|
48 | 48 |
uint8_t history[HIST_SIZE]; |
49 | 49 |
|
50 |
- AVFilterBufferRef *cur; |
|
51 |
- AVFilterBufferRef *next; |
|
52 |
- AVFilterBufferRef *prev; |
|
50 |
+ AVFrame *cur; |
|
51 |
+ AVFrame *next; |
|
52 |
+ AVFrame *prev; |
|
53 | 53 |
int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w); |
54 | 54 |
|
55 | 55 |
const AVPixFmtDescriptor *csp; |
... | ... |
@@ -113,8 +113,8 @@ static void filter(AVFilterContext *ctx) |
113 | 113 |
int match = 0; |
114 | 114 |
|
115 | 115 |
for (i = 0; i < idet->csp->nb_components; i++) { |
116 |
- int w = idet->cur->video->w; |
|
117 |
- int h = idet->cur->video->h; |
|
116 |
+ int w = idet->cur->width; |
|
117 |
+ int h = idet->cur->height; |
|
118 | 118 |
int refs = idet->cur->linesize[i]; |
119 | 119 |
|
120 | 120 |
if (i && i<3) { |
... | ... |
@@ -165,13 +165,13 @@ static void filter(AVFilterContext *ctx) |
165 | 165 |
} |
166 | 166 |
|
167 | 167 |
if (idet->last_type == TFF){ |
168 |
- idet->cur->video->top_field_first = 1; |
|
169 |
- idet->cur->video->interlaced = 1; |
|
168 |
+ idet->cur->top_field_first = 1; |
|
169 |
+ idet->cur->interlaced_frame = 1; |
|
170 | 170 |
}else if(idet->last_type == BFF){ |
171 |
- idet->cur->video->top_field_first = 0; |
|
172 |
- idet->cur->video->interlaced = 1; |
|
171 |
+ idet->cur->top_field_first = 0; |
|
172 |
+ idet->cur->interlaced_frame = 1; |
|
173 | 173 |
}else if(idet->last_type == PROGRSSIVE){ |
174 |
- idet->cur->video->interlaced = 0; |
|
174 |
+ idet->cur->interlaced_frame = 0; |
|
175 | 175 |
} |
176 | 176 |
|
177 | 177 |
idet->prestat [ type] ++; |
... | ... |
@@ -179,13 +179,13 @@ static void filter(AVFilterContext *ctx) |
179 | 179 |
av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type)); |
180 | 180 |
} |
181 | 181 |
|
182 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
182 |
+static int filter_frame(AVFilterLink *link, AVFrame *picref) |
|
183 | 183 |
{ |
184 | 184 |
AVFilterContext *ctx = link->dst; |
185 | 185 |
IDETContext *idet = ctx->priv; |
186 | 186 |
|
187 | 187 |
if (idet->prev) |
188 |
- avfilter_unref_buffer(idet->prev); |
|
188 |
+ av_frame_free(&idet->prev); |
|
189 | 189 |
idet->prev = idet->cur; |
190 | 190 |
idet->cur = idet->next; |
191 | 191 |
idet->next = picref; |
... | ... |
@@ -194,7 +194,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
194 | 194 |
return 0; |
195 | 195 |
|
196 | 196 |
if (!idet->prev) |
197 |
- idet->prev = avfilter_ref_buffer(idet->cur, ~0); |
|
197 |
+ idet->prev = av_frame_clone(idet->cur); |
|
198 | 198 |
|
199 | 199 |
if (!idet->csp) |
200 | 200 |
idet->csp = av_pix_fmt_desc_get(link->format); |
... | ... |
@@ -203,7 +203,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
203 | 203 |
|
204 | 204 |
filter(ctx); |
205 | 205 |
|
206 |
- return ff_filter_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, ~0)); |
|
206 |
+ return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur)); |
|
207 | 207 |
} |
208 | 208 |
|
209 | 209 |
static int request_frame(AVFilterLink *link) |
... | ... |
@@ -238,9 +238,9 @@ static av_cold void uninit(AVFilterContext *ctx) |
238 | 238 |
idet->poststat[UNDETERMINED] |
239 | 239 |
); |
240 | 240 |
|
241 |
- avfilter_unref_bufferp(&idet->prev); |
|
242 |
- avfilter_unref_bufferp(&idet->cur ); |
|
243 |
- avfilter_unref_bufferp(&idet->next); |
|
241 |
+ av_frame_free(&idet->prev); |
|
242 |
+ av_frame_free(&idet->cur ); |
|
243 |
+ av_frame_free(&idet->next); |
|
244 | 244 |
} |
245 | 245 |
|
246 | 246 |
static int query_formats(AVFilterContext *ctx) |
... | ... |
@@ -160,19 +160,19 @@ static void interleave(uint8_t *dst, uint8_t *src, int w, int h, |
160 | 160 |
} |
161 | 161 |
} |
162 | 162 |
|
163 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
163 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
164 | 164 |
{ |
165 | 165 |
IlContext *il = inlink->dst->priv; |
166 | 166 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
167 |
- AVFilterBufferRef *out; |
|
167 |
+ AVFrame *out; |
|
168 | 168 |
int ret, comp; |
169 | 169 |
|
170 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
170 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
171 | 171 |
if (!out) { |
172 |
- avfilter_unref_bufferp(&inpicref); |
|
172 |
+ av_frame_free(&inpicref); |
|
173 | 173 |
return AVERROR(ENOMEM); |
174 | 174 |
} |
175 |
- avfilter_copy_buffer_ref_props(out, inpicref); |
|
175 |
+ av_frame_copy_props(out, inpicref); |
|
176 | 176 |
|
177 | 177 |
interleave(out->data[0], inpicref->data[0], |
178 | 178 |
il->linesize[0], inlink->h, |
... | ... |
@@ -195,7 +195,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
195 | 195 |
} |
196 | 196 |
|
197 | 197 |
ret = ff_filter_frame(outlink, out); |
198 |
- avfilter_unref_bufferp(&inpicref); |
|
198 |
+ av_frame_free(&inpicref); |
|
199 | 199 |
return ret; |
200 | 200 |
} |
201 | 201 |
|
... | ... |
@@ -116,11 +116,11 @@ static int config_props(AVFilterLink *inlink) |
116 | 116 |
return 0; |
117 | 117 |
} |
118 | 118 |
|
119 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
|
119 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
|
120 | 120 |
{ |
121 | 121 |
KerndeintContext *kerndeint = inlink->dst->priv; |
122 | 122 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
123 |
- AVFilterBufferRef *outpic; |
|
123 |
+ AVFrame *outpic; |
|
124 | 124 |
const uint8_t *prvp; ///< Previous field's pixel line number n |
125 | 125 |
const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1) |
126 | 126 |
const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1) |
... | ... |
@@ -154,13 +154,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
154 | 154 |
|
155 | 155 |
const int is_packed_rgb = kerndeint->is_packed_rgb; |
156 | 156 |
|
157 |
- outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); |
|
157 |
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
158 | 158 |
if (!outpic) { |
159 |
- avfilter_unref_bufferp(&inpic); |
|
159 |
+ av_frame_free(&inpic); |
|
160 | 160 |
return AVERROR(ENOMEM); |
161 | 161 |
} |
162 |
- avfilter_copy_buffer_ref_props(outpic, inpic); |
|
163 |
- outpic->video->interlaced = 0; |
|
162 |
+ av_frame_copy_props(outpic, inpic); |
|
163 |
+ outpic->interlaced_frame = 0; |
|
164 | 164 |
|
165 | 165 |
for (plane = 0; inpic->data[plane] && plane < 4; plane++) { |
166 | 166 |
h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub; |
... | ... |
@@ -295,7 +295,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
295 | 295 |
av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h); |
296 | 296 |
} |
297 | 297 |
|
298 |
- avfilter_unref_buffer(inpic); |
|
298 |
+ av_frame_free(&inpic); |
|
299 | 299 |
return ff_filter_frame(outlink, outpic); |
300 | 300 |
} |
301 | 301 |
|
... | ... |
@@ -305,7 +305,6 @@ static const AVFilterPad kerndeint_inputs[] = { |
305 | 305 |
.type = AVMEDIA_TYPE_VIDEO, |
306 | 306 |
.filter_frame = filter_frame, |
307 | 307 |
.config_props = config_props, |
308 |
- .min_perms = AV_PERM_READ, |
|
309 | 308 |
}, |
310 | 309 |
{ NULL } |
311 | 310 |
}; |
... | ... |
@@ -35,7 +35,7 @@ |
35 | 35 |
#include "internal.h" |
36 | 36 |
#include "video.h" |
37 | 37 |
|
38 |
-static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *picref, enum AVPixelFormat pixfmt) |
|
38 |
+static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt) |
|
39 | 39 |
{ |
40 | 40 |
IplImage *tmpimg; |
41 | 41 |
int depth, channels_nb; |
... | ... |
@@ -45,18 +45,18 @@ static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *pi |
45 | 45 |
else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; } |
46 | 46 |
else return; |
47 | 47 |
|
48 |
- tmpimg = cvCreateImageHeader((CvSize){picref->video->w, picref->video->h}, depth, channels_nb); |
|
48 |
+ tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb); |
|
49 | 49 |
*img = *tmpimg; |
50 |
- img->imageData = img->imageDataOrigin = picref->data[0]; |
|
50 |
+ img->imageData = img->imageDataOrigin = frame->data[0]; |
|
51 | 51 |
img->dataOrder = IPL_DATA_ORDER_PIXEL; |
52 | 52 |
img->origin = IPL_ORIGIN_TL; |
53 |
- img->widthStep = picref->linesize[0]; |
|
53 |
+ img->widthStep = frame->linesize[0]; |
|
54 | 54 |
} |
55 | 55 |
|
56 |
-static void fill_picref_from_iplimage(AVFilterBufferRef *picref, const IplImage *img, enum AVPixelFormat pixfmt) |
|
56 |
+static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt) |
|
57 | 57 |
{ |
58 |
- picref->linesize[0] = img->widthStep; |
|
59 |
- picref->data[0] = img->imageData; |
|
58 |
+ frame->linesize[0] = img->widthStep; |
|
59 |
+ frame->data[0] = img->imageData; |
|
60 | 60 |
} |
61 | 61 |
|
62 | 62 |
static int query_formats(AVFilterContext *ctx) |
... | ... |
@@ -351,27 +351,27 @@ static av_cold void uninit(AVFilterContext *ctx) |
351 | 351 |
memset(ocv, 0, sizeof(*ocv)); |
352 | 352 |
} |
353 | 353 |
|
354 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
354 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
355 | 355 |
{ |
356 | 356 |
AVFilterContext *ctx = inlink->dst; |
357 | 357 |
OCVContext *ocv = ctx->priv; |
358 | 358 |
AVFilterLink *outlink= inlink->dst->outputs[0]; |
359 |
- AVFilterBufferRef *out; |
|
359 |
+ AVFrame *out; |
|
360 | 360 |
IplImage inimg, outimg; |
361 | 361 |
|
362 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
362 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
363 | 363 |
if (!out) { |
364 |
- avfilter_unref_bufferp(&in); |
|
364 |
+ av_frame_free(&in); |
|
365 | 365 |
return AVERROR(ENOMEM); |
366 | 366 |
} |
367 |
- avfilter_copy_buffer_ref_props(out, in); |
|
367 |
+ av_frame_copy_props(out, in); |
|
368 | 368 |
|
369 |
- fill_iplimage_from_picref(&inimg , in , inlink->format); |
|
370 |
- fill_iplimage_from_picref(&outimg, out, inlink->format); |
|
369 |
+ fill_iplimage_from_frame(&inimg , in , inlink->format); |
|
370 |
+ fill_iplimage_from_frame(&outimg, out, inlink->format); |
|
371 | 371 |
ocv->end_frame_filter(ctx, &inimg, &outimg); |
372 |
- fill_picref_from_iplimage(out, &outimg, inlink->format); |
|
372 |
+ fill_frame_from_iplimage(out, &outimg, inlink->format); |
|
373 | 373 |
|
374 |
- avfilter_unref_bufferp(&in); |
|
374 |
+ av_frame_free(&in); |
|
375 | 375 |
|
376 | 376 |
return ff_filter_frame(outlink, out); |
377 | 377 |
} |
... | ... |
@@ -381,7 +381,6 @@ static const AVFilterPad avfilter_vf_ocv_inputs[] = { |
381 | 381 |
.name = "default", |
382 | 382 |
.type = AVMEDIA_TYPE_VIDEO, |
383 | 383 |
.filter_frame = filter_frame, |
384 |
- .min_perms = AV_PERM_READ |
|
385 | 384 |
}, |
386 | 385 |
{ NULL } |
387 | 386 |
}; |
... | ... |
@@ -253,28 +253,28 @@ static int config_props(AVFilterLink *inlink) |
253 | 253 |
return 0; |
254 | 254 |
} |
255 | 255 |
|
256 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
256 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
257 | 257 |
{ |
258 | 258 |
AVFilterContext *ctx = inlink->dst; |
259 | 259 |
LutContext *lut = ctx->priv; |
260 | 260 |
AVFilterLink *outlink = ctx->outputs[0]; |
261 |
- AVFilterBufferRef *out; |
|
261 |
+ AVFrame *out; |
|
262 | 262 |
uint8_t *inrow, *outrow, *inrow0, *outrow0; |
263 | 263 |
int i, j, plane; |
264 | 264 |
|
265 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
265 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
266 | 266 |
if (!out) { |
267 |
- avfilter_unref_bufferp(&in); |
|
267 |
+ av_frame_free(&in); |
|
268 | 268 |
return AVERROR(ENOMEM); |
269 | 269 |
} |
270 |
- avfilter_copy_buffer_ref_props(out, in); |
|
270 |
+ av_frame_copy_props(out, in); |
|
271 | 271 |
|
272 | 272 |
if (lut->is_rgb) { |
273 | 273 |
/* packed */ |
274 | 274 |
inrow0 = in ->data[0]; |
275 | 275 |
outrow0 = out->data[0]; |
276 | 276 |
|
277 |
- for (i = 0; i < in->video->h; i ++) { |
|
277 |
+ for (i = 0; i < in->height; i ++) { |
|
278 | 278 |
int w = inlink->w; |
279 | 279 |
const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut; |
280 | 280 |
inrow = inrow0; |
... | ... |
@@ -305,7 +305,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
305 | 305 |
inrow = in ->data[plane]; |
306 | 306 |
outrow = out->data[plane]; |
307 | 307 |
|
308 |
- for (i = 0; i < (in->video->h + (1<<vsub) - 1)>>vsub; i ++) { |
|
308 |
+ for (i = 0; i < (in->height + (1<<vsub) - 1)>>vsub; i ++) { |
|
309 | 309 |
const uint8_t *tab = lut->lut[plane]; |
310 | 310 |
int w = (inlink->w + (1<<hsub) - 1)>>hsub; |
311 | 311 |
for (j = 0; j < w; j++) |
... | ... |
@@ -316,7 +316,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
316 | 316 |
} |
317 | 317 |
} |
318 | 318 |
|
319 |
- avfilter_unref_bufferp(&in); |
|
319 |
+ av_frame_free(&in); |
|
320 | 320 |
return ff_filter_frame(outlink, out); |
321 | 321 |
} |
322 | 322 |
|
... | ... |
@@ -325,7 +325,7 @@ static const AVFilterPad inputs[] = { |
325 | 325 |
.type = AVMEDIA_TYPE_VIDEO, |
326 | 326 |
.filter_frame = filter_frame, |
327 | 327 |
.config_props = config_props, |
328 |
- .min_perms = AV_PERM_READ, }, |
|
328 |
+ }, |
|
329 | 329 |
{ .name = NULL} |
330 | 330 |
}; |
331 | 331 |
static const AVFilterPad outputs[] = { |
... | ... |
@@ -536,45 +536,38 @@ mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgty |
536 | 536 |
return mpi; |
537 | 537 |
} |
538 | 538 |
|
539 |
+static void dummy_free(void *opaque, uint8_t *data){} |
|
539 | 540 |
|
540 | 541 |
int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ |
541 | 542 |
MPContext *m= (void*)vf; |
542 | 543 |
AVFilterLink *outlink = m->avfctx->outputs[0]; |
543 |
- AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer)); |
|
544 |
- AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef)); |
|
544 |
+ AVFrame *picref = av_frame_alloc(); |
|
545 | 545 |
int i; |
546 | 546 |
|
547 | 547 |
av_assert0(vf->next); |
548 | 548 |
|
549 | 549 |
av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); |
550 | 550 |
|
551 |
- if (!pic || !picref) |
|
551 |
+ if (!picref) |
|
552 | 552 |
goto fail; |
553 | 553 |
|
554 |
- picref->buf = pic; |
|
555 |
- picref->buf->free= (void*)av_free; |
|
556 |
- if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps)))) |
|
557 |
- goto fail; |
|
558 |
- |
|
559 |
- pic->w = picref->video->w = mpi->w; |
|
560 |
- pic->h = picref->video->h = mpi->h; |
|
561 |
- |
|
562 |
- /* make sure the buffer gets read permission or it's useless for output */ |
|
563 |
- picref->perms = AV_PERM_READ | AV_PERM_REUSE2; |
|
564 |
-// av_assert0(mpi->flags&MP_IMGFLAG_READABLE); |
|
565 |
- if(!(mpi->flags&MP_IMGFLAG_PRESERVE)) |
|
566 |
- picref->perms |= AV_PERM_WRITE; |
|
554 |
+ picref->width = mpi->w; |
|
555 |
+ picref->height = mpi->h; |
|
567 | 556 |
|
568 |
- pic->refcount = 1; |
|
569 | 557 |
picref->type = AVMEDIA_TYPE_VIDEO; |
570 | 558 |
|
571 | 559 |
for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); |
572 |
- pic->format = picref->format = conversion_map[i].pix_fmt; |
|
560 |
+ picref->format = conversion_map[i].pix_fmt; |
|
573 | 561 |
|
574 |
- memcpy(pic->data, mpi->planes, FFMIN(sizeof(pic->data) , sizeof(mpi->planes))); |
|
575 |
- memcpy(pic->linesize, mpi->stride, FFMIN(sizeof(pic->linesize), sizeof(mpi->stride))); |
|
576 |
- memcpy(picref->data, pic->data, sizeof(picref->data)); |
|
577 |
- memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize)); |
|
562 |
+ memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride))); |
|
563 |
+ |
|
564 |
+ for(i=0; i<4 && mpi->stride[i]; i++){ |
|
565 |
+ picref->buf[i] = av_buffer_create(mpi->planes[i], mpi->stride[i], dummy_free, NULL, |
|
566 |
+ (mpi->flags & MP_IMGFLAG_PRESERVE) ? AV_BUFFER_FLAG_READONLY : 0); |
|
567 |
+ if (!picref->buf[i]) |
|
568 |
+ goto fail; |
|
569 |
+ picref->data[i] = picref->buf[i]->data; |
|
570 |
+ } |
|
578 | 571 |
|
579 | 572 |
if(pts != MP_NOPTS_VALUE) |
580 | 573 |
picref->pts= pts * av_q2d(outlink->time_base); |
... | ... |
@@ -584,10 +577,7 @@ int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ |
584 | 584 |
|
585 | 585 |
return 1; |
586 | 586 |
fail: |
587 |
- if (picref && picref->video) |
|
588 |
- av_free(picref->video); |
|
589 |
- av_free(picref); |
|
590 |
- av_free(pic); |
|
587 |
+ av_frame_free(&picref); |
|
591 | 588 |
return 0; |
592 | 589 |
} |
593 | 590 |
|
... | ... |
@@ -793,12 +783,12 @@ static int request_frame(AVFilterLink *outlink) |
793 | 793 |
return ret; |
794 | 794 |
} |
795 | 795 |
|
796 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
|
796 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
|
797 | 797 |
{ |
798 | 798 |
MPContext *m = inlink->dst->priv; |
799 | 799 |
int i; |
800 | 800 |
double pts= MP_NOPTS_VALUE; |
801 |
- mp_image_t* mpi = ff_new_mp_image(inpic->video->w, inpic->video->h); |
|
801 |
+ mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height); |
|
802 | 802 |
|
803 | 803 |
if(inpic->pts != AV_NOPTS_VALUE) |
804 | 804 |
pts= inpic->pts / av_q2d(inlink->time_base); |
... | ... |
@@ -813,12 +803,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
813 | 813 |
|
814 | 814 |
// mpi->flags|=MP_IMGFLAG_ALLOCATED; ? |
815 | 815 |
mpi->flags |= MP_IMGFLAG_READABLE; |
816 |
- if(!(inpic->perms & AV_PERM_WRITE)) |
|
816 |
+ if(!av_frame_is_writable(inpic)) |
|
817 | 817 |
mpi->flags |= MP_IMGFLAG_PRESERVE; |
818 | 818 |
if(m->vf.put_image(&m->vf, mpi, pts) == 0){ |
819 | 819 |
av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); |
820 | 820 |
}else{ |
821 |
- avfilter_unref_buffer(inpic); |
|
821 |
+ av_frame_free(&inpic); |
|
822 | 822 |
} |
823 | 823 |
ff_free_mp_image(mpi); |
824 | 824 |
return 0; |
... | ... |
@@ -830,7 +820,6 @@ static const AVFilterPad mp_inputs[] = { |
830 | 830 |
.type = AVMEDIA_TYPE_VIDEO, |
831 | 831 |
.filter_frame = filter_frame, |
832 | 832 |
.config_props = config_inprops, |
833 |
- .min_perms = AV_PERM_READ, |
|
834 | 833 |
}, |
835 | 834 |
{ NULL } |
836 | 835 |
}; |
... | ... |
@@ -298,22 +298,22 @@ static void noise(uint8_t *dst, const uint8_t *src, |
298 | 298 |
n->param[comp].shiftptr = 0; |
299 | 299 |
} |
300 | 300 |
|
301 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
301 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
302 | 302 |
{ |
303 | 303 |
NoiseContext *n = inlink->dst->priv; |
304 | 304 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
305 |
- AVFilterBufferRef *out; |
|
305 |
+ AVFrame *out; |
|
306 | 306 |
int ret, i; |
307 | 307 |
|
308 |
- if (inpicref->perms & AV_PERM_WRITE) { |
|
308 |
+ if (av_frame_is_writable(inpicref)) { |
|
309 | 309 |
out = inpicref; |
310 | 310 |
} else { |
311 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
311 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
312 | 312 |
if (!out) { |
313 |
- avfilter_unref_bufferp(&inpicref); |
|
313 |
+ av_frame_free(&inpicref); |
|
314 | 314 |
return AVERROR(ENOMEM); |
315 | 315 |
} |
316 |
- avfilter_copy_buffer_ref_props(out, inpicref); |
|
316 |
+ av_frame_copy_props(out, inpicref); |
|
317 | 317 |
} |
318 | 318 |
|
319 | 319 |
for (i = 0; i < n->nb_planes; i++) |
... | ... |
@@ -322,7 +322,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
322 | 322 |
|
323 | 323 |
ret = ff_filter_frame(outlink, out); |
324 | 324 |
if (inpicref != out) |
325 |
- avfilter_unref_buffer(inpicref); |
|
325 |
+ av_frame_free(&inpicref); |
|
326 | 326 |
return ret; |
327 | 327 |
} |
328 | 328 |
|
... | ... |
@@ -85,7 +85,7 @@ typedef struct { |
85 | 85 |
uint8_t overlay_has_alpha; |
86 | 86 |
enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format; |
87 | 87 |
|
88 |
- AVFilterBufferRef *overpicref; |
|
88 |
+ AVFrame *overpicref; |
|
89 | 89 |
struct FFBufQueue queue_main; |
90 | 90 |
struct FFBufQueue queue_over; |
91 | 91 |
|
... | ... |
@@ -143,7 +143,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
143 | 143 |
|
144 | 144 |
av_opt_free(over); |
145 | 145 |
|
146 |
- avfilter_unref_bufferp(&over->overpicref); |
|
146 |
+ av_frame_free(&over->overpicref); |
|
147 | 147 |
ff_bufqueue_discard_all(&over->queue_main); |
148 | 148 |
ff_bufqueue_discard_all(&over->queue_over); |
149 | 149 |
} |
... | ... |
@@ -316,15 +316,15 @@ static int config_output(AVFilterLink *outlink) |
316 | 316 |
* Blend image in src to destination buffer dst at position (x, y). |
317 | 317 |
*/ |
318 | 318 |
static void blend_image(AVFilterContext *ctx, |
319 |
- AVFilterBufferRef *dst, AVFilterBufferRef *src, |
|
319 |
+ AVFrame *dst, AVFrame *src, |
|
320 | 320 |
int x, int y) |
321 | 321 |
{ |
322 | 322 |
OverlayContext *over = ctx->priv; |
323 | 323 |
int i, imax, j, jmax, k, kmax; |
324 |
- const int src_w = src->video->w; |
|
325 |
- const int src_h = src->video->h; |
|
326 |
- const int dst_w = dst->video->w; |
|
327 |
- const int dst_h = dst->video->h; |
|
324 |
+ const int src_w = src->width; |
|
325 |
+ const int src_h = src->height; |
|
326 |
+ const int dst_w = dst->width; |
|
327 |
+ const int dst_h = dst->height; |
|
328 | 328 |
|
329 | 329 |
if (x >= dst_w || x+dst_w < 0 || |
330 | 330 |
y >= dst_h || y+dst_h < 0) |
... | ... |
@@ -503,11 +503,11 @@ static void blend_image(AVFilterContext *ctx, |
503 | 503 |
} |
504 | 504 |
} |
505 | 505 |
|
506 |
-static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) |
|
506 |
+static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic) |
|
507 | 507 |
{ |
508 | 508 |
OverlayContext *over = ctx->priv; |
509 | 509 |
AVFilterLink *outlink = ctx->outputs[0]; |
510 |
- AVFilterBufferRef *next_overpic; |
|
510 |
+ AVFrame *next_overpic; |
|
511 | 511 |
int ret; |
512 | 512 |
|
513 | 513 |
/* Discard obsolete overlay frames: if there is a next overlay frame with pts |
... | ... |
@@ -518,7 +518,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) |
518 | 518 |
mainpic->pts , ctx->inputs[MAIN]->time_base) > 0) |
519 | 519 |
break; |
520 | 520 |
ff_bufqueue_get(&over->queue_over); |
521 |
- avfilter_unref_buffer(over->overpicref); |
|
521 |
+ av_frame_free(&over->overpicref); |
|
522 | 522 |
over->overpicref = next_overpic; |
523 | 523 |
} |
524 | 524 |
|
... | ... |
@@ -549,7 +549,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) |
549 | 549 |
static int try_filter_next_frame(AVFilterContext *ctx) |
550 | 550 |
{ |
551 | 551 |
OverlayContext *over = ctx->priv; |
552 |
- AVFilterBufferRef *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); |
|
552 |
+ AVFrame *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); |
|
553 | 553 |
int ret; |
554 | 554 |
|
555 | 555 |
if (!next_mainpic) |
... | ... |
@@ -568,7 +568,7 @@ static int flush_frames(AVFilterContext *ctx) |
568 | 568 |
return ret == AVERROR(EAGAIN) ? 0 : ret; |
569 | 569 |
} |
570 | 570 |
|
571 |
-static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
571 |
+static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref) |
|
572 | 572 |
{ |
573 | 573 |
AVFilterContext *ctx = inlink->dst; |
574 | 574 |
OverlayContext *over = ctx->priv; |
... | ... |
@@ -589,7 +589,7 @@ static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
589 | 589 |
return 0; |
590 | 590 |
} |
591 | 591 |
|
592 |
-static int filter_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
592 |
+static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref) |
|
593 | 593 |
{ |
594 | 594 |
AVFilterContext *ctx = inlink->dst; |
595 | 595 |
OverlayContext *over = ctx->priv; |
... | ... |
@@ -639,14 +639,13 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = { |
639 | 639 |
.get_video_buffer = ff_null_get_video_buffer, |
640 | 640 |
.config_props = config_input_main, |
641 | 641 |
.filter_frame = filter_frame_main, |
642 |
- .min_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE, |
|
642 |
+ .needs_writable = 1, |
|
643 | 643 |
}, |
644 | 644 |
{ |
645 | 645 |
.name = "overlay", |
646 | 646 |
.type = AVMEDIA_TYPE_VIDEO, |
647 | 647 |
.config_props = config_input_overlay, |
648 | 648 |
.filter_frame = filter_frame_over, |
649 |
- .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
650 | 649 |
}, |
651 | 650 |
{ NULL } |
652 | 651 |
}; |
... | ... |
@@ -238,98 +238,126 @@ static int config_output(AVFilterLink *outlink) |
238 | 238 |
return 0; |
239 | 239 |
} |
240 | 240 |
|
241 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) |
|
241 |
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) |
|
242 | 242 |
{ |
243 | 243 |
PadContext *pad = inlink->dst->priv; |
244 |
- int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1; |
|
245 | 244 |
|
246 |
- AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms, |
|
247 |
- w + (pad->w - pad->in_w) + 4*align, |
|
248 |
- h + (pad->h - pad->in_h)); |
|
245 |
+ AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0], |
|
246 |
+ w + (pad->w - pad->in_w), |
|
247 |
+ h + (pad->h - pad->in_h)); |
|
249 | 248 |
int plane; |
250 | 249 |
|
251 |
- if (!picref) |
|
250 |
+ if (!frame) |
|
252 | 251 |
return NULL; |
253 | 252 |
|
254 |
- picref->video->w = w; |
|
255 |
- picref->video->h = h; |
|
253 |
+ frame->width = w; |
|
254 |
+ frame->height = h; |
|
256 | 255 |
|
257 |
- for (plane = 0; plane < 4 && picref->data[plane]; plane++) |
|
258 |
- picref->data[plane] += FFALIGN(pad->x >> pad->draw.hsub[plane], align) * pad->draw.pixelstep[plane] + |
|
259 |
- (pad->y >> pad->draw.vsub[plane]) * picref->linesize[plane]; |
|
256 |
+ for (plane = 0; plane < 4 && frame->data[plane]; plane++) { |
|
257 |
+ int hsub = pad->draw.hsub[plane]; |
|
258 |
+ int vsub = pad->draw.vsub[plane]; |
|
259 |
+ frame->data[plane] += (pad->x >> hsub) * pad->draw.pixelstep[plane] + |
|
260 |
+ (pad->y >> vsub) * frame->linesize[plane]; |
|
261 |
+ } |
|
260 | 262 |
|
261 |
- return picref; |
|
263 |
+ return frame; |
|
262 | 264 |
} |
263 | 265 |
|
264 |
-static int does_clip(PadContext *pad, AVFilterBufferRef *outpicref, int plane, int hsub, int vsub, int x, int y) |
|
266 |
+/* check whether each plane in this buffer can be padded without copying */ |
|
267 |
+static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf) |
|
265 | 268 |
{ |
266 |
- int64_t x_in_buf, y_in_buf; |
|
269 |
+ int planes[4] = { -1, -1, -1, -1}, *p = planes; |
|
270 |
+ int i, j; |
|
267 | 271 |
|
268 |
- x_in_buf = outpicref->data[plane] - outpicref->buf->data[plane] |
|
269 |
- + (x >> hsub) * pad->draw.pixelstep[plane] |
|
270 |
- + (y >> vsub) * outpicref->linesize[plane]; |
|
272 |
+ /* get all planes in this buffer */ |
|
273 |
+ for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) { |
|
274 |
+ if (av_frame_get_plane_buffer(frame, i) == buf) |
|
275 |
+ *p++ = i; |
|
276 |
+ } |
|
271 | 277 |
|
272 |
- if(x_in_buf < 0 || x_in_buf % pad->draw.pixelstep[plane]) |
|
273 |
- return 1; |
|
274 |
- x_in_buf /= pad->draw.pixelstep[plane]; |
|
278 |
+ /* for each plane in this buffer, check that it can be padded without |
|
279 |
+ * going over buffer bounds or other planes */ |
|
280 |
+ for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) { |
|
281 |
+ int hsub = s->draw.hsub[planes[i]]; |
|
282 |
+ int vsub = s->draw.vsub[planes[i]]; |
|
283 |
+ |
|
284 |
+ uint8_t *start = frame->data[planes[i]]; |
|
285 |
+ uint8_t *end = start + (frame->height >> hsub) * |
|
286 |
+ frame->linesize[planes[i]]; |
|
287 |
+ |
|
288 |
+ /* amount of free space needed before the start and after the end |
|
289 |
+ * of the plane */ |
|
290 |
+ ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] + |
|
291 |
+ (s->y >> vsub) * frame->linesize[planes[i]]; |
|
292 |
+ ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) * |
|
293 |
+ s->draw.pixelstep[planes[i]] + |
|
294 |
+ (s->y >> vsub) * frame->linesize[planes[i]]; |
|
295 |
+ |
|
296 |
+ if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]]) |
|
297 |
+ return 1; |
|
298 |
+ if (start - buf->data < req_start || |
|
299 |
+ (buf->data + buf->size) - end < req_end) |
|
300 |
+ return 1; |
|
301 |
+ |
|
302 |
+#define SIGN(x) ((x) > 0 ? 1 : -1) |
|
303 |
+ for (j = 0; j < FF_ARRAY_ELEMS(planes) & planes[j] >= 0; j++) { |
|
304 |
+ int hsub1 = s->draw.hsub[planes[j]]; |
|
305 |
+ uint8_t *start1 = frame->data[planes[j]]; |
|
306 |
+ uint8_t *end1 = start1 + (frame->height >> hsub1) * |
|
307 |
+ frame->linesize[planes[j]]; |
|
308 |
+ if (i == j) |
|
309 |
+ continue; |
|
310 |
+ |
|
311 |
+ if (SIGN(start - end1) != SIGN(start - end1 - req_start) || |
|
312 |
+ SIGN(end - start1) != SIGN(end - start1 + req_end)) |
|
313 |
+ return 1; |
|
314 |
+ } |
|
315 |
+ } |
|
275 | 316 |
|
276 |
- av_assert0(outpicref->buf->linesize[plane]>0); //while reference can use negative linesize the main buffer should not |
|
317 |
+ return 0; |
|
318 |
+} |
|
277 | 319 |
|
278 |
- y_in_buf = x_in_buf / outpicref->buf->linesize[plane]; |
|
279 |
- x_in_buf %= outpicref->buf->linesize[plane]; |
|
320 |
+static int frame_needs_copy(PadContext *s, AVFrame *frame) |
|
321 |
+{ |
|
322 |
+ int i; |
|
280 | 323 |
|
281 |
- if( y_in_buf<<vsub >= outpicref->buf->h |
|
282 |
- || x_in_buf<<hsub >= outpicref->buf->w) |
|
324 |
+ if (!av_frame_is_writable(frame)) |
|
283 | 325 |
return 1; |
326 |
+ |
|
327 |
+ for (i = 0; i < 4 && frame->buf[i]; i++) |
|
328 |
+ if (buffer_needs_copy(s, frame, frame->buf[i])) |
|
329 |
+ return 1; |
|
284 | 330 |
return 0; |
285 | 331 |
} |
286 | 332 |
|
287 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
333 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
288 | 334 |
{ |
289 | 335 |
PadContext *pad = inlink->dst->priv; |
290 |
- AVFilterBufferRef *out = avfilter_ref_buffer(in, ~0); |
|
291 |
- int plane, needs_copy; |
|
292 |
- |
|
293 |
- if (!out) { |
|
294 |
- avfilter_unref_bufferp(&in); |
|
295 |
- return AVERROR(ENOMEM); |
|
296 |
- } |
|
297 |
- |
|
298 |
- for (plane = 0; plane < 4 && out->data[plane] && pad->draw.pixelstep[plane]; plane++) { |
|
299 |
- int hsub = pad->draw.hsub[plane]; |
|
300 |
- int vsub = pad->draw.vsub[plane]; |
|
301 |
- |
|
302 |
- av_assert0(out->buf->w > 0 && out->buf->h > 0); |
|
303 |
- |
|
304 |
- if (out->format != out->buf->format) //unsupported currently |
|
305 |
- break; |
|
336 |
+ AVFrame *out; |
|
337 |
+ int needs_copy = frame_needs_copy(pad, in); |
|
306 | 338 |
|
307 |
- out->data[plane] -= (pad->x >> hsub) * pad->draw.pixelstep[plane] + |
|
308 |
- (pad->y >> vsub) * out->linesize[plane]; |
|
309 |
- |
|
310 |
- if (does_clip(pad, out, plane, hsub, vsub, 0, 0) || |
|
311 |
- does_clip(pad, out, plane, hsub, vsub, 0, pad->h - 1) || |
|
312 |
- does_clip(pad, out, plane, hsub, vsub, pad->w - 1, 0) || |
|
313 |
- does_clip(pad, out, plane, hsub, vsub, pad->w - 1, pad->h - 1)) |
|
314 |
- break; |
|
315 |
- } |
|
316 |
- needs_copy = plane < 4 && out->data[plane] || !(out->perms & AV_PERM_WRITE); |
|
317 | 339 |
if (needs_copy) { |
318 | 340 |
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); |
319 |
- avfilter_unref_buffer(out); |
|
320 |
- out = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, |
|
341 |
+ out = ff_get_video_buffer(inlink->dst->outputs[0], |
|
321 | 342 |
FFMAX(inlink->w, pad->w), |
322 | 343 |
FFMAX(inlink->h, pad->h)); |
323 | 344 |
if (!out) { |
324 |
- avfilter_unref_bufferp(&in); |
|
345 |
+ av_frame_free(&in); |
|
325 | 346 |
return AVERROR(ENOMEM); |
326 | 347 |
} |
327 | 348 |
|
328 |
- avfilter_copy_buffer_ref_props(out, in); |
|
329 |
- } |
|
349 |
+ av_frame_copy_props(out, in); |
|
350 |
+ } else { |
|
351 |
+ int i; |
|
330 | 352 |
|
331 |
- out->video->w = pad->w; |
|
332 |
- out->video->h = pad->h; |
|
353 |
+ out = in; |
|
354 |
+ for (i = 0; i < 4 && out->data[i]; i++) { |
|
355 |
+ int hsub = pad->draw.hsub[i]; |
|
356 |
+ int vsub = pad->draw.vsub[i]; |
|
357 |
+ out->data[i] -= (pad->x >> hsub) * pad->draw.pixelstep[i] + |
|
358 |
+ (pad->y >> vsub) * out->linesize[i]; |
|
359 |
+ } |
|
360 |
+ } |
|
333 | 361 |
|
334 | 362 |
/* top bar */ |
335 | 363 |
if (pad->y) { |
... | ... |
@@ -347,20 +375,24 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
347 | 347 |
|
348 | 348 |
/* left border */ |
349 | 349 |
ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, |
350 |
- 0, pad->y, pad->x, in->video->h); |
|
350 |
+ 0, pad->y, pad->x, in->height); |
|
351 | 351 |
|
352 | 352 |
if (needs_copy) { |
353 | 353 |
ff_copy_rectangle2(&pad->draw, |
354 | 354 |
out->data, out->linesize, in->data, in->linesize, |
355 |
- pad->x, pad->y, 0, 0, in->video->w, in->video->h); |
|
355 |
+ pad->x, pad->y, 0, 0, in->width, in->height); |
|
356 | 356 |
} |
357 | 357 |
|
358 | 358 |
/* right border */ |
359 | 359 |
ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, |
360 | 360 |
pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w, |
361 |
- in->video->h); |
|
361 |
+ in->height); |
|
362 |
+ |
|
363 |
+ out->width = pad->w; |
|
364 |
+ out->height = pad->h; |
|
362 | 365 |
|
363 |
- avfilter_unref_bufferp(&in); |
|
366 |
+ if (in != out) |
|
367 |
+ av_frame_free(&in); |
|
364 | 368 |
return ff_filter_frame(inlink->dst->outputs[0], out); |
365 | 369 |
} |
366 | 370 |
|
... | ... |
@@ -52,21 +52,20 @@ static int config_props(AVFilterLink *inlink) |
52 | 52 |
return 0; |
53 | 53 |
} |
54 | 54 |
|
55 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
55 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
56 | 56 |
{ |
57 | 57 |
PixdescTestContext *priv = inlink->dst->priv; |
58 | 58 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
59 |
- AVFilterBufferRef *out; |
|
59 |
+ AVFrame *out; |
|
60 | 60 |
int i, c, w = inlink->w, h = inlink->h; |
61 | 61 |
|
62 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
63 |
- outlink->w, outlink->h); |
|
62 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
64 | 63 |
if (!out) { |
65 |
- avfilter_unref_bufferp(&in); |
|
64 |
+ av_frame_free(&in); |
|
66 | 65 |
return AVERROR(ENOMEM); |
67 | 66 |
} |
68 | 67 |
|
69 |
- avfilter_copy_buffer_ref_props(out, in); |
|
68 |
+ av_frame_copy_props(out, in); |
|
70 | 69 |
|
71 | 70 |
for (i = 0; i < 4; i++) { |
72 | 71 |
int h = outlink->h; |
... | ... |
@@ -102,7 +101,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
102 | 102 |
} |
103 | 103 |
} |
104 | 104 |
|
105 |
- avfilter_unref_bufferp(&in); |
|
105 |
+ av_frame_free(&in); |
|
106 | 106 |
return ff_filter_frame(outlink, out); |
107 | 107 |
} |
108 | 108 |
|
... | ... |
@@ -112,7 +111,6 @@ static const AVFilterPad avfilter_vf_pixdesctest_inputs[] = { |
112 | 112 |
.type = AVMEDIA_TYPE_VIDEO, |
113 | 113 |
.filter_frame = filter_frame, |
114 | 114 |
.config_props = config_props, |
115 |
- .min_perms = AV_PERM_READ, |
|
116 | 115 |
}, |
117 | 116 |
{ NULL } |
118 | 117 |
}; |
... | ... |
@@ -100,32 +100,32 @@ static int pp_config_props(AVFilterLink *inlink) |
100 | 100 |
return 0; |
101 | 101 |
} |
102 | 102 |
|
103 |
-static int pp_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inbuf) |
|
103 |
+static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf) |
|
104 | 104 |
{ |
105 | 105 |
AVFilterContext *ctx = inlink->dst; |
106 | 106 |
PPFilterContext *pp = ctx->priv; |
107 | 107 |
AVFilterLink *outlink = ctx->outputs[0]; |
108 | 108 |
const int aligned_w = FFALIGN(outlink->w, 8); |
109 | 109 |
const int aligned_h = FFALIGN(outlink->h, 8); |
110 |
- AVFilterBufferRef *outbuf; |
|
110 |
+ AVFrame *outbuf; |
|
111 | 111 |
|
112 |
- outbuf = ff_get_video_buffer(outlink, AV_PERM_WRITE, aligned_w, aligned_h); |
|
112 |
+ outbuf = ff_get_video_buffer(outlink, aligned_w, aligned_h); |
|
113 | 113 |
if (!outbuf) { |
114 |
- avfilter_unref_buffer(inbuf); |
|
114 |
+ av_frame_free(&inbuf); |
|
115 | 115 |
return AVERROR(ENOMEM); |
116 | 116 |
} |
117 |
- avfilter_copy_buffer_ref_props(outbuf, inbuf); |
|
117 |
+ av_frame_copy_props(outbuf, inbuf); |
|
118 | 118 |
|
119 | 119 |
pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize, |
120 | 120 |
outbuf->data, outbuf->linesize, |
121 | 121 |
aligned_w, outlink->h, |
122 |
- outbuf->video->qp_table, |
|
123 |
- outbuf->video->qp_table_linesize, |
|
122 |
+ outbuf->qscale_table, |
|
123 |
+ outbuf->qstride, |
|
124 | 124 |
pp->modes[pp->mode_id], |
125 | 125 |
pp->pp_ctx, |
126 |
- outbuf->video->pict_type); |
|
126 |
+ outbuf->pict_type); |
|
127 | 127 |
|
128 |
- avfilter_unref_buffer(inbuf); |
|
128 |
+ av_frame_free(&inbuf); |
|
129 | 129 |
return ff_filter_frame(outlink, outbuf); |
130 | 130 |
} |
131 | 131 |
|
... | ... |
@@ -146,7 +146,6 @@ static const AVFilterPad pp_inputs[] = { |
146 | 146 |
.type = AVMEDIA_TYPE_VIDEO, |
147 | 147 |
.config_props = pp_config_props, |
148 | 148 |
.filter_frame = pp_filter_frame, |
149 |
- .min_perms = AV_PERM_READ, |
|
150 | 149 |
}, |
151 | 150 |
{ NULL } |
152 | 151 |
}; |
... | ... |
@@ -473,23 +473,23 @@ static void blur_image(int ***mask, |
473 | 473 |
} |
474 | 474 |
} |
475 | 475 |
|
476 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
476 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
477 | 477 |
{ |
478 | 478 |
RemovelogoContext *removelogo = inlink->dst->priv; |
479 | 479 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
480 |
- AVFilterBufferRef *outpicref; |
|
480 |
+ AVFrame *outpicref; |
|
481 | 481 |
int direct = 0; |
482 | 482 |
|
483 |
- if (inpicref->perms & AV_PERM_WRITE) { |
|
483 |
+ if (av_frame_is_writable(inpicref)) { |
|
484 | 484 |
direct = 1; |
485 | 485 |
outpicref = inpicref; |
486 | 486 |
} else { |
487 |
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
487 |
+ outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
488 | 488 |
if (!outpicref) { |
489 |
- avfilter_unref_bufferp(&inpicref); |
|
489 |
+ av_frame_free(&inpicref); |
|
490 | 490 |
return AVERROR(ENOMEM); |
491 | 491 |
} |
492 |
- avfilter_copy_buffer_ref_props(outpicref, inpicref); |
|
492 |
+ av_frame_copy_props(outpicref, inpicref); |
|
493 | 493 |
} |
494 | 494 |
|
495 | 495 |
blur_image(removelogo->mask, |
... | ... |
@@ -509,7 +509,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
509 | 509 |
inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); |
510 | 510 |
|
511 | 511 |
if (!direct) |
512 |
- avfilter_unref_bufferp(&inpicref); |
|
512 |
+ av_frame_free(&inpicref); |
|
513 | 513 |
|
514 | 514 |
return ff_filter_frame(outlink, outpicref); |
515 | 515 |
} |
... | ... |
@@ -543,7 +543,6 @@ static const AVFilterPad removelogo_inputs[] = { |
543 | 543 |
.get_video_buffer = ff_null_get_video_buffer, |
544 | 544 |
.config_props = config_props_input, |
545 | 545 |
.filter_frame = filter_frame, |
546 |
- .min_perms = AV_PERM_READ, |
|
547 | 546 |
}, |
548 | 547 |
{ NULL } |
549 | 548 |
}; |
... | ... |
@@ -329,7 +329,7 @@ fail: |
329 | 329 |
return ret; |
330 | 330 |
} |
331 | 331 |
|
332 |
-static int scale_slice(AVFilterLink *link, AVFilterBufferRef *out_buf, AVFilterBufferRef *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) |
|
332 |
+static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) |
|
333 | 333 |
{ |
334 | 334 |
ScaleContext *scale = link->dst->priv; |
335 | 335 |
const uint8_t *in[4]; |
... | ... |
@@ -353,17 +353,17 @@ static int scale_slice(AVFilterLink *link, AVFilterBufferRef *out_buf, AVFilterB |
353 | 353 |
out,out_stride); |
354 | 354 |
} |
355 | 355 |
|
356 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
356 |
+static int filter_frame(AVFilterLink *link, AVFrame *in) |
|
357 | 357 |
{ |
358 | 358 |
ScaleContext *scale = link->dst->priv; |
359 | 359 |
AVFilterLink *outlink = link->dst->outputs[0]; |
360 |
- AVFilterBufferRef *out; |
|
360 |
+ AVFrame *out; |
|
361 | 361 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); |
362 | 362 |
char buf[32]; |
363 | 363 |
|
364 |
- if( in->video->w != link->w |
|
365 |
- || in->video->h != link->h |
|
366 |
- || in->format != link->format) { |
|
364 |
+ if( in->width != link->w |
|
365 |
+ || in->height != link->h |
|
366 |
+ || in->format != link->format) { |
|
367 | 367 |
int ret; |
368 | 368 |
snprintf(buf, sizeof(buf)-1, "%d", outlink->w); |
369 | 369 |
av_opt_set(scale, "w", buf, 0); |
... | ... |
@@ -371,8 +371,8 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
371 | 371 |
av_opt_set(scale, "h", buf, 0); |
372 | 372 |
|
373 | 373 |
link->dst->inputs[0]->format = in->format; |
374 |
- link->dst->inputs[0]->w = in->video->w; |
|
375 |
- link->dst->inputs[0]->h = in->video->h; |
|
374 |
+ link->dst->inputs[0]->w = in->width; |
|
375 |
+ link->dst->inputs[0]->h = in->height; |
|
376 | 376 |
|
377 | 377 |
if ((ret = config_props(outlink)) < 0) |
378 | 378 |
return ret; |
... | ... |
@@ -384,32 +384,32 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
384 | 384 |
scale->hsub = desc->log2_chroma_w; |
385 | 385 |
scale->vsub = desc->log2_chroma_h; |
386 | 386 |
|
387 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); |
|
387 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
388 | 388 |
if (!out) { |
389 |
- avfilter_unref_bufferp(&in); |
|
389 |
+ av_frame_free(&in); |
|
390 | 390 |
return AVERROR(ENOMEM); |
391 | 391 |
} |
392 | 392 |
|
393 |
- avfilter_copy_buffer_ref_props(out, in); |
|
394 |
- out->video->w = outlink->w; |
|
395 |
- out->video->h = outlink->h; |
|
393 |
+ av_frame_copy_props(out, in); |
|
394 |
+ out->width = outlink->w; |
|
395 |
+ out->height = outlink->h; |
|
396 | 396 |
|
397 | 397 |
if(scale->output_is_pal) |
398 | 398 |
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format); |
399 | 399 |
|
400 |
- av_reduce(&out->video->sample_aspect_ratio.num, &out->video->sample_aspect_ratio.den, |
|
401 |
- (int64_t)in->video->sample_aspect_ratio.num * outlink->h * link->w, |
|
402 |
- (int64_t)in->video->sample_aspect_ratio.den * outlink->w * link->h, |
|
400 |
+ av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den, |
|
401 |
+ (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w, |
|
402 |
+ (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h, |
|
403 | 403 |
INT_MAX); |
404 | 404 |
|
405 |
- if(scale->interlaced>0 || (scale->interlaced<0 && in->video->interlaced)){ |
|
405 |
+ if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){ |
|
406 | 406 |
scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0); |
407 | 407 |
scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1); |
408 | 408 |
}else{ |
409 | 409 |
scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0); |
410 | 410 |
} |
411 | 411 |
|
412 |
- avfilter_unref_bufferp(&in); |
|
412 |
+ av_frame_free(&in); |
|
413 | 413 |
return ff_filter_frame(outlink, out); |
414 | 414 |
} |
415 | 415 |
|
... | ... |
@@ -418,7 +418,6 @@ static const AVFilterPad avfilter_vf_scale_inputs[] = { |
418 | 418 |
.name = "default", |
419 | 419 |
.type = AVMEDIA_TYPE_VIDEO, |
420 | 420 |
.filter_frame = filter_frame, |
421 |
- .min_perms = AV_PERM_READ, |
|
422 | 421 |
}, |
423 | 422 |
{ NULL } |
424 | 423 |
}; |
... | ... |
@@ -71,15 +71,15 @@ static av_cold void uninit(AVFilterContext *ctx) |
71 | 71 |
av_opt_free(setfield); |
72 | 72 |
} |
73 | 73 |
|
74 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
74 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref) |
|
75 | 75 |
{ |
76 | 76 |
SetFieldContext *setfield = inlink->dst->priv; |
77 | 77 |
|
78 | 78 |
if (setfield->mode == MODE_PROG) { |
79 |
- picref->video->interlaced = 0; |
|
79 |
+ picref->interlaced_frame = 0; |
|
80 | 80 |
} else if (setfield->mode != MODE_AUTO) { |
81 |
- picref->video->interlaced = 1; |
|
82 |
- picref->video->top_field_first = setfield->mode; |
|
81 |
+ picref->interlaced_frame = 1; |
|
82 |
+ picref->top_field_first = setfield->mode; |
|
83 | 83 |
} |
84 | 84 |
return ff_filter_frame(inlink->dst->outputs[0], picref); |
85 | 85 |
} |
... | ... |
@@ -42,7 +42,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) |
42 | 42 |
return 0; |
43 | 43 |
} |
44 | 44 |
|
45 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
45 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
46 | 46 |
{ |
47 | 47 |
AVFilterContext *ctx = inlink->dst; |
48 | 48 |
ShowInfoContext *showinfo = ctx->priv; |
... | ... |
@@ -51,7 +51,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
51 | 51 |
int i, plane, vsub = desc->log2_chroma_h; |
52 | 52 |
|
53 | 53 |
for (plane = 0; plane < 4 && frame->data[plane]; plane++) { |
54 |
- int64_t linesize = av_image_get_linesize(frame->format, frame->video->w, plane); |
|
54 |
+ int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane); |
|
55 | 55 |
uint8_t *data = frame->data[plane]; |
56 | 56 |
int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h; |
57 | 57 |
|
... | ... |
@@ -70,14 +70,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
70 | 70 |
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " |
71 | 71 |
"checksum:%08X plane_checksum:[%08X", |
72 | 72 |
showinfo->frame, |
73 |
- av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), frame->pos, |
|
73 |
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame), |
|
74 | 74 |
desc->name, |
75 |
- frame->video->sample_aspect_ratio.num, frame->video->sample_aspect_ratio.den, |
|
76 |
- frame->video->w, frame->video->h, |
|
77 |
- !frame->video->interlaced ? 'P' : /* Progressive */ |
|
78 |
- frame->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
79 |
- frame->video->key_frame, |
|
80 |
- av_get_picture_type_char(frame->video->pict_type), |
|
75 |
+ frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den, |
|
76 |
+ frame->width, frame->height, |
|
77 |
+ !frame->interlaced_frame ? 'P' : /* Progressive */ |
|
78 |
+ frame->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
|
79 |
+ frame->key_frame, |
|
80 |
+ av_get_picture_type_char(frame->pict_type), |
|
81 | 81 |
checksum, plane_checksum[0]); |
82 | 82 |
|
83 | 83 |
for (plane = 1; plane < 4 && frame->data[plane]; plane++) |
... | ... |
@@ -94,7 +94,6 @@ static const AVFilterPad avfilter_vf_showinfo_inputs[] = { |
94 | 94 |
.type = AVMEDIA_TYPE_VIDEO, |
95 | 95 |
.get_video_buffer = ff_null_get_video_buffer, |
96 | 96 |
.filter_frame = filter_frame, |
97 |
- .min_perms = AV_PERM_READ, |
|
98 | 97 |
}, |
99 | 98 |
{ NULL } |
100 | 99 |
}; |
... | ... |
@@ -246,20 +246,20 @@ static void blur(uint8_t *dst, const int dst_linesize, |
246 | 246 |
} |
247 | 247 |
} |
248 | 248 |
|
249 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
|
249 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
|
250 | 250 |
{ |
251 | 251 |
SmartblurContext *sblur = inlink->dst->priv; |
252 | 252 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
253 |
- AVFilterBufferRef *outpic; |
|
253 |
+ AVFrame *outpic; |
|
254 | 254 |
int cw = inlink->w >> sblur->hsub; |
255 | 255 |
int ch = inlink->h >> sblur->vsub; |
256 | 256 |
|
257 |
- outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
257 |
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
258 | 258 |
if (!outpic) { |
259 |
- avfilter_unref_bufferp(&inpic); |
|
259 |
+ av_frame_free(&inpic); |
|
260 | 260 |
return AVERROR(ENOMEM); |
261 | 261 |
} |
262 |
- avfilter_copy_buffer_ref_props(outpic, inpic); |
|
262 |
+ av_frame_copy_props(outpic, inpic); |
|
263 | 263 |
|
264 | 264 |
blur(outpic->data[0], outpic->linesize[0], |
265 | 265 |
inpic->data[0], inpic->linesize[0], |
... | ... |
@@ -277,7 +277,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) |
277 | 277 |
sblur->chroma.filter_context); |
278 | 278 |
} |
279 | 279 |
|
280 |
- avfilter_unref_bufferp(&inpic); |
|
280 |
+ av_frame_free(&inpic); |
|
281 | 281 |
return ff_filter_frame(outlink, outpic); |
282 | 282 |
} |
283 | 283 |
|
... | ... |
@@ -339,24 +339,22 @@ static inline uint8_t ana_convert(const int *coeff, uint8_t *left, uint8_t *righ |
339 | 339 |
return av_clip_uint8(sum >> 16); |
340 | 340 |
} |
341 | 341 |
|
342 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
342 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
343 | 343 |
{ |
344 | 344 |
AVFilterContext *ctx = inlink->dst; |
345 | 345 |
Stereo3DContext *s = ctx->priv; |
346 | 346 |
AVFilterLink *outlink = ctx->outputs[0]; |
347 |
- AVFilterBufferRef *out; |
|
347 |
+ AVFrame *out; |
|
348 | 348 |
int out_off_left, out_off_right; |
349 | 349 |
int in_off_left, in_off_right; |
350 | 350 |
int ret; |
351 | 351 |
|
352 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
352 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
353 | 353 |
if (!out) { |
354 |
- avfilter_unref_bufferp(&inpicref); |
|
354 |
+ av_frame_free(&inpicref); |
|
355 | 355 |
return AVERROR(ENOMEM); |
356 | 356 |
} |
357 |
- |
|
358 |
- out->pts = inpicref->pts; |
|
359 |
- out->pos = inpicref->pos; |
|
357 |
+ av_frame_copy_props(out, inpicref); |
|
360 | 358 |
|
361 | 359 |
in_off_left = s->in.row_left * inpicref->linesize[0] + s->in.off_left; |
362 | 360 |
in_off_right = s->in.row_right * inpicref->linesize[0] + s->in.off_right; |
... | ... |
@@ -432,7 +430,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
432 | 432 |
} |
433 | 433 |
|
434 | 434 |
ret = ff_filter_frame(outlink, out); |
435 |
- avfilter_unref_bufferp(&inpicref); |
|
435 |
+ av_frame_free(&inpicref); |
|
436 | 436 |
if (ret < 0) |
437 | 437 |
return ret; |
438 | 438 |
return 0; |
... | ... |
@@ -451,7 +449,6 @@ static const AVFilterPad stereo3d_inputs[] = { |
451 | 451 |
.type = AVMEDIA_TYPE_VIDEO, |
452 | 452 |
.get_video_buffer = ff_null_get_video_buffer, |
453 | 453 |
.filter_frame = filter_frame, |
454 |
- .min_perms = AV_PERM_READ, |
|
455 | 454 |
}, |
456 | 455 |
{ NULL } |
457 | 456 |
}; |
... | ... |
@@ -461,7 +458,6 @@ static const AVFilterPad stereo3d_outputs[] = { |
461 | 461 |
.name = "default", |
462 | 462 |
.type = AVMEDIA_TYPE_VIDEO, |
463 | 463 |
.config_props = config_output, |
464 |
- .min_perms = AV_PERM_WRITE, |
|
465 | 464 |
}, |
466 | 465 |
{ NULL } |
467 | 466 |
}; |
... | ... |
@@ -158,7 +158,7 @@ static int config_input(AVFilterLink *inlink) |
158 | 158 |
#define AB(c) (((c)>>8) &0xFF) |
159 | 159 |
#define AA(c) ((0xFF-c) &0xFF) |
160 | 160 |
|
161 |
-static void overlay_ass_image(AssContext *ass, AVFilterBufferRef *picref, |
|
161 |
+static void overlay_ass_image(AssContext *ass, AVFrame *picref, |
|
162 | 162 |
const ASS_Image *image) |
163 | 163 |
{ |
164 | 164 |
for (; image; image = image->next) { |
... | ... |
@@ -167,13 +167,13 @@ static void overlay_ass_image(AssContext *ass, AVFilterBufferRef *picref, |
167 | 167 |
ff_draw_color(&ass->draw, &color, rgba_color); |
168 | 168 |
ff_blend_mask(&ass->draw, &color, |
169 | 169 |
picref->data, picref->linesize, |
170 |
- picref->video->w, picref->video->h, |
|
170 |
+ picref->width, picref->height, |
|
171 | 171 |
image->bitmap, image->stride, image->w, image->h, |
172 | 172 |
3, 0, image->dst_x, image->dst_y); |
173 | 173 |
} |
174 | 174 |
} |
175 | 175 |
|
176 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
176 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref) |
|
177 | 177 |
{ |
178 | 178 |
AVFilterContext *ctx = inlink->dst; |
179 | 179 |
AVFilterLink *outlink = ctx->outputs[0]; |
... | ... |
@@ -197,7 +197,7 @@ static const AVFilterPad ass_inputs[] = { |
197 | 197 |
.type = AVMEDIA_TYPE_VIDEO, |
198 | 198 |
.filter_frame = filter_frame, |
199 | 199 |
.config_props = config_input, |
200 |
- .min_perms = AV_PERM_READ | AV_PERM_WRITE, |
|
200 |
+ .needs_writable = 1, |
|
201 | 201 |
}, |
202 | 202 |
{ NULL } |
203 | 203 |
}; |
... | ... |
@@ -303,23 +303,23 @@ static int config_output(AVFilterLink *outlink) |
303 | 303 |
return 0; |
304 | 304 |
} |
305 | 305 |
|
306 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) |
|
306 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
307 | 307 |
{ |
308 | 308 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
309 |
- AVFilterBufferRef *outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
309 |
+ AVFrame *outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
310 | 310 |
if (!outpicref) { |
311 |
- avfilter_unref_bufferp(&inpicref); |
|
311 |
+ av_frame_free(&inpicref); |
|
312 | 312 |
return AVERROR(ENOMEM); |
313 | 313 |
} |
314 |
- avfilter_copy_buffer_ref_props(outpicref, inpicref); |
|
315 |
- outpicref->video->w = outlink->w; |
|
316 |
- outpicref->video->h = outlink->h; |
|
314 |
+ av_frame_copy_props(outpicref, inpicref); |
|
315 |
+ outpicref->width = outlink->w; |
|
316 |
+ outpicref->height = outlink->h; |
|
317 | 317 |
|
318 | 318 |
super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0], |
319 | 319 |
outpicref->data[0], outpicref->linesize[0], |
320 | 320 |
inlink->w, inlink->h); |
321 | 321 |
|
322 |
- avfilter_unref_bufferp(&inpicref); |
|
322 |
+ av_frame_free(&inpicref); |
|
323 | 323 |
return ff_filter_frame(outlink, outpicref); |
324 | 324 |
} |
325 | 325 |
|
... | ... |
@@ -329,7 +329,6 @@ static const AVFilterPad super2xsai_inputs[] = { |
329 | 329 |
.type = AVMEDIA_TYPE_VIDEO, |
330 | 330 |
.config_props = config_input, |
331 | 331 |
.filter_frame = filter_frame, |
332 |
- .min_perms = AV_PERM_READ, |
|
333 | 332 |
}, |
334 | 333 |
{ NULL } |
335 | 334 |
}; |
... | ... |
@@ -29,23 +29,25 @@ |
29 | 29 |
#include "internal.h" |
30 | 30 |
#include "video.h" |
31 | 31 |
|
32 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, |
|
33 |
- int w, int h) |
|
32 |
+static void do_swap(AVFrame *frame) |
|
34 | 33 |
{ |
35 |
- AVFilterBufferRef *picref = |
|
36 |
- ff_default_get_video_buffer(link, perms, w, h); |
|
37 |
- |
|
38 |
- FFSWAP(uint8_t*, picref->data[1], picref->data[2]); |
|
39 |
- FFSWAP(int, picref->linesize[1], picref->linesize[2]); |
|
34 |
+ FFSWAP(uint8_t*, frame->data[1], frame->data[2]); |
|
35 |
+ FFSWAP(int, frame->linesize[1], frame->linesize[2]); |
|
36 |
+ FFSWAP(uint8_t*, frame->base[1], frame->base[2]); |
|
37 |
+ FFSWAP(uint64_t, frame->error[1], frame->error[2]); |
|
38 |
+ FFSWAP(AVBufferRef*, frame->buf[1], frame->buf[2]); |
|
39 |
+} |
|
40 | 40 |
|
41 |
+static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h) |
|
42 |
+{ |
|
43 |
+ AVFrame *picref = ff_default_get_video_buffer(link, w, h); |
|
44 |
+ do_swap(picref); |
|
41 | 45 |
return picref; |
42 | 46 |
} |
43 | 47 |
|
44 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *inpicref) |
|
48 |
+static int filter_frame(AVFilterLink *link, AVFrame *inpicref) |
|
45 | 49 |
{ |
46 |
- FFSWAP(uint8_t*, inpicref->data[1], inpicref->data[2]); |
|
47 |
- FFSWAP(int, inpicref->linesize[1], inpicref->linesize[2]); |
|
48 |
- |
|
50 |
+ do_swap(inpicref); |
|
49 | 51 |
return ff_filter_frame(link->dst->outputs[0], inpicref); |
50 | 52 |
} |
51 | 53 |
|
... | ... |
@@ -33,7 +33,7 @@ |
33 | 33 |
#define HIST_SIZE (3*256) |
34 | 34 |
|
35 | 35 |
struct thumb_frame { |
36 |
- AVFilterBufferRef *buf; ///< cached frame |
|
36 |
+ AVFrame *buf; ///< cached frame |
|
37 | 37 |
int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame |
38 | 38 |
}; |
39 | 39 |
|
... | ... |
@@ -86,14 +86,14 @@ static double frame_sum_square_err(const int *hist, const double *median) |
86 | 86 |
return sum_sq_err; |
87 | 87 |
} |
88 | 88 |
|
89 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
|
89 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
|
90 | 90 |
{ |
91 | 91 |
int i, j, best_frame_idx = 0; |
92 | 92 |
double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1; |
93 | 93 |
AVFilterContext *ctx = inlink->dst; |
94 | 94 |
ThumbContext *thumb = ctx->priv; |
95 | 95 |
AVFilterLink *outlink = ctx->outputs[0]; |
96 |
- AVFilterBufferRef *picref; |
|
96 |
+ AVFrame *picref; |
|
97 | 97 |
int *hist = thumb->frames[thumb->n].histogram; |
98 | 98 |
const uint8_t *p = frame->data[0]; |
99 | 99 |
|
... | ... |
@@ -135,7 +135,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) |
135 | 135 |
memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram)); |
136 | 136 |
if (i == best_frame_idx) |
137 | 137 |
continue; |
138 |
- avfilter_unref_bufferp(&thumb->frames[i].buf); |
|
138 |
+ av_frame_unref(thumb->frames[i].buf); |
|
139 | 139 |
} |
140 | 140 |
thumb->n = 0; |
141 | 141 |
|
... | ... |
@@ -152,7 +152,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
152 | 152 |
int i; |
153 | 153 |
ThumbContext *thumb = ctx->priv; |
154 | 154 |
for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++) |
155 |
- avfilter_unref_bufferp(&thumb->frames[i].buf); |
|
155 |
+ av_frame_unref(thumb->frames[i].buf); |
|
156 | 156 |
av_freep(&thumb->frames); |
157 | 157 |
} |
158 | 158 |
|
... | ... |
@@ -207,7 +207,6 @@ static const AVFilterPad thumbnail_inputs[] = { |
207 | 207 |
.name = "default", |
208 | 208 |
.type = AVMEDIA_TYPE_VIDEO, |
209 | 209 |
.get_video_buffer = ff_null_get_video_buffer, |
210 |
- .min_perms = AV_PERM_PRESERVE, |
|
211 | 210 |
.filter_frame = filter_frame, |
212 | 211 |
}, |
213 | 212 |
{ NULL } |
... | ... |
@@ -40,7 +40,7 @@ typedef struct { |
40 | 40 |
unsigned nb_frames; |
41 | 41 |
FFDrawContext draw; |
42 | 42 |
FFDrawColor blank; |
43 |
- AVFilterBufferRef *out_ref; |
|
43 |
+ AVFrame *out_ref; |
|
44 | 44 |
} TileContext; |
45 | 45 |
|
46 | 46 |
#define REASONABLE_SIZE 1024 |
... | ... |
@@ -138,7 +138,7 @@ static void get_current_tile_pos(AVFilterContext *ctx, unsigned *x, unsigned *y) |
138 | 138 |
*y = tile->margin + (inlink->h + tile->padding) * ty; |
139 | 139 |
} |
140 | 140 |
|
141 |
-static void draw_blank_frame(AVFilterContext *ctx, AVFilterBufferRef *out_buf) |
|
141 |
+static void draw_blank_frame(AVFilterContext *ctx, AVFrame *out_buf) |
|
142 | 142 |
{ |
143 | 143 |
TileContext *tile = ctx->priv; |
144 | 144 |
AVFilterLink *inlink = ctx->inputs[0]; |
... | ... |
@@ -154,7 +154,7 @@ static int end_last_frame(AVFilterContext *ctx) |
154 | 154 |
{ |
155 | 155 |
TileContext *tile = ctx->priv; |
156 | 156 |
AVFilterLink *outlink = ctx->outputs[0]; |
157 |
- AVFilterBufferRef *out_buf = tile->out_ref; |
|
157 |
+ AVFrame *out_buf = tile->out_ref; |
|
158 | 158 |
int ret; |
159 | 159 |
|
160 | 160 |
while (tile->current < tile->nb_frames) |
... | ... |
@@ -168,7 +168,7 @@ static int end_last_frame(AVFilterContext *ctx) |
168 | 168 |
* buffers are fed to filter_frame in the order they were obtained from |
169 | 169 |
* get_buffer (think B-frames). */ |
170 | 170 |
|
171 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
171 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref) |
|
172 | 172 |
{ |
173 | 173 |
AVFilterContext *ctx = inlink->dst; |
174 | 174 |
TileContext *tile = ctx->priv; |
... | ... |
@@ -176,13 +176,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
176 | 176 |
unsigned x0, y0; |
177 | 177 |
|
178 | 178 |
if (!tile->current) { |
179 |
- tile->out_ref = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
180 |
- outlink->w, outlink->h); |
|
179 |
+ tile->out_ref = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
181 | 180 |
if (!tile->out_ref) |
182 | 181 |
return AVERROR(ENOMEM); |
183 |
- avfilter_copy_buffer_ref_props(tile->out_ref, picref); |
|
184 |
- tile->out_ref->video->w = outlink->w; |
|
185 |
- tile->out_ref->video->h = outlink->h; |
|
182 |
+ av_frame_copy_props(tile->out_ref, picref); |
|
183 |
+ tile->out_ref->width = outlink->w; |
|
184 |
+ tile->out_ref->height = outlink->h; |
|
186 | 185 |
|
187 | 186 |
/* fill surface once for margin/padding */ |
188 | 187 |
if (tile->margin || tile->padding) |
... | ... |
@@ -198,7 +197,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
198 | 198 |
picref->data, picref->linesize, |
199 | 199 |
x0, y0, 0, 0, inlink->w, inlink->h); |
200 | 200 |
|
201 |
- avfilter_unref_bufferp(&picref); |
|
201 |
+ av_frame_free(&picref); |
|
202 | 202 |
if (++tile->current == tile->nb_frames) |
203 | 203 |
return end_last_frame(ctx); |
204 | 204 |
|
... | ... |
@@ -230,7 +229,6 @@ static const AVFilterPad tile_inputs[] = { |
230 | 230 |
.name = "default", |
231 | 231 |
.type = AVMEDIA_TYPE_VIDEO, |
232 | 232 |
.filter_frame = filter_frame, |
233 |
- .min_perms = AV_PERM_READ, |
|
234 | 233 |
}, |
235 | 234 |
{ NULL } |
236 | 235 |
}; |
... | ... |
@@ -48,8 +48,8 @@ typedef struct { |
48 | 48 |
int flags; ///< flags affecting interlacing algorithm |
49 | 49 |
int frame; ///< number of the output frame |
50 | 50 |
int vsub; ///< chroma vertical subsampling |
51 |
- AVFilterBufferRef *cur; |
|
52 |
- AVFilterBufferRef *next; |
|
51 |
+ AVFrame *cur; |
|
52 |
+ AVFrame *next; |
|
53 | 53 |
uint8_t *black_data[4]; ///< buffer used to fill padded lines |
54 | 54 |
int black_linesize[4]; |
55 | 55 |
} TInterlaceContext; |
... | ... |
@@ -112,8 +112,8 @@ static av_cold void uninit(AVFilterContext *ctx) |
112 | 112 |
{ |
113 | 113 |
TInterlaceContext *tinterlace = ctx->priv; |
114 | 114 |
|
115 |
- avfilter_unref_bufferp(&tinterlace->cur ); |
|
116 |
- avfilter_unref_bufferp(&tinterlace->next); |
|
115 |
+ av_frame_free(&tinterlace->cur ); |
|
116 |
+ av_frame_free(&tinterlace->next); |
|
117 | 117 |
|
118 | 118 |
av_opt_free(tinterlace); |
119 | 119 |
av_freep(&tinterlace->black_data[0]); |
... | ... |
@@ -228,15 +228,15 @@ void copy_picture_field(uint8_t *dst[4], int dst_linesize[4], |
228 | 228 |
} |
229 | 229 |
} |
230 | 230 |
|
231 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
|
231 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref) |
|
232 | 232 |
{ |
233 | 233 |
AVFilterContext *ctx = inlink->dst; |
234 | 234 |
AVFilterLink *outlink = ctx->outputs[0]; |
235 | 235 |
TInterlaceContext *tinterlace = ctx->priv; |
236 |
- AVFilterBufferRef *cur, *next, *out; |
|
236 |
+ AVFrame *cur, *next, *out; |
|
237 | 237 |
int field, tff, ret; |
238 | 238 |
|
239 |
- avfilter_unref_buffer(tinterlace->cur); |
|
239 |
+ av_frame_free(&tinterlace->cur); |
|
240 | 240 |
tinterlace->cur = tinterlace->next; |
241 | 241 |
tinterlace->next = picref; |
242 | 242 |
|
... | ... |
@@ -249,13 +249,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
249 | 249 |
switch (tinterlace->mode) { |
250 | 250 |
case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into |
251 | 251 |
* the lower field, generating a double-height video at half framerate */ |
252 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
252 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
253 | 253 |
if (!out) |
254 | 254 |
return AVERROR(ENOMEM); |
255 |
- avfilter_copy_buffer_ref_props(out, cur); |
|
256 |
- out->video->h = outlink->h; |
|
257 |
- out->video->interlaced = 1; |
|
258 |
- out->video->top_field_first = 1; |
|
255 |
+ av_frame_copy_props(out, cur); |
|
256 |
+ out->height = outlink->h; |
|
257 |
+ out->interlaced_frame = 1; |
|
258 |
+ out->top_field_first = 1; |
|
259 | 259 |
|
260 | 260 |
/* write odd frame lines into the upper field of the new frame */ |
261 | 261 |
copy_picture_field(out->data, out->linesize, |
... | ... |
@@ -267,20 +267,20 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
267 | 267 |
(const uint8_t **)next->data, next->linesize, |
268 | 268 |
inlink->format, inlink->w, inlink->h, |
269 | 269 |
FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags); |
270 |
- avfilter_unref_bufferp(&tinterlace->next); |
|
270 |
+ av_frame_free(&tinterlace->next); |
|
271 | 271 |
break; |
272 | 272 |
|
273 | 273 |
case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */ |
274 | 274 |
case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */ |
275 |
- out = avfilter_ref_buffer(tinterlace->mode == MODE_DROP_EVEN ? cur : next, AV_PERM_READ); |
|
276 |
- avfilter_unref_bufferp(&tinterlace->next); |
|
275 |
+ out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next); |
|
276 |
+ av_frame_free(&tinterlace->next); |
|
277 | 277 |
break; |
278 | 278 |
|
279 | 279 |
case MODE_PAD: /* expand each frame to double height, but pad alternate |
280 | 280 |
* lines with black; framerate unchanged */ |
281 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
282 |
- avfilter_copy_buffer_ref_props(out, cur); |
|
283 |
- out->video->h = outlink->h; |
|
281 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
282 |
+ av_frame_copy_props(out, cur); |
|
283 |
+ out->height = outlink->h; |
|
284 | 284 |
|
285 | 285 |
field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER; |
286 | 286 |
/* copy upper and lower fields */ |
... | ... |
@@ -300,12 +300,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
300 | 300 |
case MODE_INTERLEAVE_TOP: /* top field first */ |
301 | 301 |
case MODE_INTERLEAVE_BOTTOM: /* bottom field first */ |
302 | 302 |
tff = tinterlace->mode == MODE_INTERLEAVE_TOP; |
303 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
303 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
304 | 304 |
if (!out) |
305 | 305 |
return AVERROR(ENOMEM); |
306 |
- avfilter_copy_buffer_ref_props(out, cur); |
|
307 |
- out->video->interlaced = 1; |
|
308 |
- out->video->top_field_first = tff; |
|
306 |
+ av_frame_copy_props(out, cur); |
|
307 |
+ out->interlaced_frame = 1; |
|
308 |
+ out->top_field_first = tff; |
|
309 | 309 |
|
310 | 310 |
/* copy upper/lower field from cur */ |
311 | 311 |
copy_picture_field(out->data, out->linesize, |
... | ... |
@@ -319,25 +319,25 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) |
319 | 319 |
inlink->format, inlink->w, inlink->h, |
320 | 320 |
tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER, |
321 | 321 |
tinterlace->flags); |
322 |
- avfilter_unref_bufferp(&tinterlace->next); |
|
322 |
+ av_frame_free(&tinterlace->next); |
|
323 | 323 |
break; |
324 | 324 |
case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */ |
325 | 325 |
/* output current frame first */ |
326 |
- out = avfilter_ref_buffer(cur, ~AV_PERM_WRITE); |
|
326 |
+ out = av_frame_clone(cur); |
|
327 | 327 |
if (!out) |
328 | 328 |
return AVERROR(ENOMEM); |
329 |
- out->video->interlaced = 1; |
|
329 |
+ out->interlaced_frame = 1; |
|
330 | 330 |
|
331 | 331 |
if ((ret = ff_filter_frame(outlink, out)) < 0) |
332 | 332 |
return ret; |
333 | 333 |
|
334 | 334 |
/* output mix of current and next frame */ |
335 |
- tff = next->video->top_field_first; |
|
336 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
335 |
+ tff = next->top_field_first; |
|
336 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
337 | 337 |
if (!out) |
338 | 338 |
return AVERROR(ENOMEM); |
339 |
- avfilter_copy_buffer_ref_props(out, next); |
|
340 |
- out->video->interlaced = 1; |
|
339 |
+ av_frame_copy_props(out, next); |
|
340 |
+ out->interlaced_frame = 1; |
|
341 | 341 |
|
342 | 342 |
/* write current frame second field lines into the second field of the new frame */ |
343 | 343 |
copy_picture_field(out->data, out->linesize, |
... | ... |
@@ -148,47 +148,47 @@ static int config_props_output(AVFilterLink *outlink) |
148 | 148 |
return 0; |
149 | 149 |
} |
150 | 150 |
|
151 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) |
|
151 |
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) |
|
152 | 152 |
{ |
153 | 153 |
TransContext *trans = inlink->dst->priv; |
154 | 154 |
|
155 | 155 |
return trans->passthrough ? |
156 |
- ff_null_get_video_buffer (inlink, perms, w, h) : |
|
157 |
- ff_default_get_video_buffer(inlink, perms, w, h); |
|
156 |
+ ff_null_get_video_buffer (inlink, w, h) : |
|
157 |
+ ff_default_get_video_buffer(inlink, w, h); |
|
158 | 158 |
} |
159 | 159 |
|
160 |
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
|
160 |
+static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
161 | 161 |
{ |
162 | 162 |
TransContext *trans = inlink->dst->priv; |
163 | 163 |
AVFilterLink *outlink = inlink->dst->outputs[0]; |
164 |
- AVFilterBufferRef *out; |
|
164 |
+ AVFrame *out; |
|
165 | 165 |
int plane; |
166 | 166 |
|
167 | 167 |
if (trans->passthrough) |
168 | 168 |
return ff_filter_frame(outlink, in); |
169 | 169 |
|
170 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
170 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
171 | 171 |
if (!out) { |
172 |
- avfilter_unref_bufferp(&in); |
|
172 |
+ av_frame_free(&in); |
|
173 | 173 |
return AVERROR(ENOMEM); |
174 | 174 |
} |
175 | 175 |
|
176 | 176 |
out->pts = in->pts; |
177 | 177 |
|
178 |
- if (in->video->sample_aspect_ratio.num == 0) { |
|
179 |
- out->video->sample_aspect_ratio = in->video->sample_aspect_ratio; |
|
178 |
+ if (in->sample_aspect_ratio.num == 0) { |
|
179 |
+ out->sample_aspect_ratio = in->sample_aspect_ratio; |
|
180 | 180 |
} else { |
181 |
- out->video->sample_aspect_ratio.num = in->video->sample_aspect_ratio.den; |
|
182 |
- out->video->sample_aspect_ratio.den = in->video->sample_aspect_ratio.num; |
|
181 |
+ out->sample_aspect_ratio.num = in->sample_aspect_ratio.den; |
|
182 |
+ out->sample_aspect_ratio.den = in->sample_aspect_ratio.num; |
|
183 | 183 |
} |
184 | 184 |
|
185 | 185 |
for (plane = 0; out->data[plane]; plane++) { |
186 | 186 |
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; |
187 | 187 |
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; |
188 | 188 |
int pixstep = trans->pixsteps[plane]; |
189 |
- int inh = in->video->h>>vsub; |
|
190 |
- int outw = out->video->w>>hsub; |
|
191 |
- int outh = out->video->h>>vsub; |
|
189 |
+ int inh = in->height >> vsub; |
|
190 |
+ int outw = out->width >> hsub; |
|
191 |
+ int outh = out->height >> vsub; |
|
192 | 192 |
uint8_t *dst, *src; |
193 | 193 |
int dstlinesize, srclinesize; |
194 | 194 |
int x, y; |
... | ... |
@@ -243,7 +243,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) |
243 | 243 |
} |
244 | 244 |
} |
245 | 245 |
|
246 |
- avfilter_unref_bufferp(&in); |
|
246 |
+ av_frame_free(&in); |
|
247 | 247 |
return ff_filter_frame(outlink, out); |
248 | 248 |
} |
249 | 249 |
|
... | ... |
@@ -253,7 +253,6 @@ static const AVFilterPad avfilter_vf_transpose_inputs[] = { |
253 | 253 |
.type = AVMEDIA_TYPE_VIDEO, |
254 | 254 |
.get_video_buffer= get_video_buffer, |
255 | 255 |
.filter_frame = filter_frame, |
256 |
- .min_perms = AV_PERM_READ, |
|
257 | 256 |
}, |
258 | 257 |
{ NULL } |
259 | 258 |
}; |
... | ... |
@@ -258,26 +258,26 @@ static av_cold void uninit(AVFilterContext *ctx) |
258 | 258 |
av_opt_free(unsharp); |
259 | 259 |
} |
260 | 260 |
|
261 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) |
|
261 |
+static int filter_frame(AVFilterLink *link, AVFrame *in) |
|
262 | 262 |
{ |
263 | 263 |
UnsharpContext *unsharp = link->dst->priv; |
264 | 264 |
AVFilterLink *outlink = link->dst->outputs[0]; |
265 |
- AVFilterBufferRef *out; |
|
265 |
+ AVFrame *out; |
|
266 | 266 |
int cw = SHIFTUP(link->w, unsharp->hsub); |
267 | 267 |
int ch = SHIFTUP(link->h, unsharp->vsub); |
268 | 268 |
|
269 |
- out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); |
|
269 |
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
270 | 270 |
if (!out) { |
271 |
- avfilter_unref_bufferp(&in); |
|
271 |
+ av_frame_free(&in); |
|
272 | 272 |
return AVERROR(ENOMEM); |
273 | 273 |
} |
274 |
- avfilter_copy_buffer_ref_props(out, in); |
|
274 |
+ av_frame_copy_props(out, in); |
|
275 | 275 |
|
276 | 276 |
apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma); |
277 | 277 |
apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma); |
278 | 278 |
apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma); |
279 | 279 |
|
280 |
- avfilter_unref_bufferp(&in); |
|
280 |
+ av_frame_free(&in); |
|
281 | 281 |
return ff_filter_frame(outlink, out); |
282 | 282 |
} |
283 | 283 |
|
... | ... |
@@ -287,7 +287,6 @@ static const AVFilterPad avfilter_vf_unsharp_inputs[] = { |
287 | 287 |
.type = AVMEDIA_TYPE_VIDEO, |
288 | 288 |
.filter_frame = filter_frame, |
289 | 289 |
.config_props = config_props, |
290 |
- .min_perms = AV_PERM_READ, |
|
291 | 290 |
}, |
292 | 291 |
{ NULL } |
293 | 292 |
}; |
... | ... |
@@ -43,33 +43,29 @@ static int config_input(AVFilterLink *link) |
43 | 43 |
return 0; |
44 | 44 |
} |
45 | 45 |
|
46 |
-static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, |
|
47 |
- int w, int h) |
|
46 |
+static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h) |
|
48 | 47 |
{ |
49 | 48 |
FlipContext *flip = link->dst->priv; |
50 |
- AVFilterBufferRef *picref; |
|
49 |
+ AVFrame *frame; |
|
51 | 50 |
int i; |
52 | 51 |
|
53 |
- if (!(perms & AV_PERM_NEG_LINESIZES)) |
|
54 |
- return ff_default_get_video_buffer(link, perms, w, h); |
|
55 |
- |
|
56 |
- picref = ff_get_video_buffer(link->dst->outputs[0], perms, w, h); |
|
57 |
- if (!picref) |
|
52 |
+ frame = ff_get_video_buffer(link->dst->outputs[0], w, h); |
|
53 |
+ if (!frame) |
|
58 | 54 |
return NULL; |
59 | 55 |
|
60 | 56 |
for (i = 0; i < 4; i ++) { |
61 | 57 |
int vsub = i == 1 || i == 2 ? flip->vsub : 0; |
62 | 58 |
|
63 |
- if (picref->data[i]) { |
|
64 |
- picref->data[i] += (((h + (1<<vsub)-1) >> vsub)-1) * picref->linesize[i]; |
|
65 |
- picref->linesize[i] = -picref->linesize[i]; |
|
59 |
+ if (frame->data[i]) { |
|
60 |
+ frame->data[i] += (((h + (1<<vsub) - 1) >> vsub) - 1) * frame->linesize[i]; |
|
61 |
+ frame->linesize[i] = -frame->linesize[i]; |
|
66 | 62 |
} |
67 | 63 |
} |
68 | 64 |
|
69 |
- return picref; |
|
65 |
+ return frame; |
|
70 | 66 |
} |
71 | 67 |
|
72 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
68 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
73 | 69 |
{ |
74 | 70 |
FlipContext *flip = link->dst->priv; |
75 | 71 |
int i; |
... | ... |
@@ -31,8 +31,6 @@ |
31 | 31 |
#undef NDEBUG |
32 | 32 |
#include <assert.h> |
33 | 33 |
|
34 |
-#define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE |
|
35 |
- |
|
36 | 34 |
#define CHECK(j)\ |
37 | 35 |
{ int score = FFABS(cur[mrefs + off_left + (j)] - cur[prefs + off_left - (j)])\ |
38 | 36 |
+ FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\ |
... | ... |
@@ -167,15 +165,15 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, |
167 | 167 |
FILTER(w - 3, w) |
168 | 168 |
} |
169 | 169 |
|
170 |
-static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, |
|
170 |
+static void filter(AVFilterContext *ctx, AVFrame *dstpic, |
|
171 | 171 |
int parity, int tff) |
172 | 172 |
{ |
173 | 173 |
YADIFContext *yadif = ctx->priv; |
174 | 174 |
int y, i; |
175 | 175 |
|
176 | 176 |
for (i = 0; i < yadif->csp->nb_components; i++) { |
177 |
- int w = dstpic->video->w; |
|
178 |
- int h = dstpic->video->h; |
|
177 |
+ int w = dstpic->width; |
|
178 |
+ int h = dstpic->height; |
|
179 | 179 |
int refs = yadif->cur->linesize[i]; |
180 | 180 |
int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; |
181 | 181 |
int l_edge, l_edge_pix; |
... | ... |
@@ -232,19 +230,19 @@ static int return_frame(AVFilterContext *ctx, int is_second) |
232 | 232 |
int tff, ret; |
233 | 233 |
|
234 | 234 |
if (yadif->parity == -1) { |
235 |
- tff = yadif->cur->video->interlaced ? |
|
236 |
- yadif->cur->video->top_field_first : 1; |
|
235 |
+ tff = yadif->cur->interlaced_frame ? |
|
236 |
+ yadif->cur->top_field_first : 1; |
|
237 | 237 |
} else { |
238 | 238 |
tff = yadif->parity ^ 1; |
239 | 239 |
} |
240 | 240 |
|
241 | 241 |
if (is_second) { |
242 |
- yadif->out = ff_get_video_buffer(link, PERM_RWP, link->w, link->h); |
|
242 |
+ yadif->out = ff_get_video_buffer(link, link->w, link->h); |
|
243 | 243 |
if (!yadif->out) |
244 | 244 |
return AVERROR(ENOMEM); |
245 | 245 |
|
246 |
- avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); |
|
247 |
- yadif->out->video->interlaced = 0; |
|
246 |
+ av_frame_copy_props(yadif->out, yadif->cur); |
|
247 |
+ yadif->out->interlaced_frame = 0; |
|
248 | 248 |
} |
249 | 249 |
|
250 | 250 |
filter(ctx, yadif->out, tff ^ !is_second, tff); |
... | ... |
@@ -265,47 +263,46 @@ static int return_frame(AVFilterContext *ctx, int is_second) |
265 | 265 |
return ret; |
266 | 266 |
} |
267 | 267 |
|
268 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) |
|
268 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
269 | 269 |
{ |
270 | 270 |
AVFilterContext *ctx = link->dst; |
271 | 271 |
YADIFContext *yadif = ctx->priv; |
272 | 272 |
|
273 |
- av_assert0(picref); |
|
273 |
+ av_assert0(frame); |
|
274 | 274 |
|
275 | 275 |
if (yadif->frame_pending) |
276 | 276 |
return_frame(ctx, 1); |
277 | 277 |
|
278 | 278 |
if (yadif->prev) |
279 |
- avfilter_unref_buffer(yadif->prev); |
|
279 |
+ av_frame_free(&yadif->prev); |
|
280 | 280 |
yadif->prev = yadif->cur; |
281 | 281 |
yadif->cur = yadif->next; |
282 |
- yadif->next = picref; |
|
282 |
+ yadif->next = frame; |
|
283 | 283 |
|
284 | 284 |
if (!yadif->cur) |
285 | 285 |
return 0; |
286 | 286 |
|
287 |
- if (yadif->deint && !yadif->cur->video->interlaced) { |
|
288 |
- yadif->out = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE); |
|
287 |
+ if (yadif->deint && !yadif->cur->interlaced_frame) { |
|
288 |
+ yadif->out = av_frame_clone(yadif->cur); |
|
289 | 289 |
if (!yadif->out) |
290 | 290 |
return AVERROR(ENOMEM); |
291 | 291 |
|
292 |
- avfilter_unref_bufferp(&yadif->prev); |
|
292 |
+ av_frame_free(&yadif->prev); |
|
293 | 293 |
if (yadif->out->pts != AV_NOPTS_VALUE) |
294 | 294 |
yadif->out->pts *= 2; |
295 | 295 |
return ff_filter_frame(ctx->outputs[0], yadif->out); |
296 | 296 |
} |
297 | 297 |
|
298 | 298 |
if (!yadif->prev && |
299 |
- !(yadif->prev = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE))) |
|
299 |
+ !(yadif->prev = av_frame_clone(yadif->cur))) |
|
300 | 300 |
return AVERROR(ENOMEM); |
301 | 301 |
|
302 |
- yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP, |
|
303 |
- link->w, link->h); |
|
302 |
+ yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); |
|
304 | 303 |
if (!yadif->out) |
305 | 304 |
return AVERROR(ENOMEM); |
306 | 305 |
|
307 |
- avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); |
|
308 |
- yadif->out->video->interlaced = 0; |
|
306 |
+ av_frame_copy_props(yadif->out, yadif->cur); |
|
307 |
+ yadif->out->interlaced_frame = 0; |
|
309 | 308 |
|
310 | 309 |
if (yadif->out->pts != AV_NOPTS_VALUE) |
311 | 310 |
yadif->out->pts *= 2; |
... | ... |
@@ -332,7 +329,7 @@ static int request_frame(AVFilterLink *link) |
332 | 332 |
ret = ff_request_frame(link->src->inputs[0]); |
333 | 333 |
|
334 | 334 |
if (ret == AVERROR_EOF && yadif->cur) { |
335 |
- AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, ~AV_PERM_WRITE); |
|
335 |
+ AVFrame *next = av_frame_clone(yadif->next); |
|
336 | 336 |
|
337 | 337 |
if (!next) |
338 | 338 |
return AVERROR(ENOMEM); |
... | ... |
@@ -379,9 +376,9 @@ static av_cold void uninit(AVFilterContext *ctx) |
379 | 379 |
{ |
380 | 380 |
YADIFContext *yadif = ctx->priv; |
381 | 381 |
|
382 |
- avfilter_unref_bufferp(&yadif->prev); |
|
383 |
- avfilter_unref_bufferp(&yadif->cur ); |
|
384 |
- avfilter_unref_bufferp(&yadif->next); |
|
382 |
+ av_frame_free(&yadif->prev); |
|
383 |
+ av_frame_free(&yadif->cur ); |
|
384 |
+ av_frame_free(&yadif->next); |
|
385 | 385 |
av_opt_free(yadif); |
386 | 386 |
} |
387 | 387 |
|
... | ... |
@@ -24,6 +24,7 @@ |
24 | 24 |
#include <stdio.h> |
25 | 25 |
|
26 | 26 |
#include "libavutil/avassert.h" |
27 |
+#include "libavutil/buffer.h" |
|
27 | 28 |
#include "libavutil/imgutils.h" |
28 | 29 |
#include "libavutil/mem.h" |
29 | 30 |
|
... | ... |
@@ -31,23 +32,21 @@ |
31 | 31 |
#include "internal.h" |
32 | 32 |
#include "video.h" |
33 | 33 |
|
34 |
-AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h) |
|
34 |
+AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h) |
|
35 | 35 |
{ |
36 |
- return ff_get_video_buffer(link->dst->outputs[0], perms, w, h); |
|
36 |
+ return ff_get_video_buffer(link->dst->outputs[0], w, h); |
|
37 | 37 |
} |
38 | 38 |
|
39 |
-AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h) |
|
39 |
+/* TODO: set the buffer's priv member to a context structure for the whole |
|
40 |
+ * filter chain. This will allow for a buffer pool instead of the constant |
|
41 |
+ * alloc & free cycle currently implemented. */ |
|
42 |
+AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h) |
|
40 | 43 |
{ |
41 |
- int linesize[4]; |
|
42 |
- uint8_t *data[4]; |
|
43 |
- int i; |
|
44 |
- AVFilterBufferRef *picref = NULL; |
|
45 |
- AVFilterPool *pool = link->pool; |
|
46 |
- int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE | |
|
47 |
- AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN; |
|
48 |
- |
|
49 |
- av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES))); |
|
44 |
+ AVFrame *frame = av_frame_alloc(); |
|
45 |
+ int ret; |
|
50 | 46 |
|
47 |
+#if 0 //POOL |
|
48 |
+ AVFilterPool *pool = link->pool; |
|
51 | 49 |
if (pool) { |
52 | 50 |
for (i = 0; i < POOL_SIZE; i++) { |
53 | 51 |
picref = pool->pic[i]; |
... | ... |
@@ -71,27 +70,30 @@ AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, in |
71 | 71 |
pool = link->pool = av_mallocz(sizeof(AVFilterPool)); |
72 | 72 |
pool->refcount = 1; |
73 | 73 |
} |
74 |
- |
|
75 |
- // align: +2 is needed for swscaler, +16 to be SIMD-friendly |
|
76 |
- if ((i = av_image_alloc(data, linesize, w, h, link->format, 32)) < 0) |
|
74 |
+#endif |
|
75 |
+ if (!frame) |
|
77 | 76 |
return NULL; |
78 | 77 |
|
79 |
- picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize, |
|
80 |
- full_perms, w, h, link->format); |
|
81 |
- if (!picref) { |
|
82 |
- av_free(data[0]); |
|
83 |
- return NULL; |
|
84 |
- } |
|
78 |
+ frame->width = w; |
|
79 |
+ frame->height = h; |
|
80 |
+ frame->format = link->format; |
|
81 |
+ |
|
82 |
+ ret = av_frame_get_buffer(frame, 32); |
|
83 |
+ if (ret < 0) |
|
84 |
+ av_frame_free(&frame); |
|
85 | 85 |
|
86 |
+#if 0 //POOL |
|
86 | 87 |
memset(data[0], 128, i); |
87 | 88 |
|
88 | 89 |
picref->buf->priv = pool; |
89 | 90 |
picref->buf->free = NULL; |
90 | 91 |
pool->refcount++; |
92 |
+#endif |
|
91 | 93 |
|
92 |
- return picref; |
|
94 |
+ return frame; |
|
93 | 95 |
} |
94 | 96 |
|
97 |
+#if FF_API_AVFILTERBUFFER |
|
95 | 98 |
AVFilterBufferRef * |
96 | 99 |
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, |
97 | 100 |
int w, int h, enum AVPixelFormat format) |
... | ... |
@@ -136,25 +138,20 @@ fail: |
136 | 136 |
av_free(pic); |
137 | 137 |
return NULL; |
138 | 138 |
} |
139 |
+#endif |
|
139 | 140 |
|
140 |
-AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int h) |
|
141 |
+AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h) |
|
141 | 142 |
{ |
142 |
- AVFilterBufferRef *ret = NULL; |
|
143 |
+ AVFrame *ret = NULL; |
|
143 | 144 |
|
144 | 145 |
av_unused char buf[16]; |
145 | 146 |
FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0); |
146 |
- ff_tlog(NULL, " perms:%s w:%d h:%d\n", ff_get_ref_perms_string(buf, sizeof(buf), perms), w, h); |
|
147 | 147 |
|
148 | 148 |
if (link->dstpad->get_video_buffer) |
149 |
- ret = link->dstpad->get_video_buffer(link, perms, w, h); |
|
149 |
+ ret = link->dstpad->get_video_buffer(link, w, h); |
|
150 | 150 |
|
151 | 151 |
if (!ret) |
152 |
- ret = ff_default_get_video_buffer(link, perms, w, h); |
|
153 |
- |
|
154 |
- if (ret) |
|
155 |
- ret->type = AVMEDIA_TYPE_VIDEO; |
|
156 |
- |
|
157 |
- FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, " returning "); ff_tlog_ref(NULL, ret, 1); |
|
152 |
+ ret = ff_default_get_video_buffer(link, w, h); |
|
158 | 153 |
|
159 | 154 |
return ret; |
160 | 155 |
} |
... | ... |
@@ -23,22 +23,19 @@ |
23 | 23 |
|
24 | 24 |
#include "avfilter.h" |
25 | 25 |
|
26 |
-AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, |
|
27 |
- int perms, int w, int h); |
|
28 |
-AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h); |
|
26 |
+AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h); |
|
27 |
+AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h); |
|
29 | 28 |
|
30 | 29 |
/** |
31 | 30 |
* Request a picture buffer with a specific set of permissions. |
32 | 31 |
* |
33 | 32 |
* @param link the output link to the filter from which the buffer will |
34 | 33 |
* be requested |
35 |
- * @param perms the required access permissions |
|
36 | 34 |
* @param w the minimum width of the buffer to allocate |
37 | 35 |
* @param h the minimum height of the buffer to allocate |
38 | 36 |
* @return A reference to the buffer. This must be unreferenced with |
39 | 37 |
* avfilter_unref_buffer when you are finished with it. |
40 | 38 |
*/ |
41 |
-AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, |
|
42 |
- int w, int h); |
|
39 |
+AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h); |
|
43 | 40 |
|
44 | 41 |
#endif /* AVFILTER_VIDEO_H */ |
... | ... |
@@ -20,9 +20,9 @@ |
20 | 20 |
#include "internal.h" |
21 | 21 |
#include "libavutil/internal.h" |
22 | 22 |
|
23 |
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) |
|
23 |
+static int filter_frame(AVFilterLink *link, AVFrame *frame) |
|
24 | 24 |
{ |
25 |
- avfilter_unref_bufferp(&frame); |
|
25 |
+ av_frame_free(&frame); |
|
26 | 26 |
return 0; |
27 | 27 |
} |
28 | 28 |
|
... | ... |
@@ -272,7 +272,7 @@ static void evolve(AVFilterContext *ctx) |
272 | 272 |
cellauto->generation++; |
273 | 273 |
} |
274 | 274 |
|
275 |
-static void fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
275 |
+static void fill_picture(AVFilterContext *ctx, AVFrame *picref) |
|
276 | 276 |
{ |
277 | 277 |
CellAutoContext *cellauto = ctx->priv; |
278 | 278 |
int i, j, k, row_idx = 0; |
... | ... |
@@ -303,9 +303,8 @@ static void fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
303 | 303 |
static int request_frame(AVFilterLink *outlink) |
304 | 304 |
{ |
305 | 305 |
CellAutoContext *cellauto = outlink->src->priv; |
306 |
- AVFilterBufferRef *picref = |
|
307 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE, cellauto->w, cellauto->h); |
|
308 |
- picref->video->sample_aspect_ratio = (AVRational) {1, 1}; |
|
306 |
+ AVFrame *picref = ff_get_video_buffer(outlink, cellauto->w, cellauto->h); |
|
307 |
+ picref->sample_aspect_ratio = (AVRational) {1, 1}; |
|
309 | 308 |
if (cellauto->generation == 0 && cellauto->start_full) { |
310 | 309 |
int i; |
311 | 310 |
for (i = 0; i < cellauto->h-1; i++) |
... | ... |
@@ -315,7 +314,6 @@ static int request_frame(AVFilterLink *outlink) |
315 | 315 |
evolve(outlink->src); |
316 | 316 |
|
317 | 317 |
picref->pts = cellauto->pts++; |
318 |
- picref->pos = -1; |
|
319 | 318 |
|
320 | 319 |
#ifdef DEBUG |
321 | 320 |
show_cellauto_row(outlink->src); |
... | ... |
@@ -73,7 +73,7 @@ typedef struct { |
73 | 73 |
uint8_t death_color[4]; |
74 | 74 |
uint8_t mold_color[4]; |
75 | 75 |
AVLFG lfg; |
76 |
- void (*draw)(AVFilterContext*, AVFilterBufferRef*); |
|
76 |
+ void (*draw)(AVFilterContext*, AVFrame*); |
|
77 | 77 |
} LifeContext; |
78 | 78 |
|
79 | 79 |
#define ALIVE_CELL 0xFF |
... | ... |
@@ -375,7 +375,7 @@ static void evolve(AVFilterContext *ctx) |
375 | 375 |
life->buf_idx = !life->buf_idx; |
376 | 376 |
} |
377 | 377 |
|
378 |
-static void fill_picture_monoblack(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
378 |
+static void fill_picture_monoblack(AVFilterContext *ctx, AVFrame *picref) |
|
379 | 379 |
{ |
380 | 380 |
LifeContext *life = ctx->priv; |
381 | 381 |
uint8_t *buf = life->buf[life->buf_idx]; |
... | ... |
@@ -400,7 +400,7 @@ static void fill_picture_monoblack(AVFilterContext *ctx, AVFilterBufferRef *picr |
400 | 400 |
// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16 |
401 | 401 |
#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16) |
402 | 402 |
|
403 |
-static void fill_picture_rgb(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
403 |
+static void fill_picture_rgb(AVFilterContext *ctx, AVFrame *picref) |
|
404 | 404 |
{ |
405 | 405 |
LifeContext *life = ctx->priv; |
406 | 406 |
uint8_t *buf = life->buf[life->buf_idx]; |
... | ... |
@@ -430,10 +430,9 @@ static void fill_picture_rgb(AVFilterContext *ctx, AVFilterBufferRef *picref) |
430 | 430 |
static int request_frame(AVFilterLink *outlink) |
431 | 431 |
{ |
432 | 432 |
LifeContext *life = outlink->src->priv; |
433 |
- AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, life->w, life->h); |
|
434 |
- picref->video->sample_aspect_ratio = (AVRational) {1, 1}; |
|
433 |
+ AVFrame *picref = ff_get_video_buffer(outlink, life->w, life->h); |
|
434 |
+ picref->sample_aspect_ratio = (AVRational) {1, 1}; |
|
435 | 435 |
picref->pts = life->pts++; |
436 |
- picref->pos = -1; |
|
437 | 436 |
|
438 | 437 |
life->draw(outlink->src, picref); |
439 | 438 |
evolve(outlink->src); |
... | ... |
@@ -382,10 +382,9 @@ static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, |
382 | 382 |
static int request_frame(AVFilterLink *link) |
383 | 383 |
{ |
384 | 384 |
MBContext *mb = link->src->priv; |
385 |
- AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, mb->w, mb->h); |
|
386 |
- picref->video->sample_aspect_ratio = (AVRational) {1, 1}; |
|
385 |
+ AVFrame *picref = ff_get_video_buffer(link, mb->w, mb->h); |
|
386 |
+ picref->sample_aspect_ratio = (AVRational) {1, 1}; |
|
387 | 387 |
picref->pts = mb->pts++; |
388 |
- picref->pos = -1; |
|
389 | 388 |
|
390 | 389 |
draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts); |
391 | 390 |
ff_filter_frame(link, picref); |
... | ... |
@@ -323,14 +323,14 @@ static int query_formats(AVFilterContext *ctx) |
323 | 323 |
static int request_frame(AVFilterLink *outlink) |
324 | 324 |
{ |
325 | 325 |
MPTestContext *test = outlink->src->priv; |
326 |
- AVFilterBufferRef *picref; |
|
326 |
+ AVFrame *picref; |
|
327 | 327 |
int w = WIDTH, h = HEIGHT, ch = h>>test->vsub; |
328 | 328 |
unsigned int frame = test->frame_nb; |
329 | 329 |
enum test_type tt = test->test; |
330 | 330 |
|
331 | 331 |
if (test->max_pts >= 0 && test->pts > test->max_pts) |
332 | 332 |
return AVERROR_EOF; |
333 |
- picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, w, h); |
|
333 |
+ picref = ff_get_video_buffer(outlink, w, h); |
|
334 | 334 |
picref->pts = test->pts++; |
335 | 335 |
|
336 | 336 |
// clean image |
... | ... |
@@ -58,9 +58,9 @@ typedef struct { |
58 | 58 |
AVRational sar; ///< sample aspect ratio |
59 | 59 |
int nb_decimals; |
60 | 60 |
int draw_once; ///< draw only the first frame, always put out the same picture |
61 |
- AVFilterBufferRef *picref; ///< cached reference containing the painted picture |
|
61 |
+ AVFrame *picref; ///< cached reference containing the painted picture |
|
62 | 62 |
|
63 |
- void (* fill_picture_fn)(AVFilterContext *ctx, AVFilterBufferRef *picref); |
|
63 |
+ void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame); |
|
64 | 64 |
|
65 | 65 |
/* only used by color */ |
66 | 66 |
char *color_str; |
... | ... |
@@ -150,7 +150,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
150 | 150 |
TestSourceContext *test = ctx->priv; |
151 | 151 |
|
152 | 152 |
av_opt_free(test); |
153 |
- avfilter_unref_bufferp(&test->picref); |
|
153 |
+ av_frame_free(&test->picref); |
|
154 | 154 |
} |
155 | 155 |
|
156 | 156 |
static int config_props(AVFilterLink *outlink) |
... | ... |
@@ -169,7 +169,7 @@ static int config_props(AVFilterLink *outlink) |
169 | 169 |
static int request_frame(AVFilterLink *outlink) |
170 | 170 |
{ |
171 | 171 |
TestSourceContext *test = outlink->src->priv; |
172 |
- AVFilterBufferRef *outpicref; |
|
172 |
+ AVFrame *frame; |
|
173 | 173 |
|
174 | 174 |
if (test->duration >= 0 && |
175 | 175 |
av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration) |
... | ... |
@@ -178,31 +178,29 @@ static int request_frame(AVFilterLink *outlink) |
178 | 178 |
if (test->draw_once) { |
179 | 179 |
if (!test->picref) { |
180 | 180 |
test->picref = |
181 |
- ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE, |
|
182 |
- test->w, test->h); |
|
181 |
+ ff_get_video_buffer(outlink, test->w, test->h); |
|
183 | 182 |
if (!test->picref) |
184 | 183 |
return AVERROR(ENOMEM); |
185 | 184 |
test->fill_picture_fn(outlink->src, test->picref); |
186 | 185 |
} |
187 |
- outpicref = avfilter_ref_buffer(test->picref, ~AV_PERM_WRITE); |
|
186 |
+ frame = av_frame_clone(test->picref); |
|
188 | 187 |
} else |
189 |
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h); |
|
188 |
+ frame = ff_get_video_buffer(outlink, test->w, test->h); |
|
190 | 189 |
|
191 |
- if (!outpicref) |
|
190 |
+ if (!frame) |
|
192 | 191 |
return AVERROR(ENOMEM); |
193 |
- outpicref->pts = test->pts; |
|
194 |
- outpicref->pos = -1; |
|
195 |
- outpicref->video->key_frame = 1; |
|
196 |
- outpicref->video->interlaced = 0; |
|
197 |
- outpicref->video->pict_type = AV_PICTURE_TYPE_I; |
|
198 |
- outpicref->video->sample_aspect_ratio = test->sar; |
|
192 |
+ frame->pts = test->pts; |
|
193 |
+ frame->key_frame = 1; |
|
194 |
+ frame->interlaced_frame = 0; |
|
195 |
+ frame->pict_type = AV_PICTURE_TYPE_I; |
|
196 |
+ frame->sample_aspect_ratio = test->sar; |
|
199 | 197 |
if (!test->draw_once) |
200 |
- test->fill_picture_fn(outlink->src, outpicref); |
|
198 |
+ test->fill_picture_fn(outlink->src, frame); |
|
201 | 199 |
|
202 | 200 |
test->pts++; |
203 | 201 |
test->nb_frame++; |
204 | 202 |
|
205 |
- return ff_filter_frame(outlink, outpicref); |
|
203 |
+ return ff_filter_frame(outlink, frame); |
|
206 | 204 |
} |
207 | 205 |
|
208 | 206 |
#if CONFIG_COLOR_FILTER |
... | ... |
@@ -210,7 +208,7 @@ static int request_frame(AVFilterLink *outlink) |
210 | 210 |
#define color_options options |
211 | 211 |
AVFILTER_DEFINE_CLASS(color); |
212 | 212 |
|
213 |
-static void color_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
213 |
+static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref) |
|
214 | 214 |
{ |
215 | 215 |
TestSourceContext *test = ctx->priv; |
216 | 216 |
ff_fill_rectangle(&test->draw, &test->color, |
... | ... |
@@ -287,7 +285,7 @@ AVFilter avfilter_vsrc_color = { |
287 | 287 |
#define nullsrc_options options |
288 | 288 |
AVFILTER_DEFINE_CLASS(nullsrc); |
289 | 289 |
|
290 |
-static void nullsrc_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) { } |
|
290 |
+static void nullsrc_fill_picture(AVFilterContext *ctx, AVFrame *picref) { } |
|
291 | 291 |
|
292 | 292 |
static av_cold int nullsrc_init(AVFilterContext *ctx, const char *args) |
293 | 293 |
{ |
... | ... |
@@ -398,7 +396,7 @@ static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize, |
398 | 398 |
|
399 | 399 |
#define GRADIENT_SIZE (6 * 256) |
400 | 400 |
|
401 |
-static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
401 |
+static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame) |
|
402 | 402 |
{ |
403 | 403 |
TestSourceContext *test = ctx->priv; |
404 | 404 |
uint8_t *p, *p0; |
... | ... |
@@ -412,9 +410,9 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
412 | 412 |
int seg_size; |
413 | 413 |
int second; |
414 | 414 |
int i; |
415 |
- uint8_t *data = picref->data[0]; |
|
416 |
- int width = picref->video->w; |
|
417 |
- int height = picref->video->h; |
|
415 |
+ uint8_t *data = frame->data[0]; |
|
416 |
+ int width = frame->width; |
|
417 |
+ int height = frame->height; |
|
418 | 418 |
|
419 | 419 |
/* draw colored bars and circle */ |
420 | 420 |
radius = (width + height) / 4; |
... | ... |
@@ -444,11 +442,11 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
444 | 444 |
} |
445 | 445 |
quad0 += dquad_y; |
446 | 446 |
dquad_y += 2; |
447 |
- p0 += picref->linesize[0]; |
|
447 |
+ p0 += frame->linesize[0]; |
|
448 | 448 |
} |
449 | 449 |
|
450 | 450 |
/* draw sliding color line */ |
451 |
- p0 = p = data + picref->linesize[0] * height * 3/4; |
|
451 |
+ p0 = p = data + frame->linesize[0] * height * 3/4; |
|
452 | 452 |
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) % |
453 | 453 |
GRADIENT_SIZE; |
454 | 454 |
rgrad = 0; |
... | ... |
@@ -478,8 +476,8 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
478 | 478 |
} |
479 | 479 |
p = p0; |
480 | 480 |
for (y = height / 8; y > 0; y--) { |
481 |
- memcpy(p+picref->linesize[0], p, 3 * width); |
|
482 |
- p += picref->linesize[0]; |
|
481 |
+ memcpy(p+frame->linesize[0], p, 3 * width); |
|
482 |
+ p += frame->linesize[0]; |
|
483 | 483 |
} |
484 | 484 |
|
485 | 485 |
/* draw digits */ |
... | ... |
@@ -492,10 +490,10 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
492 | 492 |
second = (int)time; |
493 | 493 |
x = width - (width - seg_size * 64) / 2; |
494 | 494 |
y = (height - seg_size * 13) / 2; |
495 |
- p = data + (x*3 + y * picref->linesize[0]); |
|
495 |
+ p = data + (x*3 + y * frame->linesize[0]); |
|
496 | 496 |
for (i = 0; i < 8; i++) { |
497 | 497 |
p -= 3 * 8 * seg_size; |
498 |
- draw_digit(second % 10, p, picref->linesize[0], seg_size); |
|
498 |
+ draw_digit(second % 10, p, frame->linesize[0], seg_size); |
|
499 | 499 |
second /= 10; |
500 | 500 |
if (second == 0) |
501 | 501 |
break; |
... | ... |
@@ -588,13 +586,13 @@ static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize, |
588 | 588 |
} |
589 | 589 |
} |
590 | 590 |
|
591 |
-static void rgbtest_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
591 |
+static void rgbtest_fill_picture(AVFilterContext *ctx, AVFrame *frame) |
|
592 | 592 |
{ |
593 | 593 |
TestSourceContext *test = ctx->priv; |
594 |
- int x, y, w = picref->video->w, h = picref->video->h; |
|
594 |
+ int x, y, w = frame->width, h = frame->height; |
|
595 | 595 |
|
596 | 596 |
for (y = 0; y < h; y++) { |
597 |
- for (x = 0; x < picref->video->w; x++) { |
|
597 |
+ for (x = 0; x < w; x++) { |
|
598 | 598 |
int c = 256*x/w; |
599 | 599 |
int r = 0, g = 0, b = 0; |
600 | 600 |
|
... | ... |
@@ -602,7 +600,7 @@ static void rgbtest_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref |
602 | 602 |
else if (3*y < 2*h) g = c; |
603 | 603 |
else b = c; |
604 | 604 |
|
605 |
- rgbtest_put_pixel(picref->data[0], picref->linesize[0], x, y, r, g, b, |
|
605 |
+ rgbtest_put_pixel(frame->data[0], frame->linesize[0], x, y, r, g, b, |
|
606 | 606 |
ctx->outputs[0]->format, test->rgba_map); |
607 | 607 |
} |
608 | 608 |
} |
... | ... |
@@ -703,7 +701,7 @@ static const uint8_t pos4ire[4] = { 29, 29, 29, 255 }; /* 11.5% intensity bla |
703 | 703 |
static const uint8_t i_pixel[4] = { 0, 68, 130, 255 }; |
704 | 704 |
static const uint8_t q_pixel[4] = { 67, 0, 130, 255 }; |
705 | 705 |
|
706 |
-static void smptebars_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) |
|
706 |
+static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref) |
|
707 | 707 |
{ |
708 | 708 |
TestSourceContext *test = ctx->priv; |
709 | 709 |
FFDrawColor color; |
... | ... |
@@ -49,10 +49,10 @@ typedef struct YADIFContext { |
49 | 49 |
|
50 | 50 |
int frame_pending; |
51 | 51 |
|
52 |
- AVFilterBufferRef *cur; |
|
53 |
- AVFilterBufferRef *next; |
|
54 |
- AVFilterBufferRef *prev; |
|
55 |
- AVFilterBufferRef *out; |
|
52 |
+ AVFrame *cur; |
|
53 |
+ AVFrame *next; |
|
54 |
+ AVFrame *prev; |
|
55 |
+ AVFrame *out; |
|
56 | 56 |
|
57 | 57 |
/** |
58 | 58 |
* Required alignment for filter_line |
... | ... |
@@ -397,6 +397,7 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src) |
397 | 397 |
dst->opaque = src->opaque; |
398 | 398 |
dst->pkt_pts = src->pkt_pts; |
399 | 399 |
dst->pkt_dts = src->pkt_dts; |
400 |
+ dst->pkt_pos = src->pkt_pos; |
|
400 | 401 |
dst->quality = src->quality; |
401 | 402 |
dst->coded_picture_number = src->coded_picture_number; |
402 | 403 |
dst->display_picture_number = src->display_picture_number; |