This filter can be used to scale one stream to match another or based on
another, useful to scale subtitles or other things to be overlayed
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
... | ... |
@@ -2780,6 +2780,7 @@ repeatfields_filter_deps="gpl" |
2780 | 2780 |
resample_filter_deps="avresample" |
2781 | 2781 |
sab_filter_deps="gpl swscale" |
2782 | 2782 |
scale_filter_deps="swscale" |
2783 |
+scale2ref_filter_deps="swscale" |
|
2783 | 2784 |
select_filter_select="pixelutils" |
2784 | 2785 |
smartblur_filter_deps="gpl swscale" |
2785 | 2786 |
showcqt_filter_deps="avcodec" |
... | ... |
@@ -5831,6 +5832,7 @@ enabled removelogo_filter && prepend avfilter_deps "avformat avcodec swscale" |
5831 | 5831 |
enabled resample_filter && prepend avfilter_deps "avresample" |
5832 | 5832 |
enabled sab_filter && prepend avfilter_deps "swscale" |
5833 | 5833 |
enabled scale_filter && prepend avfilter_deps "swscale" |
5834 |
+enabled scale2ref_filter && prepend avfilter_deps "swscale" |
|
5834 | 5835 |
enabled showspectrum_filter && prepend avfilter_deps "avcodec" |
5835 | 5836 |
enabled smartblur_filter && prepend avfilter_deps "swscale" |
5836 | 5837 |
enabled subtitles_filter && prepend avfilter_deps "avformat avcodec" |
... | ... |
@@ -9122,6 +9122,23 @@ If the specified expression is not valid, it is kept at its current |
9122 | 9122 |
value. |
9123 | 9123 |
@end table |
9124 | 9124 |
|
9125 |
+@section scale2ref |
|
9126 |
+ |
|
9127 |
+Scale (resize) the input video, based on a reference video. |
|
9128 |
+ |
|
9129 |
+See the scale filter for available options, scale2ref supports the same but |
|
9130 |
+uses the reference video instead of the main input as basis. |
|
9131 |
+ |
|
9132 |
+@subsection Examples |
|
9133 |
+ |
|
9134 |
+@itemize |
|
9135 |
+@item |
|
9136 |
+Scale a subtitle stream to match the main video in size before overlaying |
|
9137 |
+@example |
|
9138 |
+'scale2ref[b][a];[a][b]overlay' |
|
9139 |
+@end example |
|
9140 |
+@end itemize |
|
9141 |
+ |
|
9125 | 9142 |
@section separatefields |
9126 | 9143 |
|
9127 | 9144 |
The @code{separatefields} takes a frame-based video input and splits |
... | ... |
@@ -197,6 +197,7 @@ OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o |
197 | 197 |
OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o |
198 | 198 |
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o |
199 | 199 |
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o |
200 |
+OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o |
|
200 | 201 |
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o |
201 | 202 |
OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o |
202 | 203 |
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o |
... | ... |
@@ -211,6 +211,7 @@ void avfilter_register_all(void) |
211 | 211 |
REGISTER_FILTER(ROTATE, rotate, vf); |
212 | 212 |
REGISTER_FILTER(SAB, sab, vf); |
213 | 213 |
REGISTER_FILTER(SCALE, scale, vf); |
214 |
+ REGISTER_FILTER(SCALE2REF, scale2ref, vf); |
|
214 | 215 |
REGISTER_FILTER(SELECT, select, vf); |
215 | 216 |
REGISTER_FILTER(SENDCMD, sendcmd, vf); |
216 | 217 |
REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf); |
... | ... |
@@ -30,7 +30,7 @@ |
30 | 30 |
#include "libavutil/version.h" |
31 | 31 |
|
32 | 32 |
#define LIBAVFILTER_VERSION_MAJOR 5 |
33 |
-#define LIBAVFILTER_VERSION_MINOR 33 |
|
33 |
+#define LIBAVFILTER_VERSION_MINOR 34 |
|
34 | 34 |
#define LIBAVFILTER_VERSION_MICRO 100 |
35 | 35 |
|
36 | 36 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |
... | ... |
@@ -111,6 +111,8 @@ typedef struct ScaleContext { |
111 | 111 |
int force_original_aspect_ratio; |
112 | 112 |
} ScaleContext; |
113 | 113 |
|
114 |
+AVFilter ff_vf_scale2ref; |
|
115 |
+ |
|
114 | 116 |
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) |
115 | 117 |
{ |
116 | 118 |
ScaleContext *scale = ctx->priv; |
... | ... |
@@ -234,7 +236,10 @@ static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace) |
234 | 234 |
static int config_props(AVFilterLink *outlink) |
235 | 235 |
{ |
236 | 236 |
AVFilterContext *ctx = outlink->src; |
237 |
- AVFilterLink *inlink = outlink->src->inputs[0]; |
|
237 |
+ AVFilterLink *inlink0 = outlink->src->inputs[0]; |
|
238 |
+ AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ? |
|
239 |
+ outlink->src->inputs[1] : |
|
240 |
+ outlink->src->inputs[0]; |
|
238 | 241 |
enum AVPixelFormat outfmt = outlink->format; |
239 | 242 |
ScaleContext *scale = ctx->priv; |
240 | 243 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
... | ... |
@@ -343,8 +348,9 @@ static int config_props(AVFilterLink *outlink) |
343 | 343 |
if (scale->isws[1]) |
344 | 344 |
sws_freeContext(scale->isws[1]); |
345 | 345 |
scale->isws[0] = scale->isws[1] = scale->sws = NULL; |
346 |
- if (inlink->w == outlink->w && inlink->h == outlink->h && |
|
347 |
- inlink->format == outlink->format) |
|
346 |
+ if (inlink0->w == outlink->w && |
|
347 |
+ inlink0->h == outlink->h && |
|
348 |
+ inlink0->format == outlink->format) |
|
348 | 349 |
; |
349 | 350 |
else { |
350 | 351 |
struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]}; |
... | ... |
@@ -356,9 +362,9 @@ static int config_props(AVFilterLink *outlink) |
356 | 356 |
if (!*s) |
357 | 357 |
return AVERROR(ENOMEM); |
358 | 358 |
|
359 |
- av_opt_set_int(*s, "srcw", inlink ->w, 0); |
|
360 |
- av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0); |
|
361 |
- av_opt_set_int(*s, "src_format", inlink->format, 0); |
|
359 |
+ av_opt_set_int(*s, "srcw", inlink0 ->w, 0); |
|
360 |
+ av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0); |
|
361 |
+ av_opt_set_int(*s, "src_format", inlink0->format, 0); |
|
362 | 362 |
av_opt_set_int(*s, "dstw", outlink->w, 0); |
363 | 363 |
av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0); |
364 | 364 |
av_opt_set_int(*s, "dst_format", outfmt, 0); |
... | ... |
@@ -374,7 +380,7 @@ static int config_props(AVFilterLink *outlink) |
374 | 374 |
/* Override YUV420P settings to have the correct (MPEG-2) chroma positions |
375 | 375 |
* MPEG-2 chroma positions are used by convention |
376 | 376 |
* XXX: support other 4:2:0 pixel formats */ |
377 |
- if (inlink->format == AV_PIX_FMT_YUV420P) { |
|
377 |
+ if (inlink0->format == AV_PIX_FMT_YUV420P) { |
|
378 | 378 |
scale->in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192; |
379 | 379 |
} |
380 | 380 |
|
... | ... |
@@ -415,6 +421,17 @@ fail: |
415 | 415 |
return ret; |
416 | 416 |
} |
417 | 417 |
|
418 |
+static int config_props_ref(AVFilterLink *outlink) |
|
419 |
+{ |
|
420 |
+ AVFilterLink *inlink = outlink->src->inputs[1]; |
|
421 |
+ |
|
422 |
+ outlink->w = inlink->w; |
|
423 |
+ outlink->h = inlink->h; |
|
424 |
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; |
|
425 |
+ |
|
426 |
+ return 0; |
|
427 |
+} |
|
428 |
+ |
|
418 | 429 |
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) |
419 | 430 |
{ |
420 | 431 |
ScaleContext *scale = link->dst->priv; |
... | ... |
@@ -542,6 +559,13 @@ static int filter_frame(AVFilterLink *link, AVFrame *in) |
542 | 542 |
return ff_filter_frame(outlink, out); |
543 | 543 |
} |
544 | 544 |
|
545 |
+static int filter_frame_ref(AVFilterLink *link, AVFrame *in) |
|
546 |
+{ |
|
547 |
+ AVFilterLink *outlink = link->dst->outputs[1]; |
|
548 |
+ |
|
549 |
+ return ff_filter_frame(outlink, in); |
|
550 |
+} |
|
551 |
+ |
|
545 | 552 |
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, |
546 | 553 |
char *res, int res_len, int flags) |
547 | 554 |
{ |
... | ... |
@@ -643,3 +667,53 @@ AVFilter ff_vf_scale = { |
643 | 643 |
.outputs = avfilter_vf_scale_outputs, |
644 | 644 |
.process_command = process_command, |
645 | 645 |
}; |
646 |
+ |
|
647 |
+static const AVClass scale2ref_class = { |
|
648 |
+ .class_name = "scale2ref", |
|
649 |
+ .item_name = av_default_item_name, |
|
650 |
+ .option = scale_options, |
|
651 |
+ .version = LIBAVUTIL_VERSION_INT, |
|
652 |
+ .category = AV_CLASS_CATEGORY_FILTER, |
|
653 |
+ .child_class_next = child_class_next, |
|
654 |
+}; |
|
655 |
+ |
|
656 |
+static const AVFilterPad avfilter_vf_scale2ref_inputs[] = { |
|
657 |
+ { |
|
658 |
+ .name = "default", |
|
659 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
660 |
+ .filter_frame = filter_frame, |
|
661 |
+ }, |
|
662 |
+ { |
|
663 |
+ .name = "ref", |
|
664 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
665 |
+ .filter_frame = filter_frame_ref, |
|
666 |
+ }, |
|
667 |
+ { NULL } |
|
668 |
+}; |
|
669 |
+ |
|
670 |
+static const AVFilterPad avfilter_vf_scale2ref_outputs[] = { |
|
671 |
+ { |
|
672 |
+ .name = "default", |
|
673 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
674 |
+ .config_props = config_props, |
|
675 |
+ }, |
|
676 |
+ { |
|
677 |
+ .name = "ref", |
|
678 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
679 |
+ .config_props = config_props_ref, |
|
680 |
+ }, |
|
681 |
+ { NULL } |
|
682 |
+}; |
|
683 |
+ |
|
684 |
+AVFilter ff_vf_scale2ref = { |
|
685 |
+ .name = "scale2ref", |
|
686 |
+ .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."), |
|
687 |
+ .init_dict = init_dict, |
|
688 |
+ .uninit = uninit, |
|
689 |
+ .query_formats = query_formats, |
|
690 |
+ .priv_size = sizeof(ScaleContext), |
|
691 |
+ .priv_class = &scale2ref_class, |
|
692 |
+ .inputs = avfilter_vf_scale2ref_inputs, |
|
693 |
+ .outputs = avfilter_vf_scale2ref_outputs, |
|
694 |
+ .process_command = process_command, |
|
695 |
+}; |