Signed-off-by: Paul B Mahol <onemda@gmail.com>
Paul B Mahol authored on 2013/02/12 01:01:40... | ... |
@@ -1778,6 +1778,114 @@ threshold, and defaults to 98. |
1778 | 1778 |
@var{threshold} is the threshold below which a pixel value is |
1779 | 1779 |
considered black, and defaults to 32. |
1780 | 1780 |
|
1781 |
+@section blend |
|
1782 |
+ |
|
1783 |
+Blend two video frames into each other. |
|
1784 |
+ |
|
1785 |
+It takes two input streams and outputs one stream, the first input is the |
|
1786 |
+"top" layer and second input is "bottom" layer. |
|
1787 |
+Output terminates when shortest input terminates. |
|
1788 |
+ |
|
1789 |
+This filter accepts a list of options in the form of @var{key}=@var{value} |
|
1790 |
+pairs separated by ":". A description of the accepted options follows. |
|
1791 |
+ |
|
1792 |
+@table @option |
|
1793 |
+@item c0_mode |
|
1794 |
+@item c1_mode |
|
1795 |
+@item c2_mode |
|
1796 |
+@item c3_mode |
|
1797 |
+@item all_mode |
|
1798 |
+Set blend mode for specific pixel component or all pixel components in case |
|
1799 |
+of @var{all_mode}. Default value is @code{normal}. |
|
1800 |
+ |
|
1801 |
+Available values for component modes are: |
|
1802 |
+@table @samp |
|
1803 |
+@item addition |
|
1804 |
+@item and |
|
1805 |
+@item average |
|
1806 |
+@item burn |
|
1807 |
+@item darken |
|
1808 |
+@item difference |
|
1809 |
+@item divide |
|
1810 |
+@item dodge |
|
1811 |
+@item exclusion |
|
1812 |
+@item hardlight |
|
1813 |
+@item lighten |
|
1814 |
+@item multiply |
|
1815 |
+@item negation |
|
1816 |
+@item normal |
|
1817 |
+@item or |
|
1818 |
+@item overlay |
|
1819 |
+@item phoenix |
|
1820 |
+@item pinlight |
|
1821 |
+@item reflect |
|
1822 |
+@item screen |
|
1823 |
+@item softlight |
|
1824 |
+@item subtract |
|
1825 |
+@item vividlight |
|
1826 |
+@item xor |
|
1827 |
+@end table |
|
1828 |
+ |
|
1829 |
+@item c0_opacity |
|
1830 |
+@item c1_opacity |
|
1831 |
+@item c2_opacity |
|
1832 |
+@item c3_opacity |
|
1833 |
+@item all_opacity |
|
1834 |
+Set blend opacity for specific pixel component or all pixel components in case |
|
1835 |
+of @var{all_expr}. Only used in combination with pixel component blend modes. |
|
1836 |
+ |
|
1837 |
+@item c0_expr |
|
1838 |
+@item c1_expr |
|
1839 |
+@item c2_expr |
|
1840 |
+@item c3_expr |
|
1841 |
+@item all_expr |
|
1842 |
+Set blend expression for specific pixel component or all pixel components in case |
|
1843 |
+of @var{all_expr}. Note that related mode options will be ignored if those are set. |
|
1844 |
+ |
|
1845 |
+The expressions can use the following variables: |
|
1846 |
+ |
|
1847 |
+@table @option |
|
1848 |
+@item X |
|
1849 |
+@item Y |
|
1850 |
+the coordinates of the current sample |
|
1851 |
+ |
|
1852 |
+@item W |
|
1853 |
+@item H |
|
1854 |
+the width and height of currently filtered plane |
|
1855 |
+ |
|
1856 |
+@item SW |
|
1857 |
+@item SH |
|
1858 |
+Width and height scale depending on the currently filtered plane. It is the |
|
1859 |
+ratio between the corresponding luma plane number of pixels and the current |
|
1860 |
+plane ones. E.g. for YUV4:2:0 the values are @code{1,1} for the luma plane, and |
|
1861 |
+@code{0.5,0.5} for chroma planes. |
|
1862 |
+ |
|
1863 |
+@item T |
|
1864 |
+Time of the current frame, expressed in seconds. |
|
1865 |
+ |
|
1866 |
+@item TOP, A |
|
1867 |
+Value of pixel component at current location for first video frame (top layer). |
|
1868 |
+ |
|
1869 |
+@item BOTTOM, B |
|
1870 |
+Value of pixel component at current location for second video frame (bottom layer). |
|
1871 |
+@end table |
|
1872 |
+@end table |
|
1873 |
+ |
|
1874 |
+@itemize |
|
1875 |
+ |
|
1876 |
+@item |
|
1877 |
+Apply transition from bottom layer to top layer in first 10 seconds: |
|
1878 |
+@example |
|
1879 |
+blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))' |
|
1880 |
+@end example |
|
1881 |
+ |
|
1882 |
+@item |
|
1883 |
+Apply 1x1 checkerboard effect: |
|
1884 |
+@example |
|
1885 |
+blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)' |
|
1886 |
+@end example |
|
1887 |
+@end itemize |
|
1888 |
+ |
|
1781 | 1889 |
@section boxblur |
1782 | 1890 |
|
1783 | 1891 |
Apply boxblur algorithm to the input video. |
... | ... |
@@ -100,6 +100,7 @@ OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o |
100 | 100 |
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o |
101 | 101 |
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o |
102 | 102 |
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o |
103 |
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o |
|
103 | 104 |
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o |
104 | 105 |
OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o |
105 | 106 |
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o |
... | ... |
@@ -94,6 +94,7 @@ void avfilter_register_all(void) |
94 | 94 |
REGISTER_FILTER(BBOX, bbox, vf); |
95 | 95 |
REGISTER_FILTER(BLACKDETECT, blackdetect, vf); |
96 | 96 |
REGISTER_FILTER(BLACKFRAME, blackframe, vf); |
97 |
+ REGISTER_FILTER(BLEND, blend, vf); |
|
97 | 98 |
REGISTER_FILTER(BOXBLUR, boxblur, vf); |
98 | 99 |
REGISTER_FILTER(COLORMATRIX, colormatrix, vf); |
99 | 100 |
REGISTER_FILTER(COPY, copy, vf); |
... | ... |
@@ -29,8 +29,8 @@ |
29 | 29 |
#include "libavutil/avutil.h" |
30 | 30 |
|
31 | 31 |
#define LIBAVFILTER_VERSION_MAJOR 3 |
32 |
-#define LIBAVFILTER_VERSION_MINOR 38 |
|
33 |
-#define LIBAVFILTER_VERSION_MICRO 106 |
|
32 |
+#define LIBAVFILTER_VERSION_MINOR 39 |
|
33 |
+#define LIBAVFILTER_VERSION_MICRO 100 |
|
34 | 34 |
|
35 | 35 |
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |
36 | 36 |
LIBAVFILTER_VERSION_MINOR, \ |
37 | 37 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,474 @@ |
0 |
+/* |
|
1 |
+ * Copyright (c) 2013 Paul B Mahol |
|
2 |
+ * |
|
3 |
+ * This file is part of FFmpeg. |
|
4 |
+ * |
|
5 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
6 |
+ * modify it under the terms of the GNU Lesser General Public |
|
7 |
+ * License as published by the Free Software Foundation; either |
|
8 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
9 |
+ * |
|
10 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
11 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 |
+ * Lesser General Public License for more details. |
|
14 |
+ * |
|
15 |
+ * You should have received a copy of the GNU Lesser General Public |
|
16 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
17 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
+ */ |
|
19 |
+ |
|
20 |
+#include "libavutil/imgutils.h" |
|
21 |
+#include "libavutil/eval.h" |
|
22 |
+#include "libavutil/opt.h" |
|
23 |
+#include "libavutil/pixfmt.h" |
|
24 |
+#include "avfilter.h" |
|
25 |
+#include "bufferqueue.h" |
|
26 |
+#include "formats.h" |
|
27 |
+#include "internal.h" |
|
28 |
+#include "video.h" |
|
29 |
+ |
|
30 |
+#define TOP 0 |
|
31 |
+#define BOTTOM 1 |
|
32 |
+ |
|
33 |
+enum BlendMode { |
|
34 |
+ BLEND_UNSET = -1, |
|
35 |
+ BLEND_NORMAL, |
|
36 |
+ BLEND_ADDITION, |
|
37 |
+ BLEND_AND, |
|
38 |
+ BLEND_AVERAGE, |
|
39 |
+ BLEND_BURN, |
|
40 |
+ BLEND_DARKEN, |
|
41 |
+ BLEND_DIFFERENCE, |
|
42 |
+ BLEND_DIVIDE, |
|
43 |
+ BLEND_DODGE, |
|
44 |
+ BLEND_EXCLUSION, |
|
45 |
+ BLEND_HARDLIGHT, |
|
46 |
+ BLEND_LIGHTEN, |
|
47 |
+ BLEND_MULTIPLY, |
|
48 |
+ BLEND_NEGATION, |
|
49 |
+ BLEND_OR, |
|
50 |
+ BLEND_OVERLAY, |
|
51 |
+ BLEND_PHOENIX, |
|
52 |
+ BLEND_PINLIGHT, |
|
53 |
+ BLEND_REFLECT, |
|
54 |
+ BLEND_SCREEN, |
|
55 |
+ BLEND_SOFTLIGHT, |
|
56 |
+ BLEND_SUBTRACT, |
|
57 |
+ BLEND_VIVIDLIGHT, |
|
58 |
+ BLEND_XOR, |
|
59 |
+ BLEND_NB |
|
60 |
+}; |
|
61 |
+ |
|
62 |
+static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "A", "B", "TOP", "BOTTOM", NULL }; |
|
63 |
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB }; |
|
64 |
+ |
|
65 |
+typedef struct FilterParams { |
|
66 |
+ enum BlendMode mode; |
|
67 |
+ double values[VAR_VARS_NB]; |
|
68 |
+ double opacity; |
|
69 |
+ AVExpr *e; |
|
70 |
+ char *expr_str; |
|
71 |
+ void (*blend)(const uint8_t *top, int top_linesize, |
|
72 |
+ const uint8_t *bottom, int bottom_linesize, |
|
73 |
+ uint8_t *dst, int dst_linesize, |
|
74 |
+ int width, int height, struct FilterParams *param); |
|
75 |
+} FilterParams; |
|
76 |
+ |
|
77 |
+typedef struct { |
|
78 |
+ const AVClass *class; |
|
79 |
+ struct FFBufQueue queue_top; |
|
80 |
+ struct FFBufQueue queue_bottom; |
|
81 |
+ int hsub, vsub; ///< chroma subsampling values |
|
82 |
+ int frame_requested; |
|
83 |
+ char *all_expr; |
|
84 |
+ enum BlendMode all_mode; |
|
85 |
+ double all_opacity; |
|
86 |
+ |
|
87 |
+ FilterParams params[4]; |
|
88 |
+} BlendContext; |
|
89 |
+ |
|
90 |
+#define OFFSET(x) offsetof(BlendContext, x) |
|
91 |
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
92 |
+ |
|
93 |
+static const AVOption blend_options[] = { |
|
94 |
+ { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"}, |
|
95 |
+ { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"}, |
|
96 |
+ { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"}, |
|
97 |
+ { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"}, |
|
98 |
+ { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"}, |
|
99 |
+ { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" }, |
|
100 |
+ { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" }, |
|
101 |
+ { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" }, |
|
102 |
+ { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" }, |
|
103 |
+ { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" }, |
|
104 |
+ { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" }, |
|
105 |
+ { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" }, |
|
106 |
+ { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" }, |
|
107 |
+ { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" }, |
|
108 |
+ { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" }, |
|
109 |
+ { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" }, |
|
110 |
+ { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" }, |
|
111 |
+ { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" }, |
|
112 |
+ { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" }, |
|
113 |
+ { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" }, |
|
114 |
+ { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" }, |
|
115 |
+ { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" }, |
|
116 |
+ { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" }, |
|
117 |
+ { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" }, |
|
118 |
+ { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" }, |
|
119 |
+ { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" }, |
|
120 |
+ { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" }, |
|
121 |
+ { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" }, |
|
122 |
+ { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" }, |
|
123 |
+ { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, |
|
124 |
+ { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, |
|
125 |
+ { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, |
|
126 |
+ { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, |
|
127 |
+ { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, |
|
128 |
+ { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, |
|
129 |
+ { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, |
|
130 |
+ { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, |
|
131 |
+ { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS }, |
|
132 |
+ { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}, |
|
133 |
+ { NULL }, |
|
134 |
+}; |
|
135 |
+ |
|
136 |
+AVFILTER_DEFINE_CLASS(blend); |
|
137 |
+ |
|
138 |
+static void blend_normal(const uint8_t *top, int top_linesize, |
|
139 |
+ const uint8_t *bottom, int bottom_linesize, |
|
140 |
+ uint8_t *dst, int dst_linesize, |
|
141 |
+ int width, int height, FilterParams *param) |
|
142 |
+{ |
|
143 |
+ av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, height); |
|
144 |
+} |
|
145 |
+ |
|
146 |
+#define DEFINE_BLEND(name, expr) \ |
|
147 |
+static void blend_## name(const uint8_t *top, int top_linesize, \ |
|
148 |
+ const uint8_t *bottom, int bottom_linesize, \ |
|
149 |
+ uint8_t *dst, int dst_linesize, \ |
|
150 |
+ int width, int height, FilterParams *param) \ |
|
151 |
+{ \ |
|
152 |
+ double opacity = param->opacity; \ |
|
153 |
+ int i, j; \ |
|
154 |
+ \ |
|
155 |
+ for (i = 0; i < height; i++) { \ |
|
156 |
+ for (j = 0; j < width; j++) { \ |
|
157 |
+ dst[j] = top[j] + ((expr) - top[j]) * opacity; \ |
|
158 |
+ } \ |
|
159 |
+ dst += dst_linesize; \ |
|
160 |
+ top += top_linesize; \ |
|
161 |
+ bottom += bottom_linesize; \ |
|
162 |
+ } \ |
|
163 |
+} |
|
164 |
+ |
|
165 |
+#define A top[j] |
|
166 |
+#define B bottom[j] |
|
167 |
+ |
|
168 |
+#define MULTIPLY(x, a, b) (x * ((a * b) / 255)) |
|
169 |
+#define SCREEN(x, a, b) (255 - x * ((255 - a) * (255 - b) / 255)) |
|
170 |
+#define BURN(a, b) ((a == 0) ? a : FFMAX(0, 255 - ((255 - b) << 8) / a)) |
|
171 |
+#define DODGE(a, b) ((a == 255) ? a : FFMIN(255, ((b << 8) / (255 - a)))) |
|
172 |
+ |
|
173 |
+DEFINE_BLEND(addition, FFMIN(255, A + B)) |
|
174 |
+DEFINE_BLEND(average, (A + B) / 2) |
|
175 |
+DEFINE_BLEND(subtract, FFMAX(0, A - B)) |
|
176 |
+DEFINE_BLEND(multiply, MULTIPLY(1, A, B)) |
|
177 |
+DEFINE_BLEND(negation, 255 - FFABS(255 - A - B)) |
|
178 |
+DEFINE_BLEND(difference, FFABS(A - B)) |
|
179 |
+DEFINE_BLEND(screen, SCREEN(1, A, B)) |
|
180 |
+DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B)) |
|
181 |
+DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A)) |
|
182 |
+DEFINE_BLEND(darken, FFMIN(A, B)) |
|
183 |
+DEFINE_BLEND(lighten, FFMAX(A, B)) |
|
184 |
+DEFINE_BLEND(divide, ((float)A / ((float)B) * 255)) |
|
185 |
+DEFINE_BLEND(dodge, DODGE(A, B)) |
|
186 |
+DEFINE_BLEND(burn, BURN(A, B)) |
|
187 |
+DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255)) |
|
188 |
+DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255) |
|
189 |
+DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128))) |
|
190 |
+DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255) |
|
191 |
+DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B)))) |
|
192 |
+DEFINE_BLEND(and, A & B) |
|
193 |
+DEFINE_BLEND(or, A | B) |
|
194 |
+DEFINE_BLEND(xor, A ^ B) |
|
195 |
+DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128))) |
|
196 |
+ |
|
197 |
+static void blend_expr(const uint8_t *top, int top_linesize, |
|
198 |
+ const uint8_t *bottom, int bottom_linesize, |
|
199 |
+ uint8_t *dst, int dst_linesize, |
|
200 |
+ int width, int height, |
|
201 |
+ FilterParams *param) |
|
202 |
+{ |
|
203 |
+ AVExpr *e = param->e; |
|
204 |
+ double *values = param->values; |
|
205 |
+ int y, x; |
|
206 |
+ |
|
207 |
+ for (y = 0; y < height; y++) { |
|
208 |
+ values[VAR_Y] = y; |
|
209 |
+ for (x = 0; x < width; x++) { |
|
210 |
+ values[VAR_X] = x; |
|
211 |
+ values[VAR_TOP] = values[VAR_A] = top[x]; |
|
212 |
+ values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; |
|
213 |
+ dst[x] = av_expr_eval(e, values, NULL); |
|
214 |
+ } |
|
215 |
+ dst += dst_linesize; |
|
216 |
+ top += top_linesize; |
|
217 |
+ bottom += bottom_linesize; |
|
218 |
+ } |
|
219 |
+} |
|
220 |
+ |
|
221 |
+static av_cold int init(AVFilterContext *ctx, const char *args) |
|
222 |
+{ |
|
223 |
+ BlendContext *b = ctx->priv; |
|
224 |
+ int ret, plane; |
|
225 |
+ |
|
226 |
+ b->class = &blend_class; |
|
227 |
+ av_opt_set_defaults(b); |
|
228 |
+ |
|
229 |
+ if ((ret = av_set_options_string(b, args, "=", ":")) < 0) |
|
230 |
+ return ret; |
|
231 |
+ |
|
232 |
+ for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) { |
|
233 |
+ FilterParams *param = &b->params[plane]; |
|
234 |
+ |
|
235 |
+ if (b->all_mode >= 0) |
|
236 |
+ param->mode = b->all_mode; |
|
237 |
+ if (b->all_opacity < 1) |
|
238 |
+ param->opacity = b->all_opacity; |
|
239 |
+ |
|
240 |
+ switch (param->mode) { |
|
241 |
+ case BLEND_ADDITION: param->blend = blend_addition; break; |
|
242 |
+ case BLEND_AND: param->blend = blend_and; break; |
|
243 |
+ case BLEND_AVERAGE: param->blend = blend_average; break; |
|
244 |
+ case BLEND_BURN: param->blend = blend_burn; break; |
|
245 |
+ case BLEND_DARKEN: param->blend = blend_darken; break; |
|
246 |
+ case BLEND_DIFFERENCE: param->blend = blend_difference; break; |
|
247 |
+ case BLEND_DIVIDE: param->blend = blend_divide; break; |
|
248 |
+ case BLEND_DODGE: param->blend = blend_dodge; break; |
|
249 |
+ case BLEND_EXCLUSION: param->blend = blend_exclusion; break; |
|
250 |
+ case BLEND_HARDLIGHT: param->blend = blend_hardlight; break; |
|
251 |
+ case BLEND_LIGHTEN: param->blend = blend_lighten; break; |
|
252 |
+ case BLEND_MULTIPLY: param->blend = blend_multiply; break; |
|
253 |
+ case BLEND_NEGATION: param->blend = blend_negation; break; |
|
254 |
+ case BLEND_NORMAL: param->blend = blend_normal; break; |
|
255 |
+ case BLEND_OR: param->blend = blend_or; break; |
|
256 |
+ case BLEND_OVERLAY: param->blend = blend_overlay; break; |
|
257 |
+ case BLEND_PHOENIX: param->blend = blend_phoenix; break; |
|
258 |
+ case BLEND_PINLIGHT: param->blend = blend_pinlight; break; |
|
259 |
+ case BLEND_REFLECT: param->blend = blend_reflect; break; |
|
260 |
+ case BLEND_SCREEN: param->blend = blend_screen; break; |
|
261 |
+ case BLEND_SOFTLIGHT: param->blend = blend_softlight; break; |
|
262 |
+ case BLEND_SUBTRACT: param->blend = blend_subtract; break; |
|
263 |
+ case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break; |
|
264 |
+ case BLEND_XOR: param->blend = blend_xor; break; |
|
265 |
+ } |
|
266 |
+ |
|
267 |
+ if (b->all_expr && !param->expr_str) { |
|
268 |
+ param->expr_str = av_strdup(b->all_expr); |
|
269 |
+ if (!param->expr_str) |
|
270 |
+ return AVERROR(ENOMEM); |
|
271 |
+ } |
|
272 |
+ if (param->expr_str) { |
|
273 |
+ ret = av_expr_parse(¶m->e, param->expr_str, var_names, |
|
274 |
+ NULL, NULL, NULL, NULL, 0, ctx); |
|
275 |
+ if (ret < 0) |
|
276 |
+ return ret; |
|
277 |
+ param->blend = blend_expr; |
|
278 |
+ } |
|
279 |
+ } |
|
280 |
+ |
|
281 |
+ return 0; |
|
282 |
+} |
|
283 |
+ |
|
284 |
+static int query_formats(AVFilterContext *ctx) |
|
285 |
+{ |
|
286 |
+ static const enum AVPixelFormat pix_fmts[] = { |
|
287 |
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, |
|
288 |
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, |
|
289 |
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE |
|
290 |
+ }; |
|
291 |
+ |
|
292 |
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); |
|
293 |
+ return 0; |
|
294 |
+} |
|
295 |
+ |
|
296 |
+static int config_output(AVFilterLink *outlink) |
|
297 |
+{ |
|
298 |
+ AVFilterContext *ctx = outlink->src; |
|
299 |
+ AVFilterLink *toplink = ctx->inputs[TOP]; |
|
300 |
+ AVFilterLink *bottomlink = ctx->inputs[BOTTOM]; |
|
301 |
+ |
|
302 |
+ if (toplink->format != bottomlink->format) { |
|
303 |
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n"); |
|
304 |
+ return AVERROR(EINVAL); |
|
305 |
+ } |
|
306 |
+ if (toplink->w != bottomlink->w || |
|
307 |
+ toplink->h != bottomlink->h || |
|
308 |
+ toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num || |
|
309 |
+ toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) { |
|
310 |
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters " |
|
311 |
+ "(size %dx%d, SAR %d:%d) do not match the corresponding " |
|
312 |
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n", |
|
313 |
+ ctx->input_pads[TOP].name, toplink->w, toplink->h, |
|
314 |
+ toplink->sample_aspect_ratio.num, |
|
315 |
+ toplink->sample_aspect_ratio.den, |
|
316 |
+ ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h, |
|
317 |
+ bottomlink->sample_aspect_ratio.num, |
|
318 |
+ bottomlink->sample_aspect_ratio.den); |
|
319 |
+ return AVERROR(EINVAL); |
|
320 |
+ } |
|
321 |
+ |
|
322 |
+ outlink->w = toplink->w; |
|
323 |
+ outlink->h = bottomlink->h; |
|
324 |
+ outlink->time_base = toplink->time_base; |
|
325 |
+ outlink->sample_aspect_ratio = toplink->sample_aspect_ratio; |
|
326 |
+ outlink->frame_rate = toplink->frame_rate; |
|
327 |
+ return 0; |
|
328 |
+} |
|
329 |
+ |
|
330 |
+static int config_input_top(AVFilterLink *inlink) |
|
331 |
+{ |
|
332 |
+ BlendContext *b = inlink->dst->priv; |
|
333 |
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); |
|
334 |
+ |
|
335 |
+ b->hsub = pix_desc->log2_chroma_w; |
|
336 |
+ b->vsub = pix_desc->log2_chroma_h; |
|
337 |
+ return 0; |
|
338 |
+} |
|
339 |
+ |
|
340 |
+static av_cold void uninit(AVFilterContext *ctx) |
|
341 |
+{ |
|
342 |
+ BlendContext *b = ctx->priv; |
|
343 |
+ int i; |
|
344 |
+ |
|
345 |
+ av_opt_free(b); |
|
346 |
+ ff_bufqueue_discard_all(&b->queue_top); |
|
347 |
+ ff_bufqueue_discard_all(&b->queue_bottom); |
|
348 |
+ |
|
349 |
+ for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++) |
|
350 |
+ av_expr_free(b->params[i].e); |
|
351 |
+} |
|
352 |
+ |
|
353 |
+static int request_frame(AVFilterLink *outlink) |
|
354 |
+{ |
|
355 |
+ AVFilterContext *ctx = outlink->src; |
|
356 |
+ BlendContext *b = ctx->priv; |
|
357 |
+ int in, ret; |
|
358 |
+ |
|
359 |
+ b->frame_requested = 1; |
|
360 |
+ while (b->frame_requested) { |
|
361 |
+ in = ff_bufqueue_peek(&b->queue_top, TOP) ? BOTTOM : TOP; |
|
362 |
+ ret = ff_request_frame(ctx->inputs[in]); |
|
363 |
+ if (ret < 0) |
|
364 |
+ return ret; |
|
365 |
+ } |
|
366 |
+ return 0; |
|
367 |
+} |
|
368 |
+ |
|
369 |
+static void blend_frame(AVFilterContext *ctx, |
|
370 |
+ AVFilterBufferRef *top_buf, |
|
371 |
+ AVFilterBufferRef *bottom_buf, |
|
372 |
+ AVFilterBufferRef *dst_buf) |
|
373 |
+{ |
|
374 |
+ BlendContext *b = ctx->priv; |
|
375 |
+ AVFilterLink *inlink = ctx->inputs[0]; |
|
376 |
+ FilterParams *param; |
|
377 |
+ int plane; |
|
378 |
+ |
|
379 |
+ for (plane = 0; dst_buf->data[plane]; plane++) { |
|
380 |
+ int hsub = plane == 1 || plane == 2 ? b->hsub : 0; |
|
381 |
+ int vsub = plane == 1 || plane == 2 ? b->vsub : 0; |
|
382 |
+ int outw = dst_buf->video->w >> hsub; |
|
383 |
+ int outh = dst_buf->video->h >> vsub; |
|
384 |
+ uint8_t *dst = dst_buf->data[plane]; |
|
385 |
+ uint8_t *top = top_buf->data[plane]; |
|
386 |
+ uint8_t *bottom = bottom_buf->data[plane]; |
|
387 |
+ |
|
388 |
+ param = &b->params[plane]; |
|
389 |
+ param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base); |
|
390 |
+ param->values[VAR_W] = outw; |
|
391 |
+ param->values[VAR_H] = outh; |
|
392 |
+ param->values[VAR_SW] = outw / dst_buf->video->w; |
|
393 |
+ param->values[VAR_SH] = outh / dst_buf->video->h; |
|
394 |
+ param->blend(top, top_buf->linesize[plane], |
|
395 |
+ bottom, bottom_buf->linesize[plane], |
|
396 |
+ dst, dst_buf->linesize[plane], outw, outh, param); |
|
397 |
+ } |
|
398 |
+} |
|
399 |
+ |
|
400 |
+static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) |
|
401 |
+{ |
|
402 |
+ AVFilterContext *ctx = inlink->dst; |
|
403 |
+ AVFilterLink *outlink = ctx->outputs[0]; |
|
404 |
+ BlendContext *b = ctx->priv; |
|
405 |
+ |
|
406 |
+ int ret = 0; |
|
407 |
+ int is_bottom = (inlink == ctx->inputs[BOTTOM]); |
|
408 |
+ struct FFBufQueue *queue = |
|
409 |
+ (is_bottom ? &b->queue_bottom : &b->queue_top); |
|
410 |
+ ff_bufqueue_add(ctx, queue, buf); |
|
411 |
+ |
|
412 |
+ while (1) { |
|
413 |
+ AVFilterBufferRef *top_buf, *bottom_buf, *out_buf; |
|
414 |
+ |
|
415 |
+ if (!ff_bufqueue_peek(&b->queue_top, TOP) || |
|
416 |
+ !ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break; |
|
417 |
+ |
|
418 |
+ top_buf = ff_bufqueue_get(&b->queue_top); |
|
419 |
+ bottom_buf = ff_bufqueue_get(&b->queue_bottom); |
|
420 |
+ |
|
421 |
+ out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, |
|
422 |
+ outlink->w, outlink->h); |
|
423 |
+ if (!out_buf) { |
|
424 |
+ return AVERROR(ENOMEM); |
|
425 |
+ } |
|
426 |
+ avfilter_copy_buffer_ref_props(out_buf, top_buf); |
|
427 |
+ |
|
428 |
+ b->frame_requested = 0; |
|
429 |
+ blend_frame(ctx, top_buf, bottom_buf, out_buf); |
|
430 |
+ ret = ff_filter_frame(ctx->outputs[0], out_buf); |
|
431 |
+ avfilter_unref_buffer(top_buf); |
|
432 |
+ avfilter_unref_buffer(bottom_buf); |
|
433 |
+ } |
|
434 |
+ return ret; |
|
435 |
+} |
|
436 |
+ |
|
437 |
+static const AVFilterPad blend_inputs[] = { |
|
438 |
+ { |
|
439 |
+ .name = "top", |
|
440 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
441 |
+ .config_props = config_input_top, |
|
442 |
+ .filter_frame = filter_frame, |
|
443 |
+ .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
444 |
+ },{ |
|
445 |
+ .name = "bottom", |
|
446 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
447 |
+ .filter_frame = filter_frame, |
|
448 |
+ .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, |
|
449 |
+ }, |
|
450 |
+ { NULL } |
|
451 |
+}; |
|
452 |
+ |
|
453 |
+static const AVFilterPad blend_outputs[] = { |
|
454 |
+ { |
|
455 |
+ .name = "default", |
|
456 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
457 |
+ .config_props = config_output, |
|
458 |
+ .request_frame = request_frame, |
|
459 |
+ }, |
|
460 |
+ { NULL } |
|
461 |
+}; |
|
462 |
+ |
|
463 |
+AVFilter avfilter_vf_blend = { |
|
464 |
+ .name = "blend", |
|
465 |
+ .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."), |
|
466 |
+ .init = init, |
|
467 |
+ .uninit = uninit, |
|
468 |
+ .priv_size = sizeof(BlendContext), |
|
469 |
+ .query_formats = query_formats, |
|
470 |
+ .inputs = blend_inputs, |
|
471 |
+ .outputs = blend_outputs, |
|
472 |
+ .priv_class = &blend_class, |
|
473 |
+}; |