Browse code

avfilter: add normalize filter

Richard Ling authored on 2017/11/21 19:32:06
Showing 6 changed files
... ...
@@ -19,6 +19,7 @@ version <next>:
19 19
 - acontrast audio filter
20 20
 - OpenCL overlay filter
21 21
 - video mix filter
22
+- video normalize filter
22 23
 
23 24
 
24 25
 version 3.4:
... ...
@@ -10867,6 +10867,86 @@ Add temporal and uniform noise to input video:
10867 10867
 noise=alls=20:allf=t+u
10868 10868
 @end example
10869 10869
 
10870
+@section normalize
10871
+
10872
+Normalize RGB video (aka histogram stretching, contrast stretching).
10873
+See: https://en.wikipedia.org/wiki/Normalization_(image_processing)
10874
+
10875
+For each channel of each frame, the filter computes the input range and maps
10876
+it linearly to the user-specified output range. The output range defaults
10877
+to the full dynamic range from pure black to pure white.
10878
+
10879
+Temporal smoothing can be used on the input range to reduce flickering (rapid
10880
+changes in brightness) caused when small dark or bright objects enter or leave
10881
+the scene. This is similar to the auto-exposure (automatic gain control) on a
10882
+video camera, and, like a video camera, it may cause a period of over- or
10883
+under-exposure of the video.
10884
+
10885
+The R,G,B channels can be normalized independently, which may cause some
10886
+color shifting, or linked together as a single channel, which prevents
10887
+color shifting. Linked normalization preserves hue. Independent normalization
10888
+does not, so it can be used to remove some color casts. Independent and linked
10889
+normalization can be combined in any ratio.
10890
+
10891
+The normalize filter accepts the following options:
10892
+
10893
+@table @option
10894
+@item blackpt
10895
+@item whitept
10896
+Colors which define the output range. The minimum input value is mapped to
10897
+the @var{blackpt}. The maximum input value is mapped to the @var{whitept}.
10898
+The defaults are black and white respectively. Specifying white for
10899
+@var{blackpt} and black for @var{whitept} will give color-inverted,
10900
+normalized video. Shades of grey can be used to reduce the dynamic range
10901
+(contrast). Specifying saturated colors here can create some interesting
10902
+effects.
10903
+
10904
+@item smoothing
10905
+The number of previous frames to use for temporal smoothing. The input range
10906
+of each channel is smoothed using a rolling average over the current frame
10907
+and the @var{smoothing} previous frames. The default is 0 (no temporal
10908
+smoothing).
10909
+
10910
+@item independence
10911
+Controls the ratio of independent (color shifting) channel normalization to
10912
+linked (color preserving) normalization. 0.0 is fully linked, 1.0 is fully
10913
+independent. Defaults to 1.0 (fully independent).
10914
+
10915
+@item strength
10916
+Overall strength of the filter. 1.0 is full strength. 0.0 is a rather
10917
+expensive no-op. Defaults to 1.0 (full strength).
10918
+
10919
+@end table
10920
+
10921
+@subsection Examples
10922
+
10923
+Stretch video contrast to use the full dynamic range, with no temporal
10924
+smoothing; may flicker depending on the source content:
10925
+@example
10926
+normalize=blackpt=black:whitept=white:smoothing=0
10927
+@end example
10928
+
10929
+As above, but with 50 frames of temporal smoothing; flicker should be
10930
+reduced, depending on the source content:
10931
+@example
10932
+normalize=blackpt=black:whitept=white:smoothing=50
10933
+@end example
10934
+
10935
+As above, but with hue-preserving linked channel normalization:
10936
+@example
10937
+normalize=blackpt=black:whitept=white:smoothing=50:independence=0
10938
+@end example
10939
+
10940
+As above, but with half strength:
10941
+@example
10942
+normalize=blackpt=black:whitept=white:smoothing=50:independence=0:strength=0.5
10943
+@end example
10944
+
10945
+Map the darkest input color to red, the brightest input color to cyan:
10946
+@example
10947
+normalize=blackpt=red:whitept=cyan
10948
+@end example
10949
+
10870 10950
 @section null
10871 10951
 
10872 10952
 Pass the video source unchanged to the output.
... ...
@@ -247,6 +247,7 @@ OBJS-$(CONFIG_NLMEANS_FILTER)                += vf_nlmeans.o
247 247
 OBJS-$(CONFIG_NNEDI_FILTER)                  += vf_nnedi.o
248 248
 OBJS-$(CONFIG_NOFORMAT_FILTER)               += vf_format.o
249 249
 OBJS-$(CONFIG_NOISE_FILTER)                  += vf_noise.o
250
+OBJS-$(CONFIG_NORMALIZE_FILTER)              += vf_normalize.o
250 251
 OBJS-$(CONFIG_NULL_FILTER)                   += vf_null.o
251 252
 OBJS-$(CONFIG_OCR_FILTER)                    += vf_ocr.o
252 253
 OBJS-$(CONFIG_OCV_FILTER)                    += vf_libopencv.o
... ...
@@ -257,6 +257,7 @@ static void register_all(void)
257 257
     REGISTER_FILTER(NNEDI,          nnedi,          vf);
258 258
     REGISTER_FILTER(NOFORMAT,       noformat,       vf);
259 259
     REGISTER_FILTER(NOISE,          noise,          vf);
260
+    REGISTER_FILTER(NORMALIZE,      normalize,      vf);
260 261
     REGISTER_FILTER(NULL,           null,           vf);
261 262
     REGISTER_FILTER(OCR,            ocr,            vf);
262 263
     REGISTER_FILTER(OCV,            ocv,            vf);
... ...
@@ -30,7 +30,7 @@
30 30
 #include "libavutil/version.h"
31 31
 
32 32
 #define LIBAVFILTER_VERSION_MAJOR   7
33
-#define LIBAVFILTER_VERSION_MINOR   3
33
+#define LIBAVFILTER_VERSION_MINOR   4
34 34
 #define LIBAVFILTER_VERSION_MICRO 100
35 35
 
36 36
 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
37 37
new file mode 100644
... ...
@@ -0,0 +1,386 @@
0
+/*
1
+ * Copyright (c) 2017 Richard Ling
2
+ *
3
+ * This file is part of FFmpeg.
4
+ *
5
+ * FFmpeg is free software; you can redistribute it and/or
6
+ * modify it under the terms of the GNU Lesser General Public
7
+ * License as published by the Free Software Foundation; either
8
+ * version 2.1 of the License, or (at your option) any later version.
9
+ *
10
+ * FFmpeg is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
+ * Lesser General Public License for more details.
14
+ *
15
+ * You should have received a copy of the GNU Lesser General Public
16
+ * License along with FFmpeg; if not, write to the Free Software
17
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18
+ */
19
+
20
+/*
21
+ * Normalize RGB video (aka histogram stretching, contrast stretching).
22
+ * See: https://en.wikipedia.org/wiki/Normalization_(image_processing)
23
+ *
24
+ * For each channel of each frame, the filter computes the input range and maps
25
+ * it linearly to the user-specified output range. The output range defaults
26
+ * to the full dynamic range from pure black to pure white.
27
+ *
28
+ * Naively maximising the dynamic range of each frame of video in isolation
29
+ * may cause flickering (rapid changes in brightness of static objects in the
30
+ * scene) when small dark or bright objects enter or leave the scene. This
31
+ * filter can apply temporal smoothing to the input range to reduce flickering.
32
+ * Temporal smoothing is similar to the auto-exposure (automatic gain control)
33
+ * on a video camera, which performs the same function; and, like a video
34
+ * camera, it may cause a period of over- or under-exposure of the video.
35
+ *
36
+ * The filter can normalize the R,G,B channels independently, which may cause
37
+ * color shifting, or link them together as a single channel, which prevents
38
+ * color shifting. More precisely, linked normalization preserves hue (as it's
39
+ * defined in HSV/HSL color spaces) while independent normalization does not.
40
+ * Independent normalization can be used to remove color casts, such as the
41
+ * blue cast from underwater video, restoring more natural colors. The filter
42
+ * can also combine independent and linked normalization in any ratio.
43
+ *
44
+ * Finally the overall strength of the filter can be adjusted, from no effect
45
+ * to full normalization.
46
+ *
47
+ * The 5 AVOptions are:
48
+ *   blackpt,   Colors which define the output range. The minimum input value
49
+ *   whitept    is mapped to the blackpt. The maximum input value is mapped to
50
+ *              the whitept. The defaults are black and white respectively.
51
+ *              Specifying white for blackpt and black for whitept will give
52
+ *              color-inverted, normalized video. Shades of grey can be used
53
+ *              to reduce the dynamic range (contrast). Specifying saturated
54
+ *              colors here can create some interesting effects.
55
+ *
56
+ *   smoothing  The amount of temporal smoothing, expressed in frames (>=0).
57
+ *              the minimum and maximum input values of each channel are
58
+ *              smoothed using a rolling average over the current frame and
59
+ *              that many previous frames of video.  Defaults to 0 (no temporal
60
+ *              smoothing).
61
+ *
62
+ *   independence
63
+ *              Controls the ratio of independent (color shifting) channel
64
+ *              normalization to linked (color preserving) normalization. 0.0
65
+ *              is fully linked, 1.0 is fully independent. Defaults to fully
66
+ *              independent.
67
+ *
68
+ *   strength   Overall strength of the filter. 1.0 is full strength. 0.0 is
69
+ *              a rather expensive no-op. Values in between can give a gentle
70
+ *              boost to low-contrast video without creating an artificial
71
+ *              over-processed look. The default is full strength.
72
+ */
73
+
74
+#include "libavutil/imgutils.h"
75
+#include "libavutil/opt.h"
76
+#include "libavutil/pixdesc.h"
77
+#include "avfilter.h"
78
+#include "formats.h"
79
+#include "internal.h"
80
+#include "video.h"
81
+
82
+typedef struct NormalizeContext {
83
+    const AVClass *class;
84
+
85
+    // Storage for the corresponding AVOptions
86
+    uint8_t blackpt[4];
87
+    uint8_t whitept[4];
88
+    int smoothing;
89
+    float independence;
90
+    float strength;
91
+
92
+    int co[4];          // Offsets to R,G,B,A bytes respectively in each pixel
93
+    int num_components; // Number of components in the pixel format
94
+    int history_len;    // Number of frames to average; based on smoothing factor
95
+    int frame_num;      // Increments on each frame, starting from 0.
96
+
97
+    // Per-extremum, per-channel history, for temporal smoothing.
98
+    struct {
99
+        uint8_t *history;       // History entries.
100
+        uint32_t history_sum;   // Sum of history entries.
101
+    } min[3], max[3];           // Min and max for each channel in {R,G,B}.
102
+    uint8_t *history_mem;       // Single allocation for above history entries
103
+
104
+} NormalizeContext;
105
+
106
+#define OFFSET(x) offsetof(NormalizeContext, x)
107
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
108
+
109
+static const AVOption normalize_options[] = {
110
+    { "blackpt",  "output color to which darkest input color is mapped",  OFFSET(blackpt), AV_OPT_TYPE_COLOR, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
111
+    { "whitept",  "output color to which brightest input color is mapped",  OFFSET(whitept), AV_OPT_TYPE_COLOR, { .str = "white" }, CHAR_MIN, CHAR_MAX, FLAGS },
112
+    { "smoothing",  "amount of temporal smoothing of the input range, to reduce flicker", OFFSET(smoothing), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX/8, FLAGS },
113
+    { "independence", "proportion of independent to linked channel normalization", OFFSET(independence), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGS },
114
+    { "strength", "strength of filter, from no effect to full normalization", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 1.0, FLAGS },
115
+    { NULL }
116
+};
117
+
118
+AVFILTER_DEFINE_CLASS(normalize);
119
+
120
+// This function is the main guts of the filter. Normalizes the input frame
121
+// into the output frame. The frames are known to have the same dimensions
122
+// and pixel format.
123
+static void normalize(NormalizeContext *s, AVFrame *in, AVFrame *out)
124
+{
125
+    // Per-extremum, per-channel local variables.
126
+    struct {
127
+        uint8_t in;     // Original input byte value for this frame.
128
+        float smoothed; // Smoothed input value [0,255].
129
+        float out;      // Output value [0,255].
130
+    } min[3], max[3];   // Min and max for each channel in {R,G,B}.
131
+
132
+    float rgb_min_smoothed; // Min input range for linked normalization
133
+    float rgb_max_smoothed; // Max input range for linked normalization
134
+    uint8_t lut[3][256];    // Lookup table
135
+    int x, y, c;
136
+
137
+    // First, scan the input frame to find, for each channel, the minimum
138
+    // (min.in) and maximum (max.in) values present in the channel.
139
+    for (c = 0; c < 3; c++)
140
+        min[c].in = max[c].in = in->data[0][s->co[c]];
141
+    for (y = 0; y < in->height; y++) {
142
+        uint8_t *inp = in->data[0] + y * in->linesize[0];
143
+        uint8_t *outp = out->data[0] + y * out->linesize[0];
144
+        for (x = 0; x < in->width; x++) {
145
+            for (c = 0; c < 3; c++) {
146
+                min[c].in = FFMIN(min[c].in, inp[s->co[c]]);
147
+                max[c].in = FFMAX(max[c].in, inp[s->co[c]]);
148
+            }
149
+            inp += s->num_components;
150
+            outp += s->num_components;
151
+        }
152
+    }
153
+
154
+    // Next, for each channel, push min.in and max.in into their respective
155
+    // histories, to determine the min.smoothed and max.smoothed for this frame.
156
+    {
157
+        int history_idx = s->frame_num % s->history_len;
158
+        // Assume the history is not yet full; num_history_vals is the number
159
+        // of frames received so far including the current frame.
160
+        int num_history_vals = s->frame_num + 1;
161
+        if (s->frame_num >= s->history_len) {
162
+            //The history is full; drop oldest value and cap num_history_vals.
163
+            for (c = 0; c < 3; c++) {
164
+                s->min[c].history_sum -= s->min[c].history[history_idx];
165
+                s->max[c].history_sum -= s->max[c].history[history_idx];
166
+            }
167
+            num_history_vals = s->history_len;
168
+        }
169
+        // For each extremum, update history_sum and calculate smoothed value
170
+        // as the rolling average of the history entries.
171
+        for (c = 0; c < 3; c++) {
172
+            s->min[c].history_sum += (s->min[c].history[history_idx] = min[c].in);
173
+            min[c].smoothed = s->min[c].history_sum / (float)num_history_vals;
174
+            s->max[c].history_sum += (s->max[c].history[history_idx] = max[c].in);
175
+            max[c].smoothed = s->max[c].history_sum / (float)num_history_vals;
176
+        }
177
+    }
178
+
179
+    // Determine the input range for linked normalization. This is simply the
180
+    // minimum of the per-channel minimums, and the maximum of the per-channel
181
+    // maximums.
182
+    rgb_min_smoothed = FFMIN3(min[0].smoothed, min[1].smoothed, min[2].smoothed);
183
+    rgb_max_smoothed = FFMAX3(max[0].smoothed, max[1].smoothed, max[2].smoothed);
184
+
185
+    // Now, process each channel to determine the input and output range and
186
+    // build the lookup tables.
187
+    for (c = 0; c < 3; c++) {
188
+        int in_val;
189
+        // Adjust the input range for this channel [min.smoothed,max.smoothed]
190
+        // by mixing in the correct proportion of the linked normalization
191
+        // input range [rgb_min_smoothed,rgb_max_smoothed].
192
+        min[c].smoothed = (min[c].smoothed  *         s->independence)
193
+                        + (rgb_min_smoothed * (1.0f - s->independence));
194
+        max[c].smoothed = (max[c].smoothed  *         s->independence)
195
+                        + (rgb_max_smoothed * (1.0f - s->independence));
196
+
197
+        // Calculate the output range [min.out,max.out] as a ratio of the full-
198
+        // strength output range [blackpt,whitept] and the original input range
199
+        // [min.in,max.in], based on the user-specified filter strength.
200
+        min[c].out = (s->blackpt[c] *         s->strength)
201
+                   + (min[c].in     * (1.0f - s->strength));
202
+        max[c].out = (s->whitept[c] *         s->strength)
203
+                   + (max[c].in     * (1.0f - s->strength));
204
+
205
+        // Now, build a lookup table which linearly maps the adjusted input range
206
+        // [min.smoothed,max.smoothed] to the output range [min.out,max.out].
207
+        // Perform the linear interpolation for each x:
208
+        //     lut[x] = (int)(float(x - min.smoothed) * scale + max.out + 0.5)
209
+        // where scale = (max.out - min.out) / (max.smoothed - min.smoothed)
210
+        if (min[c].smoothed == max[c].smoothed) {
211
+            // There is no dynamic range to expand. No mapping for this channel.
212
+            for (in_val = min[c].in; in_val <= max[c].in; in_val++)
213
+                lut[c][in_val] = min[c].out;
214
+        } else {
215
+            // We must set lookup values for all values in the original input
216
+            // range [min.in,max.in]. Since the original input range may be
217
+            // larger than [min.smoothed,max.smoothed], some output values may
218
+            // fall outside the [0,255] dynamic range. We need to clamp them.
219
+            float scale = (max[c].out - min[c].out) / (max[c].smoothed - min[c].smoothed);
220
+            for (in_val = min[c].in; in_val <= max[c].in; in_val++) {
221
+                int out_val = (in_val - min[c].smoothed) * scale + min[c].out + 0.5f;
222
+                out_val = FFMAX(out_val, 0);
223
+                out_val = FFMIN(out_val, 255);
224
+                lut[c][in_val] = out_val;
225
+            }
226
+        }
227
+    }
228
+
229
+    // Finally, process the pixels of the input frame using the lookup tables.
230
+    for (y = 0; y < in->height; y++) {
231
+        uint8_t *inp = in->data[0] + y * in->linesize[0];
232
+        uint8_t *outp = out->data[0] + y * out->linesize[0];
233
+        for (x = 0; x < in->width; x++) {
234
+            for (c = 0; c < 3; c++)
235
+                outp[s->co[c]] = lut[c][inp[s->co[c]]];
236
+            if (s->num_components == 4)
237
+                // Copy alpha as-is.
238
+                outp[s->co[3]] = inp[s->co[3]];
239
+            inp += s->num_components;
240
+            outp += s->num_components;
241
+        }
242
+    }
243
+
244
+    s->frame_num++;
245
+}
246
+
247
+// Now we define all the functions accessible from the ff_vf_normalize class,
248
+// which is ffmpeg's interface to our filter.  See doc/filter_design.txt and
249
+// doc/writing_filters.txt for descriptions of what these interface functions
250
+// are expected to do.
251
+
252
+// Set the pixel formats that our filter supports. We should be able to process
253
+// any 8-bit RGB formats. 16-bit support might be useful one day.
254
+static int query_formats(AVFilterContext *ctx)
255
+{
256
+    static const enum AVPixelFormat pixel_fmts[] = {
257
+        AV_PIX_FMT_RGB24,
258
+        AV_PIX_FMT_BGR24,
259
+        AV_PIX_FMT_ARGB,
260
+        AV_PIX_FMT_RGBA,
261
+        AV_PIX_FMT_ABGR,
262
+        AV_PIX_FMT_BGRA,
263
+        AV_PIX_FMT_0RGB,
264
+        AV_PIX_FMT_RGB0,
265
+        AV_PIX_FMT_0BGR,
266
+        AV_PIX_FMT_BGR0,
267
+        AV_PIX_FMT_NONE
268
+    };
269
+    // According to filter_design.txt, using ff_set_common_formats() this way
270
+    // ensures the pixel formats of the input and output will be the same. That
271
+    // saves a bit of effort possibly needing to handle format conversions.
272
+    AVFilterFormats *formats = ff_make_format_list(pixel_fmts);
273
+    if (!formats)
274
+        return AVERROR(ENOMEM);
275
+    return ff_set_common_formats(ctx, formats);
276
+}
277
+
278
+// At this point we know the pixel format used for both input and output.  We
279
+// can also access the frame rate of the input video and allocate some memory
280
+// appropriately
281
+static int config_input(AVFilterLink *inlink)
282
+{
283
+    NormalizeContext *s = inlink->dst->priv;
284
+    // Store offsets to R,G,B,A bytes respectively in each pixel
285
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
286
+    int c;
287
+
288
+    for (c = 0; c < 4; ++c)
289
+        s->co[c] = desc->comp[c].offset;
290
+    s->num_components = desc->nb_components;
291
+    // Convert smoothing value to history_len (a count of frames to average,
292
+    // must be at least 1).  Currently this is a direct assignment, but the
293
+    // smoothing value was originally envisaged as a number of seconds.  In
294
+    // future it would be nice to set history_len using a number of seconds,
295
+    // but VFR video is currently an obstacle to doing so.
296
+    s->history_len = s->smoothing + 1;
297
+    // Allocate the history buffers -- there are 6 -- one for each extrema.
298
+    // s->smoothing is limited to INT_MAX/8, so that (s->history_len * 6)
299
+    // can't overflow on 32bit causing a too-small allocation.
300
+    s->history_mem = av_malloc(s->history_len * 6);
301
+    if (s->history_mem == NULL)
302
+        return AVERROR(ENOMEM);
303
+
304
+    for (c = 0; c < 3; c++) {
305
+        s->min[c].history = s->history_mem + (c*2)   * s->history_len;
306
+        s->max[c].history = s->history_mem + (c*2+1) * s->history_len;
307
+    }
308
+    return 0;
309
+}
310
+
311
+// Free any memory allocations here
312
+static av_cold void uninit(AVFilterContext *ctx)
313
+{
314
+    NormalizeContext *s = ctx->priv;
315
+
316
+    av_freep(&s->history_mem);
317
+}
318
+
319
+// This function is pretty much standard from doc/writing_filters.txt.  It
320
+// tries to do in-place filtering where possible, only allocating a new output
321
+// frame when absolutely necessary.
322
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
323
+{
324
+    AVFilterContext *ctx = inlink->dst;
325
+    AVFilterLink *outlink = ctx->outputs[0];
326
+    NormalizeContext *s = ctx->priv;
327
+    AVFrame *out;
328
+    // Set 'direct' if we can modify the input frame in-place.  Otherwise we
329
+    // need to retrieve a new frame from the output link.
330
+    int direct = av_frame_is_writable(in) && !ctx->is_disabled;
331
+
332
+    if (direct) {
333
+        out = in;
334
+    } else {
335
+        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
336
+        if (!out) {
337
+            av_frame_free(&in);
338
+            return AVERROR(ENOMEM);
339
+        }
340
+        av_frame_copy_props(out, in);
341
+    }
342
+
343
+    // Now we've got the input and output frames (which may be the same frame)
344
+    // perform the filtering with our custom function.
345
+    normalize(s, in, out);
346
+
347
+    if (ctx->is_disabled) {
348
+        av_frame_free(&out);
349
+        return ff_filter_frame(outlink, in);
350
+    }
351
+
352
+    if (!direct)
353
+        av_frame_free(&in);
354
+
355
+    return ff_filter_frame(outlink, out);
356
+}
357
+
358
+static const AVFilterPad inputs[] = {
359
+    {
360
+        .name         = "default",
361
+        .type         = AVMEDIA_TYPE_VIDEO,
362
+        .filter_frame = filter_frame,
363
+        .config_props = config_input,
364
+    },
365
+    { NULL }
366
+};
367
+
368
+static const AVFilterPad outputs[] = {
369
+    {
370
+        .name = "default",
371
+        .type = AVMEDIA_TYPE_VIDEO,
372
+    },
373
+    { NULL }
374
+};
375
+
376
+AVFilter ff_vf_normalize = {
377
+    .name          = "normalize",
378
+    .description   = NULL_IF_CONFIG_SMALL("Normalize RGB video."),
379
+    .priv_size     = sizeof(NormalizeContext),
380
+    .priv_class    = &normalize_class,
381
+    .uninit        = uninit,
382
+    .query_formats = query_formats,
383
+    .inputs        = inputs,
384
+    .outputs       = outputs,
385
+};