Browse code

avcodec: add a native SMPTE VC-2 HQ encoder

This commit adds a new encoder capable of creating BBC/SMPTE Dirac/VC-2 HQ
profile files.

Dirac is a wavelet based codec created by the BBC a little more than 10
years ago. Since then, wavelets have mostly gone out of style as they
did not provide adequate encoding gains at lower bitrates. Dirac was a
fully featured video codec equipped with perceptual masking, support for
most popular pixel formats, interlacing, overlapped-block motion
compensation, and other features. It found new life after being stripped
of various features and standardized as the VC-2 codec by the SMPTE with
an extra profile, the HQ profile that this encoder supports, added.

The HQ profile was based off of the Low-Delay profile previously
existing in Dirac. The profile forbids DC prediction and arithmetic
coding to focus on high performance and low delay at higher bitrates.
The standard bitrates for this profile vary but generally 1:4
compression is expected (~525 Mbps vs the 2200 Mbps for uncompressed
1080p50). The codec only supports I-frames, hence the high bitrates.

The structure of this encoder is simple: do a DWT transform on the
entire image, split it into multiple slices (specified by the user) and
encode them in parallel. All of the slices are of the same size, making
rate control and threading very trivial. Although only in C, this encoder
is capable of 30 frames per second on an 4 core 8 threads Ivy Bridge.
A lookup table is used to encode most of the coefficients.

No code was used from the GSoC encoder from 2007 except for the 2
transform functions in diracenc_transforms.c. All other code was written
from scratch.

This encoder outperforms any other encoders in quality, usability and in
features. Other existing implementations do not support 4 level
transforms or 64x64 blocks (slices), which greatly increase compression.

As previously said, the codec is meant for broadcasting, hence support
for non-broadcasting image widths, heights, bit depths, aspect ratios,
etc. are limited by the "level". Although this codec supports a few
chroma subsamplings (420, 422, 444), signalling those is generally
outside the specifications of the level used (3) and the reference
decoder will outright refuse to read any image with such a flag
signalled (it only supports 1920x1080 yuv422p10). However, most
implementations will happily read files with alternate dimensions,
framerates and formats signalled.

Therefore, in order to encode files other than 1080p50 yuv422p10le, you
need to provide an "-strict -2" argument to the command line. The FFmpeg
decoder will happily read any files made with non-standard parameters,
dimensions and subsamplings, and so will other implementations. IMO this
should be "-strict -1", but I'll leave that up for discussion.

There are still plenty of stuff to implement, for instance 5 more
wavelet transforms are still in the specs and supported by the decoder.

The encoder can be lossless, given a high enough bitrate.

Signed-off-by: Rostislav Pehlivanov <atomnuker@gmail.com>

Rostislav Pehlivanov authored on 2016/02/11 01:50:00
Showing 6 changed files
... ...
@@ -543,6 +543,7 @@ OBJS-$(CONFIG_VC1_DECODER)             += vc1dec.o vc1_block.o vc1_loopfilter.o
543 543
                                           wmv2dsp.o
544 544
 OBJS-$(CONFIG_VC1_MMAL_DECODER)        += mmaldec.o
545 545
 OBJS-$(CONFIG_VC1_QSV_DECODER)         += qsvdec_vc1.o
546
+OBJS-$(CONFIG_VC2_ENCODER)             += vc2enc.o vc2enc_dwt.o diractab.o
546 547
 OBJS-$(CONFIG_VCR1_DECODER)            += vcr1.o
547 548
 OBJS-$(CONFIG_VMDAUDIO_DECODER)        += vmdaudio.o
548 549
 OBJS-$(CONFIG_VMDVIDEO_DECODER)        += vmdvideo.o
... ...
@@ -336,6 +336,7 @@ void avcodec_register_all(void)
336 336
     REGISTER_DECODER(VC1IMAGE,          vc1image);
337 337
     REGISTER_DECODER(VC1_MMAL,          vc1_mmal);
338 338
     REGISTER_DECODER(VC1_QSV,           vc1_qsv);
339
+    REGISTER_ENCODER(VC2,               vc2);
339 340
     REGISTER_DECODER(VCR1,              vcr1);
340 341
     REGISTER_DECODER(VMDVIDEO,          vmdvideo);
341 342
     REGISTER_DECODER(VMNC,              vmnc);
342 343
new file mode 100644
... ...
@@ -0,0 +1,1196 @@
0
+/*
1
+ * Copyright (C) 2016 Open Broadcast Systems Ltd.
2
+ * Author        2016 Rostislav Pehlivanov <atomnuker@gmail.com>
3
+ *
4
+ * This file is part of FFmpeg.
5
+ *
6
+ * FFmpeg is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * FFmpeg is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with FFmpeg; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+#include "libavutil/ffversion.h"
22
+#include "libavutil/pixdesc.h"
23
+#include "libavutil/opt.h"
24
+#include "dirac.h"
25
+#include "put_bits.h"
26
+#include "internal.h"
27
+
28
+#include "vc2enc_dwt.h"
29
+#include "diractab.h"
30
+
31
+/* Quantizations above this usually zero coefficients and lower the quality */
32
+#define MAX_QUANT_INDEX 100
33
+
34
+#define COEF_LUT_TAB 2048
35
+
36
+enum VC2_QM {
37
+    VC2_QM_DEF = 0,
38
+    VC2_QM_COL,
39
+    VC2_QM_FLAT,
40
+
41
+    VC2_QM_NB
42
+};
43
+
44
+typedef struct SubBand {
45
+    dwtcoef *buf;
46
+    ptrdiff_t stride;
47
+    int width;
48
+    int height;
49
+} SubBand;
50
+
51
+typedef struct Plane {
52
+    SubBand band[MAX_DWT_LEVELS][4];
53
+    dwtcoef *coef_buf;
54
+    int width;
55
+    int height;
56
+    int dwt_width;
57
+    int dwt_height;
58
+    ptrdiff_t coef_stride;
59
+} Plane;
60
+
61
+typedef struct SliceArgs {
62
+    PutBitContext pb;
63
+    void *ctx;
64
+    int x;
65
+    int y;
66
+    int quant_idx;
67
+    int bits_ceil;
68
+    int bytes;
69
+} SliceArgs;
70
+
71
+typedef struct TransformArgs {
72
+    void *ctx;
73
+    Plane *plane;
74
+    void *idata;
75
+    ptrdiff_t istride;
76
+    int field;
77
+    VC2TransformContext t;
78
+} TransformArgs;
79
+
80
+typedef struct VC2EncContext {
81
+    AVClass *av_class;
82
+    PutBitContext pb;
83
+    Plane plane[3];
84
+    AVCodecContext *avctx;
85
+    DiracVersionInfo ver;
86
+
87
+    SliceArgs *slice_args;
88
+    TransformArgs transform_args[3];
89
+
90
+    /* For conversion from unsigned pixel values to signed */
91
+    int diff_offset;
92
+    int bpp;
93
+
94
+    /* Picture number */
95
+    uint32_t picture_number;
96
+
97
+    /* Base video format */
98
+    int base_vf;
99
+    int level;
100
+    int profile;
101
+
102
+    /* Quantization matrix */
103
+    uint8_t quant[MAX_DWT_LEVELS][4];
104
+
105
+    /* Coefficient LUT */
106
+    uint32_t *coef_lut_val;
107
+    uint8_t  *coef_lut_len;
108
+
109
+    int num_x; /* #slices horizontally */
110
+    int num_y; /* #slices vertically */
111
+    int prefix_bytes;
112
+    int size_scaler;
113
+    int chroma_x_shift;
114
+    int chroma_y_shift;
115
+
116
+    /* Rate control stuff */
117
+    int slice_max_bytes;
118
+    int q_ceil;
119
+    int q_start;
120
+
121
+    /* Options */
122
+    double tolerance;
123
+    int wavelet_idx;
124
+    int wavelet_depth;
125
+    int strict_compliance;
126
+    int slice_height;
127
+    int slice_width;
128
+    int interlaced;
129
+    enum VC2_QM quant_matrix;
130
+
131
+    /* Parse code state */
132
+    uint32_t next_parse_offset;
133
+    enum DiracParseCodes last_parse_code;
134
+} VC2EncContext;
135
+
136
+static av_always_inline void put_padding(PutBitContext *pb, int bytes)
137
+{
138
+    int bits = bytes*8;
139
+    if (!bits)
140
+        return;
141
+    while (bits > 31) {
142
+        put_bits(pb, 31, 0);
143
+        bits -= 31;
144
+    }
145
+    if (bits)
146
+        put_bits(pb, bits, 0);
147
+}
148
+
149
+static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val)
150
+{
151
+    int i;
152
+    int pbits = 0, bits = 0, topbit = 1, maxval = 1;
153
+
154
+    if (!val++) {
155
+        put_bits(pb, 1, 1);
156
+        return;
157
+    }
158
+
159
+    while (val > maxval) {
160
+        topbit <<= 1;
161
+        maxval <<= 1;
162
+        maxval |=  1;
163
+    }
164
+
165
+    bits = ff_log2(topbit);
166
+
167
+    for (i = 0; i < bits; i++) {
168
+        topbit >>= 1;
169
+        pbits <<= 2;
170
+        if (val & topbit)
171
+            pbits |= 0x1;
172
+    }
173
+
174
+    put_bits(pb, bits*2 + 1, (pbits << 1) | 1);
175
+}
176
+
177
+static av_always_inline int count_vc2_ue_uint(uint16_t val)
178
+{
179
+    int topbit = 1, maxval = 1;
180
+
181
+    if (!val++)
182
+        return 1;
183
+
184
+    while (val > maxval) {
185
+        topbit <<= 1;
186
+        maxval <<= 1;
187
+        maxval |=  1;
188
+    }
189
+
190
+    return ff_log2(topbit)*2 + 1;
191
+}
192
+
193
+static av_always_inline void get_vc2_ue_uint(uint16_t val, uint8_t *nbits,
194
+                                               uint32_t *eval)
195
+{
196
+    int i;
197
+    int pbits = 0, bits = 0, topbit = 1, maxval = 1;
198
+
199
+    if (!val++) {
200
+        *nbits = 1;
201
+        *eval = 1;
202
+        return;
203
+    }
204
+
205
+    while (val > maxval) {
206
+        topbit <<= 1;
207
+        maxval <<= 1;
208
+        maxval |=  1;
209
+    }
210
+
211
+    bits = ff_log2(topbit);
212
+
213
+    for (i = 0; i < bits; i++) {
214
+        topbit >>= 1;
215
+        pbits <<= 2;
216
+        if (val & topbit)
217
+            pbits |= 0x1;
218
+    }
219
+
220
+    *nbits = bits*2 + 1;
221
+    *eval = (pbits << 1) | 1;
222
+}
223
+
224
+/* VC-2 10.4 - parse_info() */
225
+static void encode_parse_info(VC2EncContext *s, enum DiracParseCodes pcode)
226
+{
227
+    uint32_t cur_pos, dist;
228
+
229
+    avpriv_align_put_bits(&s->pb);
230
+
231
+    cur_pos = put_bits_count(&s->pb) >> 3;
232
+
233
+    /* Magic string */
234
+    avpriv_put_string(&s->pb, "BBCD", 0);
235
+
236
+    /* Parse code */
237
+    put_bits(&s->pb, 8, pcode);
238
+
239
+    /* Next parse offset */
240
+    dist = cur_pos - s->next_parse_offset;
241
+    AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
242
+    s->next_parse_offset = cur_pos;
243
+    put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
244
+
245
+    /* Last parse offset */
246
+    put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 : dist);
247
+
248
+    s->last_parse_code = pcode;
249
+}
250
+
251
+/* VC-2 11.1 - parse_parameters()
252
+ * The level dictates what the decoder should expect in terms of resolution
253
+ * and allows it to quickly reject whatever it can't support. Remember,
254
+ * this codec kinda targets cheapo FPGAs without much memory. Unfortunately
255
+ * it also limits us greatly in our choice of formats, hence the flag to disable
256
+ * strict_compliance */
257
+static void encode_parse_params(VC2EncContext *s)
258
+{
259
+    put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2 */
260
+    put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0       */
261
+    put_vc2_ue_uint(&s->pb, s->profile);   /* 3 to signal HQ profile    */
262
+    put_vc2_ue_uint(&s->pb, s->level);     /* 3 - 1080/720, 6 - 4K      */
263
+}
264
+
265
+/* VC-2 11.3 - frame_size() */
266
+static void encode_frame_size(VC2EncContext *s)
267
+{
268
+    put_bits(&s->pb, 1, !s->strict_compliance);
269
+    if (!s->strict_compliance) {
270
+        AVCodecContext *avctx = s->avctx;
271
+        put_vc2_ue_uint(&s->pb, avctx->width);
272
+        put_vc2_ue_uint(&s->pb, avctx->height);
273
+    }
274
+}
275
+
276
+/* VC-2 11.3.3 - color_diff_sampling_format() */
277
+static void encode_sample_fmt(VC2EncContext *s)
278
+{
279
+    put_bits(&s->pb, 1, !s->strict_compliance);
280
+    if (!s->strict_compliance) {
281
+        int idx;
282
+        if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
283
+            idx = 1; /* 422 */
284
+        else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
285
+            idx = 2; /* 420 */
286
+        else
287
+            idx = 0; /* 444 */
288
+        put_vc2_ue_uint(&s->pb, idx);
289
+    }
290
+}
291
+
292
+/* VC-2 11.3.4 - scan_format() */
293
+static void encode_scan_format(VC2EncContext *s)
294
+{
295
+    put_bits(&s->pb, 1, !s->strict_compliance);
296
+    if (!s->strict_compliance)
297
+        put_vc2_ue_uint(&s->pb, s->interlaced);
298
+}
299
+
300
+/* VC-2 11.3.5 - frame_rate() */
301
+static void encode_frame_rate(VC2EncContext *s)
302
+{
303
+    put_bits(&s->pb, 1, !s->strict_compliance);
304
+    if (!s->strict_compliance) {
305
+        AVCodecContext *avctx = s->avctx;
306
+        put_vc2_ue_uint(&s->pb, 0);
307
+        put_vc2_ue_uint(&s->pb, avctx->time_base.den);
308
+        put_vc2_ue_uint(&s->pb, avctx->time_base.num);
309
+    }
310
+}
311
+
312
+/* VC-2 11.3.6 - aspect_ratio() */
313
+static void encode_aspect_ratio(VC2EncContext *s)
314
+{
315
+    put_bits(&s->pb, 1, !s->strict_compliance);
316
+    if (!s->strict_compliance) {
317
+        AVCodecContext *avctx = s->avctx;
318
+        put_vc2_ue_uint(&s->pb, 0);
319
+        put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.num);
320
+        put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.den);
321
+    }
322
+}
323
+
324
+/* VC-2 11.3.7 - clean_area() */
325
+static void encode_clean_area(VC2EncContext *s)
326
+{
327
+    put_bits(&s->pb, 1, 0);
328
+}
329
+
330
+/* VC-2 11.3.8 - signal_range() */
331
+static void encode_signal_range(VC2EncContext *s)
332
+{
333
+    int idx;
334
+    AVCodecContext *avctx = s->avctx;
335
+    const AVPixFmtDescriptor *fmt = av_pix_fmt_desc_get(avctx->pix_fmt);
336
+    const int depth = fmt->comp[0].depth;
337
+    if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) {
338
+        idx = 1;
339
+        s->bpp = 1;
340
+        s->diff_offset = 128;
341
+    } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG ||
342
+               avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) {
343
+        idx = 2;
344
+        s->bpp = 1;
345
+        s->diff_offset = 128;
346
+    } else if (depth == 10) {
347
+        idx = 3;
348
+        s->bpp = 2;
349
+        s->diff_offset = 512;
350
+    } else {
351
+        idx = 4;
352
+        s->bpp = 2;
353
+        s->diff_offset = 2048;
354
+    }
355
+    put_bits(&s->pb, 1, !s->strict_compliance);
356
+    if (!s->strict_compliance)
357
+        put_vc2_ue_uint(&s->pb, idx);
358
+}
359
+
360
+/* VC-2 11.3.9 - color_spec() */
361
+static void encode_color_spec(VC2EncContext *s)
362
+{
363
+    AVCodecContext *avctx = s->avctx;
364
+    put_bits(&s->pb, 1, !s->strict_compliance);
365
+    if (!s->strict_compliance) {
366
+        int val;
367
+        put_vc2_ue_uint(&s->pb, 0);
368
+
369
+        /* primaries */
370
+        put_bits(&s->pb, 1, 1);
371
+        if (avctx->color_primaries == AVCOL_PRI_BT470BG)
372
+            val = 2;
373
+        else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
374
+            val = 1;
375
+        else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
376
+            val = 1;
377
+        else
378
+            val = 0;
379
+        put_vc2_ue_uint(&s->pb, val);
380
+
381
+        /* color matrix */
382
+        put_bits(&s->pb, 1, 1);
383
+        if (avctx->colorspace == AVCOL_SPC_RGB)
384
+            val = 3;
385
+        else if (avctx->colorspace == AVCOL_SPC_YCOCG)
386
+            val = 2;
387
+        else if (avctx->colorspace == AVCOL_SPC_BT470BG)
388
+            val = 1;
389
+        else
390
+            val = 0;
391
+        put_vc2_ue_uint(&s->pb, val);
392
+
393
+        /* transfer function */
394
+        put_bits(&s->pb, 1, 1);
395
+        if (avctx->color_trc == AVCOL_TRC_LINEAR)
396
+            val = 2;
397
+        else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
398
+            val = 1;
399
+        else
400
+            val = 0;
401
+        put_vc2_ue_uint(&s->pb, val);
402
+    }
403
+}
404
+
405
+/* VC-2 11.3 - source_parameters() */
406
+static void encode_source_params(VC2EncContext *s)
407
+{
408
+    encode_frame_size(s);
409
+    encode_sample_fmt(s);
410
+    encode_scan_format(s);
411
+    encode_frame_rate(s);
412
+    encode_aspect_ratio(s);
413
+    encode_clean_area(s);
414
+    encode_signal_range(s);
415
+    encode_color_spec(s);
416
+}
417
+
418
+/* VC-2 11 - sequence_header() */
419
+static void encode_seq_header(VC2EncContext *s)
420
+{
421
+    avpriv_align_put_bits(&s->pb);
422
+    encode_parse_params(s);
423
+    put_vc2_ue_uint(&s->pb, s->base_vf);
424
+    encode_source_params(s);
425
+    put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding */
426
+}
427
+
428
+/* VC-2 12.1 - picture_header() */
429
+static void encode_picture_header(VC2EncContext *s)
430
+{
431
+    avpriv_align_put_bits(&s->pb);
432
+    put_bits32(&s->pb, s->picture_number++);
433
+}
434
+
435
+/* VC-2 12.3.4.1 - slice_parameters() */
436
+static void encode_slice_params(VC2EncContext *s)
437
+{
438
+    put_vc2_ue_uint(&s->pb, s->num_x);
439
+    put_vc2_ue_uint(&s->pb, s->num_y);
440
+    put_vc2_ue_uint(&s->pb, s->prefix_bytes);
441
+    put_vc2_ue_uint(&s->pb, s->size_scaler);
442
+}
443
+
444
+/* 1st idx = LL, second - vertical, third - horizontal, fourth - total */
445
+const uint8_t vc2_qm_col_tab[][4] = {
446
+    {20,  9, 15,  4},
447
+    { 0,  6,  6,  4},
448
+    { 0,  3,  3,  5},
449
+    { 0,  3,  5,  1},
450
+    { 0, 11, 10, 11}
451
+};
452
+
453
+const uint8_t vc2_qm_flat_tab[][4] = {
454
+    { 0,  0,  0,  0},
455
+    { 0,  0,  0,  0},
456
+    { 0,  0,  0,  0},
457
+    { 0,  0,  0,  0},
458
+    { 0,  0,  0,  0}
459
+};
460
+
461
+static void init_custom_qm(VC2EncContext *s)
462
+{
463
+    int level, orientation;
464
+
465
+    if (s->quant_matrix == VC2_QM_DEF) {
466
+        for (level = 0; level < s->wavelet_depth; level++) {
467
+            for (orientation = 0; orientation < 4; orientation++) {
468
+                if (level <= 3)
469
+                    s->quant[level][orientation] = ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
470
+                else
471
+                    s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
472
+            }
473
+        }
474
+    } else if (s->quant_matrix == VC2_QM_COL) {
475
+        for (level = 0; level < s->wavelet_depth; level++) {
476
+            for (orientation = 0; orientation < 4; orientation++) {
477
+                s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
478
+            }
479
+        }
480
+    } else {
481
+        for (level = 0; level < s->wavelet_depth; level++) {
482
+            for (orientation = 0; orientation < 4; orientation++) {
483
+                s->quant[level][orientation] = vc2_qm_flat_tab[level][orientation];
484
+            }
485
+        }
486
+    }
487
+}
488
+
489
+/* VC-2 12.3.4.2 - quant_matrix() */
490
+static void encode_quant_matrix(VC2EncContext *s)
491
+{
492
+    int level, custom_quant_matrix = 0;
493
+    if (s->wavelet_depth > 4 || s->quant_matrix != VC2_QM_DEF)
494
+        custom_quant_matrix = 1;
495
+    put_bits(&s->pb, 1, custom_quant_matrix);
496
+    if (custom_quant_matrix) {
497
+        init_custom_qm(s);
498
+        put_vc2_ue_uint(&s->pb, s->quant[0][0]);
499
+        for (level = 0; level < s->wavelet_depth; level++) {
500
+            put_vc2_ue_uint(&s->pb, s->quant[level][1]);
501
+            put_vc2_ue_uint(&s->pb, s->quant[level][2]);
502
+            put_vc2_ue_uint(&s->pb, s->quant[level][3]);
503
+        }
504
+    } else {
505
+        for (level = 0; level < s->wavelet_depth; level++) {
506
+            s->quant[level][0] = ff_dirac_default_qmat[s->wavelet_idx][level][0];
507
+            s->quant[level][1] = ff_dirac_default_qmat[s->wavelet_idx][level][1];
508
+            s->quant[level][2] = ff_dirac_default_qmat[s->wavelet_idx][level][2];
509
+            s->quant[level][3] = ff_dirac_default_qmat[s->wavelet_idx][level][3];
510
+        }
511
+    }
512
+}
513
+
514
+/* VC-2 12.3 - transform_parameters() */
515
+static void encode_transform_params(VC2EncContext *s)
516
+{
517
+    put_vc2_ue_uint(&s->pb, s->wavelet_idx);
518
+    put_vc2_ue_uint(&s->pb, s->wavelet_depth);
519
+
520
+    encode_slice_params(s);
521
+    encode_quant_matrix(s);
522
+}
523
+
524
+/* VC-2 12.2 - wavelet_transform() */
525
+static void encode_wavelet_transform(VC2EncContext *s)
526
+{
527
+    encode_transform_params(s);
528
+    avpriv_align_put_bits(&s->pb);
529
+    /* Continued after DWT in encode_transform_data() */
530
+}
531
+
532
+/* VC-2 12 - picture_parse() */
533
+static void encode_picture_start(VC2EncContext *s)
534
+{
535
+    avpriv_align_put_bits(&s->pb);
536
+    encode_picture_header(s);
537
+    avpriv_align_put_bits(&s->pb);
538
+    encode_wavelet_transform(s);
539
+}
540
+
541
+#define QUANT(c)  \
542
+    c <<= 2;      \
543
+    c /= qfactor; \
544
+
545
+static av_always_inline void coeff_quantize_get(qcoef coeff, int qfactor,
546
+                                                uint8_t *len, uint32_t *eval)
547
+{
548
+    QUANT(coeff)
549
+    get_vc2_ue_uint(abs(coeff), len, eval);
550
+    if (coeff) {
551
+        *eval = (*eval << 1) | (coeff < 0);
552
+        *len += 1;
553
+    }
554
+}
555
+
556
+static av_always_inline void coeff_quantize_encode(PutBitContext *pb, qcoef coeff,
557
+                                                   int qfactor)
558
+{
559
+    QUANT(coeff)
560
+    put_vc2_ue_uint(pb, abs(coeff));
561
+    if (coeff)
562
+        put_bits(pb, 1, coeff < 0);
563
+}
564
+
565
+/* VC-2 13.5.5.2 - slice_band() */
566
+static void encode_subband(VC2EncContext *s, PutBitContext *pb, int sx, int sy,
567
+                           SubBand *b, int quant)
568
+{
569
+    int x, y;
570
+
571
+    int left   = b->width  * (sx+0) / s->num_x;
572
+    int right  = b->width  * (sx+1) / s->num_x;
573
+    int top    = b->height * (sy+0) / s->num_y;
574
+    int bottom = b->height * (sy+1) / s->num_y;
575
+
576
+    int qfactor = ff_dirac_qscale_tab[quant];
577
+    uint8_t  *len_lut = &s->coef_lut_len[2*quant*COEF_LUT_TAB + COEF_LUT_TAB];
578
+    uint32_t *val_lut = &s->coef_lut_val[2*quant*COEF_LUT_TAB + COEF_LUT_TAB];
579
+
580
+    dwtcoef *coeff = b->buf + top * b->stride;
581
+
582
+    for (y = top; y < bottom; y++) {
583
+        for (x = left; x < right; x++) {
584
+            if (coeff[x] >= -COEF_LUT_TAB && coeff[x] < COEF_LUT_TAB)
585
+                put_bits(pb, len_lut[coeff[x]], val_lut[coeff[x]]);
586
+            else
587
+                coeff_quantize_encode(pb, coeff[x], qfactor);
588
+        }
589
+        coeff += b->stride;
590
+    }
591
+}
592
+
593
+static int count_hq_slice(VC2EncContext *s, int slice_x,
594
+                          int slice_y, int quant_idx)
595
+{
596
+    int x, y, left, right, top, bottom, qfactor;
597
+    uint8_t quants[MAX_DWT_LEVELS][4];
598
+    int bits = 0, p, level, orientation;
599
+
600
+    bits += 8*s->prefix_bytes;
601
+    bits += 8; /* quant_idx */
602
+
603
+    for (level = 0; level < s->wavelet_depth; level++)
604
+        for (orientation = !!level; orientation < 4; orientation++)
605
+            quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
606
+
607
+    for (p = 0; p < 3; p++) {
608
+        int bytes_start, bytes_len, pad_s, pad_c;
609
+        bytes_start = bits >> 3;
610
+        bits += 8;
611
+        for (level = 0; level < s->wavelet_depth; level++) {
612
+            for (orientation = !!level; orientation < 4; orientation++) {
613
+                dwtcoef *buf;
614
+                SubBand *b = &s->plane[p].band[level][orientation];
615
+
616
+                quant_idx = quants[level][orientation];
617
+                qfactor = ff_dirac_qscale_tab[quant_idx];
618
+
619
+                left   = b->width  * slice_x    / s->num_x;
620
+                right  = b->width  *(slice_x+1) / s->num_x;
621
+                top    = b->height * slice_y    / s->num_y;
622
+                bottom = b->height *(slice_y+1) / s->num_y;
623
+
624
+                buf = b->buf + top * b->stride;
625
+
626
+                for (y = top; y < bottom; y++) {
627
+                    for (x = left; x < right; x++) {
628
+                        qcoef coeff = (qcoef)buf[x];
629
+                        if (coeff >= -COEF_LUT_TAB && coeff < COEF_LUT_TAB) {
630
+                            bits += s->coef_lut_len[2*quant_idx*COEF_LUT_TAB + coeff + COEF_LUT_TAB];
631
+                        } else {
632
+                            QUANT(coeff)
633
+                            bits += count_vc2_ue_uint(abs(coeff));
634
+                            bits += !!coeff;
635
+                        }
636
+                    }
637
+                    buf += b->stride;
638
+                }
639
+            }
640
+        }
641
+        bits += FFALIGN(bits, 8) - bits;
642
+        bytes_len = (bits >> 3) - bytes_start - 1;
643
+        pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
644
+        pad_c = (pad_s*s->size_scaler) - bytes_len;
645
+        bits += pad_c*8;
646
+    }
647
+
648
+    return bits;
649
+}
650
+
651
+/* Approaches the best possible quantizer asymptotically, its kinda exaustive
652
+ * but we have a LUT to get the coefficient size in bits. Guaranteed to never
653
+ * overshoot, which is apparently very important when streaming */
654
+static int rate_control(AVCodecContext *avctx, void *arg)
655
+{
656
+    SliceArgs *slice_dat = arg;
657
+    VC2EncContext *s = slice_dat->ctx;
658
+    const int sx = slice_dat->x;
659
+    const int sy = slice_dat->y;
660
+    int quant_buf[2], bits_buf[2], quant = s->q_start, range = s->q_start/3;
661
+    const int64_t top = slice_dat->bits_ceil;
662
+    const double percent = s->tolerance;
663
+    const double bottom = top - top*(percent/100.0f);
664
+    int bits = count_hq_slice(s, sx, sy, quant);
665
+    range -= range & 1; /* Make it an even number */
666
+    while ((bits > top) || (bits < bottom)) {
667
+        range *= bits > top ? +1 : -1;
668
+        quant = av_clip(quant + range, 0, s->q_ceil);
669
+        bits = count_hq_slice(s, sx, sy, quant);
670
+        range = av_clip(range/2, 1, s->q_ceil);
671
+        if (quant_buf[1] == quant) {
672
+            quant = bits_buf[0] < bits ? quant_buf[0] : quant;
673
+            bits = bits_buf[0] < bits ? bits_buf[0] : bits;
674
+            break;
675
+        }
676
+        quant_buf[1] = quant_buf[0];
677
+        quant_buf[0] = quant;
678
+        bits_buf[1] = bits_buf[0];
679
+        bits_buf[0] = bits;
680
+    }
681
+    slice_dat->quant_idx = av_clip(quant, 0, s->q_ceil);
682
+    slice_dat->bytes = FFALIGN((bits >> 3), s->size_scaler) + 4 + s->prefix_bytes;
683
+
684
+    return 0;
685
+}
686
+
687
+static void calc_slice_sizes(VC2EncContext *s)
688
+{
689
+    int slice_x, slice_y;
690
+    SliceArgs *enc_args = s->slice_args;
691
+
692
+    for (slice_y = 0; slice_y < s->num_y; slice_y++) {
693
+        for (slice_x = 0; slice_x < s->num_x; slice_x++) {
694
+            SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
695
+            args->ctx = s;
696
+            args->x = slice_x;
697
+            args->y = slice_y;
698
+            args->bits_ceil = s->slice_max_bytes << 3;
699
+        }
700
+    }
701
+
702
+    /* Determine quantization indices and bytes per slice */
703
+    s->avctx->execute(s->avctx, rate_control, enc_args, NULL, s->num_x*s->num_y,
704
+                      sizeof(SliceArgs));
705
+}
706
+
707
+/* VC-2 13.5.3 - hq_slice */
708
+static int encode_hq_slice(AVCodecContext *avctx, void *arg)
709
+{
710
+    SliceArgs *slice_dat = arg;
711
+    VC2EncContext *s = slice_dat->ctx;
712
+    PutBitContext *pb = &slice_dat->pb;
713
+    const int slice_x = slice_dat->x;
714
+    const int slice_y = slice_dat->y;
715
+    const int quant_idx = slice_dat->quant_idx;
716
+    const int slice_bytes_max = slice_dat->bytes;
717
+    uint8_t quants[MAX_DWT_LEVELS][4];
718
+    int p, level, orientation;
719
+
720
+    avpriv_align_put_bits(pb);
721
+    put_padding(pb, s->prefix_bytes);
722
+    put_bits(pb, 8, quant_idx);
723
+
724
+    /* Slice quantization (slice_quantizers() in the specs) */
725
+    for (level = 0; level < s->wavelet_depth; level++)
726
+        for (orientation = !!level; orientation < 4; orientation++)
727
+            quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
728
+
729
+    /* Luma + 2 Chroma planes */
730
+    for (p = 0; p < 3; p++) {
731
+        int bytes_start, bytes_len, pad_s, pad_c;
732
+        bytes_start = put_bits_count(pb) >> 3;
733
+        put_bits(pb, 8, 0);
734
+        for (level = 0; level < s->wavelet_depth; level++) {
735
+            for (orientation = !!level; orientation < 4; orientation++) {
736
+                encode_subband(s, pb, slice_x, slice_y,
737
+                               &s->plane[p].band[level][orientation],
738
+                               quants[level][orientation]);
739
+            }
740
+        }
741
+        avpriv_align_put_bits(pb);
742
+        bytes_len = (put_bits_count(pb) >> 3) - bytes_start - 1;
743
+        if (p == 2) {
744
+            int len_diff = slice_bytes_max - (put_bits_count(pb) >> 3);
745
+            pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
746
+            pad_c = (pad_s*s->size_scaler) - bytes_len;
747
+        } else {
748
+            pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
749
+            pad_c = (pad_s*s->size_scaler) - bytes_len;
750
+        }
751
+        pb->buf[bytes_start] = pad_s;
752
+        put_padding(pb, pad_c);
753
+    }
754
+
755
+    return 0;
756
+}
757
+
758
+/* VC-2 13.5.1 - low_delay_transform_data() */
759
+static int encode_slices(VC2EncContext *s)
760
+{
761
+    uint8_t *buf;
762
+    int slice_x, slice_y, skip = 0;
763
+    SliceArgs *enc_args = s->slice_args;
764
+
765
+    avpriv_align_put_bits(&s->pb);
766
+    flush_put_bits(&s->pb);
767
+    buf = put_bits_ptr(&s->pb);
768
+
769
+    for (slice_y = 0; slice_y < s->num_y; slice_y++) {
770
+        for (slice_x = 0; slice_x < s->num_x; slice_x++) {
771
+            SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
772
+            init_put_bits(&args->pb, buf + skip, args->bytes);
773
+            s->q_start = (s->q_start + args->quant_idx)/2;
774
+            skip += args->bytes;
775
+        }
776
+    }
777
+
778
+    s->avctx->execute(s->avctx, encode_hq_slice, enc_args, NULL, s->num_x*s->num_y,
779
+                      sizeof(SliceArgs));
780
+
781
+    skip_put_bytes(&s->pb, skip);
782
+
783
+    return 0;
784
+}
785
+
786
+/*
787
+ * Transform basics for a 3 level transform
788
+ * |---------------------------------------------------------------------|
789
+ * |  LL-0  | HL-0  |                 |                                  |
790
+ * |--------|-------|      HL-1       |                                  |
791
+ * |  LH-0  | HH-0  |                 |                                  |
792
+ * |----------------|-----------------|              HL-2                |
793
+ * |                |                 |                                  |
794
+ * |     LH-1       |      HH-1       |                                  |
795
+ * |                |                 |                                  |
796
+ * |----------------------------------|----------------------------------|
797
+ * |                                  |                                  |
798
+ * |                                  |                                  |
799
+ * |                                  |                                  |
800
+ * |              LH-2                |              HH-2                |
801
+ * |                                  |                                  |
802
+ * |                                  |                                  |
803
+ * |                                  |                                  |
804
+ * |---------------------------------------------------------------------|
805
+ *
806
+ * DWT transforms are generally applied by splitting the image in two vertically
807
+ * and applying a low pass transform on the left part and a corresponding high
808
+ * pass transform on the right hand side. This is known as the horizontal filter
809
+ * stage.
810
+ * After that, the same operation is performed except the image is divided
811
+ * horizontally, with the high pass on the lower and the low pass on the higher
812
+ * side.
813
+ * Therefore, you're left with 4 subdivisions - known as  low-low, low-high,
814
+ * high-low and high-high. They're referred to as orientations in the decoder
815
+ * and encoder.
816
+ *
817
+ * The LL (low-low) area contains the original image downsampled by the amount
818
+ * of levels. The rest of the areas can be thought as the details needed
819
+ * to restore the image perfectly to its original size.
820
+ */
821
+
822
+
823
+static int dwt_plane(AVCodecContext *avctx, void *arg)
824
+{
825
+    TransformArgs *transform_dat = arg;
826
+    VC2EncContext *s = transform_dat->ctx;
827
+    const void *frame_data = transform_dat->idata;
828
+    const ptrdiff_t linesize = transform_dat->istride;
829
+    const int field = transform_dat->field;
830
+    const Plane *p = transform_dat->plane;
831
+    VC2TransformContext *t = &transform_dat->t;
832
+    dwtcoef *buf = p->coef_buf;
833
+    const int idx = s->wavelet_idx;
834
+    const int skip = 1 + s->interlaced;
835
+
836
+    int x, y, level, offset;
837
+    ptrdiff_t pix_stride = linesize >> (s->bpp - 1);
838
+
839
+    if (field == 1) {
840
+        offset = 0;
841
+        pix_stride <<= 1;
842
+    } else if (field == 2) {
843
+        offset = pix_stride;
844
+        pix_stride <<= 1;
845
+    } else {
846
+        offset = 0;
847
+    }
848
+
849
+    if (s->bpp == 1) {
850
+        const uint8_t *pix = (const uint8_t *)frame_data + offset;
851
+        for (y = 0; y < p->height*skip; y+=skip) {
852
+            for (x = 0; x < p->width; x++) {
853
+                buf[x] = pix[x] - s->diff_offset;
854
+            }
855
+            buf += p->coef_stride;
856
+            pix += pix_stride;
857
+        }
858
+    } else {
859
+        const uint16_t *pix = (const uint16_t *)frame_data + offset;
860
+        for (y = 0; y < p->height*skip; y+=skip) {
861
+            for (x = 0; x < p->width; x++) {
862
+                buf[x] = pix[x] - s->diff_offset;
863
+            }
864
+            buf += p->coef_stride;
865
+            pix += pix_stride;
866
+        }
867
+    }
868
+
869
+    memset(buf, 0, (p->coef_stride*p->dwt_height - p->height*p->width)*sizeof(dwtcoef));
870
+
871
+    for (level = s->wavelet_depth-1; level >= 0; level--) {
872
+        const SubBand *b = &p->band[level][0];
873
+        t->vc2_subband_dwt[idx](t, p->coef_buf, p->coef_stride,
874
+                                b->width, b->height);
875
+    }
876
+
877
+    return 0;
878
+}
879
+
880
+static void encode_frame(VC2EncContext *s, const AVFrame *frame,
881
+                         const char *aux_data, int field)
882
+{
883
+    int i;
884
+
885
+    /* Sequence header */
886
+    encode_parse_info(s, DIRAC_PCODE_SEQ_HEADER);
887
+    encode_seq_header(s);
888
+
889
+    /* Encoder version */
890
+    if (aux_data) {
891
+        encode_parse_info(s, DIRAC_PCODE_AUX);
892
+        avpriv_put_string(&s->pb, aux_data, 1);
893
+    }
894
+
895
+    /* Picture header */
896
+    encode_parse_info(s, DIRAC_PCODE_PICTURE_HQ);
897
+    encode_picture_start(s);
898
+
899
+    for (i = 0; i < 3; i++) {
900
+        s->transform_args[i].ctx   = s;
901
+        s->transform_args[i].field = field;
902
+        s->transform_args[i].plane = &s->plane[i];
903
+        s->transform_args[i].idata = frame->data[i];
904
+        s->transform_args[i].istride = frame->linesize[i];
905
+    }
906
+
907
+    /* Do a DWT transform */
908
+    s->avctx->execute(s->avctx, dwt_plane, s->transform_args, NULL, 3,
909
+                      sizeof(TransformArgs));
910
+
911
+    /* Calculate per-slice quantizers and sizes */
912
+    calc_slice_sizes(s);
913
+
914
+    /* Init planes and encode slices */
915
+    encode_slices(s);
916
+
917
+    /* End sequence */
918
+    encode_parse_info(s, DIRAC_PCODE_END_SEQ);
919
+}
920
+
921
+static av_cold int vc2_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
922
+                                      const AVFrame *frame, int *got_packet_ptr)
923
+{
924
+    int ret;
925
+    int max_frame_bytes, sig_size = 256;
926
+    VC2EncContext *s = avctx->priv_data;
927
+    const char aux_data[] = "FFmpeg version "FFMPEG_VERSION;
928
+    const int aux_data_size = sizeof(aux_data);
929
+    const int header_size = 100 + aux_data_size;
930
+    int64_t r_bitrate = avctx->bit_rate >> (s->interlaced);
931
+
932
+    s->avctx = avctx;
933
+    s->size_scaler = 1;
934
+    s->prefix_bytes = 0;
935
+    s->last_parse_code = 0;
936
+    s->next_parse_offset = 0;
937
+
938
+    /* Rate control */
939
+    max_frame_bytes = (av_rescale(r_bitrate, s->avctx->time_base.num,
940
+                                  s->avctx->time_base.den) >> 3) - header_size;
941
+
942
+    /* Find an appropriate size scaler */
943
+    while (sig_size > 255) {
944
+        s->slice_max_bytes = FFALIGN(av_rescale(max_frame_bytes, 1,
945
+                                     s->num_x*s->num_y), s->size_scaler);
946
+        s->slice_max_bytes += 4 + s->prefix_bytes;
947
+        sig_size = s->slice_max_bytes/s->size_scaler; /* Signalled slize size */
948
+        s->size_scaler <<= 1;
949
+    }
950
+
951
+    ret = ff_alloc_packet2(avctx, avpkt, max_frame_bytes*2, 0);
952
+    if (ret < 0) {
953
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
954
+        return ret;
955
+    } else {
956
+        init_put_bits(&s->pb, avpkt->data, avpkt->size);
957
+    }
958
+
959
+    encode_frame(s, frame, aux_data, s->interlaced);
960
+    if (s->interlaced)
961
+        encode_frame(s, frame, NULL, 2);
962
+
963
+    flush_put_bits(&s->pb);
964
+    avpkt->size = put_bits_count(&s->pb) >> 3;
965
+
966
+    *got_packet_ptr = 1;
967
+
968
+    return 0;
969
+}
970
+
971
+static av_cold int vc2_encode_end(AVCodecContext *avctx)
972
+{
973
+    int i;
974
+    VC2EncContext *s = avctx->priv_data;
975
+
976
+    for (i = 0; i < 3; i++) {
977
+        ff_vc2enc_free_transforms(&s->transform_args[i].t);
978
+        av_freep(&s->plane[i].coef_buf);
979
+    }
980
+
981
+    av_freep(&s->slice_args);
982
+    av_freep(&s->coef_lut_len);
983
+    av_freep(&s->coef_lut_val);
984
+
985
+    return 0;
986
+}
987
+
988
+
989
+static av_cold int vc2_encode_init(AVCodecContext *avctx)
990
+{
991
+    Plane *p;
992
+    SubBand *b;
993
+    int i, j, level, o, shift;
994
+    VC2EncContext *s = avctx->priv_data;
995
+
996
+    s->picture_number = 0;
997
+
998
+    /* Total allowed quantization range */
999
+    s->q_ceil    = MAX_QUANT_INDEX;
1000
+
1001
+    s->ver.major = 2;
1002
+    s->ver.minor = 0;
1003
+    s->profile   = 3;
1004
+    s->level     = 3;
1005
+
1006
+    s->base_vf   = -1;
1007
+    s->strict_compliance = 1;
1008
+
1009
+    /* Mark unknown as progressive */
1010
+    s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) ||
1011
+                      (avctx->field_order == AV_FIELD_PROGRESSIVE));
1012
+
1013
+    if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
1014
+        if (avctx->width == 1280 && avctx->height == 720) {
1015
+            s->level = 3;
1016
+            if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
1017
+                s->base_vf = 9;
1018
+            if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
1019
+                s->base_vf = 10;
1020
+        } else if (avctx->width == 1920 && avctx->height == 1080) {
1021
+            s->level = 3;
1022
+            if (s->interlaced) {
1023
+                if (avctx->time_base.num == 1001 && avctx->time_base.den == 30000)
1024
+                    s->base_vf = 11;
1025
+                if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
1026
+                    s->base_vf = 12;
1027
+            } else {
1028
+                if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
1029
+                    s->base_vf = 13;
1030
+                if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
1031
+                    s->base_vf = 14;
1032
+                if (avctx->time_base.num == 1001 && avctx->time_base.den == 24000)
1033
+                    s->base_vf = 21;
1034
+            }
1035
+        } else if (avctx->width == 3840 && avctx->height == 2160) {
1036
+            s->level = 6;
1037
+            if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
1038
+                s->base_vf = 17;
1039
+            if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
1040
+                s->base_vf = 18;
1041
+        }
1042
+    }
1043
+
1044
+    if (s->interlaced && s->base_vf <= 0) {
1045
+        av_log(avctx, AV_LOG_ERROR, "Interlacing not supported with non standard formats!\n");
1046
+        return AVERROR_UNKNOWN;
1047
+    }
1048
+
1049
+    if (s->interlaced)
1050
+        av_log(avctx, AV_LOG_WARNING, "Interlacing enabled!\n");
1051
+
1052
+    if ((s->slice_width  & (s->slice_width  - 1)) ||
1053
+        (s->slice_height & (s->slice_height - 1))) {
1054
+        av_log(avctx, AV_LOG_ERROR, "Slice size is not a power of two!\n");
1055
+        return AVERROR_UNKNOWN;
1056
+    }
1057
+
1058
+    if ((s->slice_width > avctx->width) ||
1059
+        (s->slice_height > avctx->height)) {
1060
+        av_log(avctx, AV_LOG_ERROR, "Slice size is bigger than the image!\n");
1061
+        return AVERROR_UNKNOWN;
1062
+    }
1063
+
1064
+    if (s->base_vf <= 0) {
1065
+        if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
1066
+            s->strict_compliance = s->base_vf = 0;
1067
+            av_log(avctx, AV_LOG_WARNING, "Disabling strict compliance\n");
1068
+        } else {
1069
+            av_log(avctx, AV_LOG_WARNING, "Given format does not strictly comply with "
1070
+                   "the specifications, please add a -strict -1 flag to use it\n");
1071
+            return AVERROR_UNKNOWN;
1072
+        }
1073
+    } else {
1074
+        av_log(avctx, AV_LOG_INFO, "Selected base video format = %i\n", s->base_vf);
1075
+    }
1076
+
1077
+    avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1078
+
1079
+    /* Planes initialization */
1080
+    for (i = 0; i < 3; i++) {
1081
+        int w, h;
1082
+        p = &s->plane[i];
1083
+        p->width      = avctx->width  >> (i ? s->chroma_x_shift : 0);
1084
+        p->height     = avctx->height >> (i ? s->chroma_y_shift : 0);
1085
+        if (s->interlaced)
1086
+            p->height >>= 1;
1087
+        p->dwt_width  = w = FFALIGN(p->width,  (1 << s->wavelet_depth));
1088
+        p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth));
1089
+        p->coef_stride = FFALIGN(p->dwt_width, 32);
1090
+        p->coef_buf = av_malloc(p->coef_stride*p->dwt_height*sizeof(dwtcoef));
1091
+        if (!p->coef_buf)
1092
+            goto alloc_fail;
1093
+        for (level = s->wavelet_depth-1; level >= 0; level--) {
1094
+            w = w >> 1;
1095
+            h = h >> 1;
1096
+            for (o = 0; o < 4; o++) {
1097
+                b = &p->band[level][o];
1098
+                b->width  = w;
1099
+                b->height = h;
1100
+                b->stride = p->coef_stride;
1101
+                shift = (o > 1)*b->height*b->stride + (o & 1)*b->width;
1102
+                b->buf = p->coef_buf + shift;
1103
+            }
1104
+        }
1105
+
1106
+        /* DWT init */
1107
+        if (ff_vc2enc_init_transforms(&s->transform_args[i].t,
1108
+                                        s->plane[0].coef_stride,
1109
+                                        s->plane[0].dwt_height))
1110
+            goto alloc_fail;
1111
+    }
1112
+
1113
+    /* Slices */
1114
+    s->num_x = s->plane[0].dwt_width/s->slice_width;
1115
+    s->num_y = s->plane[0].dwt_height/s->slice_height;
1116
+
1117
+    s->slice_args = av_malloc(s->num_x*s->num_y*sizeof(SliceArgs));
1118
+    if (!s->slice_args)
1119
+        goto alloc_fail;
1120
+
1121
+    /* Lookup tables */
1122
+    s->coef_lut_len = av_malloc(2*COEF_LUT_TAB*s->q_ceil*sizeof(*s->coef_lut_len));
1123
+    if (!s->coef_lut_len)
1124
+        goto alloc_fail;
1125
+
1126
+    s->coef_lut_val = av_malloc(2*COEF_LUT_TAB*s->q_ceil*sizeof(*s->coef_lut_val));
1127
+    if (!s->coef_lut_val)
1128
+        goto alloc_fail;
1129
+
1130
+    for (i = 0; i < s->q_ceil; i++) {
1131
+        for (j = -COEF_LUT_TAB; j < COEF_LUT_TAB; j++) {
1132
+            uint8_t  *len_lut = &s->coef_lut_len[2*i*COEF_LUT_TAB + COEF_LUT_TAB];
1133
+            uint32_t *val_lut = &s->coef_lut_val[2*i*COEF_LUT_TAB + COEF_LUT_TAB];
1134
+            coeff_quantize_get(j, ff_dirac_qscale_tab[i], &len_lut[j], &val_lut[j]);
1135
+        }
1136
+    }
1137
+
1138
+    return 0;
1139
+
1140
+alloc_fail:
1141
+    vc2_encode_end(avctx);
1142
+    av_log(avctx, AV_LOG_ERROR, "Unable to allocate memory!\n");
1143
+    return AVERROR(ENOMEM);
1144
+}
1145
+
1146
+#define VC2ENC_FLAGS (AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
1147
+static const AVOption vc2enc_options[] = {
1148
+    {"tolerance",     "Max undershoot in percent", offsetof(VC2EncContext, tolerance), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0f}, 0.0f, 45.0f, VC2ENC_FLAGS, "tolerance"},
1149
+    {"slice_width",   "Slice width",  offsetof(VC2EncContext, slice_width), AV_OPT_TYPE_INT, {.i64 = 128}, 32, 1024, VC2ENC_FLAGS, "slice_width"},
1150
+    {"slice_height",  "Slice height", offsetof(VC2EncContext, slice_height), AV_OPT_TYPE_INT, {.i64 = 64}, 8, 1024, VC2ENC_FLAGS, "slice_height"},
1151
+    {"wavelet_depth", "Transform depth", offsetof(VC2EncContext, wavelet_depth), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 5, VC2ENC_FLAGS, "wavelet_depth"},
1152
+    {"wavelet_type",  "Transform type",  offsetof(VC2EncContext, wavelet_idx), AV_OPT_TYPE_INT, {.i64 = VC2_TRANSFORM_9_7}, 0, VC2_TRANSFORMS_NB, VC2ENC_FLAGS, "wavelet_idx"},
1153
+        {"9_7",       "Deslauriers-Dubuc (9,7)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_9_7}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
1154
+        {"5_3",       "LeGall (5,3)",            0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_5_3}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
1155
+    {"qm", "Custom quantization matrix", offsetof(VC2EncContext, quant_matrix), AV_OPT_TYPE_INT, {.i64 = VC2_QM_DEF}, 0, VC2_QM_NB, VC2ENC_FLAGS, "quant_matrix"},
1156
+        {"default",   "Default from the specifications", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_DEF}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
1157
+        {"color",     "Prevents low bitrate discoloration", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_COL}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
1158
+        {"flat",      "Optimize for PSNR", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_FLAT}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
1159
+    {NULL}
1160
+};
1161
+
1162
+static const AVClass vc2enc_class = {
1163
+    .class_name = "SMPTE VC-2 encoder",
1164
+    .category = AV_CLASS_CATEGORY_ENCODER,
1165
+    .option = vc2enc_options,
1166
+    .item_name = av_default_item_name,
1167
+    .version = LIBAVUTIL_VERSION_INT
1168
+};
1169
+
1170
+static const AVCodecDefault vc2enc_defaults[] = {
1171
+    { "b",              "600000000"   },
1172
+    { NULL },
1173
+};
1174
+
1175
+static const enum AVPixelFormat allowed_pix_fmts[] = {
1176
+    AV_PIX_FMT_YUV420P,   AV_PIX_FMT_YUV422P,   AV_PIX_FMT_YUV444P,
1177
+    AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
1178
+    AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
1179
+    AV_PIX_FMT_NONE
1180
+};
1181
+
1182
+AVCodec ff_vc2_encoder = {
1183
+    .name = "vc2",
1184
+    .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-2"),
1185
+    .type = AVMEDIA_TYPE_VIDEO,
1186
+    .id = AV_CODEC_ID_DIRAC,
1187
+    .priv_data_size = sizeof(VC2EncContext),
1188
+    .init = vc2_encode_init,
1189
+    .close = vc2_encode_end,
1190
+    .capabilities = AV_CODEC_CAP_SLICE_THREADS,
1191
+    .encode2 = vc2_encode_frame,
1192
+    .priv_class = &vc2enc_class,
1193
+    .defaults = vc2enc_defaults,
1194
+    .pix_fmts = allowed_pix_fmts
1195
+};
0 1196
new file mode 100644
... ...
@@ -0,0 +1,229 @@
0
+/*
1
+ * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
2
+ * Copyright (C) 2016 Open Broadcast Systems Ltd.
3
+ * Author        2016 Rostislav Pehlivanov <atomnuker@gmail.com>
4
+ *
5
+ * This file is part of FFmpeg.
6
+ *
7
+ * FFmpeg is free software; you can redistribute it and/or
8
+ * modify it under the terms of the GNU Lesser General Public
9
+ * License as published by the Free Software Foundation; either
10
+ * version 2.1 of the License, or (at your option) any later version.
11
+ *
12
+ * FFmpeg is distributed in the hope that it will be useful,
13
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
+ * Lesser General Public License for more details.
16
+ *
17
+ * You should have received a copy of the GNU Lesser General Public
18
+ * License along with FFmpeg; if not, write to the Free Software
19
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
+ */
21
+
22
+#include "libavutil/attributes.h"
23
+#include "libavutil/mem.h"
24
+#include "vc2enc_dwt.h"
25
+
26
+/* Since the transforms spit out interleaved coefficients, this function
27
+ * rearranges the coefficients into the more traditional subdivision,
28
+ * making it easier to encode and perform another level. */
29
+static av_always_inline void deinterleave(dwtcoef *linell, ptrdiff_t stride,
30
+                                          int width, int height, dwtcoef *synthl)
31
+{
32
+    int x, y;
33
+    ptrdiff_t synthw = width << 1;
34
+    dwtcoef *linehl = linell + width;
35
+    dwtcoef *linelh = linell + height*stride;
36
+    dwtcoef *linehh = linelh + width;
37
+
38
+    /* Deinterleave the coefficients. */
39
+    for (y = 0; y < height; y++) {
40
+        for (x = 0; x < width; x++) {
41
+            linell[x] = synthl[(x << 1)];
42
+            linehl[x] = synthl[(x << 1) + 1];
43
+            linelh[x] = synthl[(x << 1) + synthw];
44
+            linehh[x] = synthl[(x << 1) + synthw + 1];
45
+        }
46
+        synthl += synthw << 1;
47
+        linell += stride;
48
+        linelh += stride;
49
+        linehl += stride;
50
+        linehh += stride;
51
+    }
52
+}
53
+
54
+static void vc2_subband_dwt_97(VC2TransformContext *t, dwtcoef *data,
55
+                               ptrdiff_t stride, int width, int height)
56
+{
57
+    int x, y;
58
+    dwtcoef *datal = data, *synth = t->buffer, *synthl = synth;
59
+    const ptrdiff_t synth_width  = width  << 1;
60
+    const ptrdiff_t synth_height = height << 1;
61
+
62
+    /*
63
+     * Shift in one bit that is used for additional precision and copy
64
+     * the data to the buffer.
65
+     */
66
+    for (y = 0; y < synth_height; y++) {
67
+        for (x = 0; x < synth_width; x++)
68
+            synthl[x] = datal[x] << 1;
69
+        synthl += synth_width;
70
+        datal += stride;
71
+    }
72
+
73
+    /* Horizontal synthesis. */
74
+    synthl = synth;
75
+    for (y = 0; y < synth_height; y++) {
76
+        /* Lifting stage 2. */
77
+        synthl[1] -= (8*synthl[0] + 9*synthl[2] - synthl[4] + 8) >> 4;
78
+        for (x = 1; x < width - 2; x++)
79
+            synthl[2*x + 1] -= (9*synthl[2*x] + 9*synthl[2*x + 2] - synthl[2*x + 4] -
80
+                                synthl[2 * x - 2] + 8) >> 4;
81
+        synthl[synth_width - 1] -= (17*synthl[synth_width - 2] -
82
+                                    synthl[synth_width - 4] + 8) >> 4;
83
+        synthl[synth_width - 3] -= (8*synthl[synth_width - 2] +
84
+                                    9*synthl[synth_width - 4] -
85
+                                    synthl[synth_width - 6] + 8) >> 4;
86
+        /* Lifting stage 1. */
87
+        synthl[0] += (synthl[1] + synthl[1] + 2) >> 2;
88
+        for (x = 1; x < width - 1; x++)
89
+            synthl[2*x] += (synthl[2*x - 1] + synthl[2*x + 1] + 2) >> 2;
90
+
91
+        synthl[synth_width - 2] += (synthl[synth_width - 3] +
92
+                                    synthl[synth_width - 1] + 2) >> 2;
93
+        synthl += synth_width;
94
+    }
95
+
96
+    /* Vertical synthesis: Lifting stage 2. */
97
+    synthl = synth + synth_width;
98
+    for (x = 0; x < synth_width; x++)
99
+        synthl[x] -= (8*synthl[x - synth_width] + 9*synthl[x + synth_width] -
100
+                      synthl[x + 3 * synth_width] + 8) >> 4;
101
+
102
+    synthl = synth + (synth_width << 1);
103
+    for (y = 1; y < height - 2; y++) {
104
+        for (x = 0; x < synth_width; x++)
105
+            synthl[x + synth_width] -= (9*synthl[x] +
106
+                                        9*synthl[x + 2 * synth_width] -
107
+                                        synthl[x - 2 * synth_width] -
108
+                                        synthl[x + 4 * synth_width] + 8) >> 4;
109
+        synthl += synth_width << 1;
110
+    }
111
+
112
+    synthl = synth + (synth_height - 1) * synth_width;
113
+    for (x = 0; x < synth_width; x++) {
114
+        synthl[x] -= (17*synthl[x - synth_width] -
115
+                      synthl[x - 3*synth_width] + 8) >> 4;
116
+                      synthl[x - 2*synth_width] -= (9*synthl[x - 3*synth_width] +
117
+                      8*synthl[x - 1*synth_width] - synthl[x - 5*synth_width] + 8) >> 4;
118
+    }
119
+
120
+    /* Vertical synthesis: Lifting stage 1. */
121
+    synthl = synth;
122
+    for (x = 0; x < synth_width; x++)
123
+        synthl[x] += (synthl[x + synth_width] + synthl[x + synth_width] + 2) >> 2;
124
+
125
+    synthl = synth + (synth_width << 1);
126
+    for (y = 1; y < height - 1; y++) {
127
+        for (x = 0; x < synth_width; x++)
128
+            synthl[x] += (synthl[x - synth_width] + synthl[x + synth_width] + 2) >> 2;
129
+        synthl += synth_width << 1;
130
+    }
131
+
132
+    synthl = synth + (synth_height - 2) * synth_width;
133
+    for (x = 0; x < synth_width; x++)
134
+        synthl[x] += (synthl[x - synth_width] + synthl[x + synth_width] + 2) >> 2;
135
+
136
+    deinterleave(data, stride, width, height, synth);
137
+}
138
+
139
+static void vc2_subband_dwt_53(VC2TransformContext *t, dwtcoef *data,
140
+                               ptrdiff_t stride, int width, int height)
141
+{
142
+    int x, y;
143
+    dwtcoef *synth = t->buffer, *synthl = synth, *datal = data;
144
+    const ptrdiff_t synth_width  = width  << 1;
145
+    const ptrdiff_t synth_height = height << 1;
146
+
147
+    /*
148
+     * Shift in one bit that is used for additional precision and copy
149
+     * the data to the buffer.
150
+     */
151
+    for (y = 0; y < synth_height; y++) {
152
+        for (x = 0; x < synth_width; x++)
153
+            synthl[x] = datal[x] << 1;
154
+        synthl += synth_width;
155
+        datal  += stride;
156
+    }
157
+
158
+    /* Horizontal synthesis. */
159
+    synthl = synth;
160
+    for (y = 0; y < synth_height; y++) {
161
+        /* Lifting stage 2. */
162
+        for (x = 0; x < width - 1; x++)
163
+            synthl[2 * x + 1] -= (synthl[2 * x] + synthl[2 * x + 2] + 1) >> 1;
164
+
165
+        synthl[synth_width - 1] -= (2*synthl[synth_width - 2] + 1) >> 1;
166
+
167
+        /* Lifting stage 1. */
168
+        synthl[0] += (2*synthl[1] + 2) >> 2;
169
+        for (x = 1; x < width - 1; x++)
170
+            synthl[2 * x] += (synthl[2 * x - 1] + synthl[2 * x + 1] + 2) >> 2;
171
+
172
+        synthl[synth_width - 2] += (synthl[synth_width - 3] + synthl[synth_width - 1] + 2) >> 2;
173
+
174
+        synthl += synth_width;
175
+    }
176
+
177
+    /* Vertical synthesis: Lifting stage 2. */
178
+    synthl = synth + synth_width;
179
+    for (x = 0; x < synth_width; x++)
180
+        synthl[x] -= (synthl[x - synth_width] + synthl[x + synth_width] + 1) >> 1;
181
+
182
+    synthl = synth + (synth_width << 1);
183
+    for (y = 1; y < height - 1; y++) {
184
+        for (x = 0; x < synth_width; x++)
185
+            synthl[x + synth_width] -= (synthl[x] + synthl[x + synth_width * 2] + 1) >> 1;
186
+        synthl += (synth_width << 1);
187
+    }
188
+
189
+    synthl = synth + (synth_height - 1) * synth_width;
190
+    for (x = 0; x < synth_width; x++)
191
+        synthl[x] -= (2*synthl[x - synth_width] + 1) >> 1;
192
+
193
+    /* Vertical synthesis: Lifting stage 1. */
194
+    synthl = synth;
195
+    for (x = 0; x < synth_width; x++)
196
+        synthl[x] += (2*synthl[synth_width + x] + 2) >> 2;
197
+
198
+    synthl = synth + (synth_width << 1);
199
+    for (y = 1; y < height - 1; y++) {
200
+        for (x = 0; x < synth_width; x++)
201
+            synthl[x] += (synthl[x + synth_width] + synthl[x - synth_width] + 2) >> 2;
202
+        synthl += (synth_width << 1);
203
+    }
204
+
205
+    synthl = synth + (synth_height - 2)*synth_width;
206
+    for (x = 0; x < synth_width; x++)
207
+        synthl[x] += (synthl[x - synth_width] + synthl[x + synth_width] + 2) >> 2;
208
+
209
+
210
+    deinterleave(data, stride, width, height, synth);
211
+}
212
+
213
+av_cold int ff_vc2enc_init_transforms(VC2TransformContext *s, int p_width, int p_height)
214
+{
215
+    s->vc2_subband_dwt[VC2_TRANSFORM_9_7]    = vc2_subband_dwt_97;
216
+    s->vc2_subband_dwt[VC2_TRANSFORM_5_3]    = vc2_subband_dwt_53;
217
+
218
+    s->buffer = av_malloc(2*p_width*p_height*sizeof(dwtcoef));
219
+    if (!s->buffer)
220
+        return 1;
221
+
222
+    return 0;
223
+}
224
+
225
+av_cold void ff_vc2enc_free_transforms(VC2TransformContext *s)
226
+{
227
+    av_freep(&s->buffer);
228
+}
0 229
new file mode 100644
... ...
@@ -0,0 +1,54 @@
0
+/*
1
+ * Copyright (C) 2016 Open Broadcast Systems Ltd.
2
+ * Author        2016 Rostislav Pehlivanov <atomnuker@gmail.com>
3
+ *
4
+ * This file is part of FFmpeg.
5
+ *
6
+ * FFmpeg is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * FFmpeg is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with FFmpeg; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+#ifndef AVCODEC_VC2_TRANSFORMS_H
22
+#define AVCODEC_VC2_TRANSFORMS_H
23
+
24
+#include <stdint.h>
25
+
26
+typedef int16_t dwtcoef;
27
+typedef int32_t qcoef;   /* Quantization needs more precision */
28
+
29
+/* Only Deslauriers-Dubuc (9,7) and LeGall (5,3) supported! */
30
+
31
+enum VC2TransformType {
32
+    VC2_TRANSFORM_9_7    = 0,   /* Deslauriers-Dubuc (9,7)  */
33
+    VC2_TRANSFORM_5_3    = 1,   /* LeGall (5,3)             */
34
+    VC2_TRANSFORM_13_7   = 2,   /* Deslauriers-Dubuc (13,7) */
35
+    VC2_TRANSFORM_HAAR   = 3,   /* Haar without shift       */
36
+    VC2_TRANSFORM_HAAR_S = 4,   /* Haar with 1 shift/lvl    */
37
+    VC2_TRANSFORM_FIDEL  = 5,   /* Fidelity filter          */
38
+    VC2_TRANSFORM_9_7_I  = 6,   /* Daubechies (9,7)         */
39
+
40
+    VC2_TRANSFORMS_NB
41
+};
42
+
43
+typedef struct VC2TransformContext {
44
+    dwtcoef *buffer;
45
+    void (*vc2_subband_dwt[VC2_TRANSFORMS_NB])(struct VC2TransformContext *t,
46
+                                               dwtcoef *data, ptrdiff_t stride,
47
+                                               int width, int height);
48
+} VC2TransformContext;
49
+
50
+int  ff_vc2enc_init_transforms(VC2TransformContext *t, int p_width, int p_height);
51
+void ff_vc2enc_free_transforms(VC2TransformContext *t);
52
+
53
+#endif /* AVCODEC_VC2_TRANSFORMS_H */
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #define LIBAVCODEC_VERSION_MAJOR  57
32 32
 #define LIBAVCODEC_VERSION_MINOR  24
33
-#define LIBAVCODEC_VERSION_MICRO 101
33
+#define LIBAVCODEC_VERSION_MICRO 102
34 34
 
35 35
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
36 36
                                                LIBAVCODEC_VERSION_MINOR, \