Browse code

lavc: add Ut Video encoder

Signed-off-by: Anton Khirnov <anton@khirnov.net>

Jan Ekström authored on 2012/08/20 18:15:34
Showing 9 changed files
... ...
@@ -43,6 +43,7 @@ version <next>:
43 43
 - RTMPTE protocol support
44 44
 - Canopus Lossless Codec decoder
45 45
 - avconv -shortest option is now per-output file
46
+- Ut Video encoder
46 47
 
47 48
 
48 49
 version 0.8:
... ...
@@ -620,7 +620,7 @@ following image formats are supported:
620 620
     @tab encoding supported through external library libtheora
621 621
 @item Tiertex Limited SEQ video  @tab     @tab  X
622 622
     @tab Codec used in DOS CD-ROM FlashBack game.
623
-@item Ut Video               @tab     @tab  X
623
+@item Ut Video               @tab  X  @tab  X
624 624
 @item v210 QuickTime uncompressed 4:2:2 10-bit     @tab  X  @tab  X
625 625
 @item v410 QuickTime uncompressed 4:4:4 10-bit     @tab  X  @tab  X
626 626
 @item VBLE Lossless Codec    @tab     @tab  X
... ...
@@ -377,7 +377,8 @@ OBJS-$(CONFIG_TTA_DECODER)             += tta.o
377 377
 OBJS-$(CONFIG_TWINVQ_DECODER)          += twinvq.o celp_math.o
378 378
 OBJS-$(CONFIG_TXD_DECODER)             += txd.o s3tc.o
379 379
 OBJS-$(CONFIG_ULTI_DECODER)            += ulti.o
380
-OBJS-$(CONFIG_UTVIDEO_DECODER)         += utvideodec.o
380
+OBJS-$(CONFIG_UTVIDEO_DECODER)         += utvideodec.o utvideo.o
381
+OBJS-$(CONFIG_UTVIDEO_ENCODER)         += utvideoenc.o utvideo.o
381 382
 OBJS-$(CONFIG_V210_DECODER)            += v210dec.o
382 383
 OBJS-$(CONFIG_V210_ENCODER)            += v210enc.o
383 384
 OBJS-$(CONFIG_V410_DECODER)            += v410dec.o
... ...
@@ -209,7 +209,7 @@ void avcodec_register_all(void)
209 209
     REGISTER_DECODER (TSCC2, tscc2);
210 210
     REGISTER_DECODER (TXD, txd);
211 211
     REGISTER_DECODER (ULTI, ulti);
212
-    REGISTER_DECODER (UTVIDEO, utvideo);
212
+    REGISTER_ENCDEC  (UTVIDEO, utvideo);
213 213
     REGISTER_ENCDEC  (V210,  v210);
214 214
     REGISTER_DECODER (V210X, v210x);
215 215
     REGISTER_ENCDEC  (V410, v410);
216 216
new file mode 100644
... ...
@@ -0,0 +1,39 @@
0
+/*
1
+ * Common Ut Video code
2
+ * Copyright (c) 2011 Konstantin Shishkov
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * Common Ut Video code
24
+ */
25
+
26
+#include "utvideo.h"
27
+
28
+const int ff_ut_pred_order[5] = {
29
+    PRED_LEFT, PRED_MEDIAN, PRED_MEDIAN, PRED_NONE, PRED_GRADIENT
30
+};
31
+
32
+const int ff_ut_rgb_order[4]  = { 1, 2, 0, 3 }; // G, B, R, A
33
+
34
+int ff_ut_huff_cmp_len(const void *a, const void *b)
35
+{
36
+    const HuffEntry *aa = a, *bb = b;
37
+    return (aa->len - bb->len)*256 + aa->sym - bb->sym;
38
+}
0 39
new file mode 100644
... ...
@@ -0,0 +1,91 @@
0
+/*
1
+ * Common Ut Video header
2
+ * Copyright (c) 2011 Konstantin Shishkov
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+#ifndef AVCODEC_UTVIDEO_H
22
+#define AVCODEC_UTVIDEO_H
23
+
24
+/**
25
+ * @file
26
+ * Common Ut Video header
27
+ */
28
+
29
+#include "libavutil/common.h"
30
+#include "avcodec.h"
31
+#include "dsputil.h"
32
+
33
+enum {
34
+    PRED_NONE = 0,
35
+    PRED_LEFT,
36
+    PRED_GRADIENT,
37
+    PRED_MEDIAN,
38
+};
39
+
40
+enum {
41
+    COMP_NONE = 0,
42
+    COMP_HUFF,
43
+};
44
+
45
+/*
46
+ * "Original format" markers.
47
+ * Based on values gotten from the official VFW encoder.
48
+ * They are not used during decoding, but they do have
49
+ * an informative role on seeing what was input
50
+ * to the encoder.
51
+ */
52
+enum {
53
+    UTVIDEO_RGB  = MKTAG(0x00, 0x00, 0x01, 0x18),
54
+    UTVIDEO_RGBA = MKTAG(0x00, 0x00, 0x02, 0x18),
55
+    UTVIDEO_420  = MKTAG('Y', 'V', '1', '2'),
56
+    UTVIDEO_422  = MKTAG('Y', 'U', 'Y', '2'),
57
+};
58
+
59
+/* Mapping of libavcodec prediction modes to Ut Video's */
60
+extern const int ff_ut_pred_order[5];
61
+
62
+/* Order of RGB(A) planes in Ut Video */
63
+extern const int ff_ut_rgb_order[4];
64
+
65
+typedef struct UtvideoContext {
66
+    AVCodecContext *avctx;
67
+    AVFrame        pic;
68
+    DSPContext     dsp;
69
+
70
+    uint32_t frame_info_size, flags, frame_info;
71
+    int      planes;
72
+    int      slices;
73
+    int      compression;
74
+    int      interlaced;
75
+    int      frame_pred;
76
+
77
+    uint8_t *slice_bits, *slice_buffer;
78
+    int      slice_bits_size;
79
+} UtvideoContext;
80
+
81
+typedef struct HuffEntry {
82
+    uint8_t  sym;
83
+    uint8_t  len;
84
+    uint32_t code;
85
+} HuffEntry;
86
+
87
+/* Compare huffman tree nodes */
88
+int ff_ut_huff_cmp_len(const void *a, const void *b);
89
+
90
+#endif /* AVCODEC_UTVIDEO_H */
... ...
@@ -32,40 +32,7 @@
32 32
 #include "get_bits.h"
33 33
 #include "dsputil.h"
34 34
 #include "thread.h"
35
-
36
-enum {
37
-    PRED_NONE = 0,
38
-    PRED_LEFT,
39
-    PRED_GRADIENT,
40
-    PRED_MEDIAN,
41
-};
42
-
43
-typedef struct UtvideoContext {
44
-    AVCodecContext *avctx;
45
-    AVFrame pic;
46
-    DSPContext dsp;
47
-
48
-    uint32_t frame_info_size, flags, frame_info;
49
-    int planes;
50
-    int slices;
51
-    int compression;
52
-    int interlaced;
53
-    int frame_pred;
54
-
55
-    uint8_t *slice_bits;
56
-    int slice_bits_size;
57
-} UtvideoContext;
58
-
59
-typedef struct HuffEntry {
60
-    uint8_t sym;
61
-    uint8_t len;
62
-} HuffEntry;
63
-
64
-static int huff_cmp(const void *a, const void *b)
65
-{
66
-    const HuffEntry *aa = a, *bb = b;
67
-    return (aa->len - bb->len)*256 + aa->sym - bb->sym;
68
-}
35
+#include "utvideo.h"
69 36
 
70 37
 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
71 38
 {
... ...
@@ -82,7 +49,7 @@ static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
82 82
         he[i].sym = i;
83 83
         he[i].len = *src++;
84 84
     }
85
-    qsort(he, 256, sizeof(*he), huff_cmp);
85
+    qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
86 86
 
87 87
     if (!he[0].len) {
88 88
         *fsym = he[0].sym;
... ...
@@ -216,8 +183,6 @@ fail:
216 216
     return AVERROR_INVALIDDATA;
217 217
 }
218 218
 
219
-static const int rgb_order[4] = { 1, 2, 0, 3 };
220
-
221 219
 static void restore_rgb_planes(uint8_t *src, int step, int stride, int width,
222 220
                                int height)
223 221
 {
... ...
@@ -434,20 +399,22 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
434 434
     case PIX_FMT_RGB24:
435 435
     case PIX_FMT_RGBA:
436 436
         for (i = 0; i < c->planes; i++) {
437
-            ret = decode_plane(c, i, c->pic.data[0] + rgb_order[i], c->planes,
438
-                               c->pic.linesize[0], avctx->width, avctx->height,
439
-                               plane_start[i], c->frame_pred == PRED_LEFT);
437
+            ret = decode_plane(c, i, c->pic.data[0] + ff_ut_rgb_order[i],
438
+                               c->planes, c->pic.linesize[0], avctx->width,
439
+                               avctx->height, plane_start[i],
440
+                               c->frame_pred == PRED_LEFT);
440 441
             if (ret)
441 442
                 return ret;
442 443
             if (c->frame_pred == PRED_MEDIAN) {
443 444
                 if (!c->interlaced) {
444
-                    restore_median(c->pic.data[0] + rgb_order[i], c->planes,
445
-                                   c->pic.linesize[0], avctx->width,
445
+                    restore_median(c->pic.data[0] + ff_ut_rgb_order[i],
446
+                                   c->planes, c->pic.linesize[0], avctx->width,
446 447
                                    avctx->height, c->slices, 0);
447 448
                 } else {
448
-                    restore_median_il(c->pic.data[0] + rgb_order[i], c->planes,
449
-                                      c->pic.linesize[0], avctx->width,
450
-                                      avctx->height, c->slices, 0);
449
+                    restore_median_il(c->pic.data[0] + ff_ut_rgb_order[i],
450
+                                      c->planes, c->pic.linesize[0],
451
+                                      avctx->width, avctx->height, c->slices,
452
+                                      0);
451 453
                 }
452 454
             }
453 455
         }
454 456
new file mode 100644
... ...
@@ -0,0 +1,735 @@
0
+/*
1
+ * Ut Video encoder
2
+ * Copyright (c) 2012 Jan Ekström
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * Ut Video encoder
24
+ */
25
+
26
+#include "libavutil/intreadwrite.h"
27
+#include "avcodec.h"
28
+#include "internal.h"
29
+#include "bytestream.h"
30
+#include "put_bits.h"
31
+#include "dsputil.h"
32
+#include "mathops.h"
33
+#include "utvideo.h"
34
+
35
+/* Compare huffentry symbols */
36
+static int huff_cmp_sym(const void *a, const void *b)
37
+{
38
+    const HuffEntry *aa = a, *bb = b;
39
+    return aa->sym - bb->sym;
40
+}
41
+
42
+static av_cold int utvideo_encode_close(AVCodecContext *avctx)
43
+{
44
+    UtvideoContext *c = avctx->priv_data;
45
+
46
+    av_freep(&avctx->coded_frame);
47
+    av_freep(&c->slice_bits);
48
+    av_freep(&c->slice_buffer);
49
+
50
+    return 0;
51
+}
52
+
53
+static av_cold int utvideo_encode_init(AVCodecContext *avctx)
54
+{
55
+    UtvideoContext *c = avctx->priv_data;
56
+
57
+    uint32_t original_format;
58
+
59
+    c->avctx           = avctx;
60
+    c->frame_info_size = 4;
61
+
62
+    switch (avctx->pix_fmt) {
63
+    case PIX_FMT_RGB24:
64
+        c->planes        = 3;
65
+        avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
66
+        original_format  = UTVIDEO_RGB;
67
+        break;
68
+    case PIX_FMT_RGBA:
69
+        c->planes        = 4;
70
+        avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
71
+        original_format  = UTVIDEO_RGBA;
72
+        break;
73
+    case PIX_FMT_YUV420P:
74
+        if (avctx->width & 1 || avctx->height & 1) {
75
+            av_log(avctx, AV_LOG_ERROR,
76
+                   "4:2:0 video requires even width and height.\n");
77
+            return AVERROR_INVALIDDATA;
78
+        }
79
+        c->planes        = 3;
80
+        avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
81
+        original_format  = UTVIDEO_420;
82
+        break;
83
+    case PIX_FMT_YUV422P:
84
+        if (avctx->width & 1) {
85
+            av_log(avctx, AV_LOG_ERROR,
86
+                   "4:2:2 video requires even width.\n");
87
+            return AVERROR_INVALIDDATA;
88
+        }
89
+        c->planes        = 3;
90
+        avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
91
+        original_format  = UTVIDEO_422;
92
+        break;
93
+    default:
94
+        av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
95
+               avctx->pix_fmt);
96
+        return AVERROR_INVALIDDATA;
97
+    }
98
+
99
+    ff_dsputil_init(&c->dsp, avctx);
100
+
101
+    /* Check the prediction method, and error out if unsupported */
102
+    if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
103
+        av_log(avctx, AV_LOG_WARNING,
104
+               "Prediction method %d is not supported in Ut Video.\n",
105
+               avctx->prediction_method);
106
+        return AVERROR_OPTION_NOT_FOUND;
107
+    }
108
+
109
+    if (avctx->prediction_method == FF_PRED_PLANE) {
110
+        av_log(avctx, AV_LOG_ERROR,
111
+               "Plane prediction is not supported in Ut Video.\n");
112
+        return AVERROR_OPTION_NOT_FOUND;
113
+    }
114
+
115
+    /* Convert from libavcodec prediction type to Ut Video's */
116
+    c->frame_pred = ff_ut_pred_order[avctx->prediction_method];
117
+
118
+    if (c->frame_pred == PRED_GRADIENT) {
119
+        av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
120
+        return AVERROR_OPTION_NOT_FOUND;
121
+    }
122
+
123
+    avctx->coded_frame = avcodec_alloc_frame();
124
+
125
+    if (!avctx->coded_frame) {
126
+        av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
127
+        utvideo_encode_close(avctx);
128
+        return AVERROR(ENOMEM);
129
+    }
130
+
131
+    /* extradata size is 4 * 32bit */
132
+    avctx->extradata_size = 16;
133
+
134
+    avctx->extradata = av_mallocz(avctx->extradata_size +
135
+                                  FF_INPUT_BUFFER_PADDING_SIZE);
136
+
137
+    if (!avctx->extradata) {
138
+        av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
139
+        utvideo_encode_close(avctx);
140
+        return AVERROR(ENOMEM);
141
+    }
142
+
143
+    c->slice_buffer = av_malloc(avctx->width * avctx->height +
144
+                                FF_INPUT_BUFFER_PADDING_SIZE);
145
+
146
+    if (!c->slice_buffer) {
147
+        av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
148
+        utvideo_encode_close(avctx);
149
+        return AVERROR(ENOMEM);
150
+    }
151
+
152
+    /*
153
+     * Set the version of the encoder.
154
+     * Last byte is "implementation ID", which is
155
+     * obtained from the creator of the format.
156
+     * Libavcodec has been assigned with the ID 0xF0.
157
+     */
158
+    AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
159
+
160
+    /*
161
+     * Set the "original format"
162
+     * Not used for anything during decoding.
163
+     */
164
+    AV_WL32(avctx->extradata + 4, original_format);
165
+
166
+    /* Write 4 as the 'frame info size' */
167
+    AV_WL32(avctx->extradata + 8, c->frame_info_size);
168
+
169
+    /*
170
+     * Set how many slices are going to be used.
171
+     * Set one slice for now.
172
+     */
173
+    c->slices = 1;
174
+
175
+    /* Set compression mode */
176
+    c->compression = COMP_HUFF;
177
+
178
+    /*
179
+     * Set the encoding flags:
180
+     * - Slice count minus 1
181
+     * - Interlaced encoding mode flag, set to zero for now.
182
+     * - Compression mode (none/huff)
183
+     * And write the flags.
184
+     */
185
+    c->flags  = (c->slices - 1) << 24;
186
+    c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
187
+    c->flags |= c->compression;
188
+
189
+    AV_WL32(avctx->extradata + 12, c->flags);
190
+
191
+    return 0;
192
+}
193
+
194
+static void mangle_rgb_planes(uint8_t *src, int step, int stride, int width,
195
+                              int height)
196
+{
197
+    int i, j;
198
+    uint8_t r, g, b;
199
+
200
+    for (j = 0; j < height; j++) {
201
+        for (i = 0; i < width * step; i += step) {
202
+            r = src[i];
203
+            g = src[i + 1];
204
+            b = src[i + 2];
205
+
206
+            src[i]     = r - g + 0x80;
207
+            src[i + 2] = b - g + 0x80;
208
+        }
209
+        src += stride;
210
+    }
211
+}
212
+
213
+/* Write data to a plane, no prediction applied */
214
+static void write_plane(uint8_t *src, uint8_t *dst, int step, int stride,
215
+                        int width, int height)
216
+{
217
+    int i, j;
218
+
219
+    for (j = 0; j < height; j++) {
220
+        for (i = 0; i < width * step; i += step)
221
+            *dst++ = src[i];
222
+
223
+        src += stride;
224
+    }
225
+}
226
+
227
+/* Write data to a plane with left prediction */
228
+static void left_predict(uint8_t *src, uint8_t *dst, int step, int stride,
229
+                         int width, int height)
230
+{
231
+    int i, j;
232
+    uint8_t prev;
233
+
234
+    prev = 0x80; /* Set the initial value */
235
+    for (j = 0; j < height; j++) {
236
+        for (i = 0; i < width * step; i += step) {
237
+            *dst++ = src[i] - prev;
238
+            prev   = src[i];
239
+        }
240
+        src += stride;
241
+    }
242
+}
243
+
244
+/* Write data to a plane with median prediction */
245
+static void median_predict(uint8_t *src, uint8_t *dst, int step, int stride,
246
+                           int width, int height)
247
+{
248
+    int i, j;
249
+    int A, B, C;
250
+    uint8_t prev;
251
+
252
+    /* First line uses left neighbour prediction */
253
+    prev = 0x80; /* Set the initial value */
254
+    for (i = 0; i < width * step; i += step) {
255
+        *dst++ = src[i] - prev;
256
+        prev   = src[i];
257
+    }
258
+
259
+    if (height == 1)
260
+        return;
261
+
262
+    src += stride;
263
+
264
+    /*
265
+     * Second line uses top prediction for the first sample,
266
+     * and median for the rest.
267
+     */
268
+    C      = src[-stride];
269
+    *dst++ = src[0] - C;
270
+    A      = src[0];
271
+    for (i = step; i < width * step; i += step) {
272
+        B       = src[i - stride];
273
+        *dst++  = src[i] - mid_pred(A, B, (A + B - C) & 0xFF);
274
+        C       = B;
275
+        A       = src[i];
276
+    }
277
+
278
+    src += stride;
279
+
280
+    /* Rest of the coded part uses median prediction */
281
+    for (j = 2; j < height; j++) {
282
+        for (i = 0; i < width * step; i += step) {
283
+            B       = src[i - stride];
284
+            *dst++  = src[i] - mid_pred(A, B, (A + B - C) & 0xFF);
285
+            C       = B;
286
+            A       = src[i];
287
+        }
288
+        src += stride;
289
+    }
290
+}
291
+
292
+/* Count the usage of values in a plane */
293
+static void count_usage(uint8_t *src, int width,
294
+                        int height, uint32_t *counts)
295
+{
296
+    int i, j;
297
+
298
+    for (j = 0; j < height; j++) {
299
+        for (i = 0; i < width; i++) {
300
+            counts[src[i]]++;
301
+        }
302
+        src += width;
303
+    }
304
+}
305
+
306
+static uint32_t add_weights(uint32_t w1, uint32_t w2)
307
+{
308
+    uint32_t max = (w1 & 0xFF) > (w2 & 0xFF) ? (w1 & 0xFF) : (w2 & 0xFF);
309
+
310
+    return ((w1 & 0xFFFFFF00) + (w2 & 0xFFFFFF00)) | (1 + max);
311
+}
312
+
313
+static void up_heap(uint32_t val, uint32_t *heap, uint32_t *weights)
314
+{
315
+    uint32_t initial_val = heap[val];
316
+
317
+    while (weights[initial_val] < weights[heap[val >> 1]]) {
318
+        heap[val] = heap[val >> 1];
319
+        val     >>= 1;
320
+    }
321
+
322
+    heap[val] = initial_val;
323
+}
324
+
325
+static void down_heap(uint32_t nr_heap, uint32_t *heap, uint32_t *weights)
326
+{
327
+    uint32_t val = 1;
328
+    uint32_t val2;
329
+    uint32_t initial_val = heap[val];
330
+
331
+    while (1) {
332
+        val2 = val << 1;
333
+
334
+        if (val2 > nr_heap)
335
+            break;
336
+
337
+        if (val2 < nr_heap && weights[heap[val2 + 1]] < weights[heap[val2]])
338
+            val2++;
339
+
340
+        if (weights[initial_val] < weights[heap[val2]])
341
+            break;
342
+
343
+        heap[val] = heap[val2];
344
+
345
+        val = val2;
346
+    }
347
+
348
+    heap[val] = initial_val;
349
+}
350
+
351
+/* Calculate the huffman code lengths from value counts */
352
+static void calculate_code_lengths(uint8_t *lengths, uint32_t *counts)
353
+{
354
+    uint32_t nr_nodes, nr_heap, node1, node2;
355
+    int      i, j;
356
+    int32_t  k;
357
+
358
+    /* Heap and node entries start from 1 */
359
+    uint32_t weights[512];
360
+    uint32_t heap[512];
361
+    int32_t  parents[512];
362
+
363
+    /* Set initial weights */
364
+    for (i = 0; i < 256; i++)
365
+        weights[i + 1] = (counts[i] ? counts[i] : 1) << 8;
366
+
367
+    nr_nodes = 256;
368
+    nr_heap  = 0;
369
+
370
+    heap[0]    = 0;
371
+    weights[0] = 0;
372
+    parents[0] = -2;
373
+
374
+    /* Create initial nodes */
375
+    for (i = 1; i <= 256; i++) {
376
+        parents[i] = -1;
377
+
378
+        heap[++nr_heap] = i;
379
+        up_heap(nr_heap, heap, weights);
380
+    }
381
+
382
+    /* Build the tree */
383
+    while (nr_heap > 1) {
384
+        node1   = heap[1];
385
+        heap[1] = heap[nr_heap--];
386
+
387
+        down_heap(nr_heap, heap, weights);
388
+
389
+        node2   = heap[1];
390
+        heap[1] = heap[nr_heap--];
391
+
392
+        down_heap(nr_heap, heap, weights);
393
+
394
+        nr_nodes++;
395
+
396
+        parents[node1]    = parents[node2] = nr_nodes;
397
+        weights[nr_nodes] = add_weights(weights[node1], weights[node2]);
398
+        parents[nr_nodes] = -1;
399
+
400
+        heap[++nr_heap] = nr_nodes;
401
+
402
+        up_heap(nr_heap, heap, weights);
403
+    }
404
+
405
+    /* Generate lengths */
406
+    for (i = 1; i <= 256; i++) {
407
+        j = 0;
408
+        k = i;
409
+
410
+        while (parents[k] >= 0) {
411
+            k = parents[k];
412
+            j++;
413
+        }
414
+
415
+        lengths[i - 1] = j;
416
+    }
417
+}
418
+
419
+/* Calculate the actual huffman codes from the code lengths */
420
+static void calculate_codes(HuffEntry *he)
421
+{
422
+    int last, i;
423
+    uint32_t code;
424
+
425
+    qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
426
+
427
+    last = 255;
428
+    while (he[last].len == 255 && last)
429
+        last--;
430
+
431
+    code = 1;
432
+    for (i = last; i >= 0; i--) {
433
+        he[i].code  = code >> (32 - he[i].len);
434
+        code       += 0x80000000u >> (he[i].len - 1);
435
+    }
436
+
437
+    qsort(he, 256, sizeof(*he), huff_cmp_sym);
438
+}
439
+
440
+/* Write huffman bit codes to a memory block */
441
+static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
442
+                            int width, int height, HuffEntry *he)
443
+{
444
+    PutBitContext pb;
445
+    int i, j;
446
+    int count;
447
+
448
+    init_put_bits(&pb, dst, dst_size);
449
+
450
+    /* Write the codes */
451
+    for (j = 0; j < height; j++) {
452
+        for (i = 0; i < width; i++)
453
+            put_bits(&pb, he[src[i]].len, he[src[i]].code);
454
+
455
+        src += width;
456
+    }
457
+
458
+    /* Pad output to a 32bit boundary */
459
+    count = put_bits_count(&pb) & 0x1F;
460
+
461
+    if (count)
462
+        put_bits(&pb, 32 - count, 0);
463
+
464
+    /* Get the amount of bits written */
465
+    count = put_bits_count(&pb);
466
+
467
+    /* Flush the rest with zeroes */
468
+    flush_put_bits(&pb);
469
+
470
+    return count;
471
+}
472
+
473
+static int encode_plane(AVCodecContext *avctx, uint8_t *src,
474
+                        uint8_t *dst, int step, int stride,
475
+                        int width, int height, PutByteContext *pb)
476
+{
477
+    UtvideoContext *c        = avctx->priv_data;
478
+    uint8_t  lengths[256];
479
+    uint32_t counts[256]     = { 0 };
480
+
481
+    HuffEntry he[256];
482
+
483
+    uint32_t offset = 0, slice_len = 0;
484
+    int      i, sstart, send = 0;
485
+    int      symbol;
486
+
487
+    /* Do prediction / make planes */
488
+    switch (c->frame_pred) {
489
+    case PRED_NONE:
490
+        for (i = 0; i < c->slices; i++) {
491
+            sstart = send;
492
+            send   = height * (i + 1) / c->slices;
493
+            write_plane(src + sstart * stride, dst + sstart * width,
494
+                        step, stride, width, send - sstart);
495
+        }
496
+        break;
497
+    case PRED_LEFT:
498
+        for (i = 0; i < c->slices; i++) {
499
+            sstart = send;
500
+            send   = height * (i + 1) / c->slices;
501
+            left_predict(src + sstart * stride, dst + sstart * width,
502
+                         step, stride, width, send - sstart);
503
+        }
504
+        break;
505
+    case PRED_MEDIAN:
506
+        for (i = 0; i < c->slices; i++) {
507
+            sstart = send;
508
+            send   = height * (i + 1) / c->slices;
509
+            median_predict(src + sstart * stride, dst + sstart * width,
510
+                           step, stride, width, send - sstart);
511
+        }
512
+        break;
513
+    default:
514
+        av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
515
+               c->frame_pred);
516
+        return AVERROR_OPTION_NOT_FOUND;
517
+    }
518
+
519
+    /* Count the usage of values */
520
+    count_usage(dst, width, height, counts);
521
+
522
+    /* Check for a special case where only one symbol was used */
523
+    for (symbol = 0; symbol < 256; symbol++) {
524
+        /* If non-zero count is found, see if it matches width * height */
525
+        if (counts[symbol]) {
526
+            /* Special case if only one symbol was used */
527
+            if (counts[symbol] == width * height) {
528
+                /*
529
+                 * Write a zero for the single symbol
530
+                 * used in the plane, else 0xFF.
531
+                 */
532
+                for (i = 0; i < 256; i++) {
533
+                    if (i == symbol)
534
+                        bytestream2_put_byte(pb, 0);
535
+                    else
536
+                        bytestream2_put_byte(pb, 0xFF);
537
+                }
538
+
539
+                /* Write zeroes for lengths */
540
+                for (i = 0; i < c->slices; i++)
541
+                    bytestream2_put_le32(pb, 0);
542
+
543
+                /* And that's all for that plane folks */
544
+                return 0;
545
+            }
546
+            break;
547
+        }
548
+    }
549
+
550
+    /* Calculate huffman lengths */
551
+    calculate_code_lengths(lengths, counts);
552
+
553
+    /*
554
+     * Write the plane's header into the output packet:
555
+     * - huffman code lengths (256 bytes)
556
+     * - slice end offsets (gotten from the slice lengths)
557
+     */
558
+    for (i = 0; i < 256; i++) {
559
+        bytestream2_put_byte(pb, lengths[i]);
560
+
561
+        he[i].len = lengths[i];
562
+        he[i].sym = i;
563
+    }
564
+
565
+    /* Calculate the huffman codes themselves */
566
+    calculate_codes(he);
567
+
568
+    send = 0;
569
+    for (i = 0; i < c->slices; i++) {
570
+        sstart  = send;
571
+        send    = height * (i + 1) / c->slices;
572
+
573
+        /*
574
+         * Write the huffman codes to a buffer,
575
+         * get the offset in bits and convert to bytes.
576
+         */
577
+        offset += write_huff_codes(dst + sstart * width, c->slice_bits,
578
+                                   width * (send - sstart), width,
579
+                                   send - sstart, he) >> 3;
580
+
581
+        slice_len = offset - slice_len;
582
+
583
+        /* Byteswap the written huffman codes */
584
+        c->dsp.bswap_buf((uint32_t *) c->slice_bits,
585
+                         (uint32_t *) c->slice_bits,
586
+                         slice_len >> 2);
587
+
588
+        /* Write the offset to the stream */
589
+        bytestream2_put_le32(pb, offset);
590
+
591
+        /* Seek to the data part of the packet */
592
+        bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
593
+                           offset - slice_len, SEEK_CUR);
594
+
595
+        /* Write the slices' data into the output packet */
596
+        bytestream2_put_buffer(pb, c->slice_bits, slice_len);
597
+
598
+        /* Seek back to the slice offsets */
599
+        bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
600
+                           SEEK_CUR);
601
+
602
+        slice_len = offset;
603
+    }
604
+
605
+    /* And at the end seek to the end of written slice(s) */
606
+    bytestream2_seek_p(pb, offset, SEEK_CUR);
607
+
608
+    return 0;
609
+}
610
+
611
+static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
612
+                                const AVFrame *pic, int *got_packet)
613
+{
614
+    UtvideoContext *c = avctx->priv_data;
615
+    PutByteContext pb;
616
+
617
+    uint32_t frame_info;
618
+
619
+    uint8_t *dst;
620
+
621
+    int width = avctx->width, height = avctx->height;
622
+    int i, ret = 0;
623
+
624
+    /* Allocate a new packet if needed, and set it to the pointer dst */
625
+    ret = ff_alloc_packet(pkt, (256 + 4 * c->slices + width * height) *
626
+                          c->planes + 4);
627
+
628
+    if (ret < 0) {
629
+        av_log(avctx, AV_LOG_ERROR,
630
+               "Error allocating the output packet, or the provided packet "
631
+               "was too small.\n");
632
+        return ret;
633
+    }
634
+
635
+    dst = pkt->data;
636
+
637
+    bytestream2_init_writer(&pb, dst, pkt->size);
638
+
639
+    av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
640
+                   width * height + FF_INPUT_BUFFER_PADDING_SIZE);
641
+
642
+    if (!c->slice_bits) {
643
+        av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
644
+        return AVERROR(ENOMEM);
645
+    }
646
+
647
+    /* In case of RGB, mangle the planes to Ut Video's format */
648
+    if (avctx->pix_fmt == PIX_FMT_RGBA || avctx->pix_fmt == PIX_FMT_RGB24)
649
+        mangle_rgb_planes(pic->data[0], c->planes, pic->linesize[0], width,
650
+                          height);
651
+
652
+    /* Deal with the planes */
653
+    switch (avctx->pix_fmt) {
654
+    case PIX_FMT_RGB24:
655
+    case PIX_FMT_RGBA:
656
+        for (i = 0; i < c->planes; i++) {
657
+            ret = encode_plane(avctx, pic->data[0] + ff_ut_rgb_order[i],
658
+                               c->slice_buffer, c->planes, pic->linesize[0],
659
+                               width, height, &pb);
660
+
661
+            if (ret) {
662
+                av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
663
+                return ret;
664
+            }
665
+        }
666
+        break;
667
+    case PIX_FMT_YUV422P:
668
+        for (i = 0; i < c->planes; i++) {
669
+            ret = encode_plane(avctx, pic->data[i], c->slice_buffer, 1,
670
+                               pic->linesize[i], width >> !!i, height, &pb);
671
+
672
+            if (ret) {
673
+                av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
674
+                return ret;
675
+            }
676
+        }
677
+        break;
678
+    case PIX_FMT_YUV420P:
679
+        for (i = 0; i < c->planes; i++) {
680
+            ret = encode_plane(avctx, pic->data[i], c->slice_buffer, 1,
681
+                               pic->linesize[i], width >> !!i, height >> !!i,
682
+                               &pb);
683
+
684
+            if (ret) {
685
+                av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
686
+                return ret;
687
+            }
688
+        }
689
+        break;
690
+    default:
691
+        av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
692
+               avctx->pix_fmt);
693
+        return AVERROR_INVALIDDATA;
694
+    }
695
+
696
+    /*
697
+     * Write frame information (LE 32bit unsigned)
698
+     * into the output packet.
699
+     * Contains the prediction method.
700
+     */
701
+    frame_info = c->frame_pred << 8;
702
+    bytestream2_put_le32(&pb, frame_info);
703
+
704
+    /*
705
+     * At least currently Ut Video is IDR only.
706
+     * Set flags accordingly.
707
+     */
708
+    avctx->coded_frame->reference = 0;
709
+    avctx->coded_frame->key_frame = 1;
710
+    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
711
+
712
+    pkt->size   = bytestream2_tell_p(&pb);
713
+    pkt->flags |= AV_PKT_FLAG_KEY;
714
+
715
+    /* Packet should be done */
716
+    *got_packet = 1;
717
+
718
+    return 0;
719
+}
720
+
721
+AVCodec ff_utvideo_encoder = {
722
+    .name           = "utvideo",
723
+    .type           = AVMEDIA_TYPE_VIDEO,
724
+    .id             = CODEC_ID_UTVIDEO,
725
+    .priv_data_size = sizeof(UtvideoContext),
726
+    .init           = utvideo_encode_init,
727
+    .encode2        = utvideo_encode_frame,
728
+    .close          = utvideo_encode_close,
729
+    .pix_fmts       = (const enum PixelFormat[]) {
730
+                          PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_YUV422P,
731
+                          PIX_FMT_YUV420P, PIX_FMT_NONE
732
+                      },
733
+    .long_name      = NULL_IF_CONFIG_SMALL("Ut Video"),
734
+};
... ...
@@ -27,7 +27,7 @@
27 27
  */
28 28
 
29 29
 #define LIBAVCODEC_VERSION_MAJOR 54
30
-#define LIBAVCODEC_VERSION_MINOR 25
30
+#define LIBAVCODEC_VERSION_MINOR 26
31 31
 #define LIBAVCODEC_VERSION_MICRO  0
32 32
 
33 33
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \