Browse code

h264: rename h264.[ch] to h264dec.[ch]

This is more consistent with the naming of other decoders.

Anton Khirnov authored on 2016/05/18 16:02:39
Showing 34 changed files
... ...
@@ -249,7 +249,7 @@ OBJS-$(CONFIG_H263_DECODER)            += h263dec.o h263.o ituh263dec.o        \
249 249
                                           intelh263dec.o h263data.o
250 250
 OBJS-$(CONFIG_H263_ENCODER)            += mpeg4videoenc.o mpeg4video.o  \
251 251
                                           h263.o ituh263enc.o flvenc.o h263data.o
252
-OBJS-$(CONFIG_H264_DECODER)            += h264.o h264_cabac.o h264_cavlc.o \
252
+OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
253 253
                                           h264_direct.o h264_loopfilter.o  \
254 254
                                           h264_mb.o h264_picture.o h264_ps.o \
255 255
                                           h264_refs.o h264_sei.o \
... ...
@@ -20,7 +20,7 @@
20 20
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 21
  */
22 22
 
23
-#include "h264.h"
23
+#include "h264dec.h"
24 24
 #include "h264data.h"
25 25
 #include "mpegutils.h"
26 26
 
27 27
deleted file mode 100644
... ...
@@ -1,982 +0,0 @@
1
-/*
2
- * H.26L/H.264/AVC/JVT/14496-10/... decoder
3
- * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4
- *
5
- * This file is part of Libav.
6
- *
7
- * Libav is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU Lesser General Public
9
- * License as published by the Free Software Foundation; either
10
- * version 2.1 of the License, or (at your option) any later version.
11
- *
12
- * Libav is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * Lesser General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU Lesser General Public
18
- * License along with Libav; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
- */
21
-
22
-/**
23
- * @file
24
- * H.264 / AVC / MPEG-4 part10 codec.
25
- * @author Michael Niedermayer <michaelni@gmx.at>
26
- */
27
-
28
-#include "libavutil/display.h"
29
-#include "libavutil/imgutils.h"
30
-#include "libavutil/opt.h"
31
-#include "libavutil/stereo3d.h"
32
-#include "libavutil/timer.h"
33
-#include "internal.h"
34
-#include "bytestream.h"
35
-#include "cabac.h"
36
-#include "cabac_functions.h"
37
-#include "error_resilience.h"
38
-#include "avcodec.h"
39
-#include "h264.h"
40
-#include "h2645_parse.h"
41
-#include "h264data.h"
42
-#include "h264chroma.h"
43
-#include "h264_mvpred.h"
44
-#include "golomb.h"
45
-#include "mathops.h"
46
-#include "me_cmp.h"
47
-#include "mpegutils.h"
48
-#include "profiles.h"
49
-#include "rectangle.h"
50
-#include "thread.h"
51
-
52
-#include <assert.h>
53
-
54
-const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55
-
56
-static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
57
-                              int (*mv)[2][4][2],
58
-                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
59
-{
60
-    H264Context *h = opaque;
61
-    H264SliceContext *sl = &h->slice_ctx[0];
62
-
63
-    sl->mb_x = mb_x;
64
-    sl->mb_y = mb_y;
65
-    sl->mb_xy = mb_x + mb_y * h->mb_stride;
66
-    memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
67
-    assert(ref >= 0);
68
-    /* FIXME: It is possible albeit uncommon that slice references
69
-     * differ between slices. We take the easy approach and ignore
70
-     * it for now. If this turns out to have any relevance in
71
-     * practice then correct remapping should be added. */
72
-    if (ref >= sl->ref_count[0])
73
-        ref = 0;
74
-    fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
75
-                   2, 2, 2, ref, 1);
76
-    fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
77
-    fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
78
-                   pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
79
-    assert(!FRAME_MBAFF(h));
80
-    ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
81
-}
82
-
83
-void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
84
-                             int y, int height)
85
-{
86
-    AVCodecContext *avctx = h->avctx;
87
-    const AVFrame   *src  = h->cur_pic.f;
88
-    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
89
-    int vshift = desc->log2_chroma_h;
90
-    const int field_pic = h->picture_structure != PICT_FRAME;
91
-    if (field_pic) {
92
-        height <<= 1;
93
-        y      <<= 1;
94
-    }
95
-
96
-    height = FFMIN(height, avctx->height - y);
97
-
98
-    if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
99
-        return;
100
-
101
-    if (avctx->draw_horiz_band) {
102
-        int offset[AV_NUM_DATA_POINTERS];
103
-        int i;
104
-
105
-        offset[0] = y * src->linesize[0];
106
-        offset[1] =
107
-        offset[2] = (y >> vshift) * src->linesize[1];
108
-        for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
109
-            offset[i] = 0;
110
-
111
-        emms_c();
112
-
113
-        avctx->draw_horiz_band(avctx, src, offset,
114
-                               y, h->picture_structure, height);
115
-    }
116
-}
117
-
118
-void ff_h264_free_tables(H264Context *h)
119
-{
120
-    int i;
121
-
122
-    av_freep(&h->intra4x4_pred_mode);
123
-    av_freep(&h->chroma_pred_mode_table);
124
-    av_freep(&h->cbp_table);
125
-    av_freep(&h->mvd_table[0]);
126
-    av_freep(&h->mvd_table[1]);
127
-    av_freep(&h->direct_table);
128
-    av_freep(&h->non_zero_count);
129
-    av_freep(&h->slice_table_base);
130
-    h->slice_table = NULL;
131
-    av_freep(&h->list_counts);
132
-
133
-    av_freep(&h->mb2b_xy);
134
-    av_freep(&h->mb2br_xy);
135
-
136
-    av_buffer_pool_uninit(&h->qscale_table_pool);
137
-    av_buffer_pool_uninit(&h->mb_type_pool);
138
-    av_buffer_pool_uninit(&h->motion_val_pool);
139
-    av_buffer_pool_uninit(&h->ref_index_pool);
140
-
141
-    for (i = 0; i < h->nb_slice_ctx; i++) {
142
-        H264SliceContext *sl = &h->slice_ctx[i];
143
-
144
-        av_freep(&sl->dc_val_base);
145
-        av_freep(&sl->er.mb_index2xy);
146
-        av_freep(&sl->er.error_status_table);
147
-        av_freep(&sl->er.er_temp_buffer);
148
-
149
-        av_freep(&sl->bipred_scratchpad);
150
-        av_freep(&sl->edge_emu_buffer);
151
-        av_freep(&sl->top_borders[0]);
152
-        av_freep(&sl->top_borders[1]);
153
-
154
-        sl->bipred_scratchpad_allocated = 0;
155
-        sl->edge_emu_buffer_allocated   = 0;
156
-        sl->top_borders_allocated[0]    = 0;
157
-        sl->top_borders_allocated[1]    = 0;
158
-    }
159
-}
160
-
161
-int ff_h264_alloc_tables(H264Context *h)
162
-{
163
-    const int big_mb_num = h->mb_stride * (h->mb_height + 1);
164
-    const int row_mb_num = h->mb_stride * 2 * h->nb_slice_ctx;
165
-    int x, y;
166
-
167
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
168
-                      row_mb_num * 8 * sizeof(uint8_t), fail)
169
-    h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
170
-
171
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
172
-                      big_mb_num * 48 * sizeof(uint8_t), fail)
173
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
174
-                      (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
175
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
176
-                      big_mb_num * sizeof(uint16_t), fail)
177
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
178
-                      big_mb_num * sizeof(uint8_t), fail)
179
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
180
-                      16 * row_mb_num * sizeof(uint8_t), fail);
181
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
182
-                      16 * row_mb_num * sizeof(uint8_t), fail);
183
-    h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
184
-    h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
185
-
186
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
187
-                      4 * big_mb_num * sizeof(uint8_t), fail);
188
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
189
-                      big_mb_num * sizeof(uint8_t), fail)
190
-
191
-    memset(h->slice_table_base, -1,
192
-           (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
193
-    h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
194
-
195
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
196
-                      big_mb_num * sizeof(uint32_t), fail);
197
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
198
-                      big_mb_num * sizeof(uint32_t), fail);
199
-    for (y = 0; y < h->mb_height; y++)
200
-        for (x = 0; x < h->mb_width; x++) {
201
-            const int mb_xy = x + y * h->mb_stride;
202
-            const int b_xy  = 4 * x + 4 * y * h->b_stride;
203
-
204
-            h->mb2b_xy[mb_xy]  = b_xy;
205
-            h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
206
-        }
207
-
208
-    return 0;
209
-
210
-fail:
211
-    ff_h264_free_tables(h);
212
-    return AVERROR(ENOMEM);
213
-}
214
-
215
-/**
216
- * Init context
217
- * Allocate buffers which are not shared amongst multiple threads.
218
- */
219
-int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
220
-{
221
-    ERContext *er = &sl->er;
222
-    int mb_array_size = h->mb_height * h->mb_stride;
223
-    int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
224
-    int c_size  = h->mb_stride * (h->mb_height + 1);
225
-    int yc_size = y_size + 2   * c_size;
226
-    int x, y, i;
227
-
228
-    sl->ref_cache[0][scan8[5]  + 1] =
229
-    sl->ref_cache[0][scan8[7]  + 1] =
230
-    sl->ref_cache[0][scan8[13] + 1] =
231
-    sl->ref_cache[1][scan8[5]  + 1] =
232
-    sl->ref_cache[1][scan8[7]  + 1] =
233
-    sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
234
-
235
-    if (CONFIG_ERROR_RESILIENCE) {
236
-        /* init ER */
237
-        er->avctx          = h->avctx;
238
-        er->decode_mb      = h264_er_decode_mb;
239
-        er->opaque         = h;
240
-        er->quarter_sample = 1;
241
-
242
-        er->mb_num      = h->mb_num;
243
-        er->mb_width    = h->mb_width;
244
-        er->mb_height   = h->mb_height;
245
-        er->mb_stride   = h->mb_stride;
246
-        er->b8_stride   = h->mb_width * 2 + 1;
247
-
248
-        // error resilience code looks cleaner with this
249
-        FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
250
-                          (h->mb_num + 1) * sizeof(int), fail);
251
-
252
-        for (y = 0; y < h->mb_height; y++)
253
-            for (x = 0; x < h->mb_width; x++)
254
-                er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
255
-
256
-        er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
257
-                                                      h->mb_stride + h->mb_width;
258
-
259
-        FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
260
-                          mb_array_size * sizeof(uint8_t), fail);
261
-
262
-        FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
263
-                         h->mb_height * h->mb_stride, fail);
264
-
265
-        FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
266
-                          yc_size * sizeof(int16_t), fail);
267
-        er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
268
-        er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
269
-        er->dc_val[2] = er->dc_val[1] + c_size;
270
-        for (i = 0; i < yc_size; i++)
271
-            sl->dc_val_base[i] = 1024;
272
-    }
273
-
274
-    return 0;
275
-
276
-fail:
277
-    return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
278
-}
279
-
280
-static int h264_init_context(AVCodecContext *avctx, H264Context *h)
281
-{
282
-    int i;
283
-
284
-    h->avctx                 = avctx;
285
-
286
-    h->picture_structure     = PICT_FRAME;
287
-    h->workaround_bugs       = avctx->workaround_bugs;
288
-    h->flags                 = avctx->flags;
289
-    h->poc.prev_poc_msb      = 1 << 16;
290
-    h->recovery_frame        = -1;
291
-    h->frame_recovered       = 0;
292
-
293
-    h->next_outputed_poc = INT_MIN;
294
-    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
295
-        h->last_pocs[i] = INT_MIN;
296
-
297
-    ff_h264_sei_uninit(&h->sei);
298
-
299
-    avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
300
-
301
-    h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
302
-    h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
303
-    if (!h->slice_ctx) {
304
-        h->nb_slice_ctx = 0;
305
-        return AVERROR(ENOMEM);
306
-    }
307
-
308
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
309
-        h->DPB[i].f = av_frame_alloc();
310
-        if (!h->DPB[i].f)
311
-            return AVERROR(ENOMEM);
312
-    }
313
-
314
-    h->cur_pic.f = av_frame_alloc();
315
-    if (!h->cur_pic.f)
316
-        return AVERROR(ENOMEM);
317
-
318
-    for (i = 0; i < h->nb_slice_ctx; i++)
319
-        h->slice_ctx[i].h264 = h;
320
-
321
-    return 0;
322
-}
323
-
324
-static av_cold int h264_decode_end(AVCodecContext *avctx)
325
-{
326
-    H264Context *h = avctx->priv_data;
327
-    int i;
328
-
329
-    ff_h264_free_tables(h);
330
-
331
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
332
-        ff_h264_unref_picture(h, &h->DPB[i]);
333
-        av_frame_free(&h->DPB[i].f);
334
-    }
335
-
336
-    h->cur_pic_ptr = NULL;
337
-
338
-    av_freep(&h->slice_ctx);
339
-    h->nb_slice_ctx = 0;
340
-
341
-    for (i = 0; i < MAX_SPS_COUNT; i++)
342
-        av_buffer_unref(&h->ps.sps_list[i]);
343
-
344
-    for (i = 0; i < MAX_PPS_COUNT; i++)
345
-        av_buffer_unref(&h->ps.pps_list[i]);
346
-
347
-    ff_h2645_packet_uninit(&h->pkt);
348
-
349
-    ff_h264_unref_picture(h, &h->cur_pic);
350
-    av_frame_free(&h->cur_pic.f);
351
-
352
-    return 0;
353
-}
354
-
355
-static AVOnce h264_vlc_init = AV_ONCE_INIT;
356
-
357
-av_cold int ff_h264_decode_init(AVCodecContext *avctx)
358
-{
359
-    H264Context *h = avctx->priv_data;
360
-    int ret;
361
-
362
-    ret = h264_init_context(avctx, h);
363
-    if (ret < 0)
364
-        return ret;
365
-
366
-    ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
367
-    if (ret != 0) {
368
-        av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
369
-        return AVERROR_UNKNOWN;
370
-    }
371
-
372
-    if (avctx->codec_id == AV_CODEC_ID_H264) {
373
-        if (avctx->ticks_per_frame == 1)
374
-            h->avctx->framerate.num *= 2;
375
-        avctx->ticks_per_frame = 2;
376
-    }
377
-
378
-    if (avctx->extradata_size > 0 && avctx->extradata) {
379
-       ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
380
-                                      &h->ps, &h->is_avc, &h->nal_length_size,
381
-                                      avctx->err_recognition, avctx);
382
-       if (ret < 0) {
383
-           h264_decode_end(avctx);
384
-           return ret;
385
-       }
386
-    }
387
-
388
-    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
389
-        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
390
-        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
391
-    }
392
-
393
-    avctx->internal->allocate_progress = 1;
394
-
395
-    if (h->enable_er) {
396
-        av_log(avctx, AV_LOG_WARNING,
397
-               "Error resilience is enabled. It is unsafe and unsupported and may crash. "
398
-               "Use it at your own risk\n");
399
-    }
400
-
401
-    return 0;
402
-}
403
-
404
-static int decode_init_thread_copy(AVCodecContext *avctx)
405
-{
406
-    H264Context *h = avctx->priv_data;
407
-    int ret;
408
-
409
-    if (!avctx->internal->is_copy)
410
-        return 0;
411
-
412
-    memset(h, 0, sizeof(*h));
413
-
414
-    ret = h264_init_context(avctx, h);
415
-    if (ret < 0)
416
-        return ret;
417
-
418
-    h->context_initialized = 0;
419
-
420
-    return 0;
421
-}
422
-
423
-/**
424
- * Run setup operations that must be run after slice header decoding.
425
- * This includes finding the next displayed frame.
426
- *
427
- * @param h h264 master context
428
- * @param setup_finished enough NALs have been read that we can call
429
- * ff_thread_finish_setup()
430
- */
431
-static void decode_postinit(H264Context *h, int setup_finished)
432
-{
433
-    const SPS *sps = h->ps.sps;
434
-    H264Picture *out = h->cur_pic_ptr;
435
-    H264Picture *cur = h->cur_pic_ptr;
436
-    int i, pics, out_of_order, out_idx;
437
-    int invalid = 0, cnt = 0;
438
-
439
-    if (h->next_output_pic)
440
-        return;
441
-
442
-    if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
443
-        /* FIXME: if we have two PAFF fields in one packet, we can't start
444
-         * the next thread here. If we have one field per packet, we can.
445
-         * The check in decode_nal_units() is not good enough to find this
446
-         * yet, so we assume the worst for now. */
447
-        // if (setup_finished)
448
-        //    ff_thread_finish_setup(h->avctx);
449
-        return;
450
-    }
451
-
452
-    // FIXME do something with unavailable reference frames
453
-
454
-    /* Sort B-frames into display order */
455
-    if (sps->bitstream_restriction_flag ||
456
-        h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
457
-        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
458
-    }
459
-
460
-    pics = 0;
461
-    while (h->delayed_pic[pics])
462
-        pics++;
463
-
464
-    assert(pics <= MAX_DELAYED_PIC_COUNT);
465
-
466
-    h->delayed_pic[pics++] = cur;
467
-    if (cur->reference == 0)
468
-        cur->reference = DELAYED_PIC_REF;
469
-
470
-    /* Frame reordering. This code takes pictures from coding order and sorts
471
-     * them by their incremental POC value into display order. It supports POC
472
-     * gaps, MMCO reset codes and random resets.
473
-     * A "display group" can start either with a IDR frame (f.key_frame = 1),
474
-     * and/or can be closed down with a MMCO reset code. In sequences where
475
-     * there is no delay, we can't detect that (since the frame was already
476
-     * output to the user), so we also set h->mmco_reset to detect the MMCO
477
-     * reset code.
478
-     * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
479
-     * we increase the delay between input and output. All frames affected by
480
-     * the lag (e.g. those that should have been output before another frame
481
-     * that we already returned to the user) will be dropped. This is a bug
482
-     * that we will fix later. */
483
-    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
484
-        cnt     += out->poc < h->last_pocs[i];
485
-        invalid += out->poc == INT_MIN;
486
-    }
487
-    if (!h->mmco_reset && !cur->f->key_frame &&
488
-        cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
489
-        h->mmco_reset = 2;
490
-        if (pics > 1)
491
-            h->delayed_pic[pics - 2]->mmco_reset = 2;
492
-    }
493
-    if (h->mmco_reset || cur->f->key_frame) {
494
-        for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
495
-            h->last_pocs[i] = INT_MIN;
496
-        cnt     = 0;
497
-        invalid = MAX_DELAYED_PIC_COUNT;
498
-    }
499
-    out     = h->delayed_pic[0];
500
-    out_idx = 0;
501
-    for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
502
-                h->delayed_pic[i] &&
503
-                !h->delayed_pic[i - 1]->mmco_reset &&
504
-                !h->delayed_pic[i]->f->key_frame;
505
-         i++)
506
-        if (h->delayed_pic[i]->poc < out->poc) {
507
-            out     = h->delayed_pic[i];
508
-            out_idx = i;
509
-        }
510
-    if (h->avctx->has_b_frames == 0 &&
511
-        (h->delayed_pic[0]->f->key_frame || h->mmco_reset))
512
-        h->next_outputed_poc = INT_MIN;
513
-    out_of_order = !out->f->key_frame && !h->mmco_reset &&
514
-                   (out->poc < h->next_outputed_poc);
515
-
516
-    if (sps->bitstream_restriction_flag &&
517
-        h->avctx->has_b_frames >= sps->num_reorder_frames) {
518
-    } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
519
-               h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
520
-        if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
521
-            h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
522
-        }
523
-    } else if (!h->avctx->has_b_frames &&
524
-               ((h->next_outputed_poc != INT_MIN &&
525
-                 out->poc > h->next_outputed_poc + 2) ||
526
-                cur->f->pict_type == AV_PICTURE_TYPE_B)) {
527
-        h->avctx->has_b_frames++;
528
-    }
529
-
530
-    if (pics > h->avctx->has_b_frames) {
531
-        out->reference &= ~DELAYED_PIC_REF;
532
-        for (i = out_idx; h->delayed_pic[i]; i++)
533
-            h->delayed_pic[i] = h->delayed_pic[i + 1];
534
-    }
535
-    memmove(h->last_pocs, &h->last_pocs[1],
536
-            sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
537
-    h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
538
-    if (!out_of_order && pics > h->avctx->has_b_frames) {
539
-        h->next_output_pic = out;
540
-        if (out->mmco_reset) {
541
-            if (out_idx > 0) {
542
-                h->next_outputed_poc                    = out->poc;
543
-                h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
544
-            } else {
545
-                h->next_outputed_poc = INT_MIN;
546
-            }
547
-        } else {
548
-            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) {
549
-                h->next_outputed_poc = INT_MIN;
550
-            } else {
551
-                h->next_outputed_poc = out->poc;
552
-            }
553
-        }
554
-        h->mmco_reset = 0;
555
-    } else {
556
-        av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
557
-    }
558
-
559
-    if (h->next_output_pic) {
560
-        if (h->next_output_pic->recovered) {
561
-            // We have reached an recovery point and all frames after it in
562
-            // display order are "recovered".
563
-            h->frame_recovered |= FRAME_RECOVERED_SEI;
564
-        }
565
-        h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
566
-    }
567
-
568
-    if (setup_finished && !h->avctx->hwaccel) {
569
-        ff_thread_finish_setup(h->avctx);
570
-
571
-        if (h->avctx->active_thread_type & FF_THREAD_FRAME)
572
-            h->setup_finished = 1;
573
-    }
574
-}
575
-
576
-/**
577
- * instantaneous decoder refresh.
578
- */
579
-static void idr(H264Context *h)
580
-{
581
-    ff_h264_remove_all_refs(h);
582
-    h->poc.prev_frame_num        =
583
-    h->poc.prev_frame_num_offset =
584
-    h->poc.prev_poc_msb          =
585
-    h->poc.prev_poc_lsb          = 0;
586
-}
587
-
588
-/* forget old pics after a seek */
589
-void ff_h264_flush_change(H264Context *h)
590
-{
591
-    int i;
592
-    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
593
-        h->last_pocs[i] = INT_MIN;
594
-    h->next_outputed_poc = INT_MIN;
595
-    h->prev_interlaced_frame = 1;
596
-    idr(h);
597
-    if (h->cur_pic_ptr)
598
-        h->cur_pic_ptr->reference = 0;
599
-    h->first_field = 0;
600
-    ff_h264_sei_uninit(&h->sei);
601
-    h->recovery_frame = -1;
602
-    h->frame_recovered = 0;
603
-}
604
-
605
-/* forget old pics after a seek */
606
-static void flush_dpb(AVCodecContext *avctx)
607
-{
608
-    H264Context *h = avctx->priv_data;
609
-    int i;
610
-
611
-    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
612
-
613
-    ff_h264_flush_change(h);
614
-
615
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
616
-        ff_h264_unref_picture(h, &h->DPB[i]);
617
-    h->cur_pic_ptr = NULL;
618
-    ff_h264_unref_picture(h, &h->cur_pic);
619
-
620
-    h->mb_y = 0;
621
-
622
-    ff_h264_free_tables(h);
623
-    h->context_initialized = 0;
624
-}
625
-
626
-static int get_last_needed_nal(H264Context *h)
627
-{
628
-    int nals_needed = 0;
629
-    int i;
630
-
631
-    for (i = 0; i < h->pkt.nb_nals; i++) {
632
-        H2645NAL *nal = &h->pkt.nals[i];
633
-        GetBitContext gb;
634
-
635
-        /* packets can sometimes contain multiple PPS/SPS,
636
-         * e.g. two PAFF field pictures in one packet, or a demuxer
637
-         * which splits NALs strangely if so, when frame threading we
638
-         * can't start the next thread until we've read all of them */
639
-        switch (nal->type) {
640
-        case NAL_SPS:
641
-        case NAL_PPS:
642
-            nals_needed = i;
643
-            break;
644
-        case NAL_DPA:
645
-        case NAL_IDR_SLICE:
646
-        case NAL_SLICE:
647
-            init_get_bits(&gb, nal->data + 1, (nal->size - 1) * 8);
648
-            if (!get_ue_golomb(&gb))
649
-                nals_needed = i;
650
-        }
651
-    }
652
-
653
-    return nals_needed;
654
-}
655
-
656
-static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
657
-{
658
-    AVCodecContext *const avctx = h->avctx;
659
-    unsigned context_count = 0;
660
-    int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
661
-    int i, ret = 0;
662
-
663
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
664
-        h->current_slice = 0;
665
-        if (!h->first_field)
666
-            h->cur_pic_ptr = NULL;
667
-        ff_h264_sei_uninit(&h->sei);
668
-    }
669
-
670
-    ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
671
-                                h->nal_length_size, avctx->codec_id);
672
-    if (ret < 0) {
673
-        av_log(avctx, AV_LOG_ERROR,
674
-               "Error splitting the input into NAL units.\n");
675
-        return ret;
676
-    }
677
-
678
-    if (avctx->active_thread_type & FF_THREAD_FRAME)
679
-        nals_needed = get_last_needed_nal(h);
680
-
681
-    for (i = 0; i < h->pkt.nb_nals; i++) {
682
-        H2645NAL *nal = &h->pkt.nals[i];
683
-        H264SliceContext *sl = &h->slice_ctx[context_count];
684
-        int err;
685
-
686
-        if (avctx->skip_frame >= AVDISCARD_NONREF &&
687
-            nal->ref_idc == 0 && nal->type != NAL_SEI)
688
-            continue;
689
-
690
-        // FIXME these should stop being context-global variables
691
-        h->nal_ref_idc   = nal->ref_idc;
692
-        h->nal_unit_type = nal->type;
693
-
694
-        err = 0;
695
-        switch (nal->type) {
696
-        case NAL_IDR_SLICE:
697
-            if (nal->type != NAL_IDR_SLICE) {
698
-                av_log(h->avctx, AV_LOG_ERROR,
699
-                       "Invalid mix of idr and non-idr slices\n");
700
-                ret = -1;
701
-                goto end;
702
-            }
703
-            idr(h); // FIXME ensure we don't lose some frames if there is reordering
704
-        case NAL_SLICE:
705
-            sl->gb = nal->gb;
706
-
707
-            if ((err = ff_h264_decode_slice_header(h, sl, nal)))
708
-                break;
709
-
710
-            if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
711
-                h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
712
-                                    ((1 << h->ps.sps->log2_max_frame_num) - 1);
713
-            }
714
-
715
-            h->cur_pic_ptr->f->key_frame |=
716
-                (nal->type == NAL_IDR_SLICE) || (h->sei.recovery_point.recovery_frame_cnt >= 0);
717
-
718
-            if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
719
-                h->recovery_frame         = -1;
720
-                h->cur_pic_ptr->recovered = 1;
721
-            }
722
-            // If we have an IDR, all frames after it in decoded order are
723
-            // "recovered".
724
-            if (nal->type == NAL_IDR_SLICE)
725
-                h->frame_recovered |= FRAME_RECOVERED_IDR;
726
-            h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
727
-
728
-            if (h->current_slice == 1) {
729
-                if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
730
-                    decode_postinit(h, i >= nals_needed);
731
-
732
-                if (h->avctx->hwaccel &&
733
-                    (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
734
-                    return ret;
735
-            }
736
-
737
-            if (sl->redundant_pic_count == 0 &&
738
-                (avctx->skip_frame < AVDISCARD_NONREF || nal->ref_idc) &&
739
-                (avctx->skip_frame < AVDISCARD_BIDIR  ||
740
-                 sl->slice_type_nos != AV_PICTURE_TYPE_B) &&
741
-                (avctx->skip_frame < AVDISCARD_NONKEY ||
742
-                 h->cur_pic_ptr->f->key_frame) &&
743
-                avctx->skip_frame < AVDISCARD_ALL) {
744
-                if (avctx->hwaccel) {
745
-                    ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
746
-                    if (ret < 0)
747
-                        return ret;
748
-                } else
749
-                    context_count++;
750
-            }
751
-            break;
752
-        case NAL_DPA:
753
-        case NAL_DPB:
754
-        case NAL_DPC:
755
-            avpriv_request_sample(avctx, "data partitioning");
756
-            ret = AVERROR(ENOSYS);
757
-            goto end;
758
-            break;
759
-        case NAL_SEI:
760
-            ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
761
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
762
-                goto end;
763
-            break;
764
-        case NAL_SPS:
765
-            ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
766
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
767
-                goto end;
768
-            break;
769
-        case NAL_PPS:
770
-            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
771
-                                                       nal->size_bits);
772
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
773
-                goto end;
774
-            break;
775
-        case NAL_AUD:
776
-        case NAL_END_SEQUENCE:
777
-        case NAL_END_STREAM:
778
-        case NAL_FILLER_DATA:
779
-        case NAL_SPS_EXT:
780
-        case NAL_AUXILIARY_SLICE:
781
-            break;
782
-        default:
783
-            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
784
-                   nal->type, nal->size_bits);
785
-        }
786
-
787
-        if (context_count == h->nb_slice_ctx) {
788
-            ret = ff_h264_execute_decode_slices(h, context_count);
789
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
790
-                goto end;
791
-            context_count = 0;
792
-        }
793
-
794
-        if (err < 0) {
795
-            av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
796
-            sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
797
-        }
798
-    }
799
-    if (context_count) {
800
-        ret = ff_h264_execute_decode_slices(h, context_count);
801
-        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
802
-            goto end;
803
-    }
804
-
805
-    ret = 0;
806
-end:
807
-    /* clean up */
808
-    if (h->cur_pic_ptr && !h->droppable) {
809
-        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
810
-                                  h->picture_structure == PICT_BOTTOM_FIELD);
811
-    }
812
-
813
-    return (ret < 0) ? ret : buf_size;
814
-}
815
-
816
-/**
817
- * Return the number of bytes consumed for building the current frame.
818
- */
819
-static int get_consumed_bytes(int pos, int buf_size)
820
-{
821
-    if (pos == 0)
822
-        pos = 1;        // avoid infinite loops (I doubt that is needed but...)
823
-    if (pos + 10 > buf_size)
824
-        pos = buf_size; // oops ;)
825
-
826
-    return pos;
827
-}
828
-
829
-static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
830
-{
831
-    int i;
832
-    int ret = av_frame_ref(dst, src);
833
-    if (ret < 0)
834
-        return ret;
835
-
836
-    if (!h->ps.sps || !h->ps.sps->crop)
837
-        return 0;
838
-
839
-    for (i = 0; i < 3; i++) {
840
-        int hshift = (i > 0) ? h->chroma_x_shift : 0;
841
-        int vshift = (i > 0) ? h->chroma_y_shift : 0;
842
-        int off    = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) +
843
-                     (h->ps.sps->crop_top >> vshift) * dst->linesize[i];
844
-        dst->data[i] += off;
845
-    }
846
-    return 0;
847
-}
848
-
849
-static int h264_decode_frame(AVCodecContext *avctx, void *data,
850
-                             int *got_frame, AVPacket *avpkt)
851
-{
852
-    const uint8_t *buf = avpkt->data;
853
-    int buf_size       = avpkt->size;
854
-    H264Context *h     = avctx->priv_data;
855
-    AVFrame *pict      = data;
856
-    int buf_index      = 0;
857
-    int ret;
858
-    const uint8_t *new_extradata;
859
-    int new_extradata_size;
860
-
861
-    h->flags = avctx->flags;
862
-    h->setup_finished = 0;
863
-
864
-    /* end of stream, output what is still in the buffers */
865
-out:
866
-    if (buf_size == 0) {
867
-        H264Picture *out;
868
-        int i, out_idx;
869
-
870
-        h->cur_pic_ptr = NULL;
871
-
872
-        // FIXME factorize this with the output code below
873
-        out     = h->delayed_pic[0];
874
-        out_idx = 0;
875
-        for (i = 1;
876
-             h->delayed_pic[i] &&
877
-             !h->delayed_pic[i]->f->key_frame &&
878
-             !h->delayed_pic[i]->mmco_reset;
879
-             i++)
880
-            if (h->delayed_pic[i]->poc < out->poc) {
881
-                out     = h->delayed_pic[i];
882
-                out_idx = i;
883
-            }
884
-
885
-        for (i = out_idx; h->delayed_pic[i]; i++)
886
-            h->delayed_pic[i] = h->delayed_pic[i + 1];
887
-
888
-        if (out) {
889
-            ret = output_frame(h, pict, out->f);
890
-            if (ret < 0)
891
-                return ret;
892
-            *got_frame = 1;
893
-        }
894
-
895
-        return buf_index;
896
-    }
897
-
898
-    new_extradata_size = 0;
899
-    new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
900
-                                            &new_extradata_size);
901
-    if (new_extradata_size > 0 && new_extradata) {
902
-        ret = ff_h264_decode_extradata(new_extradata, new_extradata_size,
903
-                                       &h->ps, &h->is_avc, &h->nal_length_size,
904
-                                       avctx->err_recognition, avctx);
905
-        if (ret < 0)
906
-            return ret;
907
-    }
908
-
909
-    buf_index = decode_nal_units(h, buf, buf_size);
910
-    if (buf_index < 0)
911
-        return AVERROR_INVALIDDATA;
912
-
913
-    if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
914
-        buf_size = 0;
915
-        goto out;
916
-    }
917
-
918
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
919
-        if (avctx->skip_frame >= AVDISCARD_NONREF)
920
-            return 0;
921
-        av_log(avctx, AV_LOG_ERROR, "no frame!\n");
922
-        return AVERROR_INVALIDDATA;
923
-    }
924
-
925
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
926
-        (h->mb_y >= h->mb_height && h->mb_height)) {
927
-        if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
928
-            decode_postinit(h, 1);
929
-
930
-        ff_h264_field_end(h, &h->slice_ctx[0], 0);
931
-
932
-        *got_frame = 0;
933
-        if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
934
-                                   h->next_output_pic->recovered)) {
935
-            if (!h->next_output_pic->recovered)
936
-                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
937
-
938
-            ret = output_frame(h, pict, h->next_output_pic->f);
939
-            if (ret < 0)
940
-                return ret;
941
-            *got_frame = 1;
942
-        }
943
-    }
944
-
945
-    assert(pict->buf[0] || !*got_frame);
946
-
947
-    return get_consumed_bytes(buf_index, buf_size);
948
-}
949
-
950
-#define OFFSET(x) offsetof(H264Context, x)
951
-#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
952
-static const AVOption h264_options[] = {
953
-    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
954
-    { NULL },
955
-};
956
-
957
-static const AVClass h264_class = {
958
-    .class_name = "h264",
959
-    .item_name  = av_default_item_name,
960
-    .option     = h264_options,
961
-    .version    = LIBAVUTIL_VERSION_INT,
962
-};
963
-
964
-AVCodec ff_h264_decoder = {
965
-    .name                  = "h264",
966
-    .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
967
-    .type                  = AVMEDIA_TYPE_VIDEO,
968
-    .id                    = AV_CODEC_ID_H264,
969
-    .priv_data_size        = sizeof(H264Context),
970
-    .init                  = ff_h264_decode_init,
971
-    .close                 = h264_decode_end,
972
-    .decode                = h264_decode_frame,
973
-    .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
974
-                             AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
975
-                             AV_CODEC_CAP_FRAME_THREADS,
976
-    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
977
-    .flush                 = flush_dpb,
978
-    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
979
-    .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
980
-    .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
981
-    .priv_class            = &h264_class,
982
-};
983 1
deleted file mode 100644
... ...
@@ -1,938 +0,0 @@
1
-/*
2
- * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3
- * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4
- *
5
- * This file is part of Libav.
6
- *
7
- * Libav is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU Lesser General Public
9
- * License as published by the Free Software Foundation; either
10
- * version 2.1 of the License, or (at your option) any later version.
11
- *
12
- * Libav is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * Lesser General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU Lesser General Public
18
- * License along with Libav; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
- */
21
-
22
-/**
23
- * @file
24
- * H.264 / AVC / MPEG-4 part10 codec.
25
- * @author Michael Niedermayer <michaelni@gmx.at>
26
- */
27
-
28
-#ifndef AVCODEC_H264_H
29
-#define AVCODEC_H264_H
30
-
31
-#include "libavutil/buffer.h"
32
-#include "libavutil/intreadwrite.h"
33
-#include "libavutil/thread.h"
34
-
35
-#include "cabac.h"
36
-#include "error_resilience.h"
37
-#include "h264_parse.h"
38
-#include "h264_sei.h"
39
-#include "h2645_parse.h"
40
-#include "h264chroma.h"
41
-#include "h264dsp.h"
42
-#include "h264pred.h"
43
-#include "h264qpel.h"
44
-#include "internal.h"
45
-#include "mpegutils.h"
46
-#include "parser.h"
47
-#include "qpeldsp.h"
48
-#include "rectangle.h"
49
-#include "videodsp.h"
50
-
51
-#define H264_MAX_PICTURE_COUNT 32
52
-
53
-#define MAX_SPS_COUNT          32
54
-#define MAX_PPS_COUNT         256
55
-
56
-#define MAX_MMCO_COUNT         66
57
-
58
-#define MAX_DELAYED_PIC_COUNT  16
59
-
60
-/* Compiling in interlaced support reduces the speed
61
- * of progressive decoding by about 2%. */
62
-#define ALLOW_INTERLACE
63
-
64
-#define FMO 0
65
-
66
-/**
67
- * The maximum number of slices supported by the decoder.
68
- * must be a power of 2
69
- */
70
-#define MAX_SLICES 32
71
-
72
-#ifdef ALLOW_INTERLACE
73
-#define MB_MBAFF(h)    h->mb_mbaff
74
-#define MB_FIELD(h)    h->mb_field_decoding_flag
75
-#define FRAME_MBAFF(h) h->mb_aff_frame
76
-#define FIELD_PICTURE(h) (h->picture_structure != PICT_FRAME)
77
-#define LEFT_MBS 2
78
-#define LTOP     0
79
-#define LBOT     1
80
-#define LEFT(i)  (i)
81
-#else
82
-#define MB_MBAFF(h)      0
83
-#define MB_FIELD(h)      0
84
-#define FRAME_MBAFF(h)   0
85
-#define FIELD_PICTURE(h) 0
86
-#undef  IS_INTERLACED
87
-#define IS_INTERLACED(mb_type) 0
88
-#define LEFT_MBS 1
89
-#define LTOP     0
90
-#define LBOT     0
91
-#define LEFT(i)  0
92
-#endif
93
-#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
94
-
95
-#ifndef CABAC
96
-#define CABAC(h) h->ps.pps->cabac
97
-#endif
98
-
99
-#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
100
-#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
101
-
102
-#define EXTENDED_SAR       255
103
-
104
-#define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit
105
-#define MB_TYPE_8x8DCT     0x01000000
106
-#define IS_REF0(a)         ((a) & MB_TYPE_REF0)
107
-#define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT)
108
-
109
-#define QP_MAX_NUM (51 + 2 * 6)           // The maximum supported qp
110
-
111
-/* NAL unit types */
112
-enum {
113
-    NAL_SLICE           = 1,
114
-    NAL_DPA             = 2,
115
-    NAL_DPB             = 3,
116
-    NAL_DPC             = 4,
117
-    NAL_IDR_SLICE       = 5,
118
-    NAL_SEI             = 6,
119
-    NAL_SPS             = 7,
120
-    NAL_PPS             = 8,
121
-    NAL_AUD             = 9,
122
-    NAL_END_SEQUENCE    = 10,
123
-    NAL_END_STREAM      = 11,
124
-    NAL_FILLER_DATA     = 12,
125
-    NAL_SPS_EXT         = 13,
126
-    NAL_AUXILIARY_SLICE = 19,
127
-};
128
-
129
-/**
130
- * Sequence parameter set
131
- */
132
-typedef struct SPS {
133
-    unsigned int sps_id;
134
-    int profile_idc;
135
-    int level_idc;
136
-    int chroma_format_idc;
137
-    int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
138
-    int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
139
-    int poc_type;                      ///< pic_order_cnt_type
140
-    int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
141
-    int delta_pic_order_always_zero_flag;
142
-    int offset_for_non_ref_pic;
143
-    int offset_for_top_to_bottom_field;
144
-    int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
145
-    int ref_frame_count;               ///< num_ref_frames
146
-    int gaps_in_frame_num_allowed_flag;
147
-    int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
148
-    int mb_height;                     ///< pic_height_in_map_units_minus1 + 1
149
-    int frame_mbs_only_flag;
150
-    int mb_aff;                        ///< mb_adaptive_frame_field_flag
151
-    int direct_8x8_inference_flag;
152
-    int crop;                          ///< frame_cropping_flag
153
-
154
-    /* those 4 are already in luma samples */
155
-    unsigned int crop_left;            ///< frame_cropping_rect_left_offset
156
-    unsigned int crop_right;           ///< frame_cropping_rect_right_offset
157
-    unsigned int crop_top;             ///< frame_cropping_rect_top_offset
158
-    unsigned int crop_bottom;          ///< frame_cropping_rect_bottom_offset
159
-    int vui_parameters_present_flag;
160
-    AVRational sar;
161
-    int video_signal_type_present_flag;
162
-    int full_range;
163
-    int colour_description_present_flag;
164
-    enum AVColorPrimaries color_primaries;
165
-    enum AVColorTransferCharacteristic color_trc;
166
-    enum AVColorSpace colorspace;
167
-    int timing_info_present_flag;
168
-    uint32_t num_units_in_tick;
169
-    uint32_t time_scale;
170
-    int fixed_frame_rate_flag;
171
-    short offset_for_ref_frame[256]; // FIXME dyn aloc?
172
-    int bitstream_restriction_flag;
173
-    int num_reorder_frames;
174
-    int scaling_matrix_present;
175
-    uint8_t scaling_matrix4[6][16];
176
-    uint8_t scaling_matrix8[6][64];
177
-    int nal_hrd_parameters_present_flag;
178
-    int vcl_hrd_parameters_present_flag;
179
-    int pic_struct_present_flag;
180
-    int time_offset_length;
181
-    int cpb_cnt;                          ///< See H.264 E.1.2
182
-    int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
183
-    int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
184
-    int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
185
-    int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
186
-    int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
187
-    int residual_color_transform_flag;    ///< residual_colour_transform_flag
188
-    int constraint_set_flags;             ///< constraint_set[0-3]_flag
189
-} SPS;
190
-
191
-/**
192
- * Picture parameter set
193
- */
194
-typedef struct PPS {
195
-    unsigned int sps_id;
196
-    int cabac;                  ///< entropy_coding_mode_flag
197
-    int pic_order_present;      ///< pic_order_present_flag
198
-    int slice_group_count;      ///< num_slice_groups_minus1 + 1
199
-    int mb_slice_group_map_type;
200
-    unsigned int ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
201
-    int weighted_pred;          ///< weighted_pred_flag
202
-    int weighted_bipred_idc;
203
-    int init_qp;                ///< pic_init_qp_minus26 + 26
204
-    int init_qs;                ///< pic_init_qs_minus26 + 26
205
-    int chroma_qp_index_offset[2];
206
-    int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
207
-    int constrained_intra_pred;     ///< constrained_intra_pred_flag
208
-    int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
209
-    int transform_8x8_mode;         ///< transform_8x8_mode_flag
210
-    uint8_t scaling_matrix4[6][16];
211
-    uint8_t scaling_matrix8[6][64];
212
-    uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
213
-    int chroma_qp_diff;
214
-
215
-    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
216
-    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
217
-    uint32_t(*dequant4_coeff[6])[16];
218
-    uint32_t(*dequant8_coeff[6])[64];
219
-} PPS;
220
-
221
-typedef struct H264ParamSets {
222
-    AVBufferRef *sps_list[MAX_SPS_COUNT];
223
-    AVBufferRef *pps_list[MAX_PPS_COUNT];
224
-
225
-    /* currently active parameters sets */
226
-    const PPS *pps;
227
-    // FIXME this should properly be const
228
-    SPS *sps;
229
-} H264ParamSets;
230
-
231
-/**
232
- * Memory management control operation opcode.
233
- */
234
-typedef enum MMCOOpcode {
235
-    MMCO_END = 0,
236
-    MMCO_SHORT2UNUSED,
237
-    MMCO_LONG2UNUSED,
238
-    MMCO_SHORT2LONG,
239
-    MMCO_SET_MAX_LONG,
240
-    MMCO_RESET,
241
-    MMCO_LONG,
242
-} MMCOOpcode;
243
-
244
-/**
245
- * Memory management control operation.
246
- */
247
-typedef struct MMCO {
248
-    MMCOOpcode opcode;
249
-    int short_pic_num;  ///< pic_num without wrapping (pic_num & max_pic_num)
250
-    int long_arg;       ///< index, pic_num, or num long refs depending on opcode
251
-} MMCO;
252
-
253
-typedef struct H264Picture {
254
-    AVFrame *f;
255
-    ThreadFrame tf;
256
-
257
-    AVBufferRef *qscale_table_buf;
258
-    int8_t *qscale_table;
259
-
260
-    AVBufferRef *motion_val_buf[2];
261
-    int16_t (*motion_val[2])[2];
262
-
263
-    AVBufferRef *mb_type_buf;
264
-    uint32_t *mb_type;
265
-
266
-    AVBufferRef *hwaccel_priv_buf;
267
-    void *hwaccel_picture_private; ///< hardware accelerator private data
268
-
269
-    AVBufferRef *ref_index_buf[2];
270
-    int8_t *ref_index[2];
271
-
272
-    int field_poc[2];       ///< top/bottom POC
273
-    int poc;                ///< frame POC
274
-    int frame_num;          ///< frame_num (raw frame_num from slice header)
275
-    int mmco_reset;         /**< MMCO_RESET set this 1. Reordering code must
276
-                                 not mix pictures before and after MMCO_RESET. */
277
-    int pic_id;             /**< pic_num (short -> no wrap version of pic_num,
278
-                                 pic_num & max_pic_num; long -> long_pic_num) */
279
-    int long_ref;           ///< 1->long term reference 0->short term reference
280
-    int ref_poc[2][2][32];  ///< POCs of the frames used as reference (FIXME need per slice)
281
-    int ref_count[2][2];    ///< number of entries in ref_poc         (FIXME need per slice)
282
-    int mbaff;              ///< 1 -> MBAFF frame 0-> not MBAFF
283
-    int field_picture;      ///< whether or not picture was encoded in separate fields
284
-
285
-    int reference;
286
-    int recovered;          ///< picture at IDR or recovery point + recovery count
287
-} H264Picture;
288
-
289
-typedef struct H264Ref {
290
-    uint8_t *data[3];
291
-    int linesize[3];
292
-
293
-    int reference;
294
-    int poc;
295
-    int pic_id;
296
-
297
-    H264Picture *parent;
298
-} H264Ref;
299
-
300
-typedef struct H264SliceContext {
301
-    struct H264Context *h264;
302
-    GetBitContext gb;
303
-    ERContext er;
304
-
305
-    int slice_num;
306
-    int slice_type;
307
-    int slice_type_nos;         ///< S free slice type (SI/SP are remapped to I/P)
308
-    int slice_type_fixed;
309
-
310
-    int qscale;
311
-    int chroma_qp[2];   // QPc
312
-    int qp_thresh;      ///< QP threshold to skip loopfilter
313
-    int last_qscale_diff;
314
-
315
-    // deblock
316
-    int deblocking_filter;          ///< disable_deblocking_filter_idc with 1 <-> 0
317
-    int slice_alpha_c0_offset;
318
-    int slice_beta_offset;
319
-
320
-    H264PredWeightTable pwt;
321
-
322
-    int prev_mb_skipped;
323
-    int next_mb_skipped;
324
-
325
-    int chroma_pred_mode;
326
-    int intra16x16_pred_mode;
327
-
328
-    int8_t intra4x4_pred_mode_cache[5 * 8];
329
-    int8_t(*intra4x4_pred_mode);
330
-
331
-    int topleft_mb_xy;
332
-    int top_mb_xy;
333
-    int topright_mb_xy;
334
-    int left_mb_xy[LEFT_MBS];
335
-
336
-    int topleft_type;
337
-    int top_type;
338
-    int topright_type;
339
-    int left_type[LEFT_MBS];
340
-
341
-    const uint8_t *left_block;
342
-    int topleft_partition;
343
-
344
-    unsigned int topleft_samples_available;
345
-    unsigned int top_samples_available;
346
-    unsigned int topright_samples_available;
347
-    unsigned int left_samples_available;
348
-
349
-    ptrdiff_t linesize, uvlinesize;
350
-    ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff
351
-    ptrdiff_t mb_uvlinesize;
352
-
353
-    int mb_x, mb_y;
354
-    int mb_xy;
355
-    int resync_mb_x;
356
-    int resync_mb_y;
357
-    unsigned int first_mb_addr;
358
-    // index of the first MB of the next slice
359
-    int next_slice_idx;
360
-    int mb_skip_run;
361
-    int is_complex;
362
-
363
-    int picture_structure;
364
-    int mb_field_decoding_flag;
365
-    int mb_mbaff;               ///< mb_aff_frame && mb_field_decoding_flag
366
-
367
-    int redundant_pic_count;
368
-
369
-    /**
370
-     * number of neighbors (top and/or left) that used 8x8 dct
371
-     */
372
-    int neighbor_transform_size;
373
-
374
-    int direct_spatial_mv_pred;
375
-    int col_parity;
376
-    int col_fieldoff;
377
-
378
-    int cbp;
379
-    int top_cbp;
380
-    int left_cbp;
381
-
382
-    int dist_scale_factor[32];
383
-    int dist_scale_factor_field[2][32];
384
-    int map_col_to_list0[2][16 + 32];
385
-    int map_col_to_list0_field[2][2][16 + 32];
386
-
387
-    /**
388
-     * num_ref_idx_l0/1_active_minus1 + 1
389
-     */
390
-    unsigned int ref_count[2];          ///< counts frames or fields, depending on current mb mode
391
-    unsigned int list_count;
392
-    H264Ref ref_list[2][48];        /**< 0..15: frame refs, 16..47: mbaff field refs.
393
-                                         *   Reordered version of default_ref_list
394
-                                         *   according to picture reordering in slice header */
395
-    struct {
396
-        uint8_t op;
397
-        uint8_t val;
398
-    } ref_modifications[2][32];
399
-    int nb_ref_modifications[2];
400
-
401
-    unsigned int pps_id;
402
-
403
-    const uint8_t *intra_pcm_ptr;
404
-    int16_t *dc_val_base;
405
-
406
-    uint8_t *bipred_scratchpad;
407
-    uint8_t *edge_emu_buffer;
408
-    uint8_t (*top_borders[2])[(16 * 3) * 2];
409
-    int bipred_scratchpad_allocated;
410
-    int edge_emu_buffer_allocated;
411
-    int top_borders_allocated[2];
412
-
413
-    /**
414
-     * non zero coeff count cache.
415
-     * is 64 if not available.
416
-     */
417
-    DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
418
-
419
-    /**
420
-     * Motion vector cache.
421
-     */
422
-    DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
423
-    DECLARE_ALIGNED(8,  int8_t, ref_cache)[2][5 * 8];
424
-    DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
425
-    uint8_t direct_cache[5 * 8];
426
-
427
-    DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
428
-
429
-    ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
430
-    DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
431
-    DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
432
-    ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
433
-    ///< check that i is not too large or ensure that there is some unused stuff after mb
434
-    int16_t mb_padding[256 * 2];
435
-
436
-    uint8_t (*mvd_table[2])[2];
437
-
438
-    /**
439
-     * Cabac
440
-     */
441
-    CABACContext cabac;
442
-    uint8_t cabac_state[1024];
443
-    int cabac_init_idc;
444
-
445
-    MMCO mmco[MAX_MMCO_COUNT];
446
-    int  nb_mmco;
447
-    int explicit_ref_marking;
448
-
449
-    int frame_num;
450
-    int poc_lsb;
451
-    int delta_poc_bottom;
452
-    int delta_poc[2];
453
-    int curr_pic_num;
454
-    int max_pic_num;
455
-} H264SliceContext;
456
-
457
-/**
458
- * H264Context
459
- */
460
-typedef struct H264Context {
461
-    const AVClass *class;
462
-    AVCodecContext *avctx;
463
-    VideoDSPContext vdsp;
464
-    H264DSPContext h264dsp;
465
-    H264ChromaContext h264chroma;
466
-    H264QpelContext h264qpel;
467
-
468
-    H264Picture DPB[H264_MAX_PICTURE_COUNT];
469
-    H264Picture *cur_pic_ptr;
470
-    H264Picture cur_pic;
471
-
472
-    H264SliceContext *slice_ctx;
473
-    int            nb_slice_ctx;
474
-
475
-    H2645Packet pkt;
476
-
477
-    int pixel_shift;    ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
478
-
479
-    /* coded dimensions -- 16 * mb w/h */
480
-    int width, height;
481
-    int chroma_x_shift, chroma_y_shift;
482
-
483
-    int droppable;
484
-    int coded_picture_number;
485
-
486
-    int context_initialized;
487
-    int flags;
488
-    int workaround_bugs;
489
-    /* Set when slice threading is used and at least one slice uses deblocking
490
-     * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
491
-     * during normal MB decoding and execute it serially at the end.
492
-     */
493
-    int postpone_filter;
494
-
495
-    int8_t(*intra4x4_pred_mode);
496
-    H264PredContext hpc;
497
-
498
-    uint8_t (*non_zero_count)[48];
499
-
500
-#define LIST_NOT_USED -1 // FIXME rename?
501
-#define PART_NOT_AVAILABLE -2
502
-
503
-    /**
504
-     * block_offset[ 0..23] for frame macroblocks
505
-     * block_offset[24..47] for field macroblocks
506
-     */
507
-    int block_offset[2 * (16 * 3)];
508
-
509
-    uint32_t *mb2b_xy;  // FIXME are these 4 a good idea?
510
-    uint32_t *mb2br_xy;
511
-    int b_stride;       // FIXME use s->b4_stride
512
-
513
-    uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
514
-
515
-    // interlacing specific flags
516
-    int mb_aff_frame;
517
-    int picture_structure;
518
-    int first_field;
519
-
520
-    uint8_t *list_counts;               ///< Array of list_count per MB specifying the slice type
521
-
522
-    /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
523
-    uint16_t *cbp_table;
524
-
525
-    /* chroma_pred_mode for i4x4 or i16x16, else 0 */
526
-    uint8_t *chroma_pred_mode_table;
527
-    uint8_t (*mvd_table[2])[2];
528
-    uint8_t *direct_table;
529
-
530
-    uint8_t zigzag_scan[16];
531
-    uint8_t zigzag_scan8x8[64];
532
-    uint8_t zigzag_scan8x8_cavlc[64];
533
-    uint8_t field_scan[16];
534
-    uint8_t field_scan8x8[64];
535
-    uint8_t field_scan8x8_cavlc[64];
536
-    const uint8_t *zigzag_scan_q0;
537
-    const uint8_t *zigzag_scan8x8_q0;
538
-    const uint8_t *zigzag_scan8x8_cavlc_q0;
539
-    const uint8_t *field_scan_q0;
540
-    const uint8_t *field_scan8x8_q0;
541
-    const uint8_t *field_scan8x8_cavlc_q0;
542
-
543
-    int mb_y;
544
-    int mb_height, mb_width;
545
-    int mb_stride;
546
-    int mb_num;
547
-
548
-    // =============================================================
549
-    // Things below are not used in the MB or more inner code
550
-
551
-    int nal_ref_idc;
552
-    int nal_unit_type;
553
-
554
-    /**
555
-     * Used to parse AVC variant of H.264
556
-     */
557
-    int is_avc;           ///< this flag is != 0 if codec is avc1
558
-    int nal_length_size;  ///< Number of bytes used for nal length (1, 2 or 4)
559
-
560
-    int bit_depth_luma;         ///< luma bit depth from sps to detect changes
561
-    int chroma_format_idc;      ///< chroma format from sps to detect changes
562
-
563
-    H264ParamSets ps;
564
-
565
-    uint16_t *slice_table_base;
566
-
567
-    H264POCContext poc;
568
-
569
-    H264Picture *short_ref[32];
570
-    H264Picture *long_ref[32];
571
-    H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
572
-    int last_pocs[MAX_DELAYED_PIC_COUNT];
573
-    H264Picture *next_output_pic;
574
-    int next_outputed_poc;
575
-
576
-    /**
577
-     * memory management control operations buffer.
578
-     */
579
-    MMCO mmco[MAX_MMCO_COUNT];
580
-    int  nb_mmco;
581
-    int mmco_reset;
582
-    int explicit_ref_marking;
583
-
584
-    int long_ref_count;     ///< number of actual long term references
585
-    int short_ref_count;    ///< number of actual short term references
586
-
587
-    /**
588
-     * @name Members for slice based multithreading
589
-     * @{
590
-     */
591
-    /**
592
-     * current slice number, used to initialize slice_num of each thread/context
593
-     */
594
-    int current_slice;
595
-
596
-    /** @} */
597
-
598
-    /**
599
-     * Complement sei_pic_struct
600
-     * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
601
-     * However, soft telecined frames may have these values.
602
-     * This is used in an attempt to flag soft telecine progressive.
603
-     */
604
-    int prev_interlaced_frame;
605
-
606
-    /**
607
-     * recovery_frame is the frame_num at which the next frame should
608
-     * be fully constructed.
609
-     *
610
-     * Set to -1 when not expecting a recovery point.
611
-     */
612
-    int recovery_frame;
613
-
614
-/**
615
- * We have seen an IDR, so all the following frames in coded order are correctly
616
- * decodable.
617
- */
618
-#define FRAME_RECOVERED_IDR  (1 << 0)
619
-/**
620
- * Sufficient number of frames have been decoded since a SEI recovery point,
621
- * so all the following frames in presentation order are correct.
622
- */
623
-#define FRAME_RECOVERED_SEI  (1 << 1)
624
-
625
-    int frame_recovered;    ///< Initial frame has been completely recovered
626
-
627
-    /* for frame threading, this is set to 1
628
-     * after finish_setup() has been called, so we cannot modify
629
-     * some context properties (which are supposed to stay constant between
630
-     * slices) anymore */
631
-    int setup_finished;
632
-
633
-    int enable_er;
634
-
635
-    H264SEIContext sei;
636
-
637
-    AVBufferPool *qscale_table_pool;
638
-    AVBufferPool *mb_type_pool;
639
-    AVBufferPool *motion_val_pool;
640
-    AVBufferPool *ref_index_pool;
641
-    int ref2frm[MAX_SLICES][2][64];     ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
642
-} H264Context;
643
-
644
-extern const uint16_t ff_h264_mb_sizes[4];
645
-
646
-/**
647
- * Decode SPS
648
- */
649
-int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
650
-                                     H264ParamSets *ps);
651
-
652
-/**
653
- * Decode PPS
654
- */
655
-int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
656
-                                         H264ParamSets *ps, int bit_length);
657
-
658
-/**
659
- * Reconstruct bitstream slice_type.
660
- */
661
-int ff_h264_get_slice_type(const H264SliceContext *sl);
662
-
663
-/**
664
- * Allocate tables.
665
- * needs width/height
666
- */
667
-int ff_h264_alloc_tables(H264Context *h);
668
-
669
-int ff_h264_decode_ref_pic_list_reordering(const H264Context *h, H264SliceContext *sl);
670
-int ff_h264_build_ref_list(const H264Context *h, H264SliceContext *sl);
671
-void ff_h264_remove_all_refs(H264Context *h);
672
-
673
-/**
674
- * Execute the reference picture marking (memory management control operations).
675
- */
676
-int ff_h264_execute_ref_pic_marking(H264Context *h);
677
-
678
-int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl,
679
-                                   GetBitContext *gb);
680
-
681
-void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
682
-int ff_h264_decode_init(AVCodecContext *avctx);
683
-void ff_h264_decode_init_vlc(void);
684
-
685
-/**
686
- * Decode a macroblock
687
- * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
688
- */
689
-int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
690
-
691
-/**
692
- * Decode a CABAC coded macroblock
693
- * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
694
- */
695
-int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
696
-
697
-void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
698
-
699
-void ff_h264_init_dequant_tables(H264Context *h);
700
-
701
-void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
702
-void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
703
-void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
704
-                                int *mb_type);
705
-
706
-void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
707
-                            uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
708
-                            unsigned int linesize, unsigned int uvlinesize);
709
-void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
710
-                       uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
711
-                       unsigned int linesize, unsigned int uvlinesize);
712
-
713
-/*
714
- * o-o o-o
715
- *  / / /
716
- * o-o o-o
717
- *  ,---'
718
- * o-o o-o
719
- *  / / /
720
- * o-o o-o
721
- */
722
-
723
-/* Scan8 organization:
724
- *    0 1 2 3 4 5 6 7
725
- * 0  DY    y y y y y
726
- * 1        y Y Y Y Y
727
- * 2        y Y Y Y Y
728
- * 3        y Y Y Y Y
729
- * 4        y Y Y Y Y
730
- * 5  DU    u u u u u
731
- * 6        u U U U U
732
- * 7        u U U U U
733
- * 8        u U U U U
734
- * 9        u U U U U
735
- * 10 DV    v v v v v
736
- * 11       v V V V V
737
- * 12       v V V V V
738
- * 13       v V V V V
739
- * 14       v V V V V
740
- * DY/DU/DV are for luma/chroma DC.
741
- */
742
-
743
-#define LUMA_DC_BLOCK_INDEX   48
744
-#define CHROMA_DC_BLOCK_INDEX 49
745
-
746
-// This table must be here because scan8[constant] must be known at compiletime
747
-static const uint8_t scan8[16 * 3 + 3] = {
748
-    4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
749
-    6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
750
-    4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
751
-    6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
752
-    4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
753
-    6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
754
-    4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
755
-    6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
756
-    4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
757
-    6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
758
-    4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
759
-    6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
760
-    0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
761
-};
762
-
763
-static av_always_inline uint32_t pack16to32(int a, int b)
764
-{
765
-#if HAVE_BIGENDIAN
766
-    return (b & 0xFFFF) + (a << 16);
767
-#else
768
-    return (a & 0xFFFF) + (b << 16);
769
-#endif
770
-}
771
-
772
-static av_always_inline uint16_t pack8to16(int a, int b)
773
-{
774
-#if HAVE_BIGENDIAN
775
-    return (b & 0xFF) + (a << 8);
776
-#else
777
-    return (a & 0xFF) + (b << 8);
778
-#endif
779
-}
780
-
781
-/**
782
- * Get the chroma qp.
783
- */
784
-static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
785
-{
786
-    return pps->chroma_qp_table[t][qscale];
787
-}
788
-
789
-/**
790
- * Get the predicted intra4x4 prediction mode.
791
- */
792
-static av_always_inline int pred_intra_mode(const H264Context *h,
793
-                                            H264SliceContext *sl, int n)
794
-{
795
-    const int index8 = scan8[n];
796
-    const int left   = sl->intra4x4_pred_mode_cache[index8 - 1];
797
-    const int top    = sl->intra4x4_pred_mode_cache[index8 - 8];
798
-    const int min    = FFMIN(left, top);
799
-
800
-    ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
801
-
802
-    if (min < 0)
803
-        return DC_PRED;
804
-    else
805
-        return min;
806
-}
807
-
808
-static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
809
-                                                        H264SliceContext *sl)
810
-{
811
-    int8_t *i4x4       = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
812
-    int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
813
-
814
-    AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
815
-    i4x4[4] = i4x4_cache[7 + 8 * 3];
816
-    i4x4[5] = i4x4_cache[7 + 8 * 2];
817
-    i4x4[6] = i4x4_cache[7 + 8 * 1];
818
-}
819
-
820
-static av_always_inline void write_back_non_zero_count(const H264Context *h,
821
-                                                       H264SliceContext *sl)
822
-{
823
-    const int mb_xy    = sl->mb_xy;
824
-    uint8_t *nnz       = h->non_zero_count[mb_xy];
825
-    uint8_t *nnz_cache = sl->non_zero_count_cache;
826
-
827
-    AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
828
-    AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
829
-    AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
830
-    AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
831
-    AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
832
-    AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
833
-    AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
834
-    AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
835
-
836
-    if (!h->chroma_y_shift) {
837
-        AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
838
-        AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
839
-        AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
840
-        AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
841
-    }
842
-}
843
-
844
-static av_always_inline void write_back_motion_list(const H264Context *h,
845
-                                                    H264SliceContext *sl,
846
-                                                    int b_stride,
847
-                                                    int b_xy, int b8_xy,
848
-                                                    int mb_type, int list)
849
-{
850
-    int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
851
-    int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
852
-    AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
853
-    AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
854
-    AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
855
-    AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
856
-    if (CABAC(h)) {
857
-        uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
858
-                                                        : h->mb2br_xy[sl->mb_xy]];
859
-        uint8_t(*mvd_src)[2]  = &sl->mvd_cache[list][scan8[0]];
860
-        if (IS_SKIP(mb_type)) {
861
-            AV_ZERO128(mvd_dst);
862
-        } else {
863
-            AV_COPY64(mvd_dst, mvd_src + 8 * 3);
864
-            AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
865
-            AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
866
-            AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
867
-        }
868
-    }
869
-
870
-    {
871
-        int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
872
-        int8_t *ref_cache = sl->ref_cache[list];
873
-        ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
874
-        ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
875
-        ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
876
-        ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
877
-    }
878
-}
879
-
880
-static av_always_inline void write_back_motion(const H264Context *h,
881
-                                               H264SliceContext *sl,
882
-                                               int mb_type)
883
-{
884
-    const int b_stride      = h->b_stride;
885
-    const int b_xy  = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
886
-    const int b8_xy = 4 * sl->mb_xy;
887
-
888
-    if (USES_LIST(mb_type, 0)) {
889
-        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
890
-    } else {
891
-        fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
892
-                       2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
893
-    }
894
-    if (USES_LIST(mb_type, 1))
895
-        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
896
-
897
-    if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
898
-        if (IS_8X8(mb_type)) {
899
-            uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
900
-            direct_table[1] = sl->sub_mb_type[1] >> 1;
901
-            direct_table[2] = sl->sub_mb_type[2] >> 1;
902
-            direct_table[3] = sl->sub_mb_type[3] >> 1;
903
-        }
904
-    }
905
-}
906
-
907
-static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
908
-{
909
-    if (h->ps.sps->direct_8x8_inference_flag)
910
-        return !(AV_RN64A(sl->sub_mb_type) &
911
-                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
912
-                  0x0001000100010001ULL));
913
-    else
914
-        return !(AV_RN64A(sl->sub_mb_type) &
915
-                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
916
-                  0x0001000100010001ULL));
917
-}
918
-
919
-int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
920
-
921
-int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
922
-void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
923
-
924
-int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
925
-
926
-void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
927
-
928
-int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
929
-                                const H2645NAL *nal);
930
-int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
931
-int ff_h264_update_thread_context(AVCodecContext *dst,
932
-                                  const AVCodecContext *src);
933
-
934
-void ff_h264_flush_change(H264Context *h);
935
-
936
-void ff_h264_free_tables(H264Context *h);
937
-
938
-#endif /* AVCODEC_H264_H */
... ...
@@ -35,7 +35,7 @@
35 35
 #include "cabac_functions.h"
36 36
 #include "internal.h"
37 37
 #include "avcodec.h"
38
-#include "h264.h"
38
+#include "h264dec.h"
39 39
 #include "h264data.h"
40 40
 #include "h264_mvpred.h"
41 41
 #include "mpegutils.h"
... ...
@@ -29,7 +29,7 @@
29 29
 
30 30
 #include "internal.h"
31 31
 #include "avcodec.h"
32
-#include "h264.h"
32
+#include "h264dec.h"
33 33
 #include "h264_mvpred.h"
34 34
 #include "h264data.h"
35 35
 #include "golomb.h"
... ...
@@ -27,7 +27,7 @@
27 27
 
28 28
 #include "internal.h"
29 29
 #include "avcodec.h"
30
-#include "h264.h"
30
+#include "h264dec.h"
31 31
 #include "mpegutils.h"
32 32
 #include "rectangle.h"
33 33
 #include "thread.h"
... ...
@@ -29,7 +29,7 @@
29 29
 #include "libavutil/intreadwrite.h"
30 30
 #include "internal.h"
31 31
 #include "avcodec.h"
32
-#include "h264.h"
32
+#include "h264dec.h"
33 33
 #include "mathops.h"
34 34
 #include "mpegutils.h"
35 35
 #include "rectangle.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/common.h"
32 32
 #include "libavutil/intreadwrite.h"
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "qpeldsp.h"
36 36
 #include "thread.h"
37 37
 
... ...
@@ -19,7 +19,7 @@
19 19
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 20
  */
21 21
 
22
-#include "h264.h"
22
+#include "h264dec.h"
23 23
 
24 24
 #undef MCFUNC
25 25
 
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #include "internal.h"
32 32
 #include "avcodec.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "mpegutils.h"
35 35
 
36 36
 #include <assert.h>
... ...
@@ -19,7 +19,7 @@
19 19
 #include "bytestream.h"
20 20
 #include "get_bits.h"
21 21
 #include "golomb.h"
22
-#include "h264.h"
22
+#include "h264dec.h"
23 23
 #include "h264_parse.h"
24 24
 
25 25
 int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps,
... ...
@@ -36,7 +36,7 @@
36 36
 
37 37
 #include "get_bits.h"
38 38
 #include "golomb.h"
39
-#include "h264.h"
39
+#include "h264dec.h"
40 40
 #include "h264_sei.h"
41 41
 #include "h264data.h"
42 42
 #include "internal.h"
... ...
@@ -33,7 +33,7 @@
33 33
 #include "cabac_functions.h"
34 34
 #include "error_resilience.h"
35 35
 #include "avcodec.h"
36
-#include "h264.h"
36
+#include "h264dec.h"
37 37
 #include "h264data.h"
38 38
 #include "h264chroma.h"
39 39
 #include "h264_mvpred.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "internal.h"
32 32
 #include "mathops.h"
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "h264data.h"
36 36
 #include "golomb.h"
37 37
 
... ...
@@ -29,7 +29,7 @@
29 29
 
30 30
 #include "internal.h"
31 31
 #include "avcodec.h"
32
-#include "h264.h"
32
+#include "h264dec.h"
33 33
 #include "golomb.h"
34 34
 #include "mpegutils.h"
35 35
 
... ...
@@ -28,7 +28,7 @@
28 28
 #include "avcodec.h"
29 29
 #include "get_bits.h"
30 30
 #include "golomb.h"
31
-#include "h264.h"
31
+#include "h264dec.h"
32 32
 #include "h264_sei.h"
33 33
 #include "internal.h"
34 34
 
... ...
@@ -35,7 +35,7 @@
35 35
 #include "cabac_functions.h"
36 36
 #include "error_resilience.h"
37 37
 #include "avcodec.h"
38
-#include "h264.h"
38
+#include "h264dec.h"
39 39
 #include "h264data.h"
40 40
 #include "h264chroma.h"
41 41
 #include "h264_mvpred.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/avutil.h"
32 32
 
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "h264data.h"
36 36
 
37 37
 const uint8_t ff_h264_golomb_to_pict_type[5] = {
... ...
@@ -21,7 +21,7 @@
21 21
 
22 22
 #include <stdint.h>
23 23
 
24
-#include "h264.h"
24
+#include "h264dec.h"
25 25
 
26 26
 extern const uint8_t ff_h264_golomb_to_pict_type[5];
27 27
 extern const uint8_t ff_h264_golomb_to_intra4x4_cbp[48];
28 28
new file mode 100644
... ...
@@ -0,0 +1,982 @@
0
+/*
1
+ * H.26L/H.264/AVC/JVT/14496-10/... decoder
2
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * H.264 / AVC / MPEG-4 part10 codec.
24
+ * @author Michael Niedermayer <michaelni@gmx.at>
25
+ */
26
+
27
+#include "libavutil/display.h"
28
+#include "libavutil/imgutils.h"
29
+#include "libavutil/opt.h"
30
+#include "libavutil/stereo3d.h"
31
+#include "libavutil/timer.h"
32
+#include "internal.h"
33
+#include "bytestream.h"
34
+#include "cabac.h"
35
+#include "cabac_functions.h"
36
+#include "error_resilience.h"
37
+#include "avcodec.h"
38
+#include "h264dec.h"
39
+#include "h2645_parse.h"
40
+#include "h264data.h"
41
+#include "h264chroma.h"
42
+#include "h264_mvpred.h"
43
+#include "golomb.h"
44
+#include "mathops.h"
45
+#include "me_cmp.h"
46
+#include "mpegutils.h"
47
+#include "profiles.h"
48
+#include "rectangle.h"
49
+#include "thread.h"
50
+
51
+#include <assert.h>
52
+
53
+const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
54
+
55
+static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
56
+                              int (*mv)[2][4][2],
57
+                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
58
+{
59
+    H264Context *h = opaque;
60
+    H264SliceContext *sl = &h->slice_ctx[0];
61
+
62
+    sl->mb_x = mb_x;
63
+    sl->mb_y = mb_y;
64
+    sl->mb_xy = mb_x + mb_y * h->mb_stride;
65
+    memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
66
+    assert(ref >= 0);
67
+    /* FIXME: It is possible albeit uncommon that slice references
68
+     * differ between slices. We take the easy approach and ignore
69
+     * it for now. If this turns out to have any relevance in
70
+     * practice then correct remapping should be added. */
71
+    if (ref >= sl->ref_count[0])
72
+        ref = 0;
73
+    fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
74
+                   2, 2, 2, ref, 1);
75
+    fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
76
+    fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
77
+                   pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
78
+    assert(!FRAME_MBAFF(h));
79
+    ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
80
+}
81
+
82
+void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
83
+                             int y, int height)
84
+{
85
+    AVCodecContext *avctx = h->avctx;
86
+    const AVFrame   *src  = h->cur_pic.f;
87
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
88
+    int vshift = desc->log2_chroma_h;
89
+    const int field_pic = h->picture_structure != PICT_FRAME;
90
+    if (field_pic) {
91
+        height <<= 1;
92
+        y      <<= 1;
93
+    }
94
+
95
+    height = FFMIN(height, avctx->height - y);
96
+
97
+    if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
98
+        return;
99
+
100
+    if (avctx->draw_horiz_band) {
101
+        int offset[AV_NUM_DATA_POINTERS];
102
+        int i;
103
+
104
+        offset[0] = y * src->linesize[0];
105
+        offset[1] =
106
+        offset[2] = (y >> vshift) * src->linesize[1];
107
+        for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
108
+            offset[i] = 0;
109
+
110
+        emms_c();
111
+
112
+        avctx->draw_horiz_band(avctx, src, offset,
113
+                               y, h->picture_structure, height);
114
+    }
115
+}
116
+
117
+void ff_h264_free_tables(H264Context *h)
118
+{
119
+    int i;
120
+
121
+    av_freep(&h->intra4x4_pred_mode);
122
+    av_freep(&h->chroma_pred_mode_table);
123
+    av_freep(&h->cbp_table);
124
+    av_freep(&h->mvd_table[0]);
125
+    av_freep(&h->mvd_table[1]);
126
+    av_freep(&h->direct_table);
127
+    av_freep(&h->non_zero_count);
128
+    av_freep(&h->slice_table_base);
129
+    h->slice_table = NULL;
130
+    av_freep(&h->list_counts);
131
+
132
+    av_freep(&h->mb2b_xy);
133
+    av_freep(&h->mb2br_xy);
134
+
135
+    av_buffer_pool_uninit(&h->qscale_table_pool);
136
+    av_buffer_pool_uninit(&h->mb_type_pool);
137
+    av_buffer_pool_uninit(&h->motion_val_pool);
138
+    av_buffer_pool_uninit(&h->ref_index_pool);
139
+
140
+    for (i = 0; i < h->nb_slice_ctx; i++) {
141
+        H264SliceContext *sl = &h->slice_ctx[i];
142
+
143
+        av_freep(&sl->dc_val_base);
144
+        av_freep(&sl->er.mb_index2xy);
145
+        av_freep(&sl->er.error_status_table);
146
+        av_freep(&sl->er.er_temp_buffer);
147
+
148
+        av_freep(&sl->bipred_scratchpad);
149
+        av_freep(&sl->edge_emu_buffer);
150
+        av_freep(&sl->top_borders[0]);
151
+        av_freep(&sl->top_borders[1]);
152
+
153
+        sl->bipred_scratchpad_allocated = 0;
154
+        sl->edge_emu_buffer_allocated   = 0;
155
+        sl->top_borders_allocated[0]    = 0;
156
+        sl->top_borders_allocated[1]    = 0;
157
+    }
158
+}
159
+
160
+int ff_h264_alloc_tables(H264Context *h)
161
+{
162
+    const int big_mb_num = h->mb_stride * (h->mb_height + 1);
163
+    const int row_mb_num = h->mb_stride * 2 * h->nb_slice_ctx;
164
+    int x, y;
165
+
166
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
167
+                      row_mb_num * 8 * sizeof(uint8_t), fail)
168
+    h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
169
+
170
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
171
+                      big_mb_num * 48 * sizeof(uint8_t), fail)
172
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
173
+                      (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
174
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
175
+                      big_mb_num * sizeof(uint16_t), fail)
176
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
177
+                      big_mb_num * sizeof(uint8_t), fail)
178
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
179
+                      16 * row_mb_num * sizeof(uint8_t), fail);
180
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
181
+                      16 * row_mb_num * sizeof(uint8_t), fail);
182
+    h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
183
+    h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
184
+
185
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
186
+                      4 * big_mb_num * sizeof(uint8_t), fail);
187
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
188
+                      big_mb_num * sizeof(uint8_t), fail)
189
+
190
+    memset(h->slice_table_base, -1,
191
+           (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
192
+    h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
193
+
194
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
195
+                      big_mb_num * sizeof(uint32_t), fail);
196
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
197
+                      big_mb_num * sizeof(uint32_t), fail);
198
+    for (y = 0; y < h->mb_height; y++)
199
+        for (x = 0; x < h->mb_width; x++) {
200
+            const int mb_xy = x + y * h->mb_stride;
201
+            const int b_xy  = 4 * x + 4 * y * h->b_stride;
202
+
203
+            h->mb2b_xy[mb_xy]  = b_xy;
204
+            h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
205
+        }
206
+
207
+    return 0;
208
+
209
+fail:
210
+    ff_h264_free_tables(h);
211
+    return AVERROR(ENOMEM);
212
+}
213
+
214
+/**
215
+ * Init context
216
+ * Allocate buffers which are not shared amongst multiple threads.
217
+ */
218
+int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
219
+{
220
+    ERContext *er = &sl->er;
221
+    int mb_array_size = h->mb_height * h->mb_stride;
222
+    int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
223
+    int c_size  = h->mb_stride * (h->mb_height + 1);
224
+    int yc_size = y_size + 2   * c_size;
225
+    int x, y, i;
226
+
227
+    sl->ref_cache[0][scan8[5]  + 1] =
228
+    sl->ref_cache[0][scan8[7]  + 1] =
229
+    sl->ref_cache[0][scan8[13] + 1] =
230
+    sl->ref_cache[1][scan8[5]  + 1] =
231
+    sl->ref_cache[1][scan8[7]  + 1] =
232
+    sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
233
+
234
+    if (CONFIG_ERROR_RESILIENCE) {
235
+        /* init ER */
236
+        er->avctx          = h->avctx;
237
+        er->decode_mb      = h264_er_decode_mb;
238
+        er->opaque         = h;
239
+        er->quarter_sample = 1;
240
+
241
+        er->mb_num      = h->mb_num;
242
+        er->mb_width    = h->mb_width;
243
+        er->mb_height   = h->mb_height;
244
+        er->mb_stride   = h->mb_stride;
245
+        er->b8_stride   = h->mb_width * 2 + 1;
246
+
247
+        // error resilience code looks cleaner with this
248
+        FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
249
+                          (h->mb_num + 1) * sizeof(int), fail);
250
+
251
+        for (y = 0; y < h->mb_height; y++)
252
+            for (x = 0; x < h->mb_width; x++)
253
+                er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
254
+
255
+        er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
256
+                                                      h->mb_stride + h->mb_width;
257
+
258
+        FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
259
+                          mb_array_size * sizeof(uint8_t), fail);
260
+
261
+        FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
262
+                         h->mb_height * h->mb_stride, fail);
263
+
264
+        FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
265
+                          yc_size * sizeof(int16_t), fail);
266
+        er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
267
+        er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
268
+        er->dc_val[2] = er->dc_val[1] + c_size;
269
+        for (i = 0; i < yc_size; i++)
270
+            sl->dc_val_base[i] = 1024;
271
+    }
272
+
273
+    return 0;
274
+
275
+fail:
276
+    return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
277
+}
278
+
279
+static int h264_init_context(AVCodecContext *avctx, H264Context *h)
280
+{
281
+    int i;
282
+
283
+    h->avctx                 = avctx;
284
+
285
+    h->picture_structure     = PICT_FRAME;
286
+    h->workaround_bugs       = avctx->workaround_bugs;
287
+    h->flags                 = avctx->flags;
288
+    h->poc.prev_poc_msb      = 1 << 16;
289
+    h->recovery_frame        = -1;
290
+    h->frame_recovered       = 0;
291
+
292
+    h->next_outputed_poc = INT_MIN;
293
+    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
294
+        h->last_pocs[i] = INT_MIN;
295
+
296
+    ff_h264_sei_uninit(&h->sei);
297
+
298
+    avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
299
+
300
+    h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
301
+    h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
302
+    if (!h->slice_ctx) {
303
+        h->nb_slice_ctx = 0;
304
+        return AVERROR(ENOMEM);
305
+    }
306
+
307
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
308
+        h->DPB[i].f = av_frame_alloc();
309
+        if (!h->DPB[i].f)
310
+            return AVERROR(ENOMEM);
311
+    }
312
+
313
+    h->cur_pic.f = av_frame_alloc();
314
+    if (!h->cur_pic.f)
315
+        return AVERROR(ENOMEM);
316
+
317
+    for (i = 0; i < h->nb_slice_ctx; i++)
318
+        h->slice_ctx[i].h264 = h;
319
+
320
+    return 0;
321
+}
322
+
323
+static av_cold int h264_decode_end(AVCodecContext *avctx)
324
+{
325
+    H264Context *h = avctx->priv_data;
326
+    int i;
327
+
328
+    ff_h264_free_tables(h);
329
+
330
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
331
+        ff_h264_unref_picture(h, &h->DPB[i]);
332
+        av_frame_free(&h->DPB[i].f);
333
+    }
334
+
335
+    h->cur_pic_ptr = NULL;
336
+
337
+    av_freep(&h->slice_ctx);
338
+    h->nb_slice_ctx = 0;
339
+
340
+    for (i = 0; i < MAX_SPS_COUNT; i++)
341
+        av_buffer_unref(&h->ps.sps_list[i]);
342
+
343
+    for (i = 0; i < MAX_PPS_COUNT; i++)
344
+        av_buffer_unref(&h->ps.pps_list[i]);
345
+
346
+    ff_h2645_packet_uninit(&h->pkt);
347
+
348
+    ff_h264_unref_picture(h, &h->cur_pic);
349
+    av_frame_free(&h->cur_pic.f);
350
+
351
+    return 0;
352
+}
353
+
354
+static AVOnce h264_vlc_init = AV_ONCE_INIT;
355
+
356
+av_cold int ff_h264_decode_init(AVCodecContext *avctx)
357
+{
358
+    H264Context *h = avctx->priv_data;
359
+    int ret;
360
+
361
+    ret = h264_init_context(avctx, h);
362
+    if (ret < 0)
363
+        return ret;
364
+
365
+    ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
366
+    if (ret != 0) {
367
+        av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
368
+        return AVERROR_UNKNOWN;
369
+    }
370
+
371
+    if (avctx->codec_id == AV_CODEC_ID_H264) {
372
+        if (avctx->ticks_per_frame == 1)
373
+            h->avctx->framerate.num *= 2;
374
+        avctx->ticks_per_frame = 2;
375
+    }
376
+
377
+    if (avctx->extradata_size > 0 && avctx->extradata) {
378
+       ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
379
+                                      &h->ps, &h->is_avc, &h->nal_length_size,
380
+                                      avctx->err_recognition, avctx);
381
+       if (ret < 0) {
382
+           h264_decode_end(avctx);
383
+           return ret;
384
+       }
385
+    }
386
+
387
+    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
388
+        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
389
+        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
390
+    }
391
+
392
+    avctx->internal->allocate_progress = 1;
393
+
394
+    if (h->enable_er) {
395
+        av_log(avctx, AV_LOG_WARNING,
396
+               "Error resilience is enabled. It is unsafe and unsupported and may crash. "
397
+               "Use it at your own risk\n");
398
+    }
399
+
400
+    return 0;
401
+}
402
+
403
+static int decode_init_thread_copy(AVCodecContext *avctx)
404
+{
405
+    H264Context *h = avctx->priv_data;
406
+    int ret;
407
+
408
+    if (!avctx->internal->is_copy)
409
+        return 0;
410
+
411
+    memset(h, 0, sizeof(*h));
412
+
413
+    ret = h264_init_context(avctx, h);
414
+    if (ret < 0)
415
+        return ret;
416
+
417
+    h->context_initialized = 0;
418
+
419
+    return 0;
420
+}
421
+
422
+/**
423
+ * Run setup operations that must be run after slice header decoding.
424
+ * This includes finding the next displayed frame.
425
+ *
426
+ * @param h h264 master context
427
+ * @param setup_finished enough NALs have been read that we can call
428
+ * ff_thread_finish_setup()
429
+ */
430
+static void decode_postinit(H264Context *h, int setup_finished)
431
+{
432
+    const SPS *sps = h->ps.sps;
433
+    H264Picture *out = h->cur_pic_ptr;
434
+    H264Picture *cur = h->cur_pic_ptr;
435
+    int i, pics, out_of_order, out_idx;
436
+    int invalid = 0, cnt = 0;
437
+
438
+    if (h->next_output_pic)
439
+        return;
440
+
441
+    if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
442
+        /* FIXME: if we have two PAFF fields in one packet, we can't start
443
+         * the next thread here. If we have one field per packet, we can.
444
+         * The check in decode_nal_units() is not good enough to find this
445
+         * yet, so we assume the worst for now. */
446
+        // if (setup_finished)
447
+        //    ff_thread_finish_setup(h->avctx);
448
+        return;
449
+    }
450
+
451
+    // FIXME do something with unavailable reference frames
452
+
453
+    /* Sort B-frames into display order */
454
+    if (sps->bitstream_restriction_flag ||
455
+        h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
456
+        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
457
+    }
458
+
459
+    pics = 0;
460
+    while (h->delayed_pic[pics])
461
+        pics++;
462
+
463
+    assert(pics <= MAX_DELAYED_PIC_COUNT);
464
+
465
+    h->delayed_pic[pics++] = cur;
466
+    if (cur->reference == 0)
467
+        cur->reference = DELAYED_PIC_REF;
468
+
469
+    /* Frame reordering. This code takes pictures from coding order and sorts
470
+     * them by their incremental POC value into display order. It supports POC
471
+     * gaps, MMCO reset codes and random resets.
472
+     * A "display group" can start either with a IDR frame (f.key_frame = 1),
473
+     * and/or can be closed down with a MMCO reset code. In sequences where
474
+     * there is no delay, we can't detect that (since the frame was already
475
+     * output to the user), so we also set h->mmco_reset to detect the MMCO
476
+     * reset code.
477
+     * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
478
+     * we increase the delay between input and output. All frames affected by
479
+     * the lag (e.g. those that should have been output before another frame
480
+     * that we already returned to the user) will be dropped. This is a bug
481
+     * that we will fix later. */
482
+    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
483
+        cnt     += out->poc < h->last_pocs[i];
484
+        invalid += out->poc == INT_MIN;
485
+    }
486
+    if (!h->mmco_reset && !cur->f->key_frame &&
487
+        cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
488
+        h->mmco_reset = 2;
489
+        if (pics > 1)
490
+            h->delayed_pic[pics - 2]->mmco_reset = 2;
491
+    }
492
+    if (h->mmco_reset || cur->f->key_frame) {
493
+        for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
494
+            h->last_pocs[i] = INT_MIN;
495
+        cnt     = 0;
496
+        invalid = MAX_DELAYED_PIC_COUNT;
497
+    }
498
+    out     = h->delayed_pic[0];
499
+    out_idx = 0;
500
+    for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
501
+                h->delayed_pic[i] &&
502
+                !h->delayed_pic[i - 1]->mmco_reset &&
503
+                !h->delayed_pic[i]->f->key_frame;
504
+         i++)
505
+        if (h->delayed_pic[i]->poc < out->poc) {
506
+            out     = h->delayed_pic[i];
507
+            out_idx = i;
508
+        }
509
+    if (h->avctx->has_b_frames == 0 &&
510
+        (h->delayed_pic[0]->f->key_frame || h->mmco_reset))
511
+        h->next_outputed_poc = INT_MIN;
512
+    out_of_order = !out->f->key_frame && !h->mmco_reset &&
513
+                   (out->poc < h->next_outputed_poc);
514
+
515
+    if (sps->bitstream_restriction_flag &&
516
+        h->avctx->has_b_frames >= sps->num_reorder_frames) {
517
+    } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
518
+               h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
519
+        if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
520
+            h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
521
+        }
522
+    } else if (!h->avctx->has_b_frames &&
523
+               ((h->next_outputed_poc != INT_MIN &&
524
+                 out->poc > h->next_outputed_poc + 2) ||
525
+                cur->f->pict_type == AV_PICTURE_TYPE_B)) {
526
+        h->avctx->has_b_frames++;
527
+    }
528
+
529
+    if (pics > h->avctx->has_b_frames) {
530
+        out->reference &= ~DELAYED_PIC_REF;
531
+        for (i = out_idx; h->delayed_pic[i]; i++)
532
+            h->delayed_pic[i] = h->delayed_pic[i + 1];
533
+    }
534
+    memmove(h->last_pocs, &h->last_pocs[1],
535
+            sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
536
+    h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
537
+    if (!out_of_order && pics > h->avctx->has_b_frames) {
538
+        h->next_output_pic = out;
539
+        if (out->mmco_reset) {
540
+            if (out_idx > 0) {
541
+                h->next_outputed_poc                    = out->poc;
542
+                h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
543
+            } else {
544
+                h->next_outputed_poc = INT_MIN;
545
+            }
546
+        } else {
547
+            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) {
548
+                h->next_outputed_poc = INT_MIN;
549
+            } else {
550
+                h->next_outputed_poc = out->poc;
551
+            }
552
+        }
553
+        h->mmco_reset = 0;
554
+    } else {
555
+        av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
556
+    }
557
+
558
+    if (h->next_output_pic) {
559
+        if (h->next_output_pic->recovered) {
560
+            // We have reached an recovery point and all frames after it in
561
+            // display order are "recovered".
562
+            h->frame_recovered |= FRAME_RECOVERED_SEI;
563
+        }
564
+        h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
565
+    }
566
+
567
+    if (setup_finished && !h->avctx->hwaccel) {
568
+        ff_thread_finish_setup(h->avctx);
569
+
570
+        if (h->avctx->active_thread_type & FF_THREAD_FRAME)
571
+            h->setup_finished = 1;
572
+    }
573
+}
574
+
575
+/**
576
+ * instantaneous decoder refresh.
577
+ */
578
+static void idr(H264Context *h)
579
+{
580
+    ff_h264_remove_all_refs(h);
581
+    h->poc.prev_frame_num        =
582
+    h->poc.prev_frame_num_offset =
583
+    h->poc.prev_poc_msb          =
584
+    h->poc.prev_poc_lsb          = 0;
585
+}
586
+
587
+/* forget old pics after a seek */
588
+void ff_h264_flush_change(H264Context *h)
589
+{
590
+    int i;
591
+    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
592
+        h->last_pocs[i] = INT_MIN;
593
+    h->next_outputed_poc = INT_MIN;
594
+    h->prev_interlaced_frame = 1;
595
+    idr(h);
596
+    if (h->cur_pic_ptr)
597
+        h->cur_pic_ptr->reference = 0;
598
+    h->first_field = 0;
599
+    ff_h264_sei_uninit(&h->sei);
600
+    h->recovery_frame = -1;
601
+    h->frame_recovered = 0;
602
+}
603
+
604
+/* forget old pics after a seek */
605
+static void flush_dpb(AVCodecContext *avctx)
606
+{
607
+    H264Context *h = avctx->priv_data;
608
+    int i;
609
+
610
+    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
611
+
612
+    ff_h264_flush_change(h);
613
+
614
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
615
+        ff_h264_unref_picture(h, &h->DPB[i]);
616
+    h->cur_pic_ptr = NULL;
617
+    ff_h264_unref_picture(h, &h->cur_pic);
618
+
619
+    h->mb_y = 0;
620
+
621
+    ff_h264_free_tables(h);
622
+    h->context_initialized = 0;
623
+}
624
+
625
+static int get_last_needed_nal(H264Context *h)
626
+{
627
+    int nals_needed = 0;
628
+    int i;
629
+
630
+    for (i = 0; i < h->pkt.nb_nals; i++) {
631
+        H2645NAL *nal = &h->pkt.nals[i];
632
+        GetBitContext gb;
633
+
634
+        /* packets can sometimes contain multiple PPS/SPS,
635
+         * e.g. two PAFF field pictures in one packet, or a demuxer
636
+         * which splits NALs strangely if so, when frame threading we
637
+         * can't start the next thread until we've read all of them */
638
+        switch (nal->type) {
639
+        case NAL_SPS:
640
+        case NAL_PPS:
641
+            nals_needed = i;
642
+            break;
643
+        case NAL_DPA:
644
+        case NAL_IDR_SLICE:
645
+        case NAL_SLICE:
646
+            init_get_bits(&gb, nal->data + 1, (nal->size - 1) * 8);
647
+            if (!get_ue_golomb(&gb))
648
+                nals_needed = i;
649
+        }
650
+    }
651
+
652
+    return nals_needed;
653
+}
654
+
655
+static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
656
+{
657
+    AVCodecContext *const avctx = h->avctx;
658
+    unsigned context_count = 0;
659
+    int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
660
+    int i, ret = 0;
661
+
662
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
663
+        h->current_slice = 0;
664
+        if (!h->first_field)
665
+            h->cur_pic_ptr = NULL;
666
+        ff_h264_sei_uninit(&h->sei);
667
+    }
668
+
669
+    ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
670
+                                h->nal_length_size, avctx->codec_id);
671
+    if (ret < 0) {
672
+        av_log(avctx, AV_LOG_ERROR,
673
+               "Error splitting the input into NAL units.\n");
674
+        return ret;
675
+    }
676
+
677
+    if (avctx->active_thread_type & FF_THREAD_FRAME)
678
+        nals_needed = get_last_needed_nal(h);
679
+
680
+    for (i = 0; i < h->pkt.nb_nals; i++) {
681
+        H2645NAL *nal = &h->pkt.nals[i];
682
+        H264SliceContext *sl = &h->slice_ctx[context_count];
683
+        int err;
684
+
685
+        if (avctx->skip_frame >= AVDISCARD_NONREF &&
686
+            nal->ref_idc == 0 && nal->type != NAL_SEI)
687
+            continue;
688
+
689
+        // FIXME these should stop being context-global variables
690
+        h->nal_ref_idc   = nal->ref_idc;
691
+        h->nal_unit_type = nal->type;
692
+
693
+        err = 0;
694
+        switch (nal->type) {
695
+        case NAL_IDR_SLICE:
696
+            if (nal->type != NAL_IDR_SLICE) {
697
+                av_log(h->avctx, AV_LOG_ERROR,
698
+                       "Invalid mix of idr and non-idr slices\n");
699
+                ret = -1;
700
+                goto end;
701
+            }
702
+            idr(h); // FIXME ensure we don't lose some frames if there is reordering
703
+        case NAL_SLICE:
704
+            sl->gb = nal->gb;
705
+
706
+            if ((err = ff_h264_decode_slice_header(h, sl, nal)))
707
+                break;
708
+
709
+            if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
710
+                h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
711
+                                    ((1 << h->ps.sps->log2_max_frame_num) - 1);
712
+            }
713
+
714
+            h->cur_pic_ptr->f->key_frame |=
715
+                (nal->type == NAL_IDR_SLICE) || (h->sei.recovery_point.recovery_frame_cnt >= 0);
716
+
717
+            if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
718
+                h->recovery_frame         = -1;
719
+                h->cur_pic_ptr->recovered = 1;
720
+            }
721
+            // If we have an IDR, all frames after it in decoded order are
722
+            // "recovered".
723
+            if (nal->type == NAL_IDR_SLICE)
724
+                h->frame_recovered |= FRAME_RECOVERED_IDR;
725
+            h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
726
+
727
+            if (h->current_slice == 1) {
728
+                if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
729
+                    decode_postinit(h, i >= nals_needed);
730
+
731
+                if (h->avctx->hwaccel &&
732
+                    (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
733
+                    return ret;
734
+            }
735
+
736
+            if (sl->redundant_pic_count == 0 &&
737
+                (avctx->skip_frame < AVDISCARD_NONREF || nal->ref_idc) &&
738
+                (avctx->skip_frame < AVDISCARD_BIDIR  ||
739
+                 sl->slice_type_nos != AV_PICTURE_TYPE_B) &&
740
+                (avctx->skip_frame < AVDISCARD_NONKEY ||
741
+                 h->cur_pic_ptr->f->key_frame) &&
742
+                avctx->skip_frame < AVDISCARD_ALL) {
743
+                if (avctx->hwaccel) {
744
+                    ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
745
+                    if (ret < 0)
746
+                        return ret;
747
+                } else
748
+                    context_count++;
749
+            }
750
+            break;
751
+        case NAL_DPA:
752
+        case NAL_DPB:
753
+        case NAL_DPC:
754
+            avpriv_request_sample(avctx, "data partitioning");
755
+            ret = AVERROR(ENOSYS);
756
+            goto end;
757
+            break;
758
+        case NAL_SEI:
759
+            ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
760
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
761
+                goto end;
762
+            break;
763
+        case NAL_SPS:
764
+            ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
765
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
766
+                goto end;
767
+            break;
768
+        case NAL_PPS:
769
+            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
770
+                                                       nal->size_bits);
771
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
772
+                goto end;
773
+            break;
774
+        case NAL_AUD:
775
+        case NAL_END_SEQUENCE:
776
+        case NAL_END_STREAM:
777
+        case NAL_FILLER_DATA:
778
+        case NAL_SPS_EXT:
779
+        case NAL_AUXILIARY_SLICE:
780
+            break;
781
+        default:
782
+            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
783
+                   nal->type, nal->size_bits);
784
+        }
785
+
786
+        if (context_count == h->nb_slice_ctx) {
787
+            ret = ff_h264_execute_decode_slices(h, context_count);
788
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
789
+                goto end;
790
+            context_count = 0;
791
+        }
792
+
793
+        if (err < 0) {
794
+            av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
795
+            sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
796
+        }
797
+    }
798
+    if (context_count) {
799
+        ret = ff_h264_execute_decode_slices(h, context_count);
800
+        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
801
+            goto end;
802
+    }
803
+
804
+    ret = 0;
805
+end:
806
+    /* clean up */
807
+    if (h->cur_pic_ptr && !h->droppable) {
808
+        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
809
+                                  h->picture_structure == PICT_BOTTOM_FIELD);
810
+    }
811
+
812
+    return (ret < 0) ? ret : buf_size;
813
+}
814
+
815
+/**
816
+ * Return the number of bytes consumed for building the current frame.
817
+ */
818
+static int get_consumed_bytes(int pos, int buf_size)
819
+{
820
+    if (pos == 0)
821
+        pos = 1;        // avoid infinite loops (I doubt that is needed but...)
822
+    if (pos + 10 > buf_size)
823
+        pos = buf_size; // oops ;)
824
+
825
+    return pos;
826
+}
827
+
828
+static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
829
+{
830
+    int i;
831
+    int ret = av_frame_ref(dst, src);
832
+    if (ret < 0)
833
+        return ret;
834
+
835
+    if (!h->ps.sps || !h->ps.sps->crop)
836
+        return 0;
837
+
838
+    for (i = 0; i < 3; i++) {
839
+        int hshift = (i > 0) ? h->chroma_x_shift : 0;
840
+        int vshift = (i > 0) ? h->chroma_y_shift : 0;
841
+        int off    = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) +
842
+                     (h->ps.sps->crop_top >> vshift) * dst->linesize[i];
843
+        dst->data[i] += off;
844
+    }
845
+    return 0;
846
+}
847
+
848
+static int h264_decode_frame(AVCodecContext *avctx, void *data,
849
+                             int *got_frame, AVPacket *avpkt)
850
+{
851
+    const uint8_t *buf = avpkt->data;
852
+    int buf_size       = avpkt->size;
853
+    H264Context *h     = avctx->priv_data;
854
+    AVFrame *pict      = data;
855
+    int buf_index      = 0;
856
+    int ret;
857
+    const uint8_t *new_extradata;
858
+    int new_extradata_size;
859
+
860
+    h->flags = avctx->flags;
861
+    h->setup_finished = 0;
862
+
863
+    /* end of stream, output what is still in the buffers */
864
+out:
865
+    if (buf_size == 0) {
866
+        H264Picture *out;
867
+        int i, out_idx;
868
+
869
+        h->cur_pic_ptr = NULL;
870
+
871
+        // FIXME factorize this with the output code below
872
+        out     = h->delayed_pic[0];
873
+        out_idx = 0;
874
+        for (i = 1;
875
+             h->delayed_pic[i] &&
876
+             !h->delayed_pic[i]->f->key_frame &&
877
+             !h->delayed_pic[i]->mmco_reset;
878
+             i++)
879
+            if (h->delayed_pic[i]->poc < out->poc) {
880
+                out     = h->delayed_pic[i];
881
+                out_idx = i;
882
+            }
883
+
884
+        for (i = out_idx; h->delayed_pic[i]; i++)
885
+            h->delayed_pic[i] = h->delayed_pic[i + 1];
886
+
887
+        if (out) {
888
+            ret = output_frame(h, pict, out->f);
889
+            if (ret < 0)
890
+                return ret;
891
+            *got_frame = 1;
892
+        }
893
+
894
+        return buf_index;
895
+    }
896
+
897
+    new_extradata_size = 0;
898
+    new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
899
+                                            &new_extradata_size);
900
+    if (new_extradata_size > 0 && new_extradata) {
901
+        ret = ff_h264_decode_extradata(new_extradata, new_extradata_size,
902
+                                       &h->ps, &h->is_avc, &h->nal_length_size,
903
+                                       avctx->err_recognition, avctx);
904
+        if (ret < 0)
905
+            return ret;
906
+    }
907
+
908
+    buf_index = decode_nal_units(h, buf, buf_size);
909
+    if (buf_index < 0)
910
+        return AVERROR_INVALIDDATA;
911
+
912
+    if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
913
+        buf_size = 0;
914
+        goto out;
915
+    }
916
+
917
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
918
+        if (avctx->skip_frame >= AVDISCARD_NONREF)
919
+            return 0;
920
+        av_log(avctx, AV_LOG_ERROR, "no frame!\n");
921
+        return AVERROR_INVALIDDATA;
922
+    }
923
+
924
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
925
+        (h->mb_y >= h->mb_height && h->mb_height)) {
926
+        if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
927
+            decode_postinit(h, 1);
928
+
929
+        ff_h264_field_end(h, &h->slice_ctx[0], 0);
930
+
931
+        *got_frame = 0;
932
+        if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
933
+                                   h->next_output_pic->recovered)) {
934
+            if (!h->next_output_pic->recovered)
935
+                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
936
+
937
+            ret = output_frame(h, pict, h->next_output_pic->f);
938
+            if (ret < 0)
939
+                return ret;
940
+            *got_frame = 1;
941
+        }
942
+    }
943
+
944
+    assert(pict->buf[0] || !*got_frame);
945
+
946
+    return get_consumed_bytes(buf_index, buf_size);
947
+}
948
+
949
+#define OFFSET(x) offsetof(H264Context, x)
950
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
951
+static const AVOption h264_options[] = {
952
+    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
953
+    { NULL },
954
+};
955
+
956
+static const AVClass h264_class = {
957
+    .class_name = "h264",
958
+    .item_name  = av_default_item_name,
959
+    .option     = h264_options,
960
+    .version    = LIBAVUTIL_VERSION_INT,
961
+};
962
+
963
+AVCodec ff_h264_decoder = {
964
+    .name                  = "h264",
965
+    .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
966
+    .type                  = AVMEDIA_TYPE_VIDEO,
967
+    .id                    = AV_CODEC_ID_H264,
968
+    .priv_data_size        = sizeof(H264Context),
969
+    .init                  = ff_h264_decode_init,
970
+    .close                 = h264_decode_end,
971
+    .decode                = h264_decode_frame,
972
+    .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
973
+                             AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
974
+                             AV_CODEC_CAP_FRAME_THREADS,
975
+    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
976
+    .flush                 = flush_dpb,
977
+    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
978
+    .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
979
+    .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
980
+    .priv_class            = &h264_class,
981
+};
0 982
new file mode 100644
... ...
@@ -0,0 +1,938 @@
0
+/*
1
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
2
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * H.264 / AVC / MPEG-4 part10 codec.
24
+ * @author Michael Niedermayer <michaelni@gmx.at>
25
+ */
26
+
27
+#ifndef AVCODEC_H264DEC_H
28
+#define AVCODEC_H264DEC_H
29
+
30
+#include "libavutil/buffer.h"
31
+#include "libavutil/intreadwrite.h"
32
+#include "libavutil/thread.h"
33
+
34
+#include "cabac.h"
35
+#include "error_resilience.h"
36
+#include "h264_parse.h"
37
+#include "h264_sei.h"
38
+#include "h2645_parse.h"
39
+#include "h264chroma.h"
40
+#include "h264dsp.h"
41
+#include "h264pred.h"
42
+#include "h264qpel.h"
43
+#include "internal.h"
44
+#include "mpegutils.h"
45
+#include "parser.h"
46
+#include "qpeldsp.h"
47
+#include "rectangle.h"
48
+#include "videodsp.h"
49
+
50
+#define H264_MAX_PICTURE_COUNT 32
51
+
52
+#define MAX_SPS_COUNT          32
53
+#define MAX_PPS_COUNT         256
54
+
55
+#define MAX_MMCO_COUNT         66
56
+
57
+#define MAX_DELAYED_PIC_COUNT  16
58
+
59
+/* Compiling in interlaced support reduces the speed
60
+ * of progressive decoding by about 2%. */
61
+#define ALLOW_INTERLACE
62
+
63
+#define FMO 0
64
+
65
+/**
66
+ * The maximum number of slices supported by the decoder.
67
+ * must be a power of 2
68
+ */
69
+#define MAX_SLICES 32
70
+
71
+#ifdef ALLOW_INTERLACE
72
+#define MB_MBAFF(h)    h->mb_mbaff
73
+#define MB_FIELD(h)    h->mb_field_decoding_flag
74
+#define FRAME_MBAFF(h) h->mb_aff_frame
75
+#define FIELD_PICTURE(h) (h->picture_structure != PICT_FRAME)
76
+#define LEFT_MBS 2
77
+#define LTOP     0
78
+#define LBOT     1
79
+#define LEFT(i)  (i)
80
+#else
81
+#define MB_MBAFF(h)      0
82
+#define MB_FIELD(h)      0
83
+#define FRAME_MBAFF(h)   0
84
+#define FIELD_PICTURE(h) 0
85
+#undef  IS_INTERLACED
86
+#define IS_INTERLACED(mb_type) 0
87
+#define LEFT_MBS 1
88
+#define LTOP     0
89
+#define LBOT     0
90
+#define LEFT(i)  0
91
+#endif
92
+#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
93
+
94
+#ifndef CABAC
95
+#define CABAC(h) h->ps.pps->cabac
96
+#endif
97
+
98
+#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
99
+#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
100
+
101
+#define EXTENDED_SAR       255
102
+
103
+#define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit
104
+#define MB_TYPE_8x8DCT     0x01000000
105
+#define IS_REF0(a)         ((a) & MB_TYPE_REF0)
106
+#define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT)
107
+
108
+#define QP_MAX_NUM (51 + 2 * 6)           // The maximum supported qp
109
+
110
+/* NAL unit types */
111
+enum {
112
+    NAL_SLICE           = 1,
113
+    NAL_DPA             = 2,
114
+    NAL_DPB             = 3,
115
+    NAL_DPC             = 4,
116
+    NAL_IDR_SLICE       = 5,
117
+    NAL_SEI             = 6,
118
+    NAL_SPS             = 7,
119
+    NAL_PPS             = 8,
120
+    NAL_AUD             = 9,
121
+    NAL_END_SEQUENCE    = 10,
122
+    NAL_END_STREAM      = 11,
123
+    NAL_FILLER_DATA     = 12,
124
+    NAL_SPS_EXT         = 13,
125
+    NAL_AUXILIARY_SLICE = 19,
126
+};
127
+
128
+/**
129
+ * Sequence parameter set
130
+ */
131
+typedef struct SPS {
132
+    unsigned int sps_id;
133
+    int profile_idc;
134
+    int level_idc;
135
+    int chroma_format_idc;
136
+    int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
137
+    int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
138
+    int poc_type;                      ///< pic_order_cnt_type
139
+    int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
140
+    int delta_pic_order_always_zero_flag;
141
+    int offset_for_non_ref_pic;
142
+    int offset_for_top_to_bottom_field;
143
+    int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
144
+    int ref_frame_count;               ///< num_ref_frames
145
+    int gaps_in_frame_num_allowed_flag;
146
+    int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
147
+    int mb_height;                     ///< pic_height_in_map_units_minus1 + 1
148
+    int frame_mbs_only_flag;
149
+    int mb_aff;                        ///< mb_adaptive_frame_field_flag
150
+    int direct_8x8_inference_flag;
151
+    int crop;                          ///< frame_cropping_flag
152
+
153
+    /* those 4 are already in luma samples */
154
+    unsigned int crop_left;            ///< frame_cropping_rect_left_offset
155
+    unsigned int crop_right;           ///< frame_cropping_rect_right_offset
156
+    unsigned int crop_top;             ///< frame_cropping_rect_top_offset
157
+    unsigned int crop_bottom;          ///< frame_cropping_rect_bottom_offset
158
+    int vui_parameters_present_flag;
159
+    AVRational sar;
160
+    int video_signal_type_present_flag;
161
+    int full_range;
162
+    int colour_description_present_flag;
163
+    enum AVColorPrimaries color_primaries;
164
+    enum AVColorTransferCharacteristic color_trc;
165
+    enum AVColorSpace colorspace;
166
+    int timing_info_present_flag;
167
+    uint32_t num_units_in_tick;
168
+    uint32_t time_scale;
169
+    int fixed_frame_rate_flag;
170
+    short offset_for_ref_frame[256]; // FIXME dyn aloc?
171
+    int bitstream_restriction_flag;
172
+    int num_reorder_frames;
173
+    int scaling_matrix_present;
174
+    uint8_t scaling_matrix4[6][16];
175
+    uint8_t scaling_matrix8[6][64];
176
+    int nal_hrd_parameters_present_flag;
177
+    int vcl_hrd_parameters_present_flag;
178
+    int pic_struct_present_flag;
179
+    int time_offset_length;
180
+    int cpb_cnt;                          ///< See H.264 E.1.2
181
+    int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
182
+    int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
183
+    int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
184
+    int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
185
+    int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
186
+    int residual_color_transform_flag;    ///< residual_colour_transform_flag
187
+    int constraint_set_flags;             ///< constraint_set[0-3]_flag
188
+} SPS;
189
+
190
+/**
191
+ * Picture parameter set
192
+ */
193
+typedef struct PPS {
194
+    unsigned int sps_id;
195
+    int cabac;                  ///< entropy_coding_mode_flag
196
+    int pic_order_present;      ///< pic_order_present_flag
197
+    int slice_group_count;      ///< num_slice_groups_minus1 + 1
198
+    int mb_slice_group_map_type;
199
+    unsigned int ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
200
+    int weighted_pred;          ///< weighted_pred_flag
201
+    int weighted_bipred_idc;
202
+    int init_qp;                ///< pic_init_qp_minus26 + 26
203
+    int init_qs;                ///< pic_init_qs_minus26 + 26
204
+    int chroma_qp_index_offset[2];
205
+    int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
206
+    int constrained_intra_pred;     ///< constrained_intra_pred_flag
207
+    int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
208
+    int transform_8x8_mode;         ///< transform_8x8_mode_flag
209
+    uint8_t scaling_matrix4[6][16];
210
+    uint8_t scaling_matrix8[6][64];
211
+    uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
212
+    int chroma_qp_diff;
213
+
214
+    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
215
+    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
216
+    uint32_t(*dequant4_coeff[6])[16];
217
+    uint32_t(*dequant8_coeff[6])[64];
218
+} PPS;
219
+
220
+typedef struct H264ParamSets {
221
+    AVBufferRef *sps_list[MAX_SPS_COUNT];
222
+    AVBufferRef *pps_list[MAX_PPS_COUNT];
223
+
224
+    /* currently active parameters sets */
225
+    const PPS *pps;
226
+    // FIXME this should properly be const
227
+    SPS *sps;
228
+} H264ParamSets;
229
+
230
+/**
231
+ * Memory management control operation opcode.
232
+ */
233
+typedef enum MMCOOpcode {
234
+    MMCO_END = 0,
235
+    MMCO_SHORT2UNUSED,
236
+    MMCO_LONG2UNUSED,
237
+    MMCO_SHORT2LONG,
238
+    MMCO_SET_MAX_LONG,
239
+    MMCO_RESET,
240
+    MMCO_LONG,
241
+} MMCOOpcode;
242
+
243
+/**
244
+ * Memory management control operation.
245
+ */
246
+typedef struct MMCO {
247
+    MMCOOpcode opcode;
248
+    int short_pic_num;  ///< pic_num without wrapping (pic_num & max_pic_num)
249
+    int long_arg;       ///< index, pic_num, or num long refs depending on opcode
250
+} MMCO;
251
+
252
+typedef struct H264Picture {
253
+    AVFrame *f;
254
+    ThreadFrame tf;
255
+
256
+    AVBufferRef *qscale_table_buf;
257
+    int8_t *qscale_table;
258
+
259
+    AVBufferRef *motion_val_buf[2];
260
+    int16_t (*motion_val[2])[2];
261
+
262
+    AVBufferRef *mb_type_buf;
263
+    uint32_t *mb_type;
264
+
265
+    AVBufferRef *hwaccel_priv_buf;
266
+    void *hwaccel_picture_private; ///< hardware accelerator private data
267
+
268
+    AVBufferRef *ref_index_buf[2];
269
+    int8_t *ref_index[2];
270
+
271
+    int field_poc[2];       ///< top/bottom POC
272
+    int poc;                ///< frame POC
273
+    int frame_num;          ///< frame_num (raw frame_num from slice header)
274
+    int mmco_reset;         /**< MMCO_RESET set this 1. Reordering code must
275
+                                 not mix pictures before and after MMCO_RESET. */
276
+    int pic_id;             /**< pic_num (short -> no wrap version of pic_num,
277
+                                 pic_num & max_pic_num; long -> long_pic_num) */
278
+    int long_ref;           ///< 1->long term reference 0->short term reference
279
+    int ref_poc[2][2][32];  ///< POCs of the frames used as reference (FIXME need per slice)
280
+    int ref_count[2][2];    ///< number of entries in ref_poc         (FIXME need per slice)
281
+    int mbaff;              ///< 1 -> MBAFF frame 0-> not MBAFF
282
+    int field_picture;      ///< whether or not picture was encoded in separate fields
283
+
284
+    int reference;
285
+    int recovered;          ///< picture at IDR or recovery point + recovery count
286
+} H264Picture;
287
+
288
+typedef struct H264Ref {
289
+    uint8_t *data[3];
290
+    int linesize[3];
291
+
292
+    int reference;
293
+    int poc;
294
+    int pic_id;
295
+
296
+    H264Picture *parent;
297
+} H264Ref;
298
+
299
+typedef struct H264SliceContext {
300
+    struct H264Context *h264;
301
+    GetBitContext gb;
302
+    ERContext er;
303
+
304
+    int slice_num;
305
+    int slice_type;
306
+    int slice_type_nos;         ///< S free slice type (SI/SP are remapped to I/P)
307
+    int slice_type_fixed;
308
+
309
+    int qscale;
310
+    int chroma_qp[2];   // QPc
311
+    int qp_thresh;      ///< QP threshold to skip loopfilter
312
+    int last_qscale_diff;
313
+
314
+    // deblock
315
+    int deblocking_filter;          ///< disable_deblocking_filter_idc with 1 <-> 0
316
+    int slice_alpha_c0_offset;
317
+    int slice_beta_offset;
318
+
319
+    H264PredWeightTable pwt;
320
+
321
+    int prev_mb_skipped;
322
+    int next_mb_skipped;
323
+
324
+    int chroma_pred_mode;
325
+    int intra16x16_pred_mode;
326
+
327
+    int8_t intra4x4_pred_mode_cache[5 * 8];
328
+    int8_t(*intra4x4_pred_mode);
329
+
330
+    int topleft_mb_xy;
331
+    int top_mb_xy;
332
+    int topright_mb_xy;
333
+    int left_mb_xy[LEFT_MBS];
334
+
335
+    int topleft_type;
336
+    int top_type;
337
+    int topright_type;
338
+    int left_type[LEFT_MBS];
339
+
340
+    const uint8_t *left_block;
341
+    int topleft_partition;
342
+
343
+    unsigned int topleft_samples_available;
344
+    unsigned int top_samples_available;
345
+    unsigned int topright_samples_available;
346
+    unsigned int left_samples_available;
347
+
348
+    ptrdiff_t linesize, uvlinesize;
349
+    ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff
350
+    ptrdiff_t mb_uvlinesize;
351
+
352
+    int mb_x, mb_y;
353
+    int mb_xy;
354
+    int resync_mb_x;
355
+    int resync_mb_y;
356
+    unsigned int first_mb_addr;
357
+    // index of the first MB of the next slice
358
+    int next_slice_idx;
359
+    int mb_skip_run;
360
+    int is_complex;
361
+
362
+    int picture_structure;
363
+    int mb_field_decoding_flag;
364
+    int mb_mbaff;               ///< mb_aff_frame && mb_field_decoding_flag
365
+
366
+    int redundant_pic_count;
367
+
368
+    /**
369
+     * number of neighbors (top and/or left) that used 8x8 dct
370
+     */
371
+    int neighbor_transform_size;
372
+
373
+    int direct_spatial_mv_pred;
374
+    int col_parity;
375
+    int col_fieldoff;
376
+
377
+    int cbp;
378
+    int top_cbp;
379
+    int left_cbp;
380
+
381
+    int dist_scale_factor[32];
382
+    int dist_scale_factor_field[2][32];
383
+    int map_col_to_list0[2][16 + 32];
384
+    int map_col_to_list0_field[2][2][16 + 32];
385
+
386
+    /**
387
+     * num_ref_idx_l0/1_active_minus1 + 1
388
+     */
389
+    unsigned int ref_count[2];          ///< counts frames or fields, depending on current mb mode
390
+    unsigned int list_count;
391
+    H264Ref ref_list[2][48];        /**< 0..15: frame refs, 16..47: mbaff field refs.
392
+                                         *   Reordered version of default_ref_list
393
+                                         *   according to picture reordering in slice header */
394
+    struct {
395
+        uint8_t op;
396
+        uint8_t val;
397
+    } ref_modifications[2][32];
398
+    int nb_ref_modifications[2];
399
+
400
+    unsigned int pps_id;
401
+
402
+    const uint8_t *intra_pcm_ptr;
403
+    int16_t *dc_val_base;
404
+
405
+    uint8_t *bipred_scratchpad;
406
+    uint8_t *edge_emu_buffer;
407
+    uint8_t (*top_borders[2])[(16 * 3) * 2];
408
+    int bipred_scratchpad_allocated;
409
+    int edge_emu_buffer_allocated;
410
+    int top_borders_allocated[2];
411
+
412
+    /**
413
+     * non zero coeff count cache.
414
+     * is 64 if not available.
415
+     */
416
+    DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
417
+
418
+    /**
419
+     * Motion vector cache.
420
+     */
421
+    DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
422
+    DECLARE_ALIGNED(8,  int8_t, ref_cache)[2][5 * 8];
423
+    DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
424
+    uint8_t direct_cache[5 * 8];
425
+
426
+    DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
427
+
428
+    ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
429
+    DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
430
+    DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
431
+    ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
432
+    ///< check that i is not too large or ensure that there is some unused stuff after mb
433
+    int16_t mb_padding[256 * 2];
434
+
435
+    uint8_t (*mvd_table[2])[2];
436
+
437
+    /**
438
+     * Cabac
439
+     */
440
+    CABACContext cabac;
441
+    uint8_t cabac_state[1024];
442
+    int cabac_init_idc;
443
+
444
+    MMCO mmco[MAX_MMCO_COUNT];
445
+    int  nb_mmco;
446
+    int explicit_ref_marking;
447
+
448
+    int frame_num;
449
+    int poc_lsb;
450
+    int delta_poc_bottom;
451
+    int delta_poc[2];
452
+    int curr_pic_num;
453
+    int max_pic_num;
454
+} H264SliceContext;
455
+
456
+/**
457
+ * H264Context
458
+ */
459
+typedef struct H264Context {
460
+    const AVClass *class;
461
+    AVCodecContext *avctx;
462
+    VideoDSPContext vdsp;
463
+    H264DSPContext h264dsp;
464
+    H264ChromaContext h264chroma;
465
+    H264QpelContext h264qpel;
466
+
467
+    H264Picture DPB[H264_MAX_PICTURE_COUNT];
468
+    H264Picture *cur_pic_ptr;
469
+    H264Picture cur_pic;
470
+
471
+    H264SliceContext *slice_ctx;
472
+    int            nb_slice_ctx;
473
+
474
+    H2645Packet pkt;
475
+
476
+    int pixel_shift;    ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
477
+
478
+    /* coded dimensions -- 16 * mb w/h */
479
+    int width, height;
480
+    int chroma_x_shift, chroma_y_shift;
481
+
482
+    int droppable;
483
+    int coded_picture_number;
484
+
485
+    int context_initialized;
486
+    int flags;
487
+    int workaround_bugs;
488
+    /* Set when slice threading is used and at least one slice uses deblocking
489
+     * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
490
+     * during normal MB decoding and execute it serially at the end.
491
+     */
492
+    int postpone_filter;
493
+
494
+    int8_t(*intra4x4_pred_mode);
495
+    H264PredContext hpc;
496
+
497
+    uint8_t (*non_zero_count)[48];
498
+
499
+#define LIST_NOT_USED -1 // FIXME rename?
500
+#define PART_NOT_AVAILABLE -2
501
+
502
+    /**
503
+     * block_offset[ 0..23] for frame macroblocks
504
+     * block_offset[24..47] for field macroblocks
505
+     */
506
+    int block_offset[2 * (16 * 3)];
507
+
508
+    uint32_t *mb2b_xy;  // FIXME are these 4 a good idea?
509
+    uint32_t *mb2br_xy;
510
+    int b_stride;       // FIXME use s->b4_stride
511
+
512
+    uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
513
+
514
+    // interlacing specific flags
515
+    int mb_aff_frame;
516
+    int picture_structure;
517
+    int first_field;
518
+
519
+    uint8_t *list_counts;               ///< Array of list_count per MB specifying the slice type
520
+
521
+    /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
522
+    uint16_t *cbp_table;
523
+
524
+    /* chroma_pred_mode for i4x4 or i16x16, else 0 */
525
+    uint8_t *chroma_pred_mode_table;
526
+    uint8_t (*mvd_table[2])[2];
527
+    uint8_t *direct_table;
528
+
529
+    uint8_t zigzag_scan[16];
530
+    uint8_t zigzag_scan8x8[64];
531
+    uint8_t zigzag_scan8x8_cavlc[64];
532
+    uint8_t field_scan[16];
533
+    uint8_t field_scan8x8[64];
534
+    uint8_t field_scan8x8_cavlc[64];
535
+    const uint8_t *zigzag_scan_q0;
536
+    const uint8_t *zigzag_scan8x8_q0;
537
+    const uint8_t *zigzag_scan8x8_cavlc_q0;
538
+    const uint8_t *field_scan_q0;
539
+    const uint8_t *field_scan8x8_q0;
540
+    const uint8_t *field_scan8x8_cavlc_q0;
541
+
542
+    int mb_y;
543
+    int mb_height, mb_width;
544
+    int mb_stride;
545
+    int mb_num;
546
+
547
+    // =============================================================
548
+    // Things below are not used in the MB or more inner code
549
+
550
+    int nal_ref_idc;
551
+    int nal_unit_type;
552
+
553
+    /**
554
+     * Used to parse AVC variant of H.264
555
+     */
556
+    int is_avc;           ///< this flag is != 0 if codec is avc1
557
+    int nal_length_size;  ///< Number of bytes used for nal length (1, 2 or 4)
558
+
559
+    int bit_depth_luma;         ///< luma bit depth from sps to detect changes
560
+    int chroma_format_idc;      ///< chroma format from sps to detect changes
561
+
562
+    H264ParamSets ps;
563
+
564
+    uint16_t *slice_table_base;
565
+
566
+    H264POCContext poc;
567
+
568
+    H264Picture *short_ref[32];
569
+    H264Picture *long_ref[32];
570
+    H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
571
+    int last_pocs[MAX_DELAYED_PIC_COUNT];
572
+    H264Picture *next_output_pic;
573
+    int next_outputed_poc;
574
+
575
+    /**
576
+     * memory management control operations buffer.
577
+     */
578
+    MMCO mmco[MAX_MMCO_COUNT];
579
+    int  nb_mmco;
580
+    int mmco_reset;
581
+    int explicit_ref_marking;
582
+
583
+    int long_ref_count;     ///< number of actual long term references
584
+    int short_ref_count;    ///< number of actual short term references
585
+
586
+    /**
587
+     * @name Members for slice based multithreading
588
+     * @{
589
+     */
590
+    /**
591
+     * current slice number, used to initialize slice_num of each thread/context
592
+     */
593
+    int current_slice;
594
+
595
+    /** @} */
596
+
597
+    /**
598
+     * Complement sei_pic_struct
599
+     * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
600
+     * However, soft telecined frames may have these values.
601
+     * This is used in an attempt to flag soft telecine progressive.
602
+     */
603
+    int prev_interlaced_frame;
604
+
605
+    /**
606
+     * recovery_frame is the frame_num at which the next frame should
607
+     * be fully constructed.
608
+     *
609
+     * Set to -1 when not expecting a recovery point.
610
+     */
611
+    int recovery_frame;
612
+
613
+/**
614
+ * We have seen an IDR, so all the following frames in coded order are correctly
615
+ * decodable.
616
+ */
617
+#define FRAME_RECOVERED_IDR  (1 << 0)
618
+/**
619
+ * Sufficient number of frames have been decoded since a SEI recovery point,
620
+ * so all the following frames in presentation order are correct.
621
+ */
622
+#define FRAME_RECOVERED_SEI  (1 << 1)
623
+
624
+    int frame_recovered;    ///< Initial frame has been completely recovered
625
+
626
+    /* for frame threading, this is set to 1
627
+     * after finish_setup() has been called, so we cannot modify
628
+     * some context properties (which are supposed to stay constant between
629
+     * slices) anymore */
630
+    int setup_finished;
631
+
632
+    int enable_er;
633
+
634
+    H264SEIContext sei;
635
+
636
+    AVBufferPool *qscale_table_pool;
637
+    AVBufferPool *mb_type_pool;
638
+    AVBufferPool *motion_val_pool;
639
+    AVBufferPool *ref_index_pool;
640
+    int ref2frm[MAX_SLICES][2][64];     ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
641
+} H264Context;
642
+
643
+extern const uint16_t ff_h264_mb_sizes[4];
644
+
645
+/**
646
+ * Decode SPS
647
+ */
648
+int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
649
+                                     H264ParamSets *ps);
650
+
651
+/**
652
+ * Decode PPS
653
+ */
654
+int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
655
+                                         H264ParamSets *ps, int bit_length);
656
+
657
+/**
658
+ * Reconstruct bitstream slice_type.
659
+ */
660
+int ff_h264_get_slice_type(const H264SliceContext *sl);
661
+
662
+/**
663
+ * Allocate tables.
664
+ * needs width/height
665
+ */
666
+int ff_h264_alloc_tables(H264Context *h);
667
+
668
+int ff_h264_decode_ref_pic_list_reordering(const H264Context *h, H264SliceContext *sl);
669
+int ff_h264_build_ref_list(const H264Context *h, H264SliceContext *sl);
670
+void ff_h264_remove_all_refs(H264Context *h);
671
+
672
+/**
673
+ * Execute the reference picture marking (memory management control operations).
674
+ */
675
+int ff_h264_execute_ref_pic_marking(H264Context *h);
676
+
677
+int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl,
678
+                                   GetBitContext *gb);
679
+
680
+void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
681
+int ff_h264_decode_init(AVCodecContext *avctx);
682
+void ff_h264_decode_init_vlc(void);
683
+
684
+/**
685
+ * Decode a macroblock
686
+ * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
687
+ */
688
+int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
689
+
690
+/**
691
+ * Decode a CABAC coded macroblock
692
+ * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
693
+ */
694
+int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
695
+
696
+void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
697
+
698
+void ff_h264_init_dequant_tables(H264Context *h);
699
+
700
+void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
701
+void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
702
+void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
703
+                                int *mb_type);
704
+
705
+void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
706
+                            uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
707
+                            unsigned int linesize, unsigned int uvlinesize);
708
+void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
709
+                       uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
710
+                       unsigned int linesize, unsigned int uvlinesize);
711
+
712
+/*
713
+ * o-o o-o
714
+ *  / / /
715
+ * o-o o-o
716
+ *  ,---'
717
+ * o-o o-o
718
+ *  / / /
719
+ * o-o o-o
720
+ */
721
+
722
+/* Scan8 organization:
723
+ *    0 1 2 3 4 5 6 7
724
+ * 0  DY    y y y y y
725
+ * 1        y Y Y Y Y
726
+ * 2        y Y Y Y Y
727
+ * 3        y Y Y Y Y
728
+ * 4        y Y Y Y Y
729
+ * 5  DU    u u u u u
730
+ * 6        u U U U U
731
+ * 7        u U U U U
732
+ * 8        u U U U U
733
+ * 9        u U U U U
734
+ * 10 DV    v v v v v
735
+ * 11       v V V V V
736
+ * 12       v V V V V
737
+ * 13       v V V V V
738
+ * 14       v V V V V
739
+ * DY/DU/DV are for luma/chroma DC.
740
+ */
741
+
742
+#define LUMA_DC_BLOCK_INDEX   48
743
+#define CHROMA_DC_BLOCK_INDEX 49
744
+
745
+// This table must be here because scan8[constant] must be known at compiletime
746
+static const uint8_t scan8[16 * 3 + 3] = {
747
+    4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
748
+    6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
749
+    4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
750
+    6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
751
+    4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
752
+    6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
753
+    4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
754
+    6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
755
+    4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
756
+    6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
757
+    4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
758
+    6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
759
+    0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
760
+};
761
+
762
+static av_always_inline uint32_t pack16to32(int a, int b)
763
+{
764
+#if HAVE_BIGENDIAN
765
+    return (b & 0xFFFF) + (a << 16);
766
+#else
767
+    return (a & 0xFFFF) + (b << 16);
768
+#endif
769
+}
770
+
771
+static av_always_inline uint16_t pack8to16(int a, int b)
772
+{
773
+#if HAVE_BIGENDIAN
774
+    return (b & 0xFF) + (a << 8);
775
+#else
776
+    return (a & 0xFF) + (b << 8);
777
+#endif
778
+}
779
+
780
+/**
781
+ * Get the chroma qp.
782
+ */
783
+static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
784
+{
785
+    return pps->chroma_qp_table[t][qscale];
786
+}
787
+
788
+/**
789
+ * Get the predicted intra4x4 prediction mode.
790
+ */
791
+static av_always_inline int pred_intra_mode(const H264Context *h,
792
+                                            H264SliceContext *sl, int n)
793
+{
794
+    const int index8 = scan8[n];
795
+    const int left   = sl->intra4x4_pred_mode_cache[index8 - 1];
796
+    const int top    = sl->intra4x4_pred_mode_cache[index8 - 8];
797
+    const int min    = FFMIN(left, top);
798
+
799
+    ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
800
+
801
+    if (min < 0)
802
+        return DC_PRED;
803
+    else
804
+        return min;
805
+}
806
+
807
+static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
808
+                                                        H264SliceContext *sl)
809
+{
810
+    int8_t *i4x4       = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
811
+    int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
812
+
813
+    AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
814
+    i4x4[4] = i4x4_cache[7 + 8 * 3];
815
+    i4x4[5] = i4x4_cache[7 + 8 * 2];
816
+    i4x4[6] = i4x4_cache[7 + 8 * 1];
817
+}
818
+
819
+static av_always_inline void write_back_non_zero_count(const H264Context *h,
820
+                                                       H264SliceContext *sl)
821
+{
822
+    const int mb_xy    = sl->mb_xy;
823
+    uint8_t *nnz       = h->non_zero_count[mb_xy];
824
+    uint8_t *nnz_cache = sl->non_zero_count_cache;
825
+
826
+    AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
827
+    AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
828
+    AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
829
+    AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
830
+    AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
831
+    AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
832
+    AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
833
+    AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
834
+
835
+    if (!h->chroma_y_shift) {
836
+        AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
837
+        AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
838
+        AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
839
+        AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
840
+    }
841
+}
842
+
843
+static av_always_inline void write_back_motion_list(const H264Context *h,
844
+                                                    H264SliceContext *sl,
845
+                                                    int b_stride,
846
+                                                    int b_xy, int b8_xy,
847
+                                                    int mb_type, int list)
848
+{
849
+    int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
850
+    int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
851
+    AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
852
+    AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
853
+    AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
854
+    AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
855
+    if (CABAC(h)) {
856
+        uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
857
+                                                        : h->mb2br_xy[sl->mb_xy]];
858
+        uint8_t(*mvd_src)[2]  = &sl->mvd_cache[list][scan8[0]];
859
+        if (IS_SKIP(mb_type)) {
860
+            AV_ZERO128(mvd_dst);
861
+        } else {
862
+            AV_COPY64(mvd_dst, mvd_src + 8 * 3);
863
+            AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
864
+            AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
865
+            AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
866
+        }
867
+    }
868
+
869
+    {
870
+        int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
871
+        int8_t *ref_cache = sl->ref_cache[list];
872
+        ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
873
+        ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
874
+        ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
875
+        ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
876
+    }
877
+}
878
+
879
+static av_always_inline void write_back_motion(const H264Context *h,
880
+                                               H264SliceContext *sl,
881
+                                               int mb_type)
882
+{
883
+    const int b_stride      = h->b_stride;
884
+    const int b_xy  = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
885
+    const int b8_xy = 4 * sl->mb_xy;
886
+
887
+    if (USES_LIST(mb_type, 0)) {
888
+        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
889
+    } else {
890
+        fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
891
+                       2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
892
+    }
893
+    if (USES_LIST(mb_type, 1))
894
+        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
895
+
896
+    if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
897
+        if (IS_8X8(mb_type)) {
898
+            uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
899
+            direct_table[1] = sl->sub_mb_type[1] >> 1;
900
+            direct_table[2] = sl->sub_mb_type[2] >> 1;
901
+            direct_table[3] = sl->sub_mb_type[3] >> 1;
902
+        }
903
+    }
904
+}
905
+
906
+static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
907
+{
908
+    if (h->ps.sps->direct_8x8_inference_flag)
909
+        return !(AV_RN64A(sl->sub_mb_type) &
910
+                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
911
+                  0x0001000100010001ULL));
912
+    else
913
+        return !(AV_RN64A(sl->sub_mb_type) &
914
+                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
915
+                  0x0001000100010001ULL));
916
+}
917
+
918
+int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
919
+
920
+int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
921
+void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
922
+
923
+int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
924
+
925
+void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
926
+
927
+int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
928
+                                const H2645NAL *nal);
929
+int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
930
+int ff_h264_update_thread_context(AVCodecContext *dst,
931
+                                  const AVCodecContext *src);
932
+
933
+void ff_h264_flush_change(H264Context *h);
934
+
935
+void ff_h264_free_tables(H264Context *h);
936
+
937
+#endif /* AVCODEC_H264DEC_H */
... ...
@@ -27,7 +27,7 @@
27 27
 
28 28
 #include "bit_depth_template.c"
29 29
 #include "libavutil/common.h"
30
-#include "h264.h"
30
+#include "h264dec.h"
31 31
 #include "h264idct.h"
32 32
 
33 33
 void FUNCC(ff_h264_idct_add)(uint8_t *_dst, int16_t *_block, int stride)
... ...
@@ -41,7 +41,7 @@
41 41
 #include "libavutil/opt.h"
42 42
 
43 43
 #include "avcodec.h"
44
-#include "h264.h"
44
+#include "h264dec.h"
45 45
 #include "internal.h"
46 46
 
47 47
 #ifdef OMX_SKIP64BIT
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/ppc/types_altivec.h"
32 32
 #include "libavutil/ppc/util_altivec.h"
33 33
 
34
-#include "libavcodec/h264.h"
34
+#include "libavcodec/h264dec.h"
35 35
 #include "libavcodec/h264dsp.h"
36 36
 
37 37
 #if HAVE_ALTIVEC && HAVE_BIGENDIAN
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #include "avcodec.h"
32 32
 #include "internal.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "qsv.h"
35 35
 #include "qsv_internal.h"
36 36
 #include "qsvenc.h"
... ...
@@ -46,7 +46,7 @@
46 46
 #include "internal.h"
47 47
 #include "avcodec.h"
48 48
 #include "mpegutils.h"
49
-#include "h264.h"
49
+#include "h264dec.h"
50 50
 #include "h264data.h"
51 51
 #include "golomb.h"
52 52
 #include "hpeldsp.h"
... ...
@@ -21,7 +21,7 @@
21 21
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 22
  */
23 23
 
24
-#include "h264.h"
24
+#include "h264dec.h"
25 25
 #include "mpegvideo.h"
26 26
 #include "vaapi_internal.h"
27 27
 
... ...
@@ -25,7 +25,7 @@
25 25
 #include "libavutil/pixfmt.h"
26 26
 
27 27
 #include "avcodec.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "h264_sei.h"
30 30
 #include "internal.h"
31 31
 #include "vaapi_encode.h"
... ...
@@ -21,7 +21,7 @@
21 21
  */
22 22
 
23 23
 #include "vaapi_internal.h"
24
-#include "h264.h"
24
+#include "h264dec.h"
25 25
 #include "mpegutils.h"
26 26
 
27 27
 /**
... ...
@@ -25,7 +25,7 @@
25 25
 #include <CoreFoundation/CFString.h>
26 26
 
27 27
 #include "libavutil/avutil.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "internal.h"
30 30
 #include "vda.h"
31 31
 #include "vda_internal.h"
... ...
@@ -25,7 +25,7 @@
25 25
 
26 26
 #include "avcodec.h"
27 27
 #include "internal.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "vc1.h"
30 30
 #include "vdpau.h"
31 31
 #include "vdpau_internal.h"
... ...
@@ -25,7 +25,7 @@
25 25
 
26 26
 #include "avcodec.h"
27 27
 #include "internal.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "mpegutils.h"
30 30
 #include "vdpau.h"
31 31
 #include "vdpau_internal.h"
... ...
@@ -23,7 +23,7 @@
23 23
 #include "libavutil/cpu.h"
24 24
 #include "libavutil/x86/asm.h"
25 25
 #include "libavutil/x86/cpu.h"
26
-#include "libavcodec/h264.h"
26
+#include "libavcodec/h264dec.h"
27 27
 #include "libavcodec/h264qpel.h"
28 28
 #include "libavcodec/pixels.h"
29 29
 #include "fpel.h"