Browse code

Merge commit '9df889a5f116c1ee78c2f239e0ba599c492431aa'

* commit '9df889a5f116c1ee78c2f239e0ba599c492431aa':
h264: rename h264.[ch] to h264dec.[ch]

Merged-by: Clément Bœsch <u@pkh.me>

Clément Bœsch authored on 2016/07/29 18:01:36
Showing 41 changed files
... ...
@@ -309,7 +309,7 @@ OBJS-$(CONFIG_H263_DECODER)            += h263dec.o h263.o ituh263dec.o        \
309 309
                                           intelh263dec.o h263data.o
310 310
 OBJS-$(CONFIG_H263_ENCODER)            += mpeg4videoenc.o mpeg4video.o  \
311 311
                                           h263.o ituh263enc.o flvenc.o h263data.o
312
-OBJS-$(CONFIG_H264_DECODER)            += h264.o h264_cabac.o h264_cavlc.o \
312
+OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
313 313
                                           h264_direct.o h264_loopfilter.o  \
314 314
                                           h264_mb.o h264_picture.o h264_ps.o \
315 315
                                           h264_refs.o h264_sei.o \
... ...
@@ -83,7 +83,7 @@
83 83
 #include <libcrystalhd/libcrystalhd_if.h>
84 84
 
85 85
 #include "avcodec.h"
86
-#include "h264.h"
86
+#include "h264dec.h"
87 87
 #include "internal.h"
88 88
 #include "libavutil/imgutils.h"
89 89
 #include "libavutil/intreadwrite.h"
... ...
@@ -22,7 +22,7 @@
22 22
 
23 23
 #include "libavutil/avassert.h"
24 24
 
25
-#include "h264.h"
25
+#include "h264dec.h"
26 26
 #include "h264data.h"
27 27
 #include "mpegutils.h"
28 28
 
29 29
deleted file mode 100644
... ...
@@ -1,1292 +0,0 @@
1
-/*
2
- * H.26L/H.264/AVC/JVT/14496-10/... decoder
3
- * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4
- *
5
- * This file is part of FFmpeg.
6
- *
7
- * FFmpeg is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU Lesser General Public
9
- * License as published by the Free Software Foundation; either
10
- * version 2.1 of the License, or (at your option) any later version.
11
- *
12
- * FFmpeg is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * Lesser General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU Lesser General Public
18
- * License along with FFmpeg; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
- */
21
-
22
-/**
23
- * @file
24
- * H.264 / AVC / MPEG-4 part10 codec.
25
- * @author Michael Niedermayer <michaelni@gmx.at>
26
- */
27
-
28
-#define UNCHECKED_BITSTREAM_READER 1
29
-
30
-#include "libavutil/avassert.h"
31
-#include "libavutil/display.h"
32
-#include "libavutil/imgutils.h"
33
-#include "libavutil/opt.h"
34
-#include "libavutil/stereo3d.h"
35
-#include "libavutil/timer.h"
36
-#include "internal.h"
37
-#include "bytestream.h"
38
-#include "cabac.h"
39
-#include "cabac_functions.h"
40
-#include "error_resilience.h"
41
-#include "avcodec.h"
42
-#include "h264.h"
43
-#include "h2645_parse.h"
44
-#include "h264data.h"
45
-#include "h264chroma.h"
46
-#include "h264_mvpred.h"
47
-#include "golomb.h"
48
-#include "mathops.h"
49
-#include "me_cmp.h"
50
-#include "mpegutils.h"
51
-#include "profiles.h"
52
-#include "rectangle.h"
53
-#include "thread.h"
54
-#include "vdpau_compat.h"
55
-
56
-static int h264_decode_end(AVCodecContext *avctx);
57
-
58
-const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
59
-
60
-int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
61
-{
62
-    H264Context *h = avctx->priv_data;
63
-    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
64
-}
65
-
66
-static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
67
-                              int (*mv)[2][4][2],
68
-                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
69
-{
70
-    H264Context *h = opaque;
71
-    H264SliceContext *sl = &h->slice_ctx[0];
72
-
73
-    sl->mb_x = mb_x;
74
-    sl->mb_y = mb_y;
75
-    sl->mb_xy = mb_x + mb_y * h->mb_stride;
76
-    memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
77
-    av_assert1(ref >= 0);
78
-    /* FIXME: It is possible albeit uncommon that slice references
79
-     * differ between slices. We take the easy approach and ignore
80
-     * it for now. If this turns out to have any relevance in
81
-     * practice then correct remapping should be added. */
82
-    if (ref >= sl->ref_count[0])
83
-        ref = 0;
84
-    if (!sl->ref_list[0][ref].data[0]) {
85
-        av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
86
-        ref = 0;
87
-    }
88
-    if ((sl->ref_list[0][ref].reference&3) != 3) {
89
-        av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
90
-        return;
91
-    }
92
-    fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
93
-                   2, 2, 2, ref, 1);
94
-    fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
95
-    fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
96
-                   pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
97
-    sl->mb_mbaff =
98
-    sl->mb_field_decoding_flag = 0;
99
-    ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
100
-}
101
-
102
-void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
103
-                             int y, int height)
104
-{
105
-    AVCodecContext *avctx = h->avctx;
106
-    const AVFrame   *src  = h->cur_pic.f;
107
-    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
108
-    int vshift = desc->log2_chroma_h;
109
-    const int field_pic = h->picture_structure != PICT_FRAME;
110
-    if (field_pic) {
111
-        height <<= 1;
112
-        y      <<= 1;
113
-    }
114
-
115
-    height = FFMIN(height, avctx->height - y);
116
-
117
-    if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
118
-        return;
119
-
120
-    if (avctx->draw_horiz_band) {
121
-        int offset[AV_NUM_DATA_POINTERS];
122
-        int i;
123
-
124
-        offset[0] = y * src->linesize[0];
125
-        offset[1] =
126
-        offset[2] = (y >> vshift) * src->linesize[1];
127
-        for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
128
-            offset[i] = 0;
129
-
130
-        emms_c();
131
-
132
-        avctx->draw_horiz_band(avctx, src, offset,
133
-                               y, h->picture_structure, height);
134
-    }
135
-}
136
-
137
-void ff_h264_free_tables(H264Context *h)
138
-{
139
-    int i;
140
-
141
-    av_freep(&h->intra4x4_pred_mode);
142
-    av_freep(&h->chroma_pred_mode_table);
143
-    av_freep(&h->cbp_table);
144
-    av_freep(&h->mvd_table[0]);
145
-    av_freep(&h->mvd_table[1]);
146
-    av_freep(&h->direct_table);
147
-    av_freep(&h->non_zero_count);
148
-    av_freep(&h->slice_table_base);
149
-    h->slice_table = NULL;
150
-    av_freep(&h->list_counts);
151
-
152
-    av_freep(&h->mb2b_xy);
153
-    av_freep(&h->mb2br_xy);
154
-
155
-    av_buffer_pool_uninit(&h->qscale_table_pool);
156
-    av_buffer_pool_uninit(&h->mb_type_pool);
157
-    av_buffer_pool_uninit(&h->motion_val_pool);
158
-    av_buffer_pool_uninit(&h->ref_index_pool);
159
-
160
-    for (i = 0; i < h->nb_slice_ctx; i++) {
161
-        H264SliceContext *sl = &h->slice_ctx[i];
162
-
163
-        av_freep(&sl->dc_val_base);
164
-        av_freep(&sl->er.mb_index2xy);
165
-        av_freep(&sl->er.error_status_table);
166
-        av_freep(&sl->er.er_temp_buffer);
167
-
168
-        av_freep(&sl->bipred_scratchpad);
169
-        av_freep(&sl->edge_emu_buffer);
170
-        av_freep(&sl->top_borders[0]);
171
-        av_freep(&sl->top_borders[1]);
172
-
173
-        sl->bipred_scratchpad_allocated = 0;
174
-        sl->edge_emu_buffer_allocated   = 0;
175
-        sl->top_borders_allocated[0]    = 0;
176
-        sl->top_borders_allocated[1]    = 0;
177
-    }
178
-}
179
-
180
-int ff_h264_alloc_tables(H264Context *h)
181
-{
182
-    const int big_mb_num = h->mb_stride * (h->mb_height + 1);
183
-    const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
184
-    int x, y;
185
-
186
-    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
187
-                      row_mb_num, 8 * sizeof(uint8_t), fail)
188
-    h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
189
-
190
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
191
-                      big_mb_num * 48 * sizeof(uint8_t), fail)
192
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
193
-                      (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
194
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
195
-                      big_mb_num * sizeof(uint16_t), fail)
196
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
197
-                      big_mb_num * sizeof(uint8_t), fail)
198
-    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
199
-                      row_mb_num, 16 * sizeof(uint8_t), fail);
200
-    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
201
-                      row_mb_num, 16 * sizeof(uint8_t), fail);
202
-    h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
203
-    h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
204
-
205
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
206
-                      4 * big_mb_num * sizeof(uint8_t), fail);
207
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
208
-                      big_mb_num * sizeof(uint8_t), fail)
209
-
210
-    memset(h->slice_table_base, -1,
211
-           (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
212
-    h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
213
-
214
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
215
-                      big_mb_num * sizeof(uint32_t), fail);
216
-    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
217
-                      big_mb_num * sizeof(uint32_t), fail);
218
-    for (y = 0; y < h->mb_height; y++)
219
-        for (x = 0; x < h->mb_width; x++) {
220
-            const int mb_xy = x + y * h->mb_stride;
221
-            const int b_xy  = 4 * x + 4 * y * h->b_stride;
222
-
223
-            h->mb2b_xy[mb_xy]  = b_xy;
224
-            h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
225
-        }
226
-
227
-    return 0;
228
-
229
-fail:
230
-    ff_h264_free_tables(h);
231
-    return AVERROR(ENOMEM);
232
-}
233
-
234
-/**
235
- * Init context
236
- * Allocate buffers which are not shared amongst multiple threads.
237
- */
238
-int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
239
-{
240
-    ERContext *er = &sl->er;
241
-    int mb_array_size = h->mb_height * h->mb_stride;
242
-    int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
243
-    int c_size  = h->mb_stride * (h->mb_height + 1);
244
-    int yc_size = y_size + 2   * c_size;
245
-    int x, y, i;
246
-
247
-    sl->ref_cache[0][scan8[5]  + 1] =
248
-    sl->ref_cache[0][scan8[7]  + 1] =
249
-    sl->ref_cache[0][scan8[13] + 1] =
250
-    sl->ref_cache[1][scan8[5]  + 1] =
251
-    sl->ref_cache[1][scan8[7]  + 1] =
252
-    sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
253
-
254
-    if (sl != h->slice_ctx) {
255
-        memset(er, 0, sizeof(*er));
256
-    } else
257
-    if (CONFIG_ERROR_RESILIENCE) {
258
-
259
-        /* init ER */
260
-        er->avctx          = h->avctx;
261
-        er->decode_mb      = h264_er_decode_mb;
262
-        er->opaque         = h;
263
-        er->quarter_sample = 1;
264
-
265
-        er->mb_num      = h->mb_num;
266
-        er->mb_width    = h->mb_width;
267
-        er->mb_height   = h->mb_height;
268
-        er->mb_stride   = h->mb_stride;
269
-        er->b8_stride   = h->mb_width * 2 + 1;
270
-
271
-        // error resilience code looks cleaner with this
272
-        FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
273
-                          (h->mb_num + 1) * sizeof(int), fail);
274
-
275
-        for (y = 0; y < h->mb_height; y++)
276
-            for (x = 0; x < h->mb_width; x++)
277
-                er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
278
-
279
-        er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
280
-                                                      h->mb_stride + h->mb_width;
281
-
282
-        FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
283
-                          mb_array_size * sizeof(uint8_t), fail);
284
-
285
-        FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
286
-                         h->mb_height * h->mb_stride, fail);
287
-
288
-        FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
289
-                          yc_size * sizeof(int16_t), fail);
290
-        er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
291
-        er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
292
-        er->dc_val[2] = er->dc_val[1] + c_size;
293
-        for (i = 0; i < yc_size; i++)
294
-            sl->dc_val_base[i] = 1024;
295
-    }
296
-
297
-    return 0;
298
-
299
-fail:
300
-    return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
301
-}
302
-
303
-static int h264_init_context(AVCodecContext *avctx, H264Context *h)
304
-{
305
-    int i;
306
-
307
-    h->avctx                 = avctx;
308
-    h->backup_width          = -1;
309
-    h->backup_height         = -1;
310
-    h->backup_pix_fmt        = AV_PIX_FMT_NONE;
311
-    h->cur_chroma_format_idc = -1;
312
-
313
-    h->picture_structure     = PICT_FRAME;
314
-    h->workaround_bugs       = avctx->workaround_bugs;
315
-    h->flags                 = avctx->flags;
316
-    h->poc.prev_poc_msb      = 1 << 16;
317
-    h->recovery_frame        = -1;
318
-    h->frame_recovered       = 0;
319
-    h->poc.prev_frame_num    = -1;
320
-    h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
321
-    h->sei.unregistered.x264_build = -1;
322
-
323
-    h->next_outputed_poc = INT_MIN;
324
-    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
325
-        h->last_pocs[i] = INT_MIN;
326
-
327
-    ff_h264_sei_uninit(&h->sei);
328
-
329
-    avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
330
-
331
-    h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
332
-    h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
333
-    if (!h->slice_ctx) {
334
-        h->nb_slice_ctx = 0;
335
-        return AVERROR(ENOMEM);
336
-    }
337
-
338
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
339
-        h->DPB[i].f = av_frame_alloc();
340
-        if (!h->DPB[i].f)
341
-            return AVERROR(ENOMEM);
342
-    }
343
-
344
-    h->cur_pic.f = av_frame_alloc();
345
-    if (!h->cur_pic.f)
346
-        return AVERROR(ENOMEM);
347
-
348
-    h->last_pic_for_ec.f = av_frame_alloc();
349
-    if (!h->last_pic_for_ec.f)
350
-        return AVERROR(ENOMEM);
351
-
352
-    for (i = 0; i < h->nb_slice_ctx; i++)
353
-        h->slice_ctx[i].h264 = h;
354
-
355
-    return 0;
356
-}
357
-
358
-static av_cold int h264_decode_end(AVCodecContext *avctx)
359
-{
360
-    H264Context *h = avctx->priv_data;
361
-    int i;
362
-
363
-    ff_h264_remove_all_refs(h);
364
-    ff_h264_free_tables(h);
365
-
366
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
367
-        ff_h264_unref_picture(h, &h->DPB[i]);
368
-        av_frame_free(&h->DPB[i].f);
369
-    }
370
-    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
371
-
372
-    h->cur_pic_ptr = NULL;
373
-
374
-    av_freep(&h->slice_ctx);
375
-    h->nb_slice_ctx = 0;
376
-
377
-    ff_h264_sei_uninit(&h->sei);
378
-    ff_h264_ps_uninit(&h->ps);
379
-
380
-    ff_h2645_packet_uninit(&h->pkt);
381
-
382
-    ff_h264_unref_picture(h, &h->cur_pic);
383
-    av_frame_free(&h->cur_pic.f);
384
-    ff_h264_unref_picture(h, &h->last_pic_for_ec);
385
-    av_frame_free(&h->last_pic_for_ec.f);
386
-
387
-    return 0;
388
-}
389
-
390
-static AVOnce h264_vlc_init = AV_ONCE_INIT;
391
-
392
-av_cold int ff_h264_decode_init(AVCodecContext *avctx)
393
-{
394
-    H264Context *h = avctx->priv_data;
395
-    int ret;
396
-
397
-    ret = h264_init_context(avctx, h);
398
-    if (ret < 0)
399
-        return ret;
400
-
401
-    ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
402
-    if (ret != 0) {
403
-        av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
404
-        return AVERROR_UNKNOWN;
405
-    }
406
-
407
-    if (avctx->codec_id == AV_CODEC_ID_H264) {
408
-        if (avctx->ticks_per_frame == 1) {
409
-            if(h->avctx->time_base.den < INT_MAX/2) {
410
-                h->avctx->time_base.den *= 2;
411
-            } else
412
-                h->avctx->time_base.num /= 2;
413
-        }
414
-        avctx->ticks_per_frame = 2;
415
-    }
416
-
417
-    if (avctx->extradata_size > 0 && avctx->extradata) {
418
-        ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
419
-                                       &h->ps, &h->is_avc, &h->nal_length_size,
420
-                                       avctx->err_recognition, avctx);
421
-        if (ret < 0) {
422
-            h264_decode_end(avctx);
423
-            return ret;
424
-        }
425
-    }
426
-
427
-    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
428
-        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
429
-        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
430
-    }
431
-
432
-    avctx->internal->allocate_progress = 1;
433
-
434
-    ff_h264_flush_change(h);
435
-
436
-    if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
437
-        h->enable_er = 0;
438
-
439
-    if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
440
-        av_log(avctx, AV_LOG_WARNING,
441
-               "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
442
-               "Use it at your own risk\n");
443
-    }
444
-
445
-    return 0;
446
-}
447
-
448
-#if HAVE_THREADS
449
-static int decode_init_thread_copy(AVCodecContext *avctx)
450
-{
451
-    H264Context *h = avctx->priv_data;
452
-    int ret;
453
-
454
-    if (!avctx->internal->is_copy)
455
-        return 0;
456
-
457
-    memset(h, 0, sizeof(*h));
458
-
459
-    ret = h264_init_context(avctx, h);
460
-    if (ret < 0)
461
-        return ret;
462
-
463
-    h->context_initialized = 0;
464
-
465
-    return 0;
466
-}
467
-#endif
468
-
469
-/**
470
- * Run setup operations that must be run after slice header decoding.
471
- * This includes finding the next displayed frame.
472
- *
473
- * @param h h264 master context
474
- * @param setup_finished enough NALs have been read that we can call
475
- * ff_thread_finish_setup()
476
- */
477
-static void decode_postinit(H264Context *h, int setup_finished)
478
-{
479
-    const SPS *sps = h->ps.sps;
480
-    H264Picture *out = h->cur_pic_ptr;
481
-    H264Picture *cur = h->cur_pic_ptr;
482
-    int i, pics, out_of_order, out_idx;
483
-
484
-    if (h->next_output_pic)
485
-        return;
486
-
487
-    if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
488
-        /* FIXME: if we have two PAFF fields in one packet, we can't start
489
-         * the next thread here. If we have one field per packet, we can.
490
-         * The check in decode_nal_units() is not good enough to find this
491
-         * yet, so we assume the worst for now. */
492
-        // if (setup_finished)
493
-        //    ff_thread_finish_setup(h->avctx);
494
-        if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
495
-            return;
496
-        if (h->avctx->hwaccel || h->missing_fields <=1)
497
-            return;
498
-    }
499
-
500
-    cur->mmco_reset = h->mmco_reset;
501
-    h->mmco_reset = 0;
502
-
503
-    // FIXME do something with unavailable reference frames
504
-
505
-    /* Sort B-frames into display order */
506
-    if (sps->bitstream_restriction_flag ||
507
-        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
508
-        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
509
-    }
510
-
511
-    for (i = 0; 1; i++) {
512
-        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
513
-            if(i)
514
-                h->last_pocs[i-1] = cur->poc;
515
-            break;
516
-        } else if(i) {
517
-            h->last_pocs[i-1]= h->last_pocs[i];
518
-        }
519
-    }
520
-    out_of_order = MAX_DELAYED_PIC_COUNT - i;
521
-    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
522
-       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
523
-        out_of_order = FFMAX(out_of_order, 1);
524
-    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
525
-        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
526
-        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
527
-            h->last_pocs[i] = INT_MIN;
528
-        h->last_pocs[0] = cur->poc;
529
-        cur->mmco_reset = 1;
530
-    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
531
-        av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
532
-        h->avctx->has_b_frames = out_of_order;
533
-    }
534
-
535
-    pics = 0;
536
-    while (h->delayed_pic[pics])
537
-        pics++;
538
-
539
-    av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
540
-
541
-    h->delayed_pic[pics++] = cur;
542
-    if (cur->reference == 0)
543
-        cur->reference = DELAYED_PIC_REF;
544
-
545
-    out     = h->delayed_pic[0];
546
-    out_idx = 0;
547
-    for (i = 1; h->delayed_pic[i] &&
548
-                !h->delayed_pic[i]->f->key_frame &&
549
-                !h->delayed_pic[i]->mmco_reset;
550
-         i++)
551
-        if (h->delayed_pic[i]->poc < out->poc) {
552
-            out     = h->delayed_pic[i];
553
-            out_idx = i;
554
-        }
555
-    if (h->avctx->has_b_frames == 0 &&
556
-        (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
557
-        h->next_outputed_poc = INT_MIN;
558
-    out_of_order = out->poc < h->next_outputed_poc;
559
-
560
-    if (out_of_order || pics > h->avctx->has_b_frames) {
561
-        out->reference &= ~DELAYED_PIC_REF;
562
-        for (i = out_idx; h->delayed_pic[i]; i++)
563
-            h->delayed_pic[i] = h->delayed_pic[i + 1];
564
-    }
565
-    if (!out_of_order && pics > h->avctx->has_b_frames) {
566
-        h->next_output_pic = out;
567
-        if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
568
-            h->next_outputed_poc = INT_MIN;
569
-        } else
570
-            h->next_outputed_poc = out->poc;
571
-    } else {
572
-        av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
573
-    }
574
-
575
-    if (h->next_output_pic) {
576
-        if (h->next_output_pic->recovered) {
577
-            // We have reached an recovery point and all frames after it in
578
-            // display order are "recovered".
579
-            h->frame_recovered |= FRAME_RECOVERED_SEI;
580
-        }
581
-        h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
582
-    }
583
-
584
-    if (setup_finished && !h->avctx->hwaccel) {
585
-        ff_thread_finish_setup(h->avctx);
586
-
587
-        if (h->avctx->active_thread_type & FF_THREAD_FRAME)
588
-            h->setup_finished = 1;
589
-    }
590
-}
591
-
592
-/**
593
- * instantaneous decoder refresh.
594
- */
595
-static void idr(H264Context *h)
596
-{
597
-    int i;
598
-    ff_h264_remove_all_refs(h);
599
-    h->poc.prev_frame_num        =
600
-    h->poc.prev_frame_num_offset = 0;
601
-    h->poc.prev_poc_msb          = 1<<16;
602
-    h->poc.prev_poc_lsb          = 0;
603
-    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
604
-        h->last_pocs[i] = INT_MIN;
605
-}
606
-
607
-/* forget old pics after a seek */
608
-void ff_h264_flush_change(H264Context *h)
609
-{
610
-    int i, j;
611
-
612
-    h->next_outputed_poc = INT_MIN;
613
-    h->prev_interlaced_frame = 1;
614
-    idr(h);
615
-
616
-    h->poc.prev_frame_num = -1;
617
-    if (h->cur_pic_ptr) {
618
-        h->cur_pic_ptr->reference = 0;
619
-        for (j=i=0; h->delayed_pic[i]; i++)
620
-            if (h->delayed_pic[i] != h->cur_pic_ptr)
621
-                h->delayed_pic[j++] = h->delayed_pic[i];
622
-        h->delayed_pic[j] = NULL;
623
-    }
624
-    ff_h264_unref_picture(h, &h->last_pic_for_ec);
625
-
626
-    h->first_field = 0;
627
-    ff_h264_sei_uninit(&h->sei);
628
-    h->recovery_frame = -1;
629
-    h->frame_recovered = 0;
630
-    h->current_slice = 0;
631
-    h->mmco_reset = 1;
632
-}
633
-
634
-/* forget old pics after a seek */
635
-static void flush_dpb(AVCodecContext *avctx)
636
-{
637
-    H264Context *h = avctx->priv_data;
638
-    int i;
639
-
640
-    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
641
-
642
-    ff_h264_flush_change(h);
643
-
644
-    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
645
-        ff_h264_unref_picture(h, &h->DPB[i]);
646
-    h->cur_pic_ptr = NULL;
647
-    ff_h264_unref_picture(h, &h->cur_pic);
648
-
649
-    h->mb_y = 0;
650
-
651
-    ff_h264_free_tables(h);
652
-    h->context_initialized = 0;
653
-}
654
-
655
-#if FF_API_CAP_VDPAU
656
-static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
657
-#endif
658
-
659
-static int get_last_needed_nal(H264Context *h)
660
-{
661
-    int nals_needed = 0;
662
-    int first_slice = 0;
663
-    int i;
664
-    int ret;
665
-
666
-    for (i = 0; i < h->pkt.nb_nals; i++) {
667
-        H2645NAL *nal = &h->pkt.nals[i];
668
-        GetBitContext gb;
669
-
670
-        /* packets can sometimes contain multiple PPS/SPS,
671
-         * e.g. two PAFF field pictures in one packet, or a demuxer
672
-         * which splits NALs strangely if so, when frame threading we
673
-         * can't start the next thread until we've read all of them */
674
-        switch (nal->type) {
675
-        case NAL_SPS:
676
-        case NAL_PPS:
677
-            nals_needed = i;
678
-            break;
679
-        case NAL_DPA:
680
-        case NAL_IDR_SLICE:
681
-        case NAL_SLICE:
682
-            ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
683
-            if (ret < 0)
684
-                return ret;
685
-            if (!get_ue_golomb_long(&gb) ||  // first_mb_in_slice
686
-                !first_slice ||
687
-                first_slice != nal->type)
688
-                nals_needed = i;
689
-            if (!first_slice)
690
-                first_slice = nal->type;
691
-        }
692
-    }
693
-
694
-    return nals_needed;
695
-}
696
-
697
-static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
698
-{
699
-    av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
700
-    av_log(logctx, AV_LOG_DEBUG, "  green_metadata_type: %d\n", gm->green_metadata_type);
701
-
702
-    if (gm->green_metadata_type == 0) {
703
-        av_log(logctx, AV_LOG_DEBUG, "  green_metadata_period_type: %d\n", gm->period_type);
704
-
705
-        if (gm->period_type == 2)
706
-            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_seconds: %d\n", gm->num_seconds);
707
-        else if (gm->period_type == 3)
708
-            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_pictures: %d\n", gm->num_pictures);
709
-
710
-        av_log(logctx, AV_LOG_DEBUG, "  SEI GREEN Complexity Metrics: %f %f %f %f\n",
711
-               (float)gm->percent_non_zero_macroblocks/255,
712
-               (float)gm->percent_intra_coded_macroblocks/255,
713
-               (float)gm->percent_six_tap_filtering/255,
714
-               (float)gm->percent_alpha_point_deblocking_instance/255);
715
-
716
-    } else if (gm->green_metadata_type == 1) {
717
-        av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_type: %d\n", gm->xsd_metric_type);
718
-
719
-        if (gm->xsd_metric_type == 0)
720
-            av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_value: %f\n",
721
-                   (float)gm->xsd_metric_value/100);
722
-    }
723
-}
724
-
725
-static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
726
-{
727
-    AVCodecContext *const avctx = h->avctx;
728
-    unsigned context_count = 0;
729
-    int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
730
-    int idr_cleared=0;
731
-    int i, ret = 0;
732
-
733
-    h->nal_unit_type= 0;
734
-
735
-    h->max_contexts = h->nb_slice_ctx;
736
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
737
-        h->current_slice = 0;
738
-        if (!h->first_field)
739
-            h->cur_pic_ptr = NULL;
740
-        ff_h264_sei_uninit(&h->sei);
741
-    }
742
-
743
-    if (h->nal_length_size == 4) {
744
-        if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
745
-            h->is_avc = 0;
746
-        }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
747
-            h->is_avc = 1;
748
-    }
749
-
750
-    ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
751
-                                h->nal_length_size, avctx->codec_id);
752
-    if (ret < 0) {
753
-        av_log(avctx, AV_LOG_ERROR,
754
-               "Error splitting the input into NAL units.\n");
755
-        return ret;
756
-    }
757
-
758
-    if (avctx->active_thread_type & FF_THREAD_FRAME)
759
-        nals_needed = get_last_needed_nal(h);
760
-    if (nals_needed < 0)
761
-        return nals_needed;
762
-
763
-    for (i = 0; i < h->pkt.nb_nals; i++) {
764
-        H2645NAL *nal = &h->pkt.nals[i];
765
-        H264SliceContext *sl = &h->slice_ctx[context_count];
766
-        int err;
767
-
768
-        if (avctx->skip_frame >= AVDISCARD_NONREF &&
769
-            nal->ref_idc == 0 && nal->type != NAL_SEI)
770
-            continue;
771
-
772
-again:
773
-        // FIXME these should stop being context-global variables
774
-        h->nal_ref_idc   = nal->ref_idc;
775
-        h->nal_unit_type = nal->type;
776
-
777
-        err = 0;
778
-        switch (nal->type) {
779
-        case NAL_IDR_SLICE:
780
-            if ((nal->data[1] & 0xFC) == 0x98) {
781
-                av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
782
-                h->next_outputed_poc = INT_MIN;
783
-                ret = -1;
784
-                goto end;
785
-            }
786
-            if (nal->type != NAL_IDR_SLICE) {
787
-                av_log(h->avctx, AV_LOG_ERROR,
788
-                       "Invalid mix of idr and non-idr slices\n");
789
-                ret = -1;
790
-                goto end;
791
-            }
792
-            if(!idr_cleared) {
793
-                if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
794
-                    av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
795
-                    ret = AVERROR_INVALIDDATA;
796
-                    goto end;
797
-                }
798
-                idr(h); // FIXME ensure we don't lose some frames if there is reordering
799
-            }
800
-            idr_cleared = 1;
801
-            h->has_recovery_point = 1;
802
-        case NAL_SLICE:
803
-            sl->gb = nal->gb;
804
-
805
-            if ((err = ff_h264_decode_slice_header(h, sl, nal)))
806
-                break;
807
-
808
-            if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
809
-                const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
810
-
811
-                if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
812
-                    h->valid_recovery_point = 1;
813
-
814
-                if (   h->recovery_frame < 0
815
-                    || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
816
-                    h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
817
-
818
-                    if (!h->valid_recovery_point)
819
-                        h->recovery_frame = h->poc.frame_num;
820
-                }
821
-            }
822
-
823
-            h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
824
-
825
-            if (nal->type == NAL_IDR_SLICE ||
826
-                (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
827
-                h->recovery_frame         = -1;
828
-                h->cur_pic_ptr->recovered = 1;
829
-            }
830
-            // If we have an IDR, all frames after it in decoded order are
831
-            // "recovered".
832
-            if (nal->type == NAL_IDR_SLICE)
833
-                h->frame_recovered |= FRAME_RECOVERED_IDR;
834
-#if 1
835
-            h->cur_pic_ptr->recovered |= h->frame_recovered;
836
-#else
837
-            h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
838
-#endif
839
-
840
-            if (h->current_slice == 1) {
841
-                if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
842
-                    decode_postinit(h, i >= nals_needed);
843
-
844
-                if (h->avctx->hwaccel &&
845
-                    (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
846
-                    goto end;
847
-#if FF_API_CAP_VDPAU
848
-                if (CONFIG_H264_VDPAU_DECODER &&
849
-                    h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
850
-                    ff_vdpau_h264_picture_start(h);
851
-#endif
852
-            }
853
-
854
-            if (sl->redundant_pic_count == 0) {
855
-                if (avctx->hwaccel) {
856
-                    ret = avctx->hwaccel->decode_slice(avctx,
857
-                                                       nal->raw_data,
858
-                                                       nal->raw_size);
859
-                    if (ret < 0)
860
-                        goto end;
861
-#if FF_API_CAP_VDPAU
862
-                } else if (CONFIG_H264_VDPAU_DECODER &&
863
-                           h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
864
-                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
865
-                                            start_code,
866
-                                            sizeof(start_code));
867
-                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
868
-                                            nal->raw_data,
869
-                                            nal->raw_size);
870
-#endif
871
-                } else
872
-                    context_count++;
873
-            }
874
-            break;
875
-        case NAL_DPA:
876
-        case NAL_DPB:
877
-        case NAL_DPC:
878
-            avpriv_request_sample(avctx, "data partitioning");
879
-            break;
880
-        case NAL_SEI:
881
-            ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
882
-            h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
883
-            if (avctx->debug & FF_DEBUG_GREEN_MD)
884
-                debug_green_metadata(&h->sei.green_metadata, h->avctx);
885
-#if FF_API_AFD
886
-FF_DISABLE_DEPRECATION_WARNINGS
887
-            h->avctx->dtg_active_format = h->sei.afd.active_format_description;
888
-FF_ENABLE_DEPRECATION_WARNINGS
889
-#endif /* FF_API_AFD */
890
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
891
-                goto end;
892
-            break;
893
-        case NAL_SPS: {
894
-            GetBitContext tmp_gb = nal->gb;
895
-            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
896
-                break;
897
-            av_log(h->avctx, AV_LOG_DEBUG,
898
-                   "SPS decoding failure, trying again with the complete NAL\n");
899
-            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
900
-            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
901
-                break;
902
-            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
903
-            break;
904
-        }
905
-        case NAL_PPS:
906
-            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
907
-                                                       nal->size_bits);
908
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
909
-                goto end;
910
-            break;
911
-        case NAL_AUD:
912
-        case NAL_END_SEQUENCE:
913
-        case NAL_END_STREAM:
914
-        case NAL_FILLER_DATA:
915
-        case NAL_SPS_EXT:
916
-        case NAL_AUXILIARY_SLICE:
917
-            break;
918
-        default:
919
-            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
920
-                   nal->type, nal->size_bits);
921
-        }
922
-
923
-        if (context_count == h->max_contexts) {
924
-            ret = ff_h264_execute_decode_slices(h, context_count);
925
-            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
926
-                goto end;
927
-            context_count = 0;
928
-        }
929
-
930
-        if (err < 0 || err == SLICE_SKIPED) {
931
-            if (err < 0)
932
-                av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
933
-            sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
934
-        } else if (err == SLICE_SINGLETHREAD) {
935
-            if (context_count > 0) {
936
-                ret = ff_h264_execute_decode_slices(h, context_count);
937
-                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
938
-                    goto end;
939
-                context_count = 0;
940
-            }
941
-            /* Slice could not be decoded in parallel mode, restart. */
942
-            sl               = &h->slice_ctx[0];
943
-            goto again;
944
-        }
945
-    }
946
-    if (context_count) {
947
-        ret = ff_h264_execute_decode_slices(h, context_count);
948
-        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
949
-            goto end;
950
-    }
951
-
952
-    ret = 0;
953
-end:
954
-
955
-#if CONFIG_ERROR_RESILIENCE
956
-    /*
957
-     * FIXME: Error handling code does not seem to support interlaced
958
-     * when slices span multiple rows
959
-     * The ff_er_add_slice calls don't work right for bottom
960
-     * fields; they cause massive erroneous error concealing
961
-     * Error marking covers both fields (top and bottom).
962
-     * This causes a mismatched s->error_count
963
-     * and a bad error table. Further, the error count goes to
964
-     * INT_MAX when called for bottom field, because mb_y is
965
-     * past end by one (callers fault) and resync_mb_y != 0
966
-     * causes problems for the first MB line, too.
967
-     */
968
-    if (!FIELD_PICTURE(h) && h->current_slice &&
969
-        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
970
-        h->enable_er) {
971
-
972
-        H264SliceContext *sl = h->slice_ctx;
973
-        int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
974
-
975
-        ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
976
-
977
-        if (use_last_pic) {
978
-            ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
979
-            sl->ref_list[0][0].parent = &h->last_pic_for_ec;
980
-            memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
981
-            memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
982
-            sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
983
-        } else if (sl->ref_count[0]) {
984
-            ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
985
-        } else
986
-            ff_h264_set_erpic(&sl->er.last_pic, NULL);
987
-
988
-        if (sl->ref_count[1])
989
-            ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
990
-
991
-        sl->er.ref_count = sl->ref_count[0];
992
-
993
-        ff_er_frame_end(&sl->er);
994
-        if (use_last_pic)
995
-            memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
996
-    }
997
-#endif /* CONFIG_ERROR_RESILIENCE */
998
-    /* clean up */
999
-    if (h->cur_pic_ptr && !h->droppable) {
1000
-        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1001
-                                  h->picture_structure == PICT_BOTTOM_FIELD);
1002
-    }
1003
-
1004
-    return (ret < 0) ? ret : buf_size;
1005
-}
1006
-
1007
-/**
1008
- * Return the number of bytes consumed for building the current frame.
1009
- */
1010
-static int get_consumed_bytes(int pos, int buf_size)
1011
-{
1012
-    if (pos == 0)
1013
-        pos = 1;        // avoid infinite loops (I doubt that is needed but...)
1014
-    if (pos + 10 > buf_size)
1015
-        pos = buf_size; // oops ;)
1016
-
1017
-    return pos;
1018
-}
1019
-
1020
-static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
1021
-{
1022
-    AVFrame *src = srcp->f;
1023
-    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
1024
-    int i;
1025
-    int ret = av_frame_ref(dst, src);
1026
-    if (ret < 0)
1027
-        return ret;
1028
-
1029
-    av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
1030
-
1031
-    h->backup_width   = h->avctx->width;
1032
-    h->backup_height  = h->avctx->height;
1033
-    h->backup_pix_fmt = h->avctx->pix_fmt;
1034
-
1035
-    h->avctx->width   = dst->width;
1036
-    h->avctx->height  = dst->height;
1037
-    h->avctx->pix_fmt = dst->format;
1038
-
1039
-    if (srcp->sei_recovery_frame_cnt == 0)
1040
-        dst->key_frame = 1;
1041
-    if (!srcp->crop)
1042
-        return 0;
1043
-
1044
-    for (i = 0; i < desc->nb_components; i++) {
1045
-        int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1046
-        int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1047
-        int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1048
-                      (srcp->crop_top  >> vshift) * dst->linesize[i];
1049
-        dst->data[i] += off;
1050
-    }
1051
-    return 0;
1052
-}
1053
-
1054
-static int is_extra(const uint8_t *buf, int buf_size)
1055
-{
1056
-    int cnt= buf[5]&0x1f;
1057
-    const uint8_t *p= buf+6;
1058
-    while(cnt--){
1059
-        int nalsize= AV_RB16(p) + 2;
1060
-        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
1061
-            return 0;
1062
-        p += nalsize;
1063
-    }
1064
-    cnt = *(p++);
1065
-    if(!cnt)
1066
-        return 0;
1067
-    while(cnt--){
1068
-        int nalsize= AV_RB16(p) + 2;
1069
-        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
1070
-            return 0;
1071
-        p += nalsize;
1072
-    }
1073
-    return 1;
1074
-}
1075
-
1076
-static int h264_decode_frame(AVCodecContext *avctx, void *data,
1077
-                             int *got_frame, AVPacket *avpkt)
1078
-{
1079
-    const uint8_t *buf = avpkt->data;
1080
-    int buf_size       = avpkt->size;
1081
-    H264Context *h     = avctx->priv_data;
1082
-    AVFrame *pict      = data;
1083
-    int buf_index      = 0;
1084
-    H264Picture *out;
1085
-    int i, out_idx;
1086
-    int ret;
1087
-
1088
-    h->flags = avctx->flags;
1089
-    h->setup_finished = 0;
1090
-
1091
-    if (h->backup_width != -1) {
1092
-        avctx->width    = h->backup_width;
1093
-        h->backup_width = -1;
1094
-    }
1095
-    if (h->backup_height != -1) {
1096
-        avctx->height    = h->backup_height;
1097
-        h->backup_height = -1;
1098
-    }
1099
-    if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
1100
-        avctx->pix_fmt    = h->backup_pix_fmt;
1101
-        h->backup_pix_fmt = AV_PIX_FMT_NONE;
1102
-    }
1103
-
1104
-    ff_h264_unref_picture(h, &h->last_pic_for_ec);
1105
-
1106
-    /* end of stream, output what is still in the buffers */
1107
-    if (buf_size == 0) {
1108
- out:
1109
-
1110
-        h->cur_pic_ptr = NULL;
1111
-        h->first_field = 0;
1112
-
1113
-        // FIXME factorize this with the output code below
1114
-        out     = h->delayed_pic[0];
1115
-        out_idx = 0;
1116
-        for (i = 1;
1117
-             h->delayed_pic[i] &&
1118
-             !h->delayed_pic[i]->f->key_frame &&
1119
-             !h->delayed_pic[i]->mmco_reset;
1120
-             i++)
1121
-            if (h->delayed_pic[i]->poc < out->poc) {
1122
-                out     = h->delayed_pic[i];
1123
-                out_idx = i;
1124
-            }
1125
-
1126
-        for (i = out_idx; h->delayed_pic[i]; i++)
1127
-            h->delayed_pic[i] = h->delayed_pic[i + 1];
1128
-
1129
-        if (out) {
1130
-            out->reference &= ~DELAYED_PIC_REF;
1131
-            ret = output_frame(h, pict, out);
1132
-            if (ret < 0)
1133
-                return ret;
1134
-            *got_frame = 1;
1135
-        }
1136
-
1137
-        return buf_index;
1138
-    }
1139
-    if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
1140
-        int side_size;
1141
-        uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1142
-        if (is_extra(side, side_size))
1143
-            ff_h264_decode_extradata(side, side_size,
1144
-                                     &h->ps, &h->is_avc, &h->nal_length_size,
1145
-                                     avctx->err_recognition, avctx);
1146
-    }
1147
-    if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1148
-        if (is_extra(buf, buf_size))
1149
-            return ff_h264_decode_extradata(buf, buf_size,
1150
-                                            &h->ps, &h->is_avc, &h->nal_length_size,
1151
-                                            avctx->err_recognition, avctx);
1152
-    }
1153
-
1154
-    buf_index = decode_nal_units(h, buf, buf_size);
1155
-    if (buf_index < 0)
1156
-        return AVERROR_INVALIDDATA;
1157
-
1158
-    if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1159
-        av_assert0(buf_index <= buf_size);
1160
-        goto out;
1161
-    }
1162
-
1163
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1164
-        if (avctx->skip_frame >= AVDISCARD_NONREF ||
1165
-            buf_size >= 4 && !memcmp("Q264", buf, 4))
1166
-            return buf_size;
1167
-        av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1168
-        return AVERROR_INVALIDDATA;
1169
-    }
1170
-
1171
-    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1172
-        (h->mb_y >= h->mb_height && h->mb_height)) {
1173
-        if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1174
-            decode_postinit(h, 1);
1175
-
1176
-        if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1177
-            return ret;
1178
-
1179
-        /* Wait for second field. */
1180
-        *got_frame = 0;
1181
-        if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
1182
-                                   (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
1183
-                                   h->next_output_pic->recovered)) {
1184
-            if (!h->next_output_pic->recovered)
1185
-                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
1186
-
1187
-            if (!h->avctx->hwaccel &&
1188
-                 (h->next_output_pic->field_poc[0] == INT_MAX ||
1189
-                  h->next_output_pic->field_poc[1] == INT_MAX)
1190
-            ) {
1191
-                int p;
1192
-                AVFrame *f = h->next_output_pic->f;
1193
-                int field = h->next_output_pic->field_poc[0] == INT_MAX;
1194
-                uint8_t *dst_data[4];
1195
-                int linesizes[4];
1196
-                const uint8_t *src_data[4];
1197
-
1198
-                av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1199
-
1200
-                for (p = 0; p<4; p++) {
1201
-                    dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1202
-                    src_data[p] = f->data[p] +  field   *f->linesize[p];
1203
-                    linesizes[p] = 2*f->linesize[p];
1204
-                }
1205
-
1206
-                av_image_copy(dst_data, linesizes, src_data, linesizes,
1207
-                              f->format, f->width, f->height>>1);
1208
-            }
1209
-
1210
-            ret = output_frame(h, pict, h->next_output_pic);
1211
-            if (ret < 0)
1212
-                return ret;
1213
-            *got_frame = 1;
1214
-            if (CONFIG_MPEGVIDEO) {
1215
-                ff_print_debug_info2(h->avctx, pict, NULL,
1216
-                                    h->next_output_pic->mb_type,
1217
-                                    h->next_output_pic->qscale_table,
1218
-                                    h->next_output_pic->motion_val,
1219
-                                    NULL,
1220
-                                    h->mb_width, h->mb_height, h->mb_stride, 1);
1221
-            }
1222
-        }
1223
-    }
1224
-
1225
-    av_assert0(pict->buf[0] || !*got_frame);
1226
-
1227
-    ff_h264_unref_picture(h, &h->last_pic_for_ec);
1228
-
1229
-    return get_consumed_bytes(buf_index, buf_size);
1230
-}
1231
-
1232
-#define OFFSET(x) offsetof(H264Context, x)
1233
-#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1234
-static const AVOption h264_options[] = {
1235
-    {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
1236
-    {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1237
-    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1238
-    { NULL },
1239
-};
1240
-
1241
-static const AVClass h264_class = {
1242
-    .class_name = "H264 Decoder",
1243
-    .item_name  = av_default_item_name,
1244
-    .option     = h264_options,
1245
-    .version    = LIBAVUTIL_VERSION_INT,
1246
-};
1247
-
1248
-AVCodec ff_h264_decoder = {
1249
-    .name                  = "h264",
1250
-    .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1251
-    .type                  = AVMEDIA_TYPE_VIDEO,
1252
-    .id                    = AV_CODEC_ID_H264,
1253
-    .priv_data_size        = sizeof(H264Context),
1254
-    .init                  = ff_h264_decode_init,
1255
-    .close                 = h264_decode_end,
1256
-    .decode                = h264_decode_frame,
1257
-    .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1258
-                             AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
1259
-                             AV_CODEC_CAP_FRAME_THREADS,
1260
-    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
1261
-    .flush                 = flush_dpb,
1262
-    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1263
-    .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1264
-    .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1265
-    .priv_class            = &h264_class,
1266
-};
1267
-
1268
-#if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
1269
-static const AVClass h264_vdpau_class = {
1270
-    .class_name = "H264 VDPAU Decoder",
1271
-    .item_name  = av_default_item_name,
1272
-    .option     = h264_options,
1273
-    .version    = LIBAVUTIL_VERSION_INT,
1274
-};
1275
-
1276
-AVCodec ff_h264_vdpau_decoder = {
1277
-    .name           = "h264_vdpau",
1278
-    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
1279
-    .type           = AVMEDIA_TYPE_VIDEO,
1280
-    .id             = AV_CODEC_ID_H264,
1281
-    .priv_data_size = sizeof(H264Context),
1282
-    .init           = ff_h264_decode_init,
1283
-    .close          = h264_decode_end,
1284
-    .decode         = h264_decode_frame,
1285
-    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
1286
-    .flush          = flush_dpb,
1287
-    .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
1288
-                                                     AV_PIX_FMT_NONE},
1289
-    .profiles       = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1290
-    .priv_class     = &h264_vdpau_class,
1291
-};
1292
-#endif
1293 1
deleted file mode 100644
... ...
@@ -1,1008 +0,0 @@
1
-/*
2
- * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3
- * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4
- *
5
- * This file is part of FFmpeg.
6
- *
7
- * FFmpeg is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU Lesser General Public
9
- * License as published by the Free Software Foundation; either
10
- * version 2.1 of the License, or (at your option) any later version.
11
- *
12
- * FFmpeg is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
- * Lesser General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU Lesser General Public
18
- * License along with FFmpeg; if not, write to the Free Software
19
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
- */
21
-
22
-/**
23
- * @file
24
- * H.264 / AVC / MPEG-4 part10 codec.
25
- * @author Michael Niedermayer <michaelni@gmx.at>
26
- */
27
-
28
-#ifndef AVCODEC_H264_H
29
-#define AVCODEC_H264_H
30
-
31
-#include "libavutil/buffer.h"
32
-#include "libavutil/intreadwrite.h"
33
-#include "libavutil/thread.h"
34
-
35
-#include "cabac.h"
36
-#include "error_resilience.h"
37
-#include "h264_parse.h"
38
-#include "h264_sei.h"
39
-#include "h2645_parse.h"
40
-#include "h264chroma.h"
41
-#include "h264dsp.h"
42
-#include "h264pred.h"
43
-#include "h264qpel.h"
44
-#include "internal.h"
45
-#include "mpegutils.h"
46
-#include "parser.h"
47
-#include "qpeldsp.h"
48
-#include "rectangle.h"
49
-#include "videodsp.h"
50
-
51
-#define H264_MAX_PICTURE_COUNT 36
52
-
53
-#define MAX_SPS_COUNT          32
54
-#define MAX_PPS_COUNT         256
55
-
56
-#define MAX_MMCO_COUNT         66
57
-
58
-#define MAX_DELAYED_PIC_COUNT  16
59
-
60
-#define MAX_MBPAIR_SIZE (256*1024) // a tighter bound could be calculated if someone cares about a few bytes
61
-
62
-/* Compiling in interlaced support reduces the speed
63
- * of progressive decoding by about 2%. */
64
-#define ALLOW_INTERLACE
65
-
66
-#define FMO 0
67
-
68
-/**
69
- * The maximum number of slices supported by the decoder.
70
- * must be a power of 2
71
- */
72
-#define MAX_SLICES 32
73
-
74
-#ifdef ALLOW_INTERLACE
75
-#define MB_MBAFF(h)    (h)->mb_mbaff
76
-#define MB_FIELD(sl)  (sl)->mb_field_decoding_flag
77
-#define FRAME_MBAFF(h) (h)->mb_aff_frame
78
-#define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
79
-#define LEFT_MBS 2
80
-#define LTOP     0
81
-#define LBOT     1
82
-#define LEFT(i)  (i)
83
-#else
84
-#define MB_MBAFF(h)      0
85
-#define MB_FIELD(sl)     0
86
-#define FRAME_MBAFF(h)   0
87
-#define FIELD_PICTURE(h) 0
88
-#undef  IS_INTERLACED
89
-#define IS_INTERLACED(mb_type) 0
90
-#define LEFT_MBS 1
91
-#define LTOP     0
92
-#define LBOT     0
93
-#define LEFT(i)  0
94
-#endif
95
-#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
96
-
97
-#ifndef CABAC
98
-#define CABAC(h) (h)->ps.pps->cabac
99
-#endif
100
-
101
-#define CHROMA(h)    ((h)->ps.sps->chroma_format_idc)
102
-#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
103
-#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
104
-
105
-#define EXTENDED_SAR       255
106
-
107
-#define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit
108
-#define MB_TYPE_8x8DCT     0x01000000
109
-#define IS_REF0(a)         ((a) & MB_TYPE_REF0)
110
-#define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT)
111
-
112
-#define QP_MAX_NUM (51 + 6*6)           // The maximum supported qp
113
-
114
-/* NAL unit types */
115
-enum {
116
-    NAL_SLICE           = 1,
117
-    NAL_DPA             = 2,
118
-    NAL_DPB             = 3,
119
-    NAL_DPC             = 4,
120
-    NAL_IDR_SLICE       = 5,
121
-    NAL_SEI             = 6,
122
-    NAL_SPS             = 7,
123
-    NAL_PPS             = 8,
124
-    NAL_AUD             = 9,
125
-    NAL_END_SEQUENCE    = 10,
126
-    NAL_END_STREAM      = 11,
127
-    NAL_FILLER_DATA     = 12,
128
-    NAL_SPS_EXT         = 13,
129
-    NAL_AUXILIARY_SLICE = 19,
130
-};
131
-
132
-/**
133
- * Sequence parameter set
134
- */
135
-typedef struct SPS {
136
-    unsigned int sps_id;
137
-    int profile_idc;
138
-    int level_idc;
139
-    int chroma_format_idc;
140
-    int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
141
-    int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
142
-    int poc_type;                      ///< pic_order_cnt_type
143
-    int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
144
-    int delta_pic_order_always_zero_flag;
145
-    int offset_for_non_ref_pic;
146
-    int offset_for_top_to_bottom_field;
147
-    int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
148
-    int ref_frame_count;               ///< num_ref_frames
149
-    int gaps_in_frame_num_allowed_flag;
150
-    int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
151
-    int mb_height;                     ///< pic_height_in_map_units_minus1 + 1
152
-    int frame_mbs_only_flag;
153
-    int mb_aff;                        ///< mb_adaptive_frame_field_flag
154
-    int direct_8x8_inference_flag;
155
-    int crop;                          ///< frame_cropping_flag
156
-
157
-    /* those 4 are already in luma samples */
158
-    unsigned int crop_left;            ///< frame_cropping_rect_left_offset
159
-    unsigned int crop_right;           ///< frame_cropping_rect_right_offset
160
-    unsigned int crop_top;             ///< frame_cropping_rect_top_offset
161
-    unsigned int crop_bottom;          ///< frame_cropping_rect_bottom_offset
162
-    int vui_parameters_present_flag;
163
-    AVRational sar;
164
-    int video_signal_type_present_flag;
165
-    int full_range;
166
-    int colour_description_present_flag;
167
-    enum AVColorPrimaries color_primaries;
168
-    enum AVColorTransferCharacteristic color_trc;
169
-    enum AVColorSpace colorspace;
170
-    int timing_info_present_flag;
171
-    uint32_t num_units_in_tick;
172
-    uint32_t time_scale;
173
-    int fixed_frame_rate_flag;
174
-    short offset_for_ref_frame[256]; // FIXME dyn aloc?
175
-    int bitstream_restriction_flag;
176
-    int num_reorder_frames;
177
-    int scaling_matrix_present;
178
-    uint8_t scaling_matrix4[6][16];
179
-    uint8_t scaling_matrix8[6][64];
180
-    int nal_hrd_parameters_present_flag;
181
-    int vcl_hrd_parameters_present_flag;
182
-    int pic_struct_present_flag;
183
-    int time_offset_length;
184
-    int cpb_cnt;                          ///< See H.264 E.1.2
185
-    int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
186
-    int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
187
-    int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
188
-    int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
189
-    int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
190
-    int residual_color_transform_flag;    ///< residual_colour_transform_flag
191
-    int constraint_set_flags;             ///< constraint_set[0-3]_flag
192
-    uint8_t data[4096];
193
-    size_t data_size;
194
-} SPS;
195
-
196
-/**
197
- * Picture parameter set
198
- */
199
-typedef struct PPS {
200
-    unsigned int sps_id;
201
-    int cabac;                  ///< entropy_coding_mode_flag
202
-    int pic_order_present;      ///< pic_order_present_flag
203
-    int slice_group_count;      ///< num_slice_groups_minus1 + 1
204
-    int mb_slice_group_map_type;
205
-    unsigned int ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
206
-    int weighted_pred;          ///< weighted_pred_flag
207
-    int weighted_bipred_idc;
208
-    int init_qp;                ///< pic_init_qp_minus26 + 26
209
-    int init_qs;                ///< pic_init_qs_minus26 + 26
210
-    int chroma_qp_index_offset[2];
211
-    int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
212
-    int constrained_intra_pred;     ///< constrained_intra_pred_flag
213
-    int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
214
-    int transform_8x8_mode;         ///< transform_8x8_mode_flag
215
-    uint8_t scaling_matrix4[6][16];
216
-    uint8_t scaling_matrix8[6][64];
217
-    uint8_t chroma_qp_table[2][QP_MAX_NUM+1];  ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
218
-    int chroma_qp_diff;
219
-    uint8_t data[4096];
220
-    size_t data_size;
221
-
222
-    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
223
-    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
224
-    uint32_t(*dequant4_coeff[6])[16];
225
-    uint32_t(*dequant8_coeff[6])[64];
226
-} PPS;
227
-
228
-typedef struct H264ParamSets {
229
-    AVBufferRef *sps_list[MAX_SPS_COUNT];
230
-    AVBufferRef *pps_list[MAX_PPS_COUNT];
231
-
232
-    AVBufferRef *pps_ref;
233
-    AVBufferRef *sps_ref;
234
-    /* currently active parameters sets */
235
-    const PPS *pps;
236
-    const SPS *sps;
237
-} H264ParamSets;
238
-
239
-/**
240
- * Memory management control operation opcode.
241
- */
242
-typedef enum MMCOOpcode {
243
-    MMCO_END = 0,
244
-    MMCO_SHORT2UNUSED,
245
-    MMCO_LONG2UNUSED,
246
-    MMCO_SHORT2LONG,
247
-    MMCO_SET_MAX_LONG,
248
-    MMCO_RESET,
249
-    MMCO_LONG,
250
-} MMCOOpcode;
251
-
252
-/**
253
- * Memory management control operation.
254
- */
255
-typedef struct MMCO {
256
-    MMCOOpcode opcode;
257
-    int short_pic_num;  ///< pic_num without wrapping (pic_num & max_pic_num)
258
-    int long_arg;       ///< index, pic_num, or num long refs depending on opcode
259
-} MMCO;
260
-
261
-typedef struct H264Picture {
262
-    AVFrame *f;
263
-    ThreadFrame tf;
264
-
265
-    AVBufferRef *qscale_table_buf;
266
-    int8_t *qscale_table;
267
-
268
-    AVBufferRef *motion_val_buf[2];
269
-    int16_t (*motion_val[2])[2];
270
-
271
-    AVBufferRef *mb_type_buf;
272
-    uint32_t *mb_type;
273
-
274
-    AVBufferRef *hwaccel_priv_buf;
275
-    void *hwaccel_picture_private; ///< hardware accelerator private data
276
-
277
-    AVBufferRef *ref_index_buf[2];
278
-    int8_t *ref_index[2];
279
-
280
-    int field_poc[2];       ///< top/bottom POC
281
-    int poc;                ///< frame POC
282
-    int frame_num;          ///< frame_num (raw frame_num from slice header)
283
-    int mmco_reset;         /**< MMCO_RESET set this 1. Reordering code must
284
-                                 not mix pictures before and after MMCO_RESET. */
285
-    int pic_id;             /**< pic_num (short -> no wrap version of pic_num,
286
-                                 pic_num & max_pic_num; long -> long_pic_num) */
287
-    int long_ref;           ///< 1->long term reference 0->short term reference
288
-    int ref_poc[2][2][32];  ///< POCs of the frames/fields used as reference (FIXME need per slice)
289
-    int ref_count[2][2];    ///< number of entries in ref_poc         (FIXME need per slice)
290
-    int mbaff;              ///< 1 -> MBAFF frame 0-> not MBAFF
291
-    int field_picture;      ///< whether or not picture was encoded in separate fields
292
-
293
-    int reference;
294
-    int recovered;          ///< picture at IDR or recovery point + recovery count
295
-    int invalid_gap;
296
-    int sei_recovery_frame_cnt;
297
-
298
-    int crop;
299
-    int crop_left;
300
-    int crop_top;
301
-} H264Picture;
302
-
303
-typedef struct H264Ref {
304
-    uint8_t *data[3];
305
-    int linesize[3];
306
-
307
-    int reference;
308
-    int poc;
309
-    int pic_id;
310
-
311
-    H264Picture *parent;
312
-} H264Ref;
313
-
314
-typedef struct H264SliceContext {
315
-    struct H264Context *h264;
316
-    GetBitContext gb;
317
-    ERContext er;
318
-
319
-    int slice_num;
320
-    int slice_type;
321
-    int slice_type_nos;         ///< S free slice type (SI/SP are remapped to I/P)
322
-    int slice_type_fixed;
323
-
324
-    int qscale;
325
-    int chroma_qp[2];   // QPc
326
-    int qp_thresh;      ///< QP threshold to skip loopfilter
327
-    int last_qscale_diff;
328
-
329
-    // deblock
330
-    int deblocking_filter;          ///< disable_deblocking_filter_idc with 1 <-> 0
331
-    int slice_alpha_c0_offset;
332
-    int slice_beta_offset;
333
-
334
-    H264PredWeightTable pwt;
335
-
336
-    int prev_mb_skipped;
337
-    int next_mb_skipped;
338
-
339
-    int chroma_pred_mode;
340
-    int intra16x16_pred_mode;
341
-
342
-    int8_t intra4x4_pred_mode_cache[5 * 8];
343
-    int8_t(*intra4x4_pred_mode);
344
-
345
-    int topleft_mb_xy;
346
-    int top_mb_xy;
347
-    int topright_mb_xy;
348
-    int left_mb_xy[LEFT_MBS];
349
-
350
-    int topleft_type;
351
-    int top_type;
352
-    int topright_type;
353
-    int left_type[LEFT_MBS];
354
-
355
-    const uint8_t *left_block;
356
-    int topleft_partition;
357
-
358
-    unsigned int topleft_samples_available;
359
-    unsigned int top_samples_available;
360
-    unsigned int topright_samples_available;
361
-    unsigned int left_samples_available;
362
-
363
-    ptrdiff_t linesize, uvlinesize;
364
-    ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff
365
-    ptrdiff_t mb_uvlinesize;
366
-
367
-    int mb_x, mb_y;
368
-    int mb_xy;
369
-    int resync_mb_x;
370
-    int resync_mb_y;
371
-    unsigned int first_mb_addr;
372
-    // index of the first MB of the next slice
373
-    int next_slice_idx;
374
-    int mb_skip_run;
375
-    int is_complex;
376
-
377
-    int picture_structure;
378
-    int mb_field_decoding_flag;
379
-    int mb_mbaff;               ///< mb_aff_frame && mb_field_decoding_flag
380
-
381
-    int redundant_pic_count;
382
-
383
-    /**
384
-     * number of neighbors (top and/or left) that used 8x8 dct
385
-     */
386
-    int neighbor_transform_size;
387
-
388
-    int direct_spatial_mv_pred;
389
-    int col_parity;
390
-    int col_fieldoff;
391
-
392
-    int cbp;
393
-    int top_cbp;
394
-    int left_cbp;
395
-
396
-    int dist_scale_factor[32];
397
-    int dist_scale_factor_field[2][32];
398
-    int map_col_to_list0[2][16 + 32];
399
-    int map_col_to_list0_field[2][2][16 + 32];
400
-
401
-    /**
402
-     * num_ref_idx_l0/1_active_minus1 + 1
403
-     */
404
-    unsigned int ref_count[2];          ///< counts frames or fields, depending on current mb mode
405
-    unsigned int list_count;
406
-    H264Ref ref_list[2][48];        /**< 0..15: frame refs, 16..47: mbaff field refs.
407
-                                         *   Reordered version of default_ref_list
408
-                                         *   according to picture reordering in slice header */
409
-    struct {
410
-        uint8_t op;
411
-        uint32_t val;
412
-    } ref_modifications[2][32];
413
-    int nb_ref_modifications[2];
414
-
415
-    unsigned int pps_id;
416
-
417
-    const uint8_t *intra_pcm_ptr;
418
-    int16_t *dc_val_base;
419
-
420
-    uint8_t *bipred_scratchpad;
421
-    uint8_t *edge_emu_buffer;
422
-    uint8_t (*top_borders[2])[(16 * 3) * 2];
423
-    int bipred_scratchpad_allocated;
424
-    int edge_emu_buffer_allocated;
425
-    int top_borders_allocated[2];
426
-
427
-    /**
428
-     * non zero coeff count cache.
429
-     * is 64 if not available.
430
-     */
431
-    DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
432
-
433
-    /**
434
-     * Motion vector cache.
435
-     */
436
-    DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
437
-    DECLARE_ALIGNED(8,  int8_t, ref_cache)[2][5 * 8];
438
-    DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
439
-    uint8_t direct_cache[5 * 8];
440
-
441
-    DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
442
-
443
-    ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
444
-    DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
445
-    DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
446
-    ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
447
-    ///< check that i is not too large or ensure that there is some unused stuff after mb
448
-    int16_t mb_padding[256 * 2];
449
-
450
-    uint8_t (*mvd_table[2])[2];
451
-
452
-    /**
453
-     * Cabac
454
-     */
455
-    CABACContext cabac;
456
-    uint8_t cabac_state[1024];
457
-    int cabac_init_idc;
458
-
459
-    MMCO mmco[MAX_MMCO_COUNT];
460
-    int  nb_mmco;
461
-    int explicit_ref_marking;
462
-
463
-    int frame_num;
464
-    int poc_lsb;
465
-    int delta_poc_bottom;
466
-    int delta_poc[2];
467
-    int curr_pic_num;
468
-    int max_pic_num;
469
-} H264SliceContext;
470
-
471
-/**
472
- * H264Context
473
- */
474
-typedef struct H264Context {
475
-    const AVClass *class;
476
-    AVCodecContext *avctx;
477
-    VideoDSPContext vdsp;
478
-    H264DSPContext h264dsp;
479
-    H264ChromaContext h264chroma;
480
-    H264QpelContext h264qpel;
481
-
482
-    H264Picture DPB[H264_MAX_PICTURE_COUNT];
483
-    H264Picture *cur_pic_ptr;
484
-    H264Picture cur_pic;
485
-    H264Picture last_pic_for_ec;
486
-
487
-    H264SliceContext *slice_ctx;
488
-    int            nb_slice_ctx;
489
-
490
-    H2645Packet pkt;
491
-
492
-    int pixel_shift;    ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
493
-
494
-    /* coded dimensions -- 16 * mb w/h */
495
-    int width, height;
496
-    int chroma_x_shift, chroma_y_shift;
497
-
498
-    /**
499
-     * Backup frame properties: needed, because they can be different
500
-     * between returned frame and last decoded frame.
501
-     **/
502
-    int backup_width;
503
-    int backup_height;
504
-    enum AVPixelFormat backup_pix_fmt;
505
-
506
-    int droppable;
507
-    int coded_picture_number;
508
-
509
-    int context_initialized;
510
-    int flags;
511
-    int workaround_bugs;
512
-    /* Set when slice threading is used and at least one slice uses deblocking
513
-     * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
514
-     * during normal MB decoding and execute it serially at the end.
515
-     */
516
-    int postpone_filter;
517
-
518
-    int8_t(*intra4x4_pred_mode);
519
-    H264PredContext hpc;
520
-
521
-    uint8_t (*non_zero_count)[48];
522
-
523
-#define LIST_NOT_USED -1 // FIXME rename?
524
-#define PART_NOT_AVAILABLE -2
525
-
526
-    /**
527
-     * block_offset[ 0..23] for frame macroblocks
528
-     * block_offset[24..47] for field macroblocks
529
-     */
530
-    int block_offset[2 * (16 * 3)];
531
-
532
-    uint32_t *mb2b_xy;  // FIXME are these 4 a good idea?
533
-    uint32_t *mb2br_xy;
534
-    int b_stride;       // FIXME use s->b4_stride
535
-
536
-    uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
537
-
538
-    // interlacing specific flags
539
-    int mb_aff_frame;
540
-    int picture_structure;
541
-    int first_field;
542
-
543
-    uint8_t *list_counts;               ///< Array of list_count per MB specifying the slice type
544
-
545
-    /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
546
-    uint16_t *cbp_table;
547
-
548
-    /* chroma_pred_mode for i4x4 or i16x16, else 0 */
549
-    uint8_t *chroma_pred_mode_table;
550
-    uint8_t (*mvd_table[2])[2];
551
-    uint8_t *direct_table;
552
-
553
-    uint8_t zigzag_scan[16];
554
-    uint8_t zigzag_scan8x8[64];
555
-    uint8_t zigzag_scan8x8_cavlc[64];
556
-    uint8_t field_scan[16];
557
-    uint8_t field_scan8x8[64];
558
-    uint8_t field_scan8x8_cavlc[64];
559
-    uint8_t zigzag_scan_q0[16];
560
-    uint8_t zigzag_scan8x8_q0[64];
561
-    uint8_t zigzag_scan8x8_cavlc_q0[64];
562
-    uint8_t field_scan_q0[16];
563
-    uint8_t field_scan8x8_q0[64];
564
-    uint8_t field_scan8x8_cavlc_q0[64];
565
-
566
-    int mb_y;
567
-    int mb_height, mb_width;
568
-    int mb_stride;
569
-    int mb_num;
570
-
571
-    // =============================================================
572
-    // Things below are not used in the MB or more inner code
573
-
574
-    int nal_ref_idc;
575
-    int nal_unit_type;
576
-
577
-    /**
578
-     * Used to parse AVC variant of H.264
579
-     */
580
-    int is_avc;           ///< this flag is != 0 if codec is avc1
581
-    int nal_length_size;  ///< Number of bytes used for nal length (1, 2 or 4)
582
-
583
-    int bit_depth_luma;         ///< luma bit depth from sps to detect changes
584
-    int chroma_format_idc;      ///< chroma format from sps to detect changes
585
-
586
-    H264ParamSets ps;
587
-
588
-    uint16_t *slice_table_base;
589
-
590
-    H264POCContext poc;
591
-
592
-    H264Ref default_ref[2];
593
-    H264Picture *short_ref[32];
594
-    H264Picture *long_ref[32];
595
-    H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
596
-    int last_pocs[MAX_DELAYED_PIC_COUNT];
597
-    H264Picture *next_output_pic;
598
-    int next_outputed_poc;
599
-
600
-    /**
601
-     * memory management control operations buffer.
602
-     */
603
-    MMCO mmco[MAX_MMCO_COUNT];
604
-    int  nb_mmco;
605
-    int mmco_reset;
606
-    int explicit_ref_marking;
607
-
608
-    int long_ref_count;     ///< number of actual long term references
609
-    int short_ref_count;    ///< number of actual short term references
610
-
611
-    /**
612
-     * @name Members for slice based multithreading
613
-     * @{
614
-     */
615
-    /**
616
-     * current slice number, used to initialize slice_num of each thread/context
617
-     */
618
-    int current_slice;
619
-
620
-    /**
621
-     * Max number of threads / contexts.
622
-     * This is equal to AVCodecContext.thread_count unless
623
-     * multithreaded decoding is impossible, in which case it is
624
-     * reduced to 1.
625
-     */
626
-    int max_contexts;
627
-
628
-    /**
629
-     *  1 if the single thread fallback warning has already been
630
-     *  displayed, 0 otherwise.
631
-     */
632
-    int single_decode_warning;
633
-
634
-    /** @} */
635
-
636
-    /**
637
-     * Complement sei_pic_struct
638
-     * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
639
-     * However, soft telecined frames may have these values.
640
-     * This is used in an attempt to flag soft telecine progressive.
641
-     */
642
-    int prev_interlaced_frame;
643
-
644
-    /**
645
-     * Are the SEI recovery points looking valid.
646
-     */
647
-    int valid_recovery_point;
648
-
649
-    /**
650
-     * recovery_frame is the frame_num at which the next frame should
651
-     * be fully constructed.
652
-     *
653
-     * Set to -1 when not expecting a recovery point.
654
-     */
655
-    int recovery_frame;
656
-
657
-/**
658
- * We have seen an IDR, so all the following frames in coded order are correctly
659
- * decodable.
660
- */
661
-#define FRAME_RECOVERED_IDR  (1 << 0)
662
-/**
663
- * Sufficient number of frames have been decoded since a SEI recovery point,
664
- * so all the following frames in presentation order are correct.
665
- */
666
-#define FRAME_RECOVERED_SEI  (1 << 1)
667
-
668
-    int frame_recovered;    ///< Initial frame has been completely recovered
669
-
670
-    int has_recovery_point;
671
-
672
-    int missing_fields;
673
-
674
-    /* for frame threading, this is set to 1
675
-     * after finish_setup() has been called, so we cannot modify
676
-     * some context properties (which are supposed to stay constant between
677
-     * slices) anymore */
678
-    int setup_finished;
679
-
680
-    int cur_chroma_format_idc;
681
-    int cur_bit_depth_luma;
682
-    int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
683
-
684
-    int enable_er;
685
-
686
-    H264SEIContext sei;
687
-
688
-    AVBufferPool *qscale_table_pool;
689
-    AVBufferPool *mb_type_pool;
690
-    AVBufferPool *motion_val_pool;
691
-    AVBufferPool *ref_index_pool;
692
-    int ref2frm[MAX_SLICES][2][64];     ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
693
-} H264Context;
694
-
695
-extern const uint16_t ff_h264_mb_sizes[4];
696
-
697
-/**
698
- * Uninit H264 param sets structure.
699
- */
700
-
701
-void ff_h264_ps_uninit(H264ParamSets *ps);
702
-
703
-/**
704
- * Decode SPS
705
- */
706
-int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
707
-                                     H264ParamSets *ps, int ignore_truncation);
708
-
709
-/**
710
- * Decode PPS
711
- */
712
-int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
713
-                                         H264ParamSets *ps, int bit_length);
714
-
715
-/**
716
- * Reconstruct bitstream slice_type.
717
- */
718
-int ff_h264_get_slice_type(const H264SliceContext *sl);
719
-
720
-/**
721
- * Allocate tables.
722
- * needs width/height
723
- */
724
-int ff_h264_alloc_tables(H264Context *h);
725
-
726
-int ff_h264_decode_ref_pic_list_reordering(const H264Context *h, H264SliceContext *sl);
727
-int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl);
728
-void ff_h264_remove_all_refs(H264Context *h);
729
-
730
-/**
731
- * Execute the reference picture marking (memory management control operations).
732
- */
733
-int ff_h264_execute_ref_pic_marking(H264Context *h);
734
-
735
-int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl,
736
-                                   GetBitContext *gb);
737
-
738
-void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
739
-int ff_h264_decode_init(AVCodecContext *avctx);
740
-void ff_h264_decode_init_vlc(void);
741
-
742
-/**
743
- * Decode a macroblock
744
- * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
745
- */
746
-int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
747
-
748
-/**
749
- * Decode a CABAC coded macroblock
750
- * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
751
- */
752
-int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
753
-
754
-void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
755
-
756
-void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
757
-void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
758
-void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
759
-                                int *mb_type);
760
-
761
-void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
762
-                            uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
763
-                            unsigned int linesize, unsigned int uvlinesize);
764
-void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
765
-                       uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
766
-                       unsigned int linesize, unsigned int uvlinesize);
767
-
768
-/*
769
- * o-o o-o
770
- *  / / /
771
- * o-o o-o
772
- *  ,---'
773
- * o-o o-o
774
- *  / / /
775
- * o-o o-o
776
- */
777
-
778
-/* Scan8 organization:
779
- *    0 1 2 3 4 5 6 7
780
- * 0  DY    y y y y y
781
- * 1        y Y Y Y Y
782
- * 2        y Y Y Y Y
783
- * 3        y Y Y Y Y
784
- * 4        y Y Y Y Y
785
- * 5  DU    u u u u u
786
- * 6        u U U U U
787
- * 7        u U U U U
788
- * 8        u U U U U
789
- * 9        u U U U U
790
- * 10 DV    v v v v v
791
- * 11       v V V V V
792
- * 12       v V V V V
793
- * 13       v V V V V
794
- * 14       v V V V V
795
- * DY/DU/DV are for luma/chroma DC.
796
- */
797
-
798
-#define LUMA_DC_BLOCK_INDEX   48
799
-#define CHROMA_DC_BLOCK_INDEX 49
800
-
801
-// This table must be here because scan8[constant] must be known at compiletime
802
-static const uint8_t scan8[16 * 3 + 3] = {
803
-    4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
804
-    6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
805
-    4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
806
-    6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
807
-    4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
808
-    6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
809
-    4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
810
-    6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
811
-    4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
812
-    6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
813
-    4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
814
-    6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
815
-    0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
816
-};
817
-
818
-static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
819
-{
820
-#if HAVE_BIGENDIAN
821
-    return (b & 0xFFFF) + (a << 16);
822
-#else
823
-    return (a & 0xFFFF) + (b << 16);
824
-#endif
825
-}
826
-
827
-static av_always_inline uint16_t pack8to16(unsigned a, unsigned b)
828
-{
829
-#if HAVE_BIGENDIAN
830
-    return (b & 0xFF) + (a << 8);
831
-#else
832
-    return (a & 0xFF) + (b << 8);
833
-#endif
834
-}
835
-
836
-/**
837
- * Get the chroma qp.
838
- */
839
-static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
840
-{
841
-    return pps->chroma_qp_table[t][qscale];
842
-}
843
-
844
-/**
845
- * Get the predicted intra4x4 prediction mode.
846
- */
847
-static av_always_inline int pred_intra_mode(const H264Context *h,
848
-                                            H264SliceContext *sl, int n)
849
-{
850
-    const int index8 = scan8[n];
851
-    const int left   = sl->intra4x4_pred_mode_cache[index8 - 1];
852
-    const int top    = sl->intra4x4_pred_mode_cache[index8 - 8];
853
-    const int min    = FFMIN(left, top);
854
-
855
-    ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
856
-
857
-    if (min < 0)
858
-        return DC_PRED;
859
-    else
860
-        return min;
861
-}
862
-
863
-static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
864
-                                                        H264SliceContext *sl)
865
-{
866
-    int8_t *i4x4       = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
867
-    int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
868
-
869
-    AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
870
-    i4x4[4] = i4x4_cache[7 + 8 * 3];
871
-    i4x4[5] = i4x4_cache[7 + 8 * 2];
872
-    i4x4[6] = i4x4_cache[7 + 8 * 1];
873
-}
874
-
875
-static av_always_inline void write_back_non_zero_count(const H264Context *h,
876
-                                                       H264SliceContext *sl)
877
-{
878
-    const int mb_xy    = sl->mb_xy;
879
-    uint8_t *nnz       = h->non_zero_count[mb_xy];
880
-    uint8_t *nnz_cache = sl->non_zero_count_cache;
881
-
882
-    AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
883
-    AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
884
-    AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
885
-    AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
886
-    AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
887
-    AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
888
-    AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
889
-    AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
890
-
891
-    if (!h->chroma_y_shift) {
892
-        AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
893
-        AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
894
-        AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
895
-        AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
896
-    }
897
-}
898
-
899
-static av_always_inline void write_back_motion_list(const H264Context *h,
900
-                                                    H264SliceContext *sl,
901
-                                                    int b_stride,
902
-                                                    int b_xy, int b8_xy,
903
-                                                    int mb_type, int list)
904
-{
905
-    int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
906
-    int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
907
-    AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
908
-    AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
909
-    AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
910
-    AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
911
-    if (CABAC(h)) {
912
-        uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
913
-                                                        : h->mb2br_xy[sl->mb_xy]];
914
-        uint8_t(*mvd_src)[2]  = &sl->mvd_cache[list][scan8[0]];
915
-        if (IS_SKIP(mb_type)) {
916
-            AV_ZERO128(mvd_dst);
917
-        } else {
918
-            AV_COPY64(mvd_dst, mvd_src + 8 * 3);
919
-            AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
920
-            AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
921
-            AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
922
-        }
923
-    }
924
-
925
-    {
926
-        int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
927
-        int8_t *ref_cache = sl->ref_cache[list];
928
-        ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
929
-        ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
930
-        ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
931
-        ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
932
-    }
933
-}
934
-
935
-static av_always_inline void write_back_motion(const H264Context *h,
936
-                                               H264SliceContext *sl,
937
-                                               int mb_type)
938
-{
939
-    const int b_stride      = h->b_stride;
940
-    const int b_xy  = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
941
-    const int b8_xy = 4 * sl->mb_xy;
942
-
943
-    if (USES_LIST(mb_type, 0)) {
944
-        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
945
-    } else {
946
-        fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
947
-                       2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
948
-    }
949
-    if (USES_LIST(mb_type, 1))
950
-        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
951
-
952
-    if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
953
-        if (IS_8X8(mb_type)) {
954
-            uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
955
-            direct_table[1] = sl->sub_mb_type[1] >> 1;
956
-            direct_table[2] = sl->sub_mb_type[2] >> 1;
957
-            direct_table[3] = sl->sub_mb_type[3] >> 1;
958
-        }
959
-    }
960
-}
961
-
962
-static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
963
-{
964
-    if (h->ps.sps->direct_8x8_inference_flag)
965
-        return !(AV_RN64A(sl->sub_mb_type) &
966
-                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
967
-                  0x0001000100010001ULL));
968
-    else
969
-        return !(AV_RN64A(sl->sub_mb_type) &
970
-                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
971
-                  0x0001000100010001ULL));
972
-}
973
-
974
-static inline int find_start_code(const uint8_t *buf, int buf_size,
975
-                           int buf_index, int next_avc)
976
-{
977
-    uint32_t state = -1;
978
-
979
-    buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
980
-
981
-    return FFMIN(buf_index, buf_size);
982
-}
983
-
984
-int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
985
-
986
-int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
987
-void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
988
-
989
-int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
990
-
991
-void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
992
-
993
-int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
994
-                                const H2645NAL *nal);
995
-#define SLICE_SINGLETHREAD 1
996
-#define SLICE_SKIPED 2
997
-
998
-int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
999
-int ff_h264_update_thread_context(AVCodecContext *dst,
1000
-                                  const AVCodecContext *src);
1001
-
1002
-void ff_h264_flush_change(H264Context *h);
1003
-
1004
-void ff_h264_free_tables(H264Context *h);
1005
-
1006
-void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
1007
-
1008
-#endif /* AVCODEC_H264_H */
... ...
@@ -37,7 +37,7 @@
37 37
 #include "cabac_functions.h"
38 38
 #include "internal.h"
39 39
 #include "avcodec.h"
40
-#include "h264.h"
40
+#include "h264dec.h"
41 41
 #include "h264data.h"
42 42
 #include "h264_mvpred.h"
43 43
 #include "mpegutils.h"
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #include "internal.h"
32 32
 #include "avcodec.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "h264_mvpred.h"
35 35
 #include "h264data.h"
36 36
 #include "golomb.h"
... ...
@@ -27,7 +27,7 @@
27 27
 
28 28
 #include "internal.h"
29 29
 #include "avcodec.h"
30
-#include "h264.h"
30
+#include "h264dec.h"
31 31
 #include "mpegutils.h"
32 32
 #include "rectangle.h"
33 33
 #include "thread.h"
... ...
@@ -29,7 +29,7 @@
29 29
 #include "libavutil/intreadwrite.h"
30 30
 #include "internal.h"
31 31
 #include "avcodec.h"
32
-#include "h264.h"
32
+#include "h264dec.h"
33 33
 #include "mathops.h"
34 34
 #include "mpegutils.h"
35 35
 #include "rectangle.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/common.h"
32 32
 #include "libavutil/intreadwrite.h"
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "qpeldsp.h"
36 36
 #include "thread.h"
37 37
 
... ...
@@ -19,7 +19,7 @@
19 19
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 20
  */
21 21
 
22
-#include "h264.h"
22
+#include "h264dec.h"
23 23
 
24 24
 #undef MCFUNC
25 25
 
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #include "internal.h"
32 32
 #include "avcodec.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "mpegutils.h"
35 35
 #include "libavutil/avassert.h"
36 36
 
... ...
@@ -19,7 +19,7 @@
19 19
 #include "bytestream.h"
20 20
 #include "get_bits.h"
21 21
 #include "golomb.h"
22
-#include "h264.h"
22
+#include "h264dec.h"
23 23
 #include "h264_parse.h"
24 24
 
25 25
 int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps,
... ...
@@ -38,7 +38,7 @@
38 38
 
39 39
 #include "get_bits.h"
40 40
 #include "golomb.h"
41
-#include "h264.h"
41
+#include "h264dec.h"
42 42
 #include "h264_sei.h"
43 43
 #include "h264data.h"
44 44
 #include "internal.h"
... ...
@@ -33,7 +33,7 @@
33 33
 #include "cabac_functions.h"
34 34
 #include "error_resilience.h"
35 35
 #include "avcodec.h"
36
-#include "h264.h"
36
+#include "h264dec.h"
37 37
 #include "h264data.h"
38 38
 #include "h264chroma.h"
39 39
 #include "h264_mvpred.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "internal.h"
32 32
 #include "mathops.h"
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "h264data.h"
36 36
 #include "golomb.h"
37 37
 
... ...
@@ -30,7 +30,7 @@
30 30
 #include "libavutil/avassert.h"
31 31
 #include "internal.h"
32 32
 #include "avcodec.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "golomb.h"
35 35
 #include "mpegutils.h"
36 36
 
... ...
@@ -28,7 +28,7 @@
28 28
 #include "avcodec.h"
29 29
 #include "get_bits.h"
30 30
 #include "golomb.h"
31
-#include "h264.h"
31
+#include "h264dec.h"
32 32
 #include "h264_sei.h"
33 33
 #include "internal.h"
34 34
 
... ...
@@ -35,7 +35,7 @@
35 35
 #include "cabac_functions.h"
36 36
 #include "error_resilience.h"
37 37
 #include "avcodec.h"
38
-#include "h264.h"
38
+#include "h264dec.h"
39 39
 #include "h264data.h"
40 40
 #include "h264chroma.h"
41 41
 #include "h264_mvpred.h"
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/avutil.h"
32 32
 
33 33
 #include "avcodec.h"
34
-#include "h264.h"
34
+#include "h264dec.h"
35 35
 #include "h264data.h"
36 36
 
37 37
 const uint8_t ff_h264_golomb_to_pict_type[5] = {
... ...
@@ -21,7 +21,7 @@
21 21
 
22 22
 #include <stdint.h>
23 23
 
24
-#include "h264.h"
24
+#include "h264dec.h"
25 25
 
26 26
 extern const uint8_t ff_h264_golomb_to_pict_type[5];
27 27
 extern const uint8_t ff_h264_golomb_to_intra4x4_cbp[48];
28 28
new file mode 100644
... ...
@@ -0,0 +1,1292 @@
0
+/*
1
+ * H.26L/H.264/AVC/JVT/14496-10/... decoder
2
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3
+ *
4
+ * This file is part of FFmpeg.
5
+ *
6
+ * FFmpeg is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * FFmpeg is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with FFmpeg; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * H.264 / AVC / MPEG-4 part10 codec.
24
+ * @author Michael Niedermayer <michaelni@gmx.at>
25
+ */
26
+
27
+#define UNCHECKED_BITSTREAM_READER 1
28
+
29
+#include "libavutil/avassert.h"
30
+#include "libavutil/display.h"
31
+#include "libavutil/imgutils.h"
32
+#include "libavutil/opt.h"
33
+#include "libavutil/stereo3d.h"
34
+#include "libavutil/timer.h"
35
+#include "internal.h"
36
+#include "bytestream.h"
37
+#include "cabac.h"
38
+#include "cabac_functions.h"
39
+#include "error_resilience.h"
40
+#include "avcodec.h"
41
+#include "h264dec.h"
42
+#include "h2645_parse.h"
43
+#include "h264data.h"
44
+#include "h264chroma.h"
45
+#include "h264_mvpred.h"
46
+#include "golomb.h"
47
+#include "mathops.h"
48
+#include "me_cmp.h"
49
+#include "mpegutils.h"
50
+#include "profiles.h"
51
+#include "rectangle.h"
52
+#include "thread.h"
53
+#include "vdpau_compat.h"
54
+
55
+static int h264_decode_end(AVCodecContext *avctx);
56
+
57
+const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
58
+
59
+int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
60
+{
61
+    H264Context *h = avctx->priv_data;
62
+    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
63
+}
64
+
65
+static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
66
+                              int (*mv)[2][4][2],
67
+                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
68
+{
69
+    H264Context *h = opaque;
70
+    H264SliceContext *sl = &h->slice_ctx[0];
71
+
72
+    sl->mb_x = mb_x;
73
+    sl->mb_y = mb_y;
74
+    sl->mb_xy = mb_x + mb_y * h->mb_stride;
75
+    memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
76
+    av_assert1(ref >= 0);
77
+    /* FIXME: It is possible albeit uncommon that slice references
78
+     * differ between slices. We take the easy approach and ignore
79
+     * it for now. If this turns out to have any relevance in
80
+     * practice then correct remapping should be added. */
81
+    if (ref >= sl->ref_count[0])
82
+        ref = 0;
83
+    if (!sl->ref_list[0][ref].data[0]) {
84
+        av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
85
+        ref = 0;
86
+    }
87
+    if ((sl->ref_list[0][ref].reference&3) != 3) {
88
+        av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
89
+        return;
90
+    }
91
+    fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
92
+                   2, 2, 2, ref, 1);
93
+    fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
94
+    fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
95
+                   pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
96
+    sl->mb_mbaff =
97
+    sl->mb_field_decoding_flag = 0;
98
+    ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
99
+}
100
+
101
+void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
102
+                             int y, int height)
103
+{
104
+    AVCodecContext *avctx = h->avctx;
105
+    const AVFrame   *src  = h->cur_pic.f;
106
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
107
+    int vshift = desc->log2_chroma_h;
108
+    const int field_pic = h->picture_structure != PICT_FRAME;
109
+    if (field_pic) {
110
+        height <<= 1;
111
+        y      <<= 1;
112
+    }
113
+
114
+    height = FFMIN(height, avctx->height - y);
115
+
116
+    if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
117
+        return;
118
+
119
+    if (avctx->draw_horiz_band) {
120
+        int offset[AV_NUM_DATA_POINTERS];
121
+        int i;
122
+
123
+        offset[0] = y * src->linesize[0];
124
+        offset[1] =
125
+        offset[2] = (y >> vshift) * src->linesize[1];
126
+        for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
127
+            offset[i] = 0;
128
+
129
+        emms_c();
130
+
131
+        avctx->draw_horiz_band(avctx, src, offset,
132
+                               y, h->picture_structure, height);
133
+    }
134
+}
135
+
136
+void ff_h264_free_tables(H264Context *h)
137
+{
138
+    int i;
139
+
140
+    av_freep(&h->intra4x4_pred_mode);
141
+    av_freep(&h->chroma_pred_mode_table);
142
+    av_freep(&h->cbp_table);
143
+    av_freep(&h->mvd_table[0]);
144
+    av_freep(&h->mvd_table[1]);
145
+    av_freep(&h->direct_table);
146
+    av_freep(&h->non_zero_count);
147
+    av_freep(&h->slice_table_base);
148
+    h->slice_table = NULL;
149
+    av_freep(&h->list_counts);
150
+
151
+    av_freep(&h->mb2b_xy);
152
+    av_freep(&h->mb2br_xy);
153
+
154
+    av_buffer_pool_uninit(&h->qscale_table_pool);
155
+    av_buffer_pool_uninit(&h->mb_type_pool);
156
+    av_buffer_pool_uninit(&h->motion_val_pool);
157
+    av_buffer_pool_uninit(&h->ref_index_pool);
158
+
159
+    for (i = 0; i < h->nb_slice_ctx; i++) {
160
+        H264SliceContext *sl = &h->slice_ctx[i];
161
+
162
+        av_freep(&sl->dc_val_base);
163
+        av_freep(&sl->er.mb_index2xy);
164
+        av_freep(&sl->er.error_status_table);
165
+        av_freep(&sl->er.er_temp_buffer);
166
+
167
+        av_freep(&sl->bipred_scratchpad);
168
+        av_freep(&sl->edge_emu_buffer);
169
+        av_freep(&sl->top_borders[0]);
170
+        av_freep(&sl->top_borders[1]);
171
+
172
+        sl->bipred_scratchpad_allocated = 0;
173
+        sl->edge_emu_buffer_allocated   = 0;
174
+        sl->top_borders_allocated[0]    = 0;
175
+        sl->top_borders_allocated[1]    = 0;
176
+    }
177
+}
178
+
179
+int ff_h264_alloc_tables(H264Context *h)
180
+{
181
+    const int big_mb_num = h->mb_stride * (h->mb_height + 1);
182
+    const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
183
+    int x, y;
184
+
185
+    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
186
+                      row_mb_num, 8 * sizeof(uint8_t), fail)
187
+    h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
188
+
189
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
190
+                      big_mb_num * 48 * sizeof(uint8_t), fail)
191
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
192
+                      (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
193
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
194
+                      big_mb_num * sizeof(uint16_t), fail)
195
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
196
+                      big_mb_num * sizeof(uint8_t), fail)
197
+    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
198
+                      row_mb_num, 16 * sizeof(uint8_t), fail);
199
+    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
200
+                      row_mb_num, 16 * sizeof(uint8_t), fail);
201
+    h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
202
+    h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
203
+
204
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
205
+                      4 * big_mb_num * sizeof(uint8_t), fail);
206
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
207
+                      big_mb_num * sizeof(uint8_t), fail)
208
+
209
+    memset(h->slice_table_base, -1,
210
+           (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
211
+    h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
212
+
213
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
214
+                      big_mb_num * sizeof(uint32_t), fail);
215
+    FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
216
+                      big_mb_num * sizeof(uint32_t), fail);
217
+    for (y = 0; y < h->mb_height; y++)
218
+        for (x = 0; x < h->mb_width; x++) {
219
+            const int mb_xy = x + y * h->mb_stride;
220
+            const int b_xy  = 4 * x + 4 * y * h->b_stride;
221
+
222
+            h->mb2b_xy[mb_xy]  = b_xy;
223
+            h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
224
+        }
225
+
226
+    return 0;
227
+
228
+fail:
229
+    ff_h264_free_tables(h);
230
+    return AVERROR(ENOMEM);
231
+}
232
+
233
+/**
234
+ * Init context
235
+ * Allocate buffers which are not shared amongst multiple threads.
236
+ */
237
+int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
238
+{
239
+    ERContext *er = &sl->er;
240
+    int mb_array_size = h->mb_height * h->mb_stride;
241
+    int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
242
+    int c_size  = h->mb_stride * (h->mb_height + 1);
243
+    int yc_size = y_size + 2   * c_size;
244
+    int x, y, i;
245
+
246
+    sl->ref_cache[0][scan8[5]  + 1] =
247
+    sl->ref_cache[0][scan8[7]  + 1] =
248
+    sl->ref_cache[0][scan8[13] + 1] =
249
+    sl->ref_cache[1][scan8[5]  + 1] =
250
+    sl->ref_cache[1][scan8[7]  + 1] =
251
+    sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
252
+
253
+    if (sl != h->slice_ctx) {
254
+        memset(er, 0, sizeof(*er));
255
+    } else
256
+    if (CONFIG_ERROR_RESILIENCE) {
257
+
258
+        /* init ER */
259
+        er->avctx          = h->avctx;
260
+        er->decode_mb      = h264_er_decode_mb;
261
+        er->opaque         = h;
262
+        er->quarter_sample = 1;
263
+
264
+        er->mb_num      = h->mb_num;
265
+        er->mb_width    = h->mb_width;
266
+        er->mb_height   = h->mb_height;
267
+        er->mb_stride   = h->mb_stride;
268
+        er->b8_stride   = h->mb_width * 2 + 1;
269
+
270
+        // error resilience code looks cleaner with this
271
+        FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
272
+                          (h->mb_num + 1) * sizeof(int), fail);
273
+
274
+        for (y = 0; y < h->mb_height; y++)
275
+            for (x = 0; x < h->mb_width; x++)
276
+                er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
277
+
278
+        er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
279
+                                                      h->mb_stride + h->mb_width;
280
+
281
+        FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
282
+                          mb_array_size * sizeof(uint8_t), fail);
283
+
284
+        FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
285
+                         h->mb_height * h->mb_stride, fail);
286
+
287
+        FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
288
+                          yc_size * sizeof(int16_t), fail);
289
+        er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
290
+        er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
291
+        er->dc_val[2] = er->dc_val[1] + c_size;
292
+        for (i = 0; i < yc_size; i++)
293
+            sl->dc_val_base[i] = 1024;
294
+    }
295
+
296
+    return 0;
297
+
298
+fail:
299
+    return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
300
+}
301
+
302
+static int h264_init_context(AVCodecContext *avctx, H264Context *h)
303
+{
304
+    int i;
305
+
306
+    h->avctx                 = avctx;
307
+    h->backup_width          = -1;
308
+    h->backup_height         = -1;
309
+    h->backup_pix_fmt        = AV_PIX_FMT_NONE;
310
+    h->cur_chroma_format_idc = -1;
311
+
312
+    h->picture_structure     = PICT_FRAME;
313
+    h->workaround_bugs       = avctx->workaround_bugs;
314
+    h->flags                 = avctx->flags;
315
+    h->poc.prev_poc_msb      = 1 << 16;
316
+    h->recovery_frame        = -1;
317
+    h->frame_recovered       = 0;
318
+    h->poc.prev_frame_num    = -1;
319
+    h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
320
+    h->sei.unregistered.x264_build = -1;
321
+
322
+    h->next_outputed_poc = INT_MIN;
323
+    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
324
+        h->last_pocs[i] = INT_MIN;
325
+
326
+    ff_h264_sei_uninit(&h->sei);
327
+
328
+    avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
329
+
330
+    h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
331
+    h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
332
+    if (!h->slice_ctx) {
333
+        h->nb_slice_ctx = 0;
334
+        return AVERROR(ENOMEM);
335
+    }
336
+
337
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
338
+        h->DPB[i].f = av_frame_alloc();
339
+        if (!h->DPB[i].f)
340
+            return AVERROR(ENOMEM);
341
+    }
342
+
343
+    h->cur_pic.f = av_frame_alloc();
344
+    if (!h->cur_pic.f)
345
+        return AVERROR(ENOMEM);
346
+
347
+    h->last_pic_for_ec.f = av_frame_alloc();
348
+    if (!h->last_pic_for_ec.f)
349
+        return AVERROR(ENOMEM);
350
+
351
+    for (i = 0; i < h->nb_slice_ctx; i++)
352
+        h->slice_ctx[i].h264 = h;
353
+
354
+    return 0;
355
+}
356
+
357
+static av_cold int h264_decode_end(AVCodecContext *avctx)
358
+{
359
+    H264Context *h = avctx->priv_data;
360
+    int i;
361
+
362
+    ff_h264_remove_all_refs(h);
363
+    ff_h264_free_tables(h);
364
+
365
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
366
+        ff_h264_unref_picture(h, &h->DPB[i]);
367
+        av_frame_free(&h->DPB[i].f);
368
+    }
369
+    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
370
+
371
+    h->cur_pic_ptr = NULL;
372
+
373
+    av_freep(&h->slice_ctx);
374
+    h->nb_slice_ctx = 0;
375
+
376
+    ff_h264_sei_uninit(&h->sei);
377
+    ff_h264_ps_uninit(&h->ps);
378
+
379
+    ff_h2645_packet_uninit(&h->pkt);
380
+
381
+    ff_h264_unref_picture(h, &h->cur_pic);
382
+    av_frame_free(&h->cur_pic.f);
383
+    ff_h264_unref_picture(h, &h->last_pic_for_ec);
384
+    av_frame_free(&h->last_pic_for_ec.f);
385
+
386
+    return 0;
387
+}
388
+
389
+static AVOnce h264_vlc_init = AV_ONCE_INIT;
390
+
391
+av_cold int ff_h264_decode_init(AVCodecContext *avctx)
392
+{
393
+    H264Context *h = avctx->priv_data;
394
+    int ret;
395
+
396
+    ret = h264_init_context(avctx, h);
397
+    if (ret < 0)
398
+        return ret;
399
+
400
+    ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
401
+    if (ret != 0) {
402
+        av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
403
+        return AVERROR_UNKNOWN;
404
+    }
405
+
406
+    if (avctx->codec_id == AV_CODEC_ID_H264) {
407
+        if (avctx->ticks_per_frame == 1) {
408
+            if(h->avctx->time_base.den < INT_MAX/2) {
409
+                h->avctx->time_base.den *= 2;
410
+            } else
411
+                h->avctx->time_base.num /= 2;
412
+        }
413
+        avctx->ticks_per_frame = 2;
414
+    }
415
+
416
+    if (avctx->extradata_size > 0 && avctx->extradata) {
417
+        ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
418
+                                       &h->ps, &h->is_avc, &h->nal_length_size,
419
+                                       avctx->err_recognition, avctx);
420
+        if (ret < 0) {
421
+            h264_decode_end(avctx);
422
+            return ret;
423
+        }
424
+    }
425
+
426
+    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
427
+        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
428
+        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
429
+    }
430
+
431
+    avctx->internal->allocate_progress = 1;
432
+
433
+    ff_h264_flush_change(h);
434
+
435
+    if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
436
+        h->enable_er = 0;
437
+
438
+    if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
439
+        av_log(avctx, AV_LOG_WARNING,
440
+               "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
441
+               "Use it at your own risk\n");
442
+    }
443
+
444
+    return 0;
445
+}
446
+
447
+#if HAVE_THREADS
448
+static int decode_init_thread_copy(AVCodecContext *avctx)
449
+{
450
+    H264Context *h = avctx->priv_data;
451
+    int ret;
452
+
453
+    if (!avctx->internal->is_copy)
454
+        return 0;
455
+
456
+    memset(h, 0, sizeof(*h));
457
+
458
+    ret = h264_init_context(avctx, h);
459
+    if (ret < 0)
460
+        return ret;
461
+
462
+    h->context_initialized = 0;
463
+
464
+    return 0;
465
+}
466
+#endif
467
+
468
+/**
469
+ * Run setup operations that must be run after slice header decoding.
470
+ * This includes finding the next displayed frame.
471
+ *
472
+ * @param h h264 master context
473
+ * @param setup_finished enough NALs have been read that we can call
474
+ * ff_thread_finish_setup()
475
+ */
476
+static void decode_postinit(H264Context *h, int setup_finished)
477
+{
478
+    const SPS *sps = h->ps.sps;
479
+    H264Picture *out = h->cur_pic_ptr;
480
+    H264Picture *cur = h->cur_pic_ptr;
481
+    int i, pics, out_of_order, out_idx;
482
+
483
+    if (h->next_output_pic)
484
+        return;
485
+
486
+    if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
487
+        /* FIXME: if we have two PAFF fields in one packet, we can't start
488
+         * the next thread here. If we have one field per packet, we can.
489
+         * The check in decode_nal_units() is not good enough to find this
490
+         * yet, so we assume the worst for now. */
491
+        // if (setup_finished)
492
+        //    ff_thread_finish_setup(h->avctx);
493
+        if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
494
+            return;
495
+        if (h->avctx->hwaccel || h->missing_fields <=1)
496
+            return;
497
+    }
498
+
499
+    cur->mmco_reset = h->mmco_reset;
500
+    h->mmco_reset = 0;
501
+
502
+    // FIXME do something with unavailable reference frames
503
+
504
+    /* Sort B-frames into display order */
505
+    if (sps->bitstream_restriction_flag ||
506
+        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
507
+        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
508
+    }
509
+
510
+    for (i = 0; 1; i++) {
511
+        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
512
+            if(i)
513
+                h->last_pocs[i-1] = cur->poc;
514
+            break;
515
+        } else if(i) {
516
+            h->last_pocs[i-1]= h->last_pocs[i];
517
+        }
518
+    }
519
+    out_of_order = MAX_DELAYED_PIC_COUNT - i;
520
+    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
521
+       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
522
+        out_of_order = FFMAX(out_of_order, 1);
523
+    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
524
+        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
525
+        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
526
+            h->last_pocs[i] = INT_MIN;
527
+        h->last_pocs[0] = cur->poc;
528
+        cur->mmco_reset = 1;
529
+    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
530
+        av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
531
+        h->avctx->has_b_frames = out_of_order;
532
+    }
533
+
534
+    pics = 0;
535
+    while (h->delayed_pic[pics])
536
+        pics++;
537
+
538
+    av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
539
+
540
+    h->delayed_pic[pics++] = cur;
541
+    if (cur->reference == 0)
542
+        cur->reference = DELAYED_PIC_REF;
543
+
544
+    out     = h->delayed_pic[0];
545
+    out_idx = 0;
546
+    for (i = 1; h->delayed_pic[i] &&
547
+                !h->delayed_pic[i]->f->key_frame &&
548
+                !h->delayed_pic[i]->mmco_reset;
549
+         i++)
550
+        if (h->delayed_pic[i]->poc < out->poc) {
551
+            out     = h->delayed_pic[i];
552
+            out_idx = i;
553
+        }
554
+    if (h->avctx->has_b_frames == 0 &&
555
+        (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
556
+        h->next_outputed_poc = INT_MIN;
557
+    out_of_order = out->poc < h->next_outputed_poc;
558
+
559
+    if (out_of_order || pics > h->avctx->has_b_frames) {
560
+        out->reference &= ~DELAYED_PIC_REF;
561
+        for (i = out_idx; h->delayed_pic[i]; i++)
562
+            h->delayed_pic[i] = h->delayed_pic[i + 1];
563
+    }
564
+    if (!out_of_order && pics > h->avctx->has_b_frames) {
565
+        h->next_output_pic = out;
566
+        if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
567
+            h->next_outputed_poc = INT_MIN;
568
+        } else
569
+            h->next_outputed_poc = out->poc;
570
+    } else {
571
+        av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
572
+    }
573
+
574
+    if (h->next_output_pic) {
575
+        if (h->next_output_pic->recovered) {
576
+            // We have reached an recovery point and all frames after it in
577
+            // display order are "recovered".
578
+            h->frame_recovered |= FRAME_RECOVERED_SEI;
579
+        }
580
+        h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
581
+    }
582
+
583
+    if (setup_finished && !h->avctx->hwaccel) {
584
+        ff_thread_finish_setup(h->avctx);
585
+
586
+        if (h->avctx->active_thread_type & FF_THREAD_FRAME)
587
+            h->setup_finished = 1;
588
+    }
589
+}
590
+
591
+/**
592
+ * instantaneous decoder refresh.
593
+ */
594
+static void idr(H264Context *h)
595
+{
596
+    int i;
597
+    ff_h264_remove_all_refs(h);
598
+    h->poc.prev_frame_num        =
599
+    h->poc.prev_frame_num_offset = 0;
600
+    h->poc.prev_poc_msb          = 1<<16;
601
+    h->poc.prev_poc_lsb          = 0;
602
+    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
603
+        h->last_pocs[i] = INT_MIN;
604
+}
605
+
606
+/* forget old pics after a seek */
607
+void ff_h264_flush_change(H264Context *h)
608
+{
609
+    int i, j;
610
+
611
+    h->next_outputed_poc = INT_MIN;
612
+    h->prev_interlaced_frame = 1;
613
+    idr(h);
614
+
615
+    h->poc.prev_frame_num = -1;
616
+    if (h->cur_pic_ptr) {
617
+        h->cur_pic_ptr->reference = 0;
618
+        for (j=i=0; h->delayed_pic[i]; i++)
619
+            if (h->delayed_pic[i] != h->cur_pic_ptr)
620
+                h->delayed_pic[j++] = h->delayed_pic[i];
621
+        h->delayed_pic[j] = NULL;
622
+    }
623
+    ff_h264_unref_picture(h, &h->last_pic_for_ec);
624
+
625
+    h->first_field = 0;
626
+    ff_h264_sei_uninit(&h->sei);
627
+    h->recovery_frame = -1;
628
+    h->frame_recovered = 0;
629
+    h->current_slice = 0;
630
+    h->mmco_reset = 1;
631
+}
632
+
633
+/* forget old pics after a seek */
634
+static void flush_dpb(AVCodecContext *avctx)
635
+{
636
+    H264Context *h = avctx->priv_data;
637
+    int i;
638
+
639
+    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
640
+
641
+    ff_h264_flush_change(h);
642
+
643
+    for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
644
+        ff_h264_unref_picture(h, &h->DPB[i]);
645
+    h->cur_pic_ptr = NULL;
646
+    ff_h264_unref_picture(h, &h->cur_pic);
647
+
648
+    h->mb_y = 0;
649
+
650
+    ff_h264_free_tables(h);
651
+    h->context_initialized = 0;
652
+}
653
+
654
+#if FF_API_CAP_VDPAU
655
+static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
656
+#endif
657
+
658
+static int get_last_needed_nal(H264Context *h)
659
+{
660
+    int nals_needed = 0;
661
+    int first_slice = 0;
662
+    int i;
663
+    int ret;
664
+
665
+    for (i = 0; i < h->pkt.nb_nals; i++) {
666
+        H2645NAL *nal = &h->pkt.nals[i];
667
+        GetBitContext gb;
668
+
669
+        /* packets can sometimes contain multiple PPS/SPS,
670
+         * e.g. two PAFF field pictures in one packet, or a demuxer
671
+         * which splits NALs strangely if so, when frame threading we
672
+         * can't start the next thread until we've read all of them */
673
+        switch (nal->type) {
674
+        case NAL_SPS:
675
+        case NAL_PPS:
676
+            nals_needed = i;
677
+            break;
678
+        case NAL_DPA:
679
+        case NAL_IDR_SLICE:
680
+        case NAL_SLICE:
681
+            ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
682
+            if (ret < 0)
683
+                return ret;
684
+            if (!get_ue_golomb_long(&gb) ||  // first_mb_in_slice
685
+                !first_slice ||
686
+                first_slice != nal->type)
687
+                nals_needed = i;
688
+            if (!first_slice)
689
+                first_slice = nal->type;
690
+        }
691
+    }
692
+
693
+    return nals_needed;
694
+}
695
+
696
+static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
697
+{
698
+    av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
699
+    av_log(logctx, AV_LOG_DEBUG, "  green_metadata_type: %d\n", gm->green_metadata_type);
700
+
701
+    if (gm->green_metadata_type == 0) {
702
+        av_log(logctx, AV_LOG_DEBUG, "  green_metadata_period_type: %d\n", gm->period_type);
703
+
704
+        if (gm->period_type == 2)
705
+            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_seconds: %d\n", gm->num_seconds);
706
+        else if (gm->period_type == 3)
707
+            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_pictures: %d\n", gm->num_pictures);
708
+
709
+        av_log(logctx, AV_LOG_DEBUG, "  SEI GREEN Complexity Metrics: %f %f %f %f\n",
710
+               (float)gm->percent_non_zero_macroblocks/255,
711
+               (float)gm->percent_intra_coded_macroblocks/255,
712
+               (float)gm->percent_six_tap_filtering/255,
713
+               (float)gm->percent_alpha_point_deblocking_instance/255);
714
+
715
+    } else if (gm->green_metadata_type == 1) {
716
+        av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_type: %d\n", gm->xsd_metric_type);
717
+
718
+        if (gm->xsd_metric_type == 0)
719
+            av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_value: %f\n",
720
+                   (float)gm->xsd_metric_value/100);
721
+    }
722
+}
723
+
724
+static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
725
+{
726
+    AVCodecContext *const avctx = h->avctx;
727
+    unsigned context_count = 0;
728
+    int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
729
+    int idr_cleared=0;
730
+    int i, ret = 0;
731
+
732
+    h->nal_unit_type= 0;
733
+
734
+    h->max_contexts = h->nb_slice_ctx;
735
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
736
+        h->current_slice = 0;
737
+        if (!h->first_field)
738
+            h->cur_pic_ptr = NULL;
739
+        ff_h264_sei_uninit(&h->sei);
740
+    }
741
+
742
+    if (h->nal_length_size == 4) {
743
+        if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
744
+            h->is_avc = 0;
745
+        }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
746
+            h->is_avc = 1;
747
+    }
748
+
749
+    ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
750
+                                h->nal_length_size, avctx->codec_id);
751
+    if (ret < 0) {
752
+        av_log(avctx, AV_LOG_ERROR,
753
+               "Error splitting the input into NAL units.\n");
754
+        return ret;
755
+    }
756
+
757
+    if (avctx->active_thread_type & FF_THREAD_FRAME)
758
+        nals_needed = get_last_needed_nal(h);
759
+    if (nals_needed < 0)
760
+        return nals_needed;
761
+
762
+    for (i = 0; i < h->pkt.nb_nals; i++) {
763
+        H2645NAL *nal = &h->pkt.nals[i];
764
+        H264SliceContext *sl = &h->slice_ctx[context_count];
765
+        int err;
766
+
767
+        if (avctx->skip_frame >= AVDISCARD_NONREF &&
768
+            nal->ref_idc == 0 && nal->type != NAL_SEI)
769
+            continue;
770
+
771
+again:
772
+        // FIXME these should stop being context-global variables
773
+        h->nal_ref_idc   = nal->ref_idc;
774
+        h->nal_unit_type = nal->type;
775
+
776
+        err = 0;
777
+        switch (nal->type) {
778
+        case NAL_IDR_SLICE:
779
+            if ((nal->data[1] & 0xFC) == 0x98) {
780
+                av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
781
+                h->next_outputed_poc = INT_MIN;
782
+                ret = -1;
783
+                goto end;
784
+            }
785
+            if (nal->type != NAL_IDR_SLICE) {
786
+                av_log(h->avctx, AV_LOG_ERROR,
787
+                       "Invalid mix of idr and non-idr slices\n");
788
+                ret = -1;
789
+                goto end;
790
+            }
791
+            if(!idr_cleared) {
792
+                if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
793
+                    av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
794
+                    ret = AVERROR_INVALIDDATA;
795
+                    goto end;
796
+                }
797
+                idr(h); // FIXME ensure we don't lose some frames if there is reordering
798
+            }
799
+            idr_cleared = 1;
800
+            h->has_recovery_point = 1;
801
+        case NAL_SLICE:
802
+            sl->gb = nal->gb;
803
+
804
+            if ((err = ff_h264_decode_slice_header(h, sl, nal)))
805
+                break;
806
+
807
+            if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
808
+                const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
809
+
810
+                if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
811
+                    h->valid_recovery_point = 1;
812
+
813
+                if (   h->recovery_frame < 0
814
+                    || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
815
+                    h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
816
+
817
+                    if (!h->valid_recovery_point)
818
+                        h->recovery_frame = h->poc.frame_num;
819
+                }
820
+            }
821
+
822
+            h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
823
+
824
+            if (nal->type == NAL_IDR_SLICE ||
825
+                (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
826
+                h->recovery_frame         = -1;
827
+                h->cur_pic_ptr->recovered = 1;
828
+            }
829
+            // If we have an IDR, all frames after it in decoded order are
830
+            // "recovered".
831
+            if (nal->type == NAL_IDR_SLICE)
832
+                h->frame_recovered |= FRAME_RECOVERED_IDR;
833
+#if 1
834
+            h->cur_pic_ptr->recovered |= h->frame_recovered;
835
+#else
836
+            h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
837
+#endif
838
+
839
+            if (h->current_slice == 1) {
840
+                if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
841
+                    decode_postinit(h, i >= nals_needed);
842
+
843
+                if (h->avctx->hwaccel &&
844
+                    (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
845
+                    goto end;
846
+#if FF_API_CAP_VDPAU
847
+                if (CONFIG_H264_VDPAU_DECODER &&
848
+                    h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
849
+                    ff_vdpau_h264_picture_start(h);
850
+#endif
851
+            }
852
+
853
+            if (sl->redundant_pic_count == 0) {
854
+                if (avctx->hwaccel) {
855
+                    ret = avctx->hwaccel->decode_slice(avctx,
856
+                                                       nal->raw_data,
857
+                                                       nal->raw_size);
858
+                    if (ret < 0)
859
+                        goto end;
860
+#if FF_API_CAP_VDPAU
861
+                } else if (CONFIG_H264_VDPAU_DECODER &&
862
+                           h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
863
+                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
864
+                                            start_code,
865
+                                            sizeof(start_code));
866
+                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
867
+                                            nal->raw_data,
868
+                                            nal->raw_size);
869
+#endif
870
+                } else
871
+                    context_count++;
872
+            }
873
+            break;
874
+        case NAL_DPA:
875
+        case NAL_DPB:
876
+        case NAL_DPC:
877
+            avpriv_request_sample(avctx, "data partitioning");
878
+            break;
879
+        case NAL_SEI:
880
+            ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
881
+            h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
882
+            if (avctx->debug & FF_DEBUG_GREEN_MD)
883
+                debug_green_metadata(&h->sei.green_metadata, h->avctx);
884
+#if FF_API_AFD
885
+FF_DISABLE_DEPRECATION_WARNINGS
886
+            h->avctx->dtg_active_format = h->sei.afd.active_format_description;
887
+FF_ENABLE_DEPRECATION_WARNINGS
888
+#endif /* FF_API_AFD */
889
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
890
+                goto end;
891
+            break;
892
+        case NAL_SPS: {
893
+            GetBitContext tmp_gb = nal->gb;
894
+            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
895
+                break;
896
+            av_log(h->avctx, AV_LOG_DEBUG,
897
+                   "SPS decoding failure, trying again with the complete NAL\n");
898
+            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
899
+            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
900
+                break;
901
+            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
902
+            break;
903
+        }
904
+        case NAL_PPS:
905
+            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
906
+                                                       nal->size_bits);
907
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
908
+                goto end;
909
+            break;
910
+        case NAL_AUD:
911
+        case NAL_END_SEQUENCE:
912
+        case NAL_END_STREAM:
913
+        case NAL_FILLER_DATA:
914
+        case NAL_SPS_EXT:
915
+        case NAL_AUXILIARY_SLICE:
916
+            break;
917
+        default:
918
+            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
919
+                   nal->type, nal->size_bits);
920
+        }
921
+
922
+        if (context_count == h->max_contexts) {
923
+            ret = ff_h264_execute_decode_slices(h, context_count);
924
+            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
925
+                goto end;
926
+            context_count = 0;
927
+        }
928
+
929
+        if (err < 0 || err == SLICE_SKIPED) {
930
+            if (err < 0)
931
+                av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
932
+            sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
933
+        } else if (err == SLICE_SINGLETHREAD) {
934
+            if (context_count > 0) {
935
+                ret = ff_h264_execute_decode_slices(h, context_count);
936
+                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
937
+                    goto end;
938
+                context_count = 0;
939
+            }
940
+            /* Slice could not be decoded in parallel mode, restart. */
941
+            sl               = &h->slice_ctx[0];
942
+            goto again;
943
+        }
944
+    }
945
+    if (context_count) {
946
+        ret = ff_h264_execute_decode_slices(h, context_count);
947
+        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
948
+            goto end;
949
+    }
950
+
951
+    ret = 0;
952
+end:
953
+
954
+#if CONFIG_ERROR_RESILIENCE
955
+    /*
956
+     * FIXME: Error handling code does not seem to support interlaced
957
+     * when slices span multiple rows
958
+     * The ff_er_add_slice calls don't work right for bottom
959
+     * fields; they cause massive erroneous error concealing
960
+     * Error marking covers both fields (top and bottom).
961
+     * This causes a mismatched s->error_count
962
+     * and a bad error table. Further, the error count goes to
963
+     * INT_MAX when called for bottom field, because mb_y is
964
+     * past end by one (callers fault) and resync_mb_y != 0
965
+     * causes problems for the first MB line, too.
966
+     */
967
+    if (!FIELD_PICTURE(h) && h->current_slice &&
968
+        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
969
+        h->enable_er) {
970
+
971
+        H264SliceContext *sl = h->slice_ctx;
972
+        int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
973
+
974
+        ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
975
+
976
+        if (use_last_pic) {
977
+            ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
978
+            sl->ref_list[0][0].parent = &h->last_pic_for_ec;
979
+            memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
980
+            memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
981
+            sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
982
+        } else if (sl->ref_count[0]) {
983
+            ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
984
+        } else
985
+            ff_h264_set_erpic(&sl->er.last_pic, NULL);
986
+
987
+        if (sl->ref_count[1])
988
+            ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
989
+
990
+        sl->er.ref_count = sl->ref_count[0];
991
+
992
+        ff_er_frame_end(&sl->er);
993
+        if (use_last_pic)
994
+            memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
995
+    }
996
+#endif /* CONFIG_ERROR_RESILIENCE */
997
+    /* clean up */
998
+    if (h->cur_pic_ptr && !h->droppable) {
999
+        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1000
+                                  h->picture_structure == PICT_BOTTOM_FIELD);
1001
+    }
1002
+
1003
+    return (ret < 0) ? ret : buf_size;
1004
+}
1005
+
1006
+/**
1007
+ * Return the number of bytes consumed for building the current frame.
1008
+ */
1009
+static int get_consumed_bytes(int pos, int buf_size)
1010
+{
1011
+    if (pos == 0)
1012
+        pos = 1;        // avoid infinite loops (I doubt that is needed but...)
1013
+    if (pos + 10 > buf_size)
1014
+        pos = buf_size; // oops ;)
1015
+
1016
+    return pos;
1017
+}
1018
+
1019
+static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
1020
+{
1021
+    AVFrame *src = srcp->f;
1022
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
1023
+    int i;
1024
+    int ret = av_frame_ref(dst, src);
1025
+    if (ret < 0)
1026
+        return ret;
1027
+
1028
+    av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
1029
+
1030
+    h->backup_width   = h->avctx->width;
1031
+    h->backup_height  = h->avctx->height;
1032
+    h->backup_pix_fmt = h->avctx->pix_fmt;
1033
+
1034
+    h->avctx->width   = dst->width;
1035
+    h->avctx->height  = dst->height;
1036
+    h->avctx->pix_fmt = dst->format;
1037
+
1038
+    if (srcp->sei_recovery_frame_cnt == 0)
1039
+        dst->key_frame = 1;
1040
+    if (!srcp->crop)
1041
+        return 0;
1042
+
1043
+    for (i = 0; i < desc->nb_components; i++) {
1044
+        int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1045
+        int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1046
+        int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1047
+                      (srcp->crop_top  >> vshift) * dst->linesize[i];
1048
+        dst->data[i] += off;
1049
+    }
1050
+    return 0;
1051
+}
1052
+
1053
+static int is_extra(const uint8_t *buf, int buf_size)
1054
+{
1055
+    int cnt= buf[5]&0x1f;
1056
+    const uint8_t *p= buf+6;
1057
+    while(cnt--){
1058
+        int nalsize= AV_RB16(p) + 2;
1059
+        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
1060
+            return 0;
1061
+        p += nalsize;
1062
+    }
1063
+    cnt = *(p++);
1064
+    if(!cnt)
1065
+        return 0;
1066
+    while(cnt--){
1067
+        int nalsize= AV_RB16(p) + 2;
1068
+        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
1069
+            return 0;
1070
+        p += nalsize;
1071
+    }
1072
+    return 1;
1073
+}
1074
+
1075
+static int h264_decode_frame(AVCodecContext *avctx, void *data,
1076
+                             int *got_frame, AVPacket *avpkt)
1077
+{
1078
+    const uint8_t *buf = avpkt->data;
1079
+    int buf_size       = avpkt->size;
1080
+    H264Context *h     = avctx->priv_data;
1081
+    AVFrame *pict      = data;
1082
+    int buf_index      = 0;
1083
+    H264Picture *out;
1084
+    int i, out_idx;
1085
+    int ret;
1086
+
1087
+    h->flags = avctx->flags;
1088
+    h->setup_finished = 0;
1089
+
1090
+    if (h->backup_width != -1) {
1091
+        avctx->width    = h->backup_width;
1092
+        h->backup_width = -1;
1093
+    }
1094
+    if (h->backup_height != -1) {
1095
+        avctx->height    = h->backup_height;
1096
+        h->backup_height = -1;
1097
+    }
1098
+    if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
1099
+        avctx->pix_fmt    = h->backup_pix_fmt;
1100
+        h->backup_pix_fmt = AV_PIX_FMT_NONE;
1101
+    }
1102
+
1103
+    ff_h264_unref_picture(h, &h->last_pic_for_ec);
1104
+
1105
+    /* end of stream, output what is still in the buffers */
1106
+    if (buf_size == 0) {
1107
+ out:
1108
+
1109
+        h->cur_pic_ptr = NULL;
1110
+        h->first_field = 0;
1111
+
1112
+        // FIXME factorize this with the output code below
1113
+        out     = h->delayed_pic[0];
1114
+        out_idx = 0;
1115
+        for (i = 1;
1116
+             h->delayed_pic[i] &&
1117
+             !h->delayed_pic[i]->f->key_frame &&
1118
+             !h->delayed_pic[i]->mmco_reset;
1119
+             i++)
1120
+            if (h->delayed_pic[i]->poc < out->poc) {
1121
+                out     = h->delayed_pic[i];
1122
+                out_idx = i;
1123
+            }
1124
+
1125
+        for (i = out_idx; h->delayed_pic[i]; i++)
1126
+            h->delayed_pic[i] = h->delayed_pic[i + 1];
1127
+
1128
+        if (out) {
1129
+            out->reference &= ~DELAYED_PIC_REF;
1130
+            ret = output_frame(h, pict, out);
1131
+            if (ret < 0)
1132
+                return ret;
1133
+            *got_frame = 1;
1134
+        }
1135
+
1136
+        return buf_index;
1137
+    }
1138
+    if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
1139
+        int side_size;
1140
+        uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1141
+        if (is_extra(side, side_size))
1142
+            ff_h264_decode_extradata(side, side_size,
1143
+                                     &h->ps, &h->is_avc, &h->nal_length_size,
1144
+                                     avctx->err_recognition, avctx);
1145
+    }
1146
+    if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1147
+        if (is_extra(buf, buf_size))
1148
+            return ff_h264_decode_extradata(buf, buf_size,
1149
+                                            &h->ps, &h->is_avc, &h->nal_length_size,
1150
+                                            avctx->err_recognition, avctx);
1151
+    }
1152
+
1153
+    buf_index = decode_nal_units(h, buf, buf_size);
1154
+    if (buf_index < 0)
1155
+        return AVERROR_INVALIDDATA;
1156
+
1157
+    if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1158
+        av_assert0(buf_index <= buf_size);
1159
+        goto out;
1160
+    }
1161
+
1162
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1163
+        if (avctx->skip_frame >= AVDISCARD_NONREF ||
1164
+            buf_size >= 4 && !memcmp("Q264", buf, 4))
1165
+            return buf_size;
1166
+        av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1167
+        return AVERROR_INVALIDDATA;
1168
+    }
1169
+
1170
+    if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1171
+        (h->mb_y >= h->mb_height && h->mb_height)) {
1172
+        if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1173
+            decode_postinit(h, 1);
1174
+
1175
+        if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1176
+            return ret;
1177
+
1178
+        /* Wait for second field. */
1179
+        *got_frame = 0;
1180
+        if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
1181
+                                   (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
1182
+                                   h->next_output_pic->recovered)) {
1183
+            if (!h->next_output_pic->recovered)
1184
+                h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
1185
+
1186
+            if (!h->avctx->hwaccel &&
1187
+                 (h->next_output_pic->field_poc[0] == INT_MAX ||
1188
+                  h->next_output_pic->field_poc[1] == INT_MAX)
1189
+            ) {
1190
+                int p;
1191
+                AVFrame *f = h->next_output_pic->f;
1192
+                int field = h->next_output_pic->field_poc[0] == INT_MAX;
1193
+                uint8_t *dst_data[4];
1194
+                int linesizes[4];
1195
+                const uint8_t *src_data[4];
1196
+
1197
+                av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1198
+
1199
+                for (p = 0; p<4; p++) {
1200
+                    dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1201
+                    src_data[p] = f->data[p] +  field   *f->linesize[p];
1202
+                    linesizes[p] = 2*f->linesize[p];
1203
+                }
1204
+
1205
+                av_image_copy(dst_data, linesizes, src_data, linesizes,
1206
+                              f->format, f->width, f->height>>1);
1207
+            }
1208
+
1209
+            ret = output_frame(h, pict, h->next_output_pic);
1210
+            if (ret < 0)
1211
+                return ret;
1212
+            *got_frame = 1;
1213
+            if (CONFIG_MPEGVIDEO) {
1214
+                ff_print_debug_info2(h->avctx, pict, NULL,
1215
+                                    h->next_output_pic->mb_type,
1216
+                                    h->next_output_pic->qscale_table,
1217
+                                    h->next_output_pic->motion_val,
1218
+                                    NULL,
1219
+                                    h->mb_width, h->mb_height, h->mb_stride, 1);
1220
+            }
1221
+        }
1222
+    }
1223
+
1224
+    av_assert0(pict->buf[0] || !*got_frame);
1225
+
1226
+    ff_h264_unref_picture(h, &h->last_pic_for_ec);
1227
+
1228
+    return get_consumed_bytes(buf_index, buf_size);
1229
+}
1230
+
1231
+#define OFFSET(x) offsetof(H264Context, x)
1232
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1233
+static const AVOption h264_options[] = {
1234
+    {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
1235
+    {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1236
+    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1237
+    { NULL },
1238
+};
1239
+
1240
+static const AVClass h264_class = {
1241
+    .class_name = "H264 Decoder",
1242
+    .item_name  = av_default_item_name,
1243
+    .option     = h264_options,
1244
+    .version    = LIBAVUTIL_VERSION_INT,
1245
+};
1246
+
1247
+AVCodec ff_h264_decoder = {
1248
+    .name                  = "h264",
1249
+    .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1250
+    .type                  = AVMEDIA_TYPE_VIDEO,
1251
+    .id                    = AV_CODEC_ID_H264,
1252
+    .priv_data_size        = sizeof(H264Context),
1253
+    .init                  = ff_h264_decode_init,
1254
+    .close                 = h264_decode_end,
1255
+    .decode                = h264_decode_frame,
1256
+    .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1257
+                             AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
1258
+                             AV_CODEC_CAP_FRAME_THREADS,
1259
+    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
1260
+    .flush                 = flush_dpb,
1261
+    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1262
+    .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1263
+    .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1264
+    .priv_class            = &h264_class,
1265
+};
1266
+
1267
+#if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
1268
+static const AVClass h264_vdpau_class = {
1269
+    .class_name = "H264 VDPAU Decoder",
1270
+    .item_name  = av_default_item_name,
1271
+    .option     = h264_options,
1272
+    .version    = LIBAVUTIL_VERSION_INT,
1273
+};
1274
+
1275
+AVCodec ff_h264_vdpau_decoder = {
1276
+    .name           = "h264_vdpau",
1277
+    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
1278
+    .type           = AVMEDIA_TYPE_VIDEO,
1279
+    .id             = AV_CODEC_ID_H264,
1280
+    .priv_data_size = sizeof(H264Context),
1281
+    .init           = ff_h264_decode_init,
1282
+    .close          = h264_decode_end,
1283
+    .decode         = h264_decode_frame,
1284
+    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
1285
+    .flush          = flush_dpb,
1286
+    .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
1287
+                                                     AV_PIX_FMT_NONE},
1288
+    .profiles       = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1289
+    .priv_class     = &h264_vdpau_class,
1290
+};
1291
+#endif
0 1292
new file mode 100644
... ...
@@ -0,0 +1,1008 @@
0
+/*
1
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
2
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3
+ *
4
+ * This file is part of FFmpeg.
5
+ *
6
+ * FFmpeg is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * FFmpeg is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with FFmpeg; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * H.264 / AVC / MPEG-4 part10 codec.
24
+ * @author Michael Niedermayer <michaelni@gmx.at>
25
+ */
26
+
27
+#ifndef AVCODEC_H264DEC_H
28
+#define AVCODEC_H264DEC_H
29
+
30
+#include "libavutil/buffer.h"
31
+#include "libavutil/intreadwrite.h"
32
+#include "libavutil/thread.h"
33
+
34
+#include "cabac.h"
35
+#include "error_resilience.h"
36
+#include "h264_parse.h"
37
+#include "h264_sei.h"
38
+#include "h2645_parse.h"
39
+#include "h264chroma.h"
40
+#include "h264dsp.h"
41
+#include "h264pred.h"
42
+#include "h264qpel.h"
43
+#include "internal.h"
44
+#include "mpegutils.h"
45
+#include "parser.h"
46
+#include "qpeldsp.h"
47
+#include "rectangle.h"
48
+#include "videodsp.h"
49
+
50
+#define H264_MAX_PICTURE_COUNT 36
51
+
52
+#define MAX_SPS_COUNT          32
53
+#define MAX_PPS_COUNT         256
54
+
55
+#define MAX_MMCO_COUNT         66
56
+
57
+#define MAX_DELAYED_PIC_COUNT  16
58
+
59
+#define MAX_MBPAIR_SIZE (256*1024) // a tighter bound could be calculated if someone cares about a few bytes
60
+
61
+/* Compiling in interlaced support reduces the speed
62
+ * of progressive decoding by about 2%. */
63
+#define ALLOW_INTERLACE
64
+
65
+#define FMO 0
66
+
67
+/**
68
+ * The maximum number of slices supported by the decoder.
69
+ * must be a power of 2
70
+ */
71
+#define MAX_SLICES 32
72
+
73
+#ifdef ALLOW_INTERLACE
74
+#define MB_MBAFF(h)    (h)->mb_mbaff
75
+#define MB_FIELD(sl)  (sl)->mb_field_decoding_flag
76
+#define FRAME_MBAFF(h) (h)->mb_aff_frame
77
+#define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
78
+#define LEFT_MBS 2
79
+#define LTOP     0
80
+#define LBOT     1
81
+#define LEFT(i)  (i)
82
+#else
83
+#define MB_MBAFF(h)      0
84
+#define MB_FIELD(sl)     0
85
+#define FRAME_MBAFF(h)   0
86
+#define FIELD_PICTURE(h) 0
87
+#undef  IS_INTERLACED
88
+#define IS_INTERLACED(mb_type) 0
89
+#define LEFT_MBS 1
90
+#define LTOP     0
91
+#define LBOT     0
92
+#define LEFT(i)  0
93
+#endif
94
+#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
95
+
96
+#ifndef CABAC
97
+#define CABAC(h) (h)->ps.pps->cabac
98
+#endif
99
+
100
+#define CHROMA(h)    ((h)->ps.sps->chroma_format_idc)
101
+#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
102
+#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
103
+
104
+#define EXTENDED_SAR       255
105
+
106
+#define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit
107
+#define MB_TYPE_8x8DCT     0x01000000
108
+#define IS_REF0(a)         ((a) & MB_TYPE_REF0)
109
+#define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT)
110
+
111
+#define QP_MAX_NUM (51 + 6*6)           // The maximum supported qp
112
+
113
+/* NAL unit types */
114
+enum {
115
+    NAL_SLICE           = 1,
116
+    NAL_DPA             = 2,
117
+    NAL_DPB             = 3,
118
+    NAL_DPC             = 4,
119
+    NAL_IDR_SLICE       = 5,
120
+    NAL_SEI             = 6,
121
+    NAL_SPS             = 7,
122
+    NAL_PPS             = 8,
123
+    NAL_AUD             = 9,
124
+    NAL_END_SEQUENCE    = 10,
125
+    NAL_END_STREAM      = 11,
126
+    NAL_FILLER_DATA     = 12,
127
+    NAL_SPS_EXT         = 13,
128
+    NAL_AUXILIARY_SLICE = 19,
129
+};
130
+
131
+/**
132
+ * Sequence parameter set
133
+ */
134
+typedef struct SPS {
135
+    unsigned int sps_id;
136
+    int profile_idc;
137
+    int level_idc;
138
+    int chroma_format_idc;
139
+    int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
140
+    int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
141
+    int poc_type;                      ///< pic_order_cnt_type
142
+    int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
143
+    int delta_pic_order_always_zero_flag;
144
+    int offset_for_non_ref_pic;
145
+    int offset_for_top_to_bottom_field;
146
+    int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
147
+    int ref_frame_count;               ///< num_ref_frames
148
+    int gaps_in_frame_num_allowed_flag;
149
+    int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
150
+    int mb_height;                     ///< pic_height_in_map_units_minus1 + 1
151
+    int frame_mbs_only_flag;
152
+    int mb_aff;                        ///< mb_adaptive_frame_field_flag
153
+    int direct_8x8_inference_flag;
154
+    int crop;                          ///< frame_cropping_flag
155
+
156
+    /* those 4 are already in luma samples */
157
+    unsigned int crop_left;            ///< frame_cropping_rect_left_offset
158
+    unsigned int crop_right;           ///< frame_cropping_rect_right_offset
159
+    unsigned int crop_top;             ///< frame_cropping_rect_top_offset
160
+    unsigned int crop_bottom;          ///< frame_cropping_rect_bottom_offset
161
+    int vui_parameters_present_flag;
162
+    AVRational sar;
163
+    int video_signal_type_present_flag;
164
+    int full_range;
165
+    int colour_description_present_flag;
166
+    enum AVColorPrimaries color_primaries;
167
+    enum AVColorTransferCharacteristic color_trc;
168
+    enum AVColorSpace colorspace;
169
+    int timing_info_present_flag;
170
+    uint32_t num_units_in_tick;
171
+    uint32_t time_scale;
172
+    int fixed_frame_rate_flag;
173
+    short offset_for_ref_frame[256]; // FIXME dyn aloc?
174
+    int bitstream_restriction_flag;
175
+    int num_reorder_frames;
176
+    int scaling_matrix_present;
177
+    uint8_t scaling_matrix4[6][16];
178
+    uint8_t scaling_matrix8[6][64];
179
+    int nal_hrd_parameters_present_flag;
180
+    int vcl_hrd_parameters_present_flag;
181
+    int pic_struct_present_flag;
182
+    int time_offset_length;
183
+    int cpb_cnt;                          ///< See H.264 E.1.2
184
+    int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
185
+    int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
186
+    int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
187
+    int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
188
+    int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
189
+    int residual_color_transform_flag;    ///< residual_colour_transform_flag
190
+    int constraint_set_flags;             ///< constraint_set[0-3]_flag
191
+    uint8_t data[4096];
192
+    size_t data_size;
193
+} SPS;
194
+
195
+/**
196
+ * Picture parameter set
197
+ */
198
+typedef struct PPS {
199
+    unsigned int sps_id;
200
+    int cabac;                  ///< entropy_coding_mode_flag
201
+    int pic_order_present;      ///< pic_order_present_flag
202
+    int slice_group_count;      ///< num_slice_groups_minus1 + 1
203
+    int mb_slice_group_map_type;
204
+    unsigned int ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
205
+    int weighted_pred;          ///< weighted_pred_flag
206
+    int weighted_bipred_idc;
207
+    int init_qp;                ///< pic_init_qp_minus26 + 26
208
+    int init_qs;                ///< pic_init_qs_minus26 + 26
209
+    int chroma_qp_index_offset[2];
210
+    int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
211
+    int constrained_intra_pred;     ///< constrained_intra_pred_flag
212
+    int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
213
+    int transform_8x8_mode;         ///< transform_8x8_mode_flag
214
+    uint8_t scaling_matrix4[6][16];
215
+    uint8_t scaling_matrix8[6][64];
216
+    uint8_t chroma_qp_table[2][QP_MAX_NUM+1];  ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
217
+    int chroma_qp_diff;
218
+    uint8_t data[4096];
219
+    size_t data_size;
220
+
221
+    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
222
+    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
223
+    uint32_t(*dequant4_coeff[6])[16];
224
+    uint32_t(*dequant8_coeff[6])[64];
225
+} PPS;
226
+
227
+typedef struct H264ParamSets {
228
+    AVBufferRef *sps_list[MAX_SPS_COUNT];
229
+    AVBufferRef *pps_list[MAX_PPS_COUNT];
230
+
231
+    AVBufferRef *pps_ref;
232
+    AVBufferRef *sps_ref;
233
+    /* currently active parameters sets */
234
+    const PPS *pps;
235
+    const SPS *sps;
236
+} H264ParamSets;
237
+
238
+/**
239
+ * Memory management control operation opcode.
240
+ */
241
+typedef enum MMCOOpcode {
242
+    MMCO_END = 0,
243
+    MMCO_SHORT2UNUSED,
244
+    MMCO_LONG2UNUSED,
245
+    MMCO_SHORT2LONG,
246
+    MMCO_SET_MAX_LONG,
247
+    MMCO_RESET,
248
+    MMCO_LONG,
249
+} MMCOOpcode;
250
+
251
+/**
252
+ * Memory management control operation.
253
+ */
254
+typedef struct MMCO {
255
+    MMCOOpcode opcode;
256
+    int short_pic_num;  ///< pic_num without wrapping (pic_num & max_pic_num)
257
+    int long_arg;       ///< index, pic_num, or num long refs depending on opcode
258
+} MMCO;
259
+
260
+typedef struct H264Picture {
261
+    AVFrame *f;
262
+    ThreadFrame tf;
263
+
264
+    AVBufferRef *qscale_table_buf;
265
+    int8_t *qscale_table;
266
+
267
+    AVBufferRef *motion_val_buf[2];
268
+    int16_t (*motion_val[2])[2];
269
+
270
+    AVBufferRef *mb_type_buf;
271
+    uint32_t *mb_type;
272
+
273
+    AVBufferRef *hwaccel_priv_buf;
274
+    void *hwaccel_picture_private; ///< hardware accelerator private data
275
+
276
+    AVBufferRef *ref_index_buf[2];
277
+    int8_t *ref_index[2];
278
+
279
+    int field_poc[2];       ///< top/bottom POC
280
+    int poc;                ///< frame POC
281
+    int frame_num;          ///< frame_num (raw frame_num from slice header)
282
+    int mmco_reset;         /**< MMCO_RESET set this 1. Reordering code must
283
+                                 not mix pictures before and after MMCO_RESET. */
284
+    int pic_id;             /**< pic_num (short -> no wrap version of pic_num,
285
+                                 pic_num & max_pic_num; long -> long_pic_num) */
286
+    int long_ref;           ///< 1->long term reference 0->short term reference
287
+    int ref_poc[2][2][32];  ///< POCs of the frames/fields used as reference (FIXME need per slice)
288
+    int ref_count[2][2];    ///< number of entries in ref_poc         (FIXME need per slice)
289
+    int mbaff;              ///< 1 -> MBAFF frame 0-> not MBAFF
290
+    int field_picture;      ///< whether or not picture was encoded in separate fields
291
+
292
+    int reference;
293
+    int recovered;          ///< picture at IDR or recovery point + recovery count
294
+    int invalid_gap;
295
+    int sei_recovery_frame_cnt;
296
+
297
+    int crop;
298
+    int crop_left;
299
+    int crop_top;
300
+} H264Picture;
301
+
302
+typedef struct H264Ref {
303
+    uint8_t *data[3];
304
+    int linesize[3];
305
+
306
+    int reference;
307
+    int poc;
308
+    int pic_id;
309
+
310
+    H264Picture *parent;
311
+} H264Ref;
312
+
313
+typedef struct H264SliceContext {
314
+    struct H264Context *h264;
315
+    GetBitContext gb;
316
+    ERContext er;
317
+
318
+    int slice_num;
319
+    int slice_type;
320
+    int slice_type_nos;         ///< S free slice type (SI/SP are remapped to I/P)
321
+    int slice_type_fixed;
322
+
323
+    int qscale;
324
+    int chroma_qp[2];   // QPc
325
+    int qp_thresh;      ///< QP threshold to skip loopfilter
326
+    int last_qscale_diff;
327
+
328
+    // deblock
329
+    int deblocking_filter;          ///< disable_deblocking_filter_idc with 1 <-> 0
330
+    int slice_alpha_c0_offset;
331
+    int slice_beta_offset;
332
+
333
+    H264PredWeightTable pwt;
334
+
335
+    int prev_mb_skipped;
336
+    int next_mb_skipped;
337
+
338
+    int chroma_pred_mode;
339
+    int intra16x16_pred_mode;
340
+
341
+    int8_t intra4x4_pred_mode_cache[5 * 8];
342
+    int8_t(*intra4x4_pred_mode);
343
+
344
+    int topleft_mb_xy;
345
+    int top_mb_xy;
346
+    int topright_mb_xy;
347
+    int left_mb_xy[LEFT_MBS];
348
+
349
+    int topleft_type;
350
+    int top_type;
351
+    int topright_type;
352
+    int left_type[LEFT_MBS];
353
+
354
+    const uint8_t *left_block;
355
+    int topleft_partition;
356
+
357
+    unsigned int topleft_samples_available;
358
+    unsigned int top_samples_available;
359
+    unsigned int topright_samples_available;
360
+    unsigned int left_samples_available;
361
+
362
+    ptrdiff_t linesize, uvlinesize;
363
+    ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff
364
+    ptrdiff_t mb_uvlinesize;
365
+
366
+    int mb_x, mb_y;
367
+    int mb_xy;
368
+    int resync_mb_x;
369
+    int resync_mb_y;
370
+    unsigned int first_mb_addr;
371
+    // index of the first MB of the next slice
372
+    int next_slice_idx;
373
+    int mb_skip_run;
374
+    int is_complex;
375
+
376
+    int picture_structure;
377
+    int mb_field_decoding_flag;
378
+    int mb_mbaff;               ///< mb_aff_frame && mb_field_decoding_flag
379
+
380
+    int redundant_pic_count;
381
+
382
+    /**
383
+     * number of neighbors (top and/or left) that used 8x8 dct
384
+     */
385
+    int neighbor_transform_size;
386
+
387
+    int direct_spatial_mv_pred;
388
+    int col_parity;
389
+    int col_fieldoff;
390
+
391
+    int cbp;
392
+    int top_cbp;
393
+    int left_cbp;
394
+
395
+    int dist_scale_factor[32];
396
+    int dist_scale_factor_field[2][32];
397
+    int map_col_to_list0[2][16 + 32];
398
+    int map_col_to_list0_field[2][2][16 + 32];
399
+
400
+    /**
401
+     * num_ref_idx_l0/1_active_minus1 + 1
402
+     */
403
+    unsigned int ref_count[2];          ///< counts frames or fields, depending on current mb mode
404
+    unsigned int list_count;
405
+    H264Ref ref_list[2][48];        /**< 0..15: frame refs, 16..47: mbaff field refs.
406
+                                         *   Reordered version of default_ref_list
407
+                                         *   according to picture reordering in slice header */
408
+    struct {
409
+        uint8_t op;
410
+        uint32_t val;
411
+    } ref_modifications[2][32];
412
+    int nb_ref_modifications[2];
413
+
414
+    unsigned int pps_id;
415
+
416
+    const uint8_t *intra_pcm_ptr;
417
+    int16_t *dc_val_base;
418
+
419
+    uint8_t *bipred_scratchpad;
420
+    uint8_t *edge_emu_buffer;
421
+    uint8_t (*top_borders[2])[(16 * 3) * 2];
422
+    int bipred_scratchpad_allocated;
423
+    int edge_emu_buffer_allocated;
424
+    int top_borders_allocated[2];
425
+
426
+    /**
427
+     * non zero coeff count cache.
428
+     * is 64 if not available.
429
+     */
430
+    DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
431
+
432
+    /**
433
+     * Motion vector cache.
434
+     */
435
+    DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
436
+    DECLARE_ALIGNED(8,  int8_t, ref_cache)[2][5 * 8];
437
+    DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
438
+    uint8_t direct_cache[5 * 8];
439
+
440
+    DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
441
+
442
+    ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
443
+    DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
444
+    DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
445
+    ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
446
+    ///< check that i is not too large or ensure that there is some unused stuff after mb
447
+    int16_t mb_padding[256 * 2];
448
+
449
+    uint8_t (*mvd_table[2])[2];
450
+
451
+    /**
452
+     * Cabac
453
+     */
454
+    CABACContext cabac;
455
+    uint8_t cabac_state[1024];
456
+    int cabac_init_idc;
457
+
458
+    MMCO mmco[MAX_MMCO_COUNT];
459
+    int  nb_mmco;
460
+    int explicit_ref_marking;
461
+
462
+    int frame_num;
463
+    int poc_lsb;
464
+    int delta_poc_bottom;
465
+    int delta_poc[2];
466
+    int curr_pic_num;
467
+    int max_pic_num;
468
+} H264SliceContext;
469
+
470
+/**
471
+ * H264Context
472
+ */
473
+typedef struct H264Context {
474
+    const AVClass *class;
475
+    AVCodecContext *avctx;
476
+    VideoDSPContext vdsp;
477
+    H264DSPContext h264dsp;
478
+    H264ChromaContext h264chroma;
479
+    H264QpelContext h264qpel;
480
+
481
+    H264Picture DPB[H264_MAX_PICTURE_COUNT];
482
+    H264Picture *cur_pic_ptr;
483
+    H264Picture cur_pic;
484
+    H264Picture last_pic_for_ec;
485
+
486
+    H264SliceContext *slice_ctx;
487
+    int            nb_slice_ctx;
488
+
489
+    H2645Packet pkt;
490
+
491
+    int pixel_shift;    ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
492
+
493
+    /* coded dimensions -- 16 * mb w/h */
494
+    int width, height;
495
+    int chroma_x_shift, chroma_y_shift;
496
+
497
+    /**
498
+     * Backup frame properties: needed, because they can be different
499
+     * between returned frame and last decoded frame.
500
+     **/
501
+    int backup_width;
502
+    int backup_height;
503
+    enum AVPixelFormat backup_pix_fmt;
504
+
505
+    int droppable;
506
+    int coded_picture_number;
507
+
508
+    int context_initialized;
509
+    int flags;
510
+    int workaround_bugs;
511
+    /* Set when slice threading is used and at least one slice uses deblocking
512
+     * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
513
+     * during normal MB decoding and execute it serially at the end.
514
+     */
515
+    int postpone_filter;
516
+
517
+    int8_t(*intra4x4_pred_mode);
518
+    H264PredContext hpc;
519
+
520
+    uint8_t (*non_zero_count)[48];
521
+
522
+#define LIST_NOT_USED -1 // FIXME rename?
523
+#define PART_NOT_AVAILABLE -2
524
+
525
+    /**
526
+     * block_offset[ 0..23] for frame macroblocks
527
+     * block_offset[24..47] for field macroblocks
528
+     */
529
+    int block_offset[2 * (16 * 3)];
530
+
531
+    uint32_t *mb2b_xy;  // FIXME are these 4 a good idea?
532
+    uint32_t *mb2br_xy;
533
+    int b_stride;       // FIXME use s->b4_stride
534
+
535
+    uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
536
+
537
+    // interlacing specific flags
538
+    int mb_aff_frame;
539
+    int picture_structure;
540
+    int first_field;
541
+
542
+    uint8_t *list_counts;               ///< Array of list_count per MB specifying the slice type
543
+
544
+    /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
545
+    uint16_t *cbp_table;
546
+
547
+    /* chroma_pred_mode for i4x4 or i16x16, else 0 */
548
+    uint8_t *chroma_pred_mode_table;
549
+    uint8_t (*mvd_table[2])[2];
550
+    uint8_t *direct_table;
551
+
552
+    uint8_t zigzag_scan[16];
553
+    uint8_t zigzag_scan8x8[64];
554
+    uint8_t zigzag_scan8x8_cavlc[64];
555
+    uint8_t field_scan[16];
556
+    uint8_t field_scan8x8[64];
557
+    uint8_t field_scan8x8_cavlc[64];
558
+    uint8_t zigzag_scan_q0[16];
559
+    uint8_t zigzag_scan8x8_q0[64];
560
+    uint8_t zigzag_scan8x8_cavlc_q0[64];
561
+    uint8_t field_scan_q0[16];
562
+    uint8_t field_scan8x8_q0[64];
563
+    uint8_t field_scan8x8_cavlc_q0[64];
564
+
565
+    int mb_y;
566
+    int mb_height, mb_width;
567
+    int mb_stride;
568
+    int mb_num;
569
+
570
+    // =============================================================
571
+    // Things below are not used in the MB or more inner code
572
+
573
+    int nal_ref_idc;
574
+    int nal_unit_type;
575
+
576
+    /**
577
+     * Used to parse AVC variant of H.264
578
+     */
579
+    int is_avc;           ///< this flag is != 0 if codec is avc1
580
+    int nal_length_size;  ///< Number of bytes used for nal length (1, 2 or 4)
581
+
582
+    int bit_depth_luma;         ///< luma bit depth from sps to detect changes
583
+    int chroma_format_idc;      ///< chroma format from sps to detect changes
584
+
585
+    H264ParamSets ps;
586
+
587
+    uint16_t *slice_table_base;
588
+
589
+    H264POCContext poc;
590
+
591
+    H264Ref default_ref[2];
592
+    H264Picture *short_ref[32];
593
+    H264Picture *long_ref[32];
594
+    H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
595
+    int last_pocs[MAX_DELAYED_PIC_COUNT];
596
+    H264Picture *next_output_pic;
597
+    int next_outputed_poc;
598
+
599
+    /**
600
+     * memory management control operations buffer.
601
+     */
602
+    MMCO mmco[MAX_MMCO_COUNT];
603
+    int  nb_mmco;
604
+    int mmco_reset;
605
+    int explicit_ref_marking;
606
+
607
+    int long_ref_count;     ///< number of actual long term references
608
+    int short_ref_count;    ///< number of actual short term references
609
+
610
+    /**
611
+     * @name Members for slice based multithreading
612
+     * @{
613
+     */
614
+    /**
615
+     * current slice number, used to initialize slice_num of each thread/context
616
+     */
617
+    int current_slice;
618
+
619
+    /**
620
+     * Max number of threads / contexts.
621
+     * This is equal to AVCodecContext.thread_count unless
622
+     * multithreaded decoding is impossible, in which case it is
623
+     * reduced to 1.
624
+     */
625
+    int max_contexts;
626
+
627
+    /**
628
+     *  1 if the single thread fallback warning has already been
629
+     *  displayed, 0 otherwise.
630
+     */
631
+    int single_decode_warning;
632
+
633
+    /** @} */
634
+
635
+    /**
636
+     * Complement sei_pic_struct
637
+     * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
638
+     * However, soft telecined frames may have these values.
639
+     * This is used in an attempt to flag soft telecine progressive.
640
+     */
641
+    int prev_interlaced_frame;
642
+
643
+    /**
644
+     * Are the SEI recovery points looking valid.
645
+     */
646
+    int valid_recovery_point;
647
+
648
+    /**
649
+     * recovery_frame is the frame_num at which the next frame should
650
+     * be fully constructed.
651
+     *
652
+     * Set to -1 when not expecting a recovery point.
653
+     */
654
+    int recovery_frame;
655
+
656
+/**
657
+ * We have seen an IDR, so all the following frames in coded order are correctly
658
+ * decodable.
659
+ */
660
+#define FRAME_RECOVERED_IDR  (1 << 0)
661
+/**
662
+ * Sufficient number of frames have been decoded since a SEI recovery point,
663
+ * so all the following frames in presentation order are correct.
664
+ */
665
+#define FRAME_RECOVERED_SEI  (1 << 1)
666
+
667
+    int frame_recovered;    ///< Initial frame has been completely recovered
668
+
669
+    int has_recovery_point;
670
+
671
+    int missing_fields;
672
+
673
+    /* for frame threading, this is set to 1
674
+     * after finish_setup() has been called, so we cannot modify
675
+     * some context properties (which are supposed to stay constant between
676
+     * slices) anymore */
677
+    int setup_finished;
678
+
679
+    int cur_chroma_format_idc;
680
+    int cur_bit_depth_luma;
681
+    int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
682
+
683
+    int enable_er;
684
+
685
+    H264SEIContext sei;
686
+
687
+    AVBufferPool *qscale_table_pool;
688
+    AVBufferPool *mb_type_pool;
689
+    AVBufferPool *motion_val_pool;
690
+    AVBufferPool *ref_index_pool;
691
+    int ref2frm[MAX_SLICES][2][64];     ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
692
+} H264Context;
693
+
694
+extern const uint16_t ff_h264_mb_sizes[4];
695
+
696
+/**
697
+ * Uninit H264 param sets structure.
698
+ */
699
+
700
+void ff_h264_ps_uninit(H264ParamSets *ps);
701
+
702
+/**
703
+ * Decode SPS
704
+ */
705
+int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
706
+                                     H264ParamSets *ps, int ignore_truncation);
707
+
708
+/**
709
+ * Decode PPS
710
+ */
711
+int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
712
+                                         H264ParamSets *ps, int bit_length);
713
+
714
+/**
715
+ * Reconstruct bitstream slice_type.
716
+ */
717
+int ff_h264_get_slice_type(const H264SliceContext *sl);
718
+
719
+/**
720
+ * Allocate tables.
721
+ * needs width/height
722
+ */
723
+int ff_h264_alloc_tables(H264Context *h);
724
+
725
+int ff_h264_decode_ref_pic_list_reordering(const H264Context *h, H264SliceContext *sl);
726
+int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl);
727
+void ff_h264_remove_all_refs(H264Context *h);
728
+
729
+/**
730
+ * Execute the reference picture marking (memory management control operations).
731
+ */
732
+int ff_h264_execute_ref_pic_marking(H264Context *h);
733
+
734
+int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl,
735
+                                   GetBitContext *gb);
736
+
737
+void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
738
+int ff_h264_decode_init(AVCodecContext *avctx);
739
+void ff_h264_decode_init_vlc(void);
740
+
741
+/**
742
+ * Decode a macroblock
743
+ * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
744
+ */
745
+int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
746
+
747
+/**
748
+ * Decode a CABAC coded macroblock
749
+ * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
750
+ */
751
+int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
752
+
753
+void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
754
+
755
+void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
756
+void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
757
+void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
758
+                                int *mb_type);
759
+
760
+void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
761
+                            uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
762
+                            unsigned int linesize, unsigned int uvlinesize);
763
+void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
764
+                       uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
765
+                       unsigned int linesize, unsigned int uvlinesize);
766
+
767
+/*
768
+ * o-o o-o
769
+ *  / / /
770
+ * o-o o-o
771
+ *  ,---'
772
+ * o-o o-o
773
+ *  / / /
774
+ * o-o o-o
775
+ */
776
+
777
+/* Scan8 organization:
778
+ *    0 1 2 3 4 5 6 7
779
+ * 0  DY    y y y y y
780
+ * 1        y Y Y Y Y
781
+ * 2        y Y Y Y Y
782
+ * 3        y Y Y Y Y
783
+ * 4        y Y Y Y Y
784
+ * 5  DU    u u u u u
785
+ * 6        u U U U U
786
+ * 7        u U U U U
787
+ * 8        u U U U U
788
+ * 9        u U U U U
789
+ * 10 DV    v v v v v
790
+ * 11       v V V V V
791
+ * 12       v V V V V
792
+ * 13       v V V V V
793
+ * 14       v V V V V
794
+ * DY/DU/DV are for luma/chroma DC.
795
+ */
796
+
797
+#define LUMA_DC_BLOCK_INDEX   48
798
+#define CHROMA_DC_BLOCK_INDEX 49
799
+
800
+// This table must be here because scan8[constant] must be known at compiletime
801
+static const uint8_t scan8[16 * 3 + 3] = {
802
+    4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
803
+    6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
804
+    4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
805
+    6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
806
+    4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
807
+    6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
808
+    4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
809
+    6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
810
+    4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
811
+    6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
812
+    4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
813
+    6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
814
+    0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
815
+};
816
+
817
+static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
818
+{
819
+#if HAVE_BIGENDIAN
820
+    return (b & 0xFFFF) + (a << 16);
821
+#else
822
+    return (a & 0xFFFF) + (b << 16);
823
+#endif
824
+}
825
+
826
+static av_always_inline uint16_t pack8to16(unsigned a, unsigned b)
827
+{
828
+#if HAVE_BIGENDIAN
829
+    return (b & 0xFF) + (a << 8);
830
+#else
831
+    return (a & 0xFF) + (b << 8);
832
+#endif
833
+}
834
+
835
+/**
836
+ * Get the chroma qp.
837
+ */
838
+static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
839
+{
840
+    return pps->chroma_qp_table[t][qscale];
841
+}
842
+
843
+/**
844
+ * Get the predicted intra4x4 prediction mode.
845
+ */
846
+static av_always_inline int pred_intra_mode(const H264Context *h,
847
+                                            H264SliceContext *sl, int n)
848
+{
849
+    const int index8 = scan8[n];
850
+    const int left   = sl->intra4x4_pred_mode_cache[index8 - 1];
851
+    const int top    = sl->intra4x4_pred_mode_cache[index8 - 8];
852
+    const int min    = FFMIN(left, top);
853
+
854
+    ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
855
+
856
+    if (min < 0)
857
+        return DC_PRED;
858
+    else
859
+        return min;
860
+}
861
+
862
+static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
863
+                                                        H264SliceContext *sl)
864
+{
865
+    int8_t *i4x4       = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
866
+    int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
867
+
868
+    AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
869
+    i4x4[4] = i4x4_cache[7 + 8 * 3];
870
+    i4x4[5] = i4x4_cache[7 + 8 * 2];
871
+    i4x4[6] = i4x4_cache[7 + 8 * 1];
872
+}
873
+
874
+static av_always_inline void write_back_non_zero_count(const H264Context *h,
875
+                                                       H264SliceContext *sl)
876
+{
877
+    const int mb_xy    = sl->mb_xy;
878
+    uint8_t *nnz       = h->non_zero_count[mb_xy];
879
+    uint8_t *nnz_cache = sl->non_zero_count_cache;
880
+
881
+    AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
882
+    AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
883
+    AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
884
+    AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
885
+    AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
886
+    AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
887
+    AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
888
+    AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
889
+
890
+    if (!h->chroma_y_shift) {
891
+        AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
892
+        AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
893
+        AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
894
+        AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
895
+    }
896
+}
897
+
898
+static av_always_inline void write_back_motion_list(const H264Context *h,
899
+                                                    H264SliceContext *sl,
900
+                                                    int b_stride,
901
+                                                    int b_xy, int b8_xy,
902
+                                                    int mb_type, int list)
903
+{
904
+    int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
905
+    int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
906
+    AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
907
+    AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
908
+    AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
909
+    AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
910
+    if (CABAC(h)) {
911
+        uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
912
+                                                        : h->mb2br_xy[sl->mb_xy]];
913
+        uint8_t(*mvd_src)[2]  = &sl->mvd_cache[list][scan8[0]];
914
+        if (IS_SKIP(mb_type)) {
915
+            AV_ZERO128(mvd_dst);
916
+        } else {
917
+            AV_COPY64(mvd_dst, mvd_src + 8 * 3);
918
+            AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
919
+            AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
920
+            AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
921
+        }
922
+    }
923
+
924
+    {
925
+        int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
926
+        int8_t *ref_cache = sl->ref_cache[list];
927
+        ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
928
+        ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
929
+        ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
930
+        ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
931
+    }
932
+}
933
+
934
+static av_always_inline void write_back_motion(const H264Context *h,
935
+                                               H264SliceContext *sl,
936
+                                               int mb_type)
937
+{
938
+    const int b_stride      = h->b_stride;
939
+    const int b_xy  = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
940
+    const int b8_xy = 4 * sl->mb_xy;
941
+
942
+    if (USES_LIST(mb_type, 0)) {
943
+        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
944
+    } else {
945
+        fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
946
+                       2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
947
+    }
948
+    if (USES_LIST(mb_type, 1))
949
+        write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
950
+
951
+    if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
952
+        if (IS_8X8(mb_type)) {
953
+            uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
954
+            direct_table[1] = sl->sub_mb_type[1] >> 1;
955
+            direct_table[2] = sl->sub_mb_type[2] >> 1;
956
+            direct_table[3] = sl->sub_mb_type[3] >> 1;
957
+        }
958
+    }
959
+}
960
+
961
+static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
962
+{
963
+    if (h->ps.sps->direct_8x8_inference_flag)
964
+        return !(AV_RN64A(sl->sub_mb_type) &
965
+                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
966
+                  0x0001000100010001ULL));
967
+    else
968
+        return !(AV_RN64A(sl->sub_mb_type) &
969
+                 ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
970
+                  0x0001000100010001ULL));
971
+}
972
+
973
+static inline int find_start_code(const uint8_t *buf, int buf_size,
974
+                           int buf_index, int next_avc)
975
+{
976
+    uint32_t state = -1;
977
+
978
+    buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
979
+
980
+    return FFMIN(buf_index, buf_size);
981
+}
982
+
983
+int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
984
+
985
+int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
986
+void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
987
+
988
+int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
989
+
990
+void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
991
+
992
+int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
993
+                                const H2645NAL *nal);
994
+#define SLICE_SINGLETHREAD 1
995
+#define SLICE_SKIPED 2
996
+
997
+int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
998
+int ff_h264_update_thread_context(AVCodecContext *dst,
999
+                                  const AVCodecContext *src);
1000
+
1001
+void ff_h264_flush_change(H264Context *h);
1002
+
1003
+void ff_h264_free_tables(H264Context *h);
1004
+
1005
+void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
1006
+
1007
+#endif /* AVCODEC_H264DEC_H */
... ...
@@ -27,7 +27,7 @@
27 27
 
28 28
 #include "bit_depth_template.c"
29 29
 #include "libavutil/common.h"
30
-#include "h264.h"
30
+#include "h264dec.h"
31 31
 #include "h264idct.h"
32 32
 
33 33
 void FUNCC(ff_h264_idct_add)(uint8_t *_dst, int16_t *_block, int stride)
... ...
@@ -32,7 +32,7 @@
32 32
 #include "libavutil/atomic.h"
33 33
 
34 34
 #include "avcodec.h"
35
-#include "h264.h"
35
+#include "h264dec.h"
36 36
 #include "internal.h"
37 37
 #include "mediacodecdec.h"
38 38
 #include "mediacodec_wrapper.h"
... ...
@@ -21,7 +21,7 @@
21 21
 #ifndef AVCODEC_MIPS_H264CHROMA_MIPS_H
22 22
 #define AVCODEC_MIPS_H264CHROMA_MIPS_H
23 23
 
24
-#include "libavcodec/h264.h"
24
+#include "libavcodec/h264dec.h"
25 25
 void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, int stride,
26 26
                                 int height, int x, int y);
27 27
 void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, int stride,
... ...
@@ -22,7 +22,7 @@
22 22
 #ifndef AVCODEC_MIPS_H264DSP_MIPS_H
23 23
 #define AVCODEC_MIPS_H264DSP_MIPS_H
24 24
 
25
-#include "libavcodec/h264.h"
25
+#include "libavcodec/h264dec.h"
26 26
 #include "constants.h"
27 27
 
28 28
 void ff_h264_h_lpf_luma_inter_msa(uint8_t *src, int stride,
... ...
@@ -41,7 +41,7 @@
41 41
 #include "libavutil/opt.h"
42 42
 
43 43
 #include "avcodec.h"
44
-#include "h264.h"
44
+#include "h264dec.h"
45 45
 #include "internal.h"
46 46
 
47 47
 #ifdef OMX_SKIP64BIT
... ...
@@ -31,7 +31,7 @@
31 31
 #include "libavutil/ppc/types_altivec.h"
32 32
 #include "libavutil/ppc/util_altivec.h"
33 33
 
34
-#include "libavcodec/h264.h"
34
+#include "libavcodec/h264dec.h"
35 35
 #include "libavcodec/h264dsp.h"
36 36
 
37 37
 #if HAVE_ALTIVEC
... ...
@@ -30,7 +30,7 @@
30 30
 
31 31
 #include "avcodec.h"
32 32
 #include "internal.h"
33
-#include "h264.h"
33
+#include "h264dec.h"
34 34
 #include "qsv.h"
35 35
 #include "qsv_internal.h"
36 36
 #include "qsvenc.h"
... ...
@@ -46,7 +46,7 @@
46 46
 #include "internal.h"
47 47
 #include "avcodec.h"
48 48
 #include "mpegutils.h"
49
-#include "h264.h"
49
+#include "h264dec.h"
50 50
 #include "h264data.h"
51 51
 #include "golomb.h"
52 52
 #include "hpeldsp.h"
... ...
@@ -25,7 +25,7 @@
25 25
 #include "libavutil/pixfmt.h"
26 26
 
27 27
 #include "avcodec.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "h264_sei.h"
30 30
 #include "internal.h"
31 31
 #include "vaapi_encode.h"
... ...
@@ -21,7 +21,7 @@
21 21
  */
22 22
 
23 23
 #include "vaapi_internal.h"
24
-#include "h264.h"
24
+#include "h264dec.h"
25 25
 #include "mpegutils.h"
26 26
 
27 27
 /**
... ...
@@ -26,7 +26,7 @@
26 26
 
27 27
 #include "vda.h"
28 28
 #include "libavutil/avutil.h"
29
-#include "h264.h"
29
+#include "h264dec.h"
30 30
 
31 31
 struct vda_buffer {
32 32
     CVPixelBufferRef cv_buffer;
... ...
@@ -28,7 +28,7 @@
28 28
 #include <CoreFoundation/CoreFoundation.h>
29 29
 
30 30
 #include "vda.h"
31
-#include "h264.h"
31
+#include "h264dec.h"
32 32
 #include "avcodec.h"
33 33
 
34 34
 #ifndef kCFCoreFoundationVersionNumber10_7
... ...
@@ -25,7 +25,7 @@
25 25
 
26 26
 #include "avcodec.h"
27 27
 #include "internal.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "vc1.h"
30 30
 #include "vdpau.h"
31 31
 #include "vdpau_compat.h"
... ...
@@ -26,7 +26,7 @@
26 26
 
27 27
 #include <stdint.h>
28 28
 
29
-#include "h264.h"
29
+#include "h264dec.h"
30 30
 #include "mpeg4video.h"
31 31
 
32 32
 void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf,
... ...
@@ -25,7 +25,7 @@
25 25
 
26 26
 #include "avcodec.h"
27 27
 #include "internal.h"
28
-#include "h264.h"
28
+#include "h264dec.h"
29 29
 #include "mpegutils.h"
30 30
 #include "vdpau.h"
31 31
 #include "vdpau_internal.h"
... ...
@@ -29,7 +29,7 @@
29 29
 #include "vda_vt_internal.h"
30 30
 #include "libavutil/avutil.h"
31 31
 #include "bytestream.h"
32
-#include "h264.h"
32
+#include "h264dec.h"
33 33
 #include "mpegvideo.h"
34 34
 
35 35
 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
... ...
@@ -23,7 +23,7 @@
23 23
 #include "libavutil/cpu.h"
24 24
 #include "libavutil/x86/asm.h"
25 25
 #include "libavutil/x86/cpu.h"
26
-#include "libavcodec/h264.h"
26
+#include "libavcodec/h264dec.h"
27 27
 #include "libavcodec/h264qpel.h"
28 28
 #include "libavcodec/pixels.h"
29 29
 #include "fpel.h"
... ...
@@ -48,7 +48,7 @@
48 48
 #include "libavutil/time_internal.h"
49 49
 #include "libavcodec/bytestream.h"
50 50
 #include "libavcodec/dnxhddata.h"
51
-#include "libavcodec/h264.h"
51
+#include "libavcodec/h264dec.h"
52 52
 #include "libavcodec/internal.h"
53 53
 #include "audiointerleave.h"
54 54
 #include "avformat.h"