Most of the changes are just trivial are just trivial replacements of
fields from MpegEncContext with equivalent fields in H264Context.
Everything in h264* other than h264.c are those trivial changes.
The nontrivial parts are:
1) extracting a simplified version of the frame management code from
mpegvideo.c. We don't need last/next_picture anymore, since h264 uses
its own more complex system already and those were set only to appease
the mpegvideo parts.
2) some tables that need to be allocated/freed in appropriate places.
3) hwaccels -- mostly trivial replacements.
for dxva, the draw_horiz_band() call is moved from
ff_dxva2_common_end_frame() to per-codec end_frame() callbacks,
because it's now different for h264 and MpegEncContext-based
decoders.
4) svq3 -- it does not use h264 complex reference system, so I just
added some very simplistic frame management instead and dropped the
use of ff_h264_frame_start(). Because of this I also had to move some
initialization code to svq3.
Additional fixes for chroma format and bit depth changes by
Janne Grunau <janne-libav@jannau.net>
Signed-off-by: Anton Khirnov <anton@khirnov.net>
... | ... |
@@ -76,7 +76,7 @@ int ff_dxva2_commit_buffer(AVCodecContext *avctx, |
76 | 76 |
return result; |
77 | 77 |
} |
78 | 78 |
|
79 |
-int ff_dxva2_common_end_frame(AVCodecContext *avctx, MpegEncContext *s, |
|
79 |
+int ff_dxva2_common_end_frame(AVCodecContext *avctx, Picture *pic, |
|
80 | 80 |
const void *pp, unsigned pp_size, |
81 | 81 |
const void *qm, unsigned qm_size, |
82 | 82 |
int (*commit_bs_si)(AVCodecContext *, |
... | ... |
@@ -90,7 +90,7 @@ int ff_dxva2_common_end_frame(AVCodecContext *avctx, MpegEncContext *s, |
90 | 90 |
int result; |
91 | 91 |
|
92 | 92 |
if (FAILED(IDirectXVideoDecoder_BeginFrame(ctx->decoder, |
93 |
- ff_dxva2_get_surface(s->current_picture_ptr), |
|
93 |
+ ff_dxva2_get_surface(pic), |
|
94 | 94 |
NULL))) { |
95 | 95 |
av_log(avctx, AV_LOG_ERROR, "Failed to begin frame\n"); |
96 | 96 |
return -1; |
... | ... |
@@ -146,7 +146,5 @@ end: |
146 | 146 |
result = -1; |
147 | 147 |
} |
148 | 148 |
|
149 |
- if (!result) |
|
150 |
- ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
151 | 149 |
return result; |
152 | 150 |
} |
... | ... |
@@ -44,15 +44,14 @@ static void fill_picture_entry(DXVA_PicEntry_H264 *pic, |
44 | 44 |
static void fill_picture_parameters(struct dxva_context *ctx, const H264Context *h, |
45 | 45 |
DXVA_PicParams_H264 *pp) |
46 | 46 |
{ |
47 |
- const MpegEncContext *s = &h->s; |
|
48 |
- const Picture *current_picture = s->current_picture_ptr; |
|
47 |
+ const Picture *current_picture = h->cur_pic_ptr; |
|
49 | 48 |
int i, j; |
50 | 49 |
|
51 | 50 |
memset(pp, 0, sizeof(*pp)); |
52 | 51 |
/* Configure current picture */ |
53 | 52 |
fill_picture_entry(&pp->CurrPic, |
54 | 53 |
ff_dxva2_get_surface_index(ctx, current_picture), |
55 |
- s->picture_structure == PICT_BOTTOM_FIELD); |
|
54 |
+ h->picture_structure == PICT_BOTTOM_FIELD); |
|
56 | 55 |
/* Configure the set of references */ |
57 | 56 |
pp->UsedForReferenceFlags = 0; |
58 | 57 |
pp->NonExistingFrameFlags = 0; |
... | ... |
@@ -88,13 +87,13 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context |
88 | 88 |
} |
89 | 89 |
} |
90 | 90 |
|
91 |
- pp->wFrameWidthInMbsMinus1 = s->mb_width - 1; |
|
92 |
- pp->wFrameHeightInMbsMinus1 = s->mb_height - 1; |
|
91 |
+ pp->wFrameWidthInMbsMinus1 = h->mb_width - 1; |
|
92 |
+ pp->wFrameHeightInMbsMinus1 = h->mb_height - 1; |
|
93 | 93 |
pp->num_ref_frames = h->sps.ref_frame_count; |
94 | 94 |
|
95 |
- pp->wBitFields = ((s->picture_structure != PICT_FRAME) << 0) | |
|
95 |
+ pp->wBitFields = ((h->picture_structure != PICT_FRAME) << 0) | |
|
96 | 96 |
((h->sps.mb_aff && |
97 |
- (s->picture_structure == PICT_FRAME)) << 1) | |
|
97 |
+ (h->picture_structure == PICT_FRAME)) << 1) | |
|
98 | 98 |
(h->sps.residual_color_transform_flag << 2) | |
99 | 99 |
/* sp_for_switch_flag (not implemented by Libav) */ |
100 | 100 |
(0 << 3) | |
... | ... |
@@ -120,11 +119,11 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context |
120 | 120 |
pp->Reserved16Bits = 3; /* FIXME is there a way to detect the right mode ? */ |
121 | 121 |
pp->StatusReportFeedbackNumber = 1 + ctx->report_id++; |
122 | 122 |
pp->CurrFieldOrderCnt[0] = 0; |
123 |
- if ((s->picture_structure & PICT_TOP_FIELD) && |
|
123 |
+ if ((h->picture_structure & PICT_TOP_FIELD) && |
|
124 | 124 |
current_picture->field_poc[0] != INT_MAX) |
125 | 125 |
pp->CurrFieldOrderCnt[0] = current_picture->field_poc[0]; |
126 | 126 |
pp->CurrFieldOrderCnt[1] = 0; |
127 |
- if ((s->picture_structure & PICT_BOTTOM_FIELD) && |
|
127 |
+ if ((h->picture_structure & PICT_BOTTOM_FIELD) && |
|
128 | 128 |
current_picture->field_poc[1] != INT_MAX) |
129 | 129 |
pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1]; |
130 | 130 |
pp->pic_init_qs_minus26 = h->pps.init_qs - 26; |
... | ... |
@@ -200,7 +199,6 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, |
200 | 200 |
{ |
201 | 201 |
const H264Context *h = avctx->priv_data; |
202 | 202 |
struct dxva_context *ctx = avctx->hwaccel_context; |
203 |
- const MpegEncContext *s = &h->s; |
|
204 | 203 |
unsigned list; |
205 | 204 |
|
206 | 205 |
memset(slice, 0, sizeof(*slice)); |
... | ... |
@@ -208,9 +206,9 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, |
208 | 208 |
slice->SliceBytesInBuffer = size; |
209 | 209 |
slice->wBadSliceChopping = 0; |
210 | 210 |
|
211 |
- slice->first_mb_in_slice = (s->mb_y >> FIELD_OR_MBAFF_PICTURE) * s->mb_width + s->mb_x; |
|
211 |
+ slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE) * h->mb_width + h->mb_x; |
|
212 | 212 |
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */ |
213 |
- slice->BitOffsetToSliceData = get_bits_count(&s->gb); |
|
213 |
+ slice->BitOffsetToSliceData = get_bits_count(&h->gb); |
|
214 | 214 |
slice->slice_type = ff_h264_get_slice_type(h); |
215 | 215 |
if (h->slice_type_fixed) |
216 | 216 |
slice->slice_type += 5; |
... | ... |
@@ -260,7 +258,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, |
260 | 260 |
} |
261 | 261 |
} |
262 | 262 |
slice->slice_qs_delta = 0; /* XXX not implemented by Libav */ |
263 |
- slice->slice_qp_delta = s->qscale - h->pps.init_qp; |
|
263 |
+ slice->slice_qp_delta = h->qscale - h->pps.init_qp; |
|
264 | 264 |
slice->redundant_pic_cnt = h->redundant_pic_count; |
265 | 265 |
if (h->slice_type == AV_PICTURE_TYPE_B) |
266 | 266 |
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred; |
... | ... |
@@ -277,10 +275,9 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, |
277 | 277 |
DXVA2_DecodeBufferDesc *sc) |
278 | 278 |
{ |
279 | 279 |
const H264Context *h = avctx->priv_data; |
280 |
- const MpegEncContext *s = &h->s; |
|
281 |
- const unsigned mb_count = s->mb_width * s->mb_height; |
|
280 |
+ const unsigned mb_count = h->mb_width * h->mb_height; |
|
282 | 281 |
struct dxva_context *ctx = avctx->hwaccel_context; |
283 |
- const Picture *current_picture = h->s.current_picture_ptr; |
|
282 |
+ const Picture *current_picture = h->cur_pic_ptr; |
|
284 | 283 |
struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private; |
285 | 284 |
DXVA_Slice_H264_Short *slice = NULL; |
286 | 285 |
uint8_t *dxva_data, *current, *end; |
... | ... |
@@ -376,7 +373,7 @@ static int start_frame(AVCodecContext *avctx, |
376 | 376 |
{ |
377 | 377 |
const H264Context *h = avctx->priv_data; |
378 | 378 |
struct dxva_context *ctx = avctx->hwaccel_context; |
379 |
- struct dxva2_picture_context *ctx_pic = h->s.current_picture_ptr->f.hwaccel_picture_private; |
|
379 |
+ struct dxva2_picture_context *ctx_pic = h->cur_pic_ptr->f.hwaccel_picture_private; |
|
380 | 380 |
|
381 | 381 |
if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0) |
382 | 382 |
return -1; |
... | ... |
@@ -399,7 +396,7 @@ static int decode_slice(AVCodecContext *avctx, |
399 | 399 |
{ |
400 | 400 |
const H264Context *h = avctx->priv_data; |
401 | 401 |
struct dxva_context *ctx = avctx->hwaccel_context; |
402 |
- const Picture *current_picture = h->s.current_picture_ptr; |
|
402 |
+ const Picture *current_picture = h->cur_pic_ptr; |
|
403 | 403 |
struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private; |
404 | 404 |
unsigned position; |
405 | 405 |
|
... | ... |
@@ -427,16 +424,19 @@ static int decode_slice(AVCodecContext *avctx, |
427 | 427 |
static int end_frame(AVCodecContext *avctx) |
428 | 428 |
{ |
429 | 429 |
H264Context *h = avctx->priv_data; |
430 |
- MpegEncContext *s = &h->s; |
|
431 | 430 |
struct dxva2_picture_context *ctx_pic = |
432 |
- h->s.current_picture_ptr->f.hwaccel_picture_private; |
|
431 |
+ h->cur_pic_ptr->f.hwaccel_picture_private; |
|
432 |
+ int ret; |
|
433 | 433 |
|
434 | 434 |
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0) |
435 | 435 |
return -1; |
436 |
- return ff_dxva2_common_end_frame(avctx, s, |
|
437 |
- &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
438 |
- &ctx_pic->qm, sizeof(ctx_pic->qm), |
|
439 |
- commit_bitstream_and_slice_buffer); |
|
436 |
+ ret = ff_dxva2_common_end_frame(avctx, h->cur_pic_ptr, |
|
437 |
+ &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
438 |
+ &ctx_pic->qm, sizeof(ctx_pic->qm), |
|
439 |
+ commit_bitstream_and_slice_buffer); |
|
440 |
+ if (!ret) |
|
441 |
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height); |
|
442 |
+ return ret; |
|
440 | 443 |
} |
441 | 444 |
|
442 | 445 |
AVHWAccel ff_h264_dxva2_hwaccel = { |
... | ... |
@@ -47,7 +47,7 @@ int ff_dxva2_commit_buffer(AVCodecContext *, struct dxva_context *, |
47 | 47 |
unsigned mb_count); |
48 | 48 |
|
49 | 49 |
|
50 |
-int ff_dxva2_common_end_frame(AVCodecContext *, MpegEncContext *, |
|
50 |
+int ff_dxva2_common_end_frame(AVCodecContext *, Picture *, |
|
51 | 51 |
const void *pp, unsigned pp_size, |
52 | 52 |
const void *qm, unsigned qm_size, |
53 | 53 |
int (*commit_bs_si)(AVCodecContext *, |
... | ... |
@@ -251,13 +251,17 @@ static int end_frame(AVCodecContext *avctx) |
251 | 251 |
struct MpegEncContext *s = avctx->priv_data; |
252 | 252 |
struct dxva2_picture_context *ctx_pic = |
253 | 253 |
s->current_picture_ptr->f.hwaccel_picture_private; |
254 |
+ int ret; |
|
254 | 255 |
|
255 | 256 |
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0) |
256 | 257 |
return -1; |
257 |
- return ff_dxva2_common_end_frame(avctx, s, |
|
258 |
- &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
259 |
- &ctx_pic->qm, sizeof(ctx_pic->qm), |
|
260 |
- commit_bitstream_and_slice_buffer); |
|
258 |
+ ret = ff_dxva2_common_end_frame(avctx, s->current_picture_ptr, |
|
259 |
+ &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
260 |
+ &ctx_pic->qm, sizeof(ctx_pic->qm), |
|
261 |
+ commit_bitstream_and_slice_buffer); |
|
262 |
+ if (!ret) |
|
263 |
+ ff_mpeg_draw_horiz_band(s, 0, avctx->height); |
|
264 |
+ return ret; |
|
261 | 265 |
} |
262 | 266 |
|
263 | 267 |
AVHWAccel ff_mpeg2_dxva2_hwaccel = { |
... | ... |
@@ -254,14 +254,18 @@ static int end_frame(AVCodecContext *avctx) |
254 | 254 |
{ |
255 | 255 |
VC1Context *v = avctx->priv_data; |
256 | 256 |
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private; |
257 |
+ int ret; |
|
257 | 258 |
|
258 | 259 |
if (ctx_pic->bitstream_size <= 0) |
259 | 260 |
return -1; |
260 | 261 |
|
261 |
- return ff_dxva2_common_end_frame(avctx, &v->s, |
|
262 |
- &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
263 |
- NULL, 0, |
|
264 |
- commit_bitstream_and_slice_buffer); |
|
262 |
+ ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr, |
|
263 |
+ &ctx_pic->pp, sizeof(ctx_pic->pp), |
|
264 |
+ NULL, 0, |
|
265 |
+ commit_bitstream_and_slice_buffer); |
|
266 |
+ if (!ret) |
|
267 |
+ ff_mpeg_draw_horiz_band(&v->s, 0, avctx->height); |
|
268 |
+ return ret; |
|
265 | 269 |
} |
266 | 270 |
|
267 | 271 |
#if CONFIG_WMV3_DXVA2_HWACCEL |
... | ... |
@@ -83,11 +83,10 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, |
83 | 83 |
int mb_x, int mb_y, int mb_intra, int mb_skipped) |
84 | 84 |
{ |
85 | 85 |
H264Context *h = opaque; |
86 |
- MpegEncContext *s = &h->s; |
|
87 | 86 |
|
88 |
- s->mb_x = mb_x; |
|
89 |
- s->mb_y = mb_y; |
|
90 |
- h->mb_xy = s->mb_x + s->mb_y * s->mb_stride; |
|
87 |
+ h->mb_x = mb_x; |
|
88 |
+ h->mb_y = mb_y; |
|
89 |
+ h->mb_xy = mb_x + mb_y * h->mb_stride; |
|
91 | 90 |
memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache)); |
92 | 91 |
assert(ref >= 0); |
93 | 92 |
/* FIXME: It is possible albeit uncommon that slice references |
... | ... |
@@ -96,22 +95,179 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, |
96 | 96 |
* practice then correct remapping should be added. */ |
97 | 97 |
if (ref >= h->ref_count[0]) |
98 | 98 |
ref = 0; |
99 |
- fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy], |
|
99 |
+ fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy], |
|
100 | 100 |
2, 2, 2, ref, 1); |
101 | 101 |
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); |
102 | 102 |
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, |
103 |
- pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4); |
|
103 |
+ pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4); |
|
104 | 104 |
assert(!FRAME_MBAFF); |
105 | 105 |
ff_h264_hl_decode_mb(h); |
106 | 106 |
} |
107 | 107 |
|
108 |
+void ff_h264_draw_horiz_band(H264Context *h, int y, int height) |
|
109 |
+{ |
|
110 |
+ ff_draw_horiz_band(h->avctx, &h->dsp, &h->cur_pic, |
|
111 |
+ h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL, |
|
112 |
+ y, height, h->picture_structure, h->first_field, 1, |
|
113 |
+ h->low_delay, h->mb_height * 16, h->mb_width * 16); |
|
114 |
+} |
|
115 |
+ |
|
116 |
+static void free_frame_buffer(H264Context *h, Picture *pic) |
|
117 |
+{ |
|
118 |
+ ff_thread_release_buffer(h->avctx, &pic->f); |
|
119 |
+ av_freep(&pic->f.hwaccel_picture_private); |
|
120 |
+} |
|
121 |
+ |
|
122 |
+static void free_picture(H264Context *h, Picture *pic) |
|
123 |
+{ |
|
124 |
+ int i; |
|
125 |
+ |
|
126 |
+ if (pic->f.data[0]) |
|
127 |
+ free_frame_buffer(h, pic); |
|
128 |
+ |
|
129 |
+ av_freep(&pic->qscale_table_base); |
|
130 |
+ pic->f.qscale_table = NULL; |
|
131 |
+ av_freep(&pic->mb_type_base); |
|
132 |
+ pic->f.mb_type = NULL; |
|
133 |
+ for (i = 0; i < 2; i++) { |
|
134 |
+ av_freep(&pic->motion_val_base[i]); |
|
135 |
+ av_freep(&pic->f.ref_index[i]); |
|
136 |
+ pic->f.motion_val[i] = NULL; |
|
137 |
+ } |
|
138 |
+} |
|
139 |
+ |
|
140 |
+static void release_unused_pictures(H264Context *h, int remove_current) |
|
141 |
+{ |
|
142 |
+ int i; |
|
143 |
+ |
|
144 |
+ /* release non reference frames */ |
|
145 |
+ for (i = 0; i < h->picture_count; i++) { |
|
146 |
+ if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference && |
|
147 |
+ (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) && |
|
148 |
+ (remove_current || &h->DPB[i] != h->cur_pic_ptr)) { |
|
149 |
+ free_frame_buffer(h, &h->DPB[i]); |
|
150 |
+ } |
|
151 |
+ } |
|
152 |
+} |
|
153 |
+ |
|
154 |
+static int alloc_scratch_buffers(H264Context *h, int linesize) |
|
155 |
+{ |
|
156 |
+ int alloc_size = FFALIGN(FFABS(linesize) + 32, 32); |
|
157 |
+ |
|
158 |
+ if (h->bipred_scratchpad) |
|
159 |
+ return 0; |
|
160 |
+ |
|
161 |
+ h->bipred_scratchpad = av_malloc(16 * 6 * alloc_size); |
|
162 |
+ // edge emu needs blocksize + filter length - 1 |
|
163 |
+ // (= 21x21 for h264) |
|
164 |
+ h->edge_emu_buffer = av_mallocz(alloc_size * 2 * 21); |
|
165 |
+ h->me.scratchpad = av_mallocz(alloc_size * 2 * 16 * 2); |
|
166 |
+ |
|
167 |
+ if (!h->bipred_scratchpad || !h->edge_emu_buffer || !h->me.scratchpad) { |
|
168 |
+ av_freep(&h->bipred_scratchpad); |
|
169 |
+ av_freep(&h->edge_emu_buffer); |
|
170 |
+ av_freep(&h->me.scratchpad); |
|
171 |
+ return AVERROR(ENOMEM); |
|
172 |
+ } |
|
173 |
+ |
|
174 |
+ h->me.temp = h->me.scratchpad; |
|
175 |
+ |
|
176 |
+ return 0; |
|
177 |
+} |
|
178 |
+ |
|
179 |
+static int alloc_picture(H264Context *h, Picture *pic) |
|
180 |
+{ |
|
181 |
+ const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1; |
|
182 |
+ const int mb_array_size = h->mb_stride * h->mb_height; |
|
183 |
+ const int b4_stride = h->mb_width * 4 + 1; |
|
184 |
+ const int b4_array_size = b4_stride * h->mb_height * 4; |
|
185 |
+ int i, ret = 0; |
|
186 |
+ |
|
187 |
+ av_assert0(!pic->f.data[0]); |
|
188 |
+ |
|
189 |
+ if (h->avctx->hwaccel) { |
|
190 |
+ const AVHWAccel *hwaccel = h->avctx->hwaccel; |
|
191 |
+ av_assert0(!pic->f.hwaccel_picture_private); |
|
192 |
+ if (hwaccel->priv_data_size) { |
|
193 |
+ pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size); |
|
194 |
+ if (!pic->f.hwaccel_picture_private) |
|
195 |
+ return AVERROR(ENOMEM); |
|
196 |
+ } |
|
197 |
+ } |
|
198 |
+ ret = ff_thread_get_buffer(h->avctx, &pic->f); |
|
199 |
+ if (ret < 0) |
|
200 |
+ goto fail; |
|
201 |
+ |
|
202 |
+ h->linesize = pic->f.linesize[0]; |
|
203 |
+ h->uvlinesize = pic->f.linesize[1]; |
|
204 |
+ |
|
205 |
+ if (pic->f.qscale_table == NULL) { |
|
206 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base, |
|
207 |
+ (big_mb_num + h->mb_stride) * sizeof(uint8_t), |
|
208 |
+ fail) |
|
209 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base, |
|
210 |
+ (big_mb_num + h->mb_stride) * sizeof(uint32_t), |
|
211 |
+ fail) |
|
212 |
+ pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1; |
|
213 |
+ pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1; |
|
214 |
+ |
|
215 |
+ for (i = 0; i < 2; i++) { |
|
216 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i], |
|
217 |
+ 2 * (b4_array_size + 4) * sizeof(int16_t), |
|
218 |
+ fail) |
|
219 |
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4; |
|
220 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i], |
|
221 |
+ 4 * mb_array_size * sizeof(uint8_t), fail) |
|
222 |
+ } |
|
223 |
+ pic->f.motion_subsample_log2 = 2; |
|
224 |
+ |
|
225 |
+ pic->f.qstride = h->mb_stride; |
|
226 |
+ } |
|
227 |
+ |
|
228 |
+ pic->owner2 = h; |
|
229 |
+ |
|
230 |
+ return 0; |
|
231 |
+fail: |
|
232 |
+ free_frame_buffer(h, pic); |
|
233 |
+ return (ret < 0) ? ret : AVERROR(ENOMEM); |
|
234 |
+} |
|
235 |
+ |
|
236 |
+static inline int pic_is_unused(H264Context *h, Picture *pic) |
|
237 |
+{ |
|
238 |
+ if (pic->f.data[0] == NULL) |
|
239 |
+ return 1; |
|
240 |
+ if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF)) |
|
241 |
+ if (!pic->owner2 || pic->owner2 == h) |
|
242 |
+ return 1; |
|
243 |
+ return 0; |
|
244 |
+} |
|
245 |
+ |
|
246 |
+static int find_unused_picture(H264Context *h) |
|
247 |
+{ |
|
248 |
+ int i; |
|
249 |
+ |
|
250 |
+ for (i = h->picture_range_start; i < h->picture_range_end; i++) { |
|
251 |
+ if (pic_is_unused(h, &h->DPB[i])) |
|
252 |
+ break; |
|
253 |
+ } |
|
254 |
+ if (i == h->picture_range_end) |
|
255 |
+ return AVERROR_INVALIDDATA; |
|
256 |
+ |
|
257 |
+ if (h->DPB[i].needs_realloc) { |
|
258 |
+ h->DPB[i].needs_realloc = 0; |
|
259 |
+ free_picture(h, &h->DPB[i]); |
|
260 |
+ avcodec_get_frame_defaults(&h->DPB[i].f); |
|
261 |
+ } |
|
262 |
+ |
|
263 |
+ return i; |
|
264 |
+} |
|
265 |
+ |
|
108 | 266 |
/** |
109 | 267 |
* Check if the top & left blocks are available if needed and |
110 | 268 |
* change the dc mode so it only uses the available blocks. |
111 | 269 |
*/ |
112 | 270 |
int ff_h264_check_intra4x4_pred_mode(H264Context *h) |
113 | 271 |
{ |
114 |
- MpegEncContext *const s = &h->s; |
|
115 | 272 |
static const int8_t top[12] = { |
116 | 273 |
-1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0 |
117 | 274 |
}; |
... | ... |
@@ -124,9 +280,9 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h) |
124 | 124 |
for (i = 0; i < 4; i++) { |
125 | 125 |
int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]]; |
126 | 126 |
if (status < 0) { |
127 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
127 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
128 | 128 |
"top block unavailable for requested intra4x4 mode %d at %d %d\n", |
129 |
- status, s->mb_x, s->mb_y); |
|
129 |
+ status, h->mb_x, h->mb_y); |
|
130 | 130 |
return -1; |
131 | 131 |
} else if (status) { |
132 | 132 |
h->intra4x4_pred_mode_cache[scan8[0] + i] = status; |
... | ... |
@@ -140,9 +296,9 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h) |
140 | 140 |
if (!(h->left_samples_available & mask[i])) { |
141 | 141 |
int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]]; |
142 | 142 |
if (status < 0) { |
143 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
143 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
144 | 144 |
"left block unavailable for requested intra4x4 mode %d at %d %d\n", |
145 |
- status, s->mb_x, s->mb_y); |
|
145 |
+ status, h->mb_x, h->mb_y); |
|
146 | 146 |
return -1; |
147 | 147 |
} else if (status) { |
148 | 148 |
h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status; |
... | ... |
@@ -159,23 +315,22 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h) |
159 | 159 |
*/ |
160 | 160 |
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) |
161 | 161 |
{ |
162 |
- MpegEncContext *const s = &h->s; |
|
163 | 162 |
static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 }; |
164 | 163 |
static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 }; |
165 | 164 |
|
166 | 165 |
if (mode > 6U) { |
167 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
166 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
168 | 167 |
"out of range intra chroma pred mode at %d %d\n", |
169 |
- s->mb_x, s->mb_y); |
|
168 |
+ h->mb_x, h->mb_y); |
|
170 | 169 |
return -1; |
171 | 170 |
} |
172 | 171 |
|
173 | 172 |
if (!(h->top_samples_available & 0x8000)) { |
174 | 173 |
mode = top[mode]; |
175 | 174 |
if (mode < 0) { |
176 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
175 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
177 | 176 |
"top block unavailable for requested intra mode at %d %d\n", |
178 |
- s->mb_x, s->mb_y); |
|
177 |
+ h->mb_x, h->mb_y); |
|
179 | 178 |
return -1; |
180 | 179 |
} |
181 | 180 |
} |
... | ... |
@@ -189,9 +344,9 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) |
189 | 189 |
2 * (mode == DC_128_PRED8x8); |
190 | 190 |
} |
191 | 191 |
if (mode < 0) { |
192 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
192 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
193 | 193 |
"left block unavailable for requested intra mode at %d %d\n", |
194 |
- s->mb_x, s->mb_y); |
|
194 |
+ h->mb_x, h->mb_y); |
|
195 | 195 |
return -1; |
196 | 196 |
} |
197 | 197 |
} |
... | ... |
@@ -314,7 +469,7 @@ static int decode_rbsp_trailing(H264Context *h, const uint8_t *src) |
314 | 314 |
int v = *src; |
315 | 315 |
int r; |
316 | 316 |
|
317 |
- tprintf(h->s.avctx, "rbsp trailing %X\n", v); |
|
317 |
+ tprintf(h->avctx, "rbsp trailing %X\n", v); |
|
318 | 318 |
|
319 | 319 |
for (r = 1; r < 9; r++) { |
320 | 320 |
if (v & 1) |
... | ... |
@@ -341,10 +496,9 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, |
341 | 341 |
int height, int y_offset, int list0, |
342 | 342 |
int list1, int *nrefs) |
343 | 343 |
{ |
344 |
- MpegEncContext *const s = &h->s; |
|
345 | 344 |
int my; |
346 | 345 |
|
347 |
- y_offset += 16 * (s->mb_y >> MB_FIELD); |
|
346 |
+ y_offset += 16 * (h->mb_y >> MB_FIELD); |
|
348 | 347 |
|
349 | 348 |
if (list0) { |
350 | 349 |
int ref_n = h->ref_cache[0][scan8[n]]; |
... | ... |
@@ -353,8 +507,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, |
353 | 353 |
// Error resilience puts the current picture in the ref list. |
354 | 354 |
// Don't try to wait on these as it will cause a deadlock. |
355 | 355 |
// Fields can wait on each other, though. |
356 |
- if (ref->f.thread_opaque != s->current_picture.f.thread_opaque || |
|
357 |
- (ref->f.reference & 3) != s->picture_structure) { |
|
356 |
+ if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || |
|
357 |
+ (ref->f.reference & 3) != h->picture_structure) { |
|
358 | 358 |
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); |
359 | 359 |
if (refs[0][ref_n] < 0) |
360 | 360 |
nrefs[0] += 1; |
... | ... |
@@ -366,8 +520,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, |
366 | 366 |
int ref_n = h->ref_cache[1][scan8[n]]; |
367 | 367 |
Picture *ref = &h->ref_list[1][ref_n]; |
368 | 368 |
|
369 |
- if (ref->f.thread_opaque != s->current_picture.f.thread_opaque || |
|
370 |
- (ref->f.reference & 3) != s->picture_structure) { |
|
369 |
+ if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || |
|
370 |
+ (ref->f.reference & 3) != h->picture_structure) { |
|
371 | 371 |
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1); |
372 | 372 |
if (refs[1][ref_n] < 0) |
373 | 373 |
nrefs[1] += 1; |
... | ... |
@@ -383,9 +537,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, |
383 | 383 |
*/ |
384 | 384 |
static void await_references(H264Context *h) |
385 | 385 |
{ |
386 |
- MpegEncContext *const s = &h->s; |
|
387 | 386 |
const int mb_xy = h->mb_xy; |
388 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
387 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
389 | 388 |
int refs[2][48]; |
390 | 389 |
int nrefs[2] = { 0 }; |
391 | 390 |
int ref, list; |
... | ... |
@@ -459,7 +612,7 @@ static void await_references(H264Context *h) |
459 | 459 |
Picture *ref_pic = &h->ref_list[list][ref]; |
460 | 460 |
int ref_field = ref_pic->f.reference - 1; |
461 | 461 |
int ref_field_picture = ref_pic->field_picture; |
462 |
- int pic_height = 16 * s->mb_height >> ref_field_picture; |
|
462 |
+ int pic_height = 16 * h->mb_height >> ref_field_picture; |
|
463 | 463 |
|
464 | 464 |
row <<= MB_MBAFF; |
465 | 465 |
nrefs[list]--; |
... | ... |
@@ -500,7 +653,6 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
500 | 500 |
h264_chroma_mc_func chroma_op, |
501 | 501 |
int pixel_shift, int chroma_idc) |
502 | 502 |
{ |
503 |
- MpegEncContext *const s = &h->s; |
|
504 | 503 |
const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8; |
505 | 504 |
int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8; |
506 | 505 |
const int luma_xy = (mx & 3) + ((my & 3) << 2); |
... | ... |
@@ -512,8 +664,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
512 | 512 |
int emu = 0; |
513 | 513 |
const int full_mx = mx >> 2; |
514 | 514 |
const int full_my = my >> 2; |
515 |
- const int pic_width = 16 * s->mb_width; |
|
516 |
- const int pic_height = 16 * s->mb_height >> MB_FIELD; |
|
515 |
+ const int pic_width = 16 * h->mb_width; |
|
516 |
+ const int pic_height = 16 * h->mb_height >> MB_FIELD; |
|
517 | 517 |
int ysh; |
518 | 518 |
|
519 | 519 |
if (mx & 7) |
... | ... |
@@ -525,12 +677,12 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
525 | 525 |
full_my < 0 - extra_height || |
526 | 526 |
full_mx + 16 /*FIXME*/ > pic_width + extra_width || |
527 | 527 |
full_my + 16 /*FIXME*/ > pic_height + extra_height) { |
528 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, |
|
528 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, |
|
529 | 529 |
src_y - (2 << pixel_shift) - 2 * h->mb_linesize, |
530 | 530 |
h->mb_linesize, |
531 | 531 |
16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, |
532 | 532 |
full_my - 2, pic_width, pic_height); |
533 |
- src_y = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
533 |
+ src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
534 | 534 |
emu = 1; |
535 | 535 |
} |
536 | 536 |
|
... | ... |
@@ -538,19 +690,19 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
538 | 538 |
if (!square) |
539 | 539 |
qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize); |
540 | 540 |
|
541 |
- if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY) |
|
541 |
+ if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY) |
|
542 | 542 |
return; |
543 | 543 |
|
544 | 544 |
if (chroma_idc == 3 /* yuv444 */) { |
545 | 545 |
src_cb = pic->f.data[1] + offset; |
546 | 546 |
if (emu) { |
547 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, |
|
547 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, |
|
548 | 548 |
src_cb - (2 << pixel_shift) - 2 * h->mb_linesize, |
549 | 549 |
h->mb_linesize, |
550 | 550 |
16 + 5, 16 + 5 /*FIXME*/, |
551 | 551 |
full_mx - 2, full_my - 2, |
552 | 552 |
pic_width, pic_height); |
553 |
- src_cb = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
553 |
+ src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
554 | 554 |
} |
555 | 555 |
qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps? |
556 | 556 |
if (!square) |
... | ... |
@@ -558,13 +710,13 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
558 | 558 |
|
559 | 559 |
src_cr = pic->f.data[2] + offset; |
560 | 560 |
if (emu) { |
561 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, |
|
561 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, |
|
562 | 562 |
src_cr - (2 << pixel_shift) - 2 * h->mb_linesize, |
563 | 563 |
h->mb_linesize, |
564 | 564 |
16 + 5, 16 + 5 /*FIXME*/, |
565 | 565 |
full_mx - 2, full_my - 2, |
566 | 566 |
pic_width, pic_height); |
567 |
- src_cr = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
567 |
+ src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; |
|
568 | 568 |
} |
569 | 569 |
qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps? |
570 | 570 |
if (!square) |
... | ... |
@@ -575,7 +727,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
575 | 575 |
ysh = 3 - (chroma_idc == 2 /* yuv422 */); |
576 | 576 |
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) { |
577 | 577 |
// chroma offset when predicting from a field of opposite parity |
578 |
- my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1)); |
|
578 |
+ my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1)); |
|
579 | 579 |
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1); |
580 | 580 |
} |
581 | 581 |
|
... | ... |
@@ -585,20 +737,20 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, |
585 | 585 |
(my >> ysh) * h->mb_uvlinesize; |
586 | 586 |
|
587 | 587 |
if (emu) { |
588 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, |
|
588 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb, h->mb_uvlinesize, |
|
589 | 589 |
9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), |
590 | 590 |
pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); |
591 |
- src_cb = s->edge_emu_buffer; |
|
591 |
+ src_cb = h->edge_emu_buffer; |
|
592 | 592 |
} |
593 | 593 |
chroma_op(dest_cb, src_cb, h->mb_uvlinesize, |
594 | 594 |
height >> (chroma_idc == 1 /* yuv420 */), |
595 | 595 |
mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7); |
596 | 596 |
|
597 | 597 |
if (emu) { |
598 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src_cr, h->mb_uvlinesize, |
|
598 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr, h->mb_uvlinesize, |
|
599 | 599 |
9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), |
600 | 600 |
pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); |
601 |
- src_cr = s->edge_emu_buffer; |
|
601 |
+ src_cr = h->edge_emu_buffer; |
|
602 | 602 |
} |
603 | 603 |
chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */), |
604 | 604 |
mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7); |
... | ... |
@@ -616,7 +768,6 @@ static av_always_inline void mc_part_std(H264Context *h, int n, int square, |
616 | 616 |
int list0, int list1, |
617 | 617 |
int pixel_shift, int chroma_idc) |
618 | 618 |
{ |
619 |
- MpegEncContext *const s = &h->s; |
|
620 | 619 |
qpel_mc_func *qpix_op = qpix_put; |
621 | 620 |
h264_chroma_mc_func chroma_op = chroma_put; |
622 | 621 |
|
... | ... |
@@ -631,8 +782,8 @@ static av_always_inline void mc_part_std(H264Context *h, int n, int square, |
631 | 631 |
dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize; |
632 | 632 |
dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize; |
633 | 633 |
} |
634 |
- x_offset += 8 * s->mb_x; |
|
635 |
- y_offset += 8 * (s->mb_y >> MB_FIELD); |
|
634 |
+ x_offset += 8 * h->mb_x; |
|
635 |
+ y_offset += 8 * (h->mb_y >> MB_FIELD); |
|
636 | 636 |
|
637 | 637 |
if (list0) { |
638 | 638 |
Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]]; |
... | ... |
@@ -666,7 +817,6 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, |
666 | 666 |
int list0, int list1, |
667 | 667 |
int pixel_shift, int chroma_idc) |
668 | 668 |
{ |
669 |
- MpegEncContext *const s = &h->s; |
|
670 | 669 |
int chroma_height; |
671 | 670 |
|
672 | 671 |
dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize; |
... | ... |
@@ -685,8 +835,8 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, |
685 | 685 |
dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize; |
686 | 686 |
dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize; |
687 | 687 |
} |
688 |
- x_offset += 8 * s->mb_x; |
|
689 |
- y_offset += 8 * (s->mb_y >> MB_FIELD); |
|
688 |
+ x_offset += 8 * h->mb_x; |
|
689 |
+ y_offset += 8 * (h->mb_y >> MB_FIELD); |
|
690 | 690 |
|
691 | 691 |
if (list0 && list1) { |
692 | 692 |
/* don't optimize for luma-only case, since B-frames usually |
... | ... |
@@ -707,7 +857,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, |
707 | 707 |
pixel_shift, chroma_idc); |
708 | 708 |
|
709 | 709 |
if (h->use_weight == 2) { |
710 |
- int weight0 = h->implicit_weight[refn0][refn1][s->mb_y & 1]; |
|
710 |
+ int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1]; |
|
711 | 711 |
int weight1 = 64 - weight0; |
712 | 712 |
luma_weight_avg(dest_y, tmp_y, h->mb_linesize, |
713 | 713 |
height, 5, weight0, weight1, 0); |
... | ... |
@@ -765,24 +915,23 @@ static av_always_inline void prefetch_motion(H264Context *h, int list, |
765 | 765 |
{ |
766 | 766 |
/* fetch pixels for estimated mv 4 macroblocks ahead |
767 | 767 |
* optimized for 64byte cache lines */ |
768 |
- MpegEncContext *const s = &h->s; |
|
769 | 768 |
const int refn = h->ref_cache[list][scan8[0]]; |
770 | 769 |
if (refn >= 0) { |
771 |
- const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * s->mb_x + 8; |
|
772 |
- const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * s->mb_y; |
|
770 |
+ const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8; |
|
771 |
+ const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y; |
|
773 | 772 |
uint8_t **src = h->ref_list[list][refn].f.data; |
774 | 773 |
int off = (mx << pixel_shift) + |
775 |
- (my + (s->mb_x & 3) * 4) * h->mb_linesize + |
|
774 |
+ (my + (h->mb_x & 3) * 4) * h->mb_linesize + |
|
776 | 775 |
(64 << pixel_shift); |
777 |
- s->vdsp.prefetch(src[0] + off, s->linesize, 4); |
|
776 |
+ h->vdsp.prefetch(src[0] + off, h->linesize, 4); |
|
778 | 777 |
if (chroma_idc == 3 /* yuv444 */) { |
779 |
- s->vdsp.prefetch(src[1] + off, s->linesize, 4); |
|
780 |
- s->vdsp.prefetch(src[2] + off, s->linesize, 4); |
|
778 |
+ h->vdsp.prefetch(src[1] + off, h->linesize, 4); |
|
779 |
+ h->vdsp.prefetch(src[2] + off, h->linesize, 4); |
|
781 | 780 |
} else { |
782 | 781 |
off = ((mx >> 1) << pixel_shift) + |
783 |
- ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + |
|
782 |
+ ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize + |
|
784 | 783 |
(64 << pixel_shift); |
785 |
- s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2); |
|
784 |
+ h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2); |
|
786 | 785 |
} |
787 | 786 |
} |
788 | 787 |
} |
... | ... |
@@ -806,6 +955,18 @@ static void free_tables(H264Context *h, int free_rbsp) |
806 | 806 |
av_freep(&h->mb2b_xy); |
807 | 807 |
av_freep(&h->mb2br_xy); |
808 | 808 |
|
809 |
+ if (free_rbsp) { |
|
810 |
+ for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++) |
|
811 |
+ free_picture(h, &h->DPB[i]); |
|
812 |
+ av_freep(&h->DPB); |
|
813 |
+ h->picture_count = 0; |
|
814 |
+ } else if (h->DPB) { |
|
815 |
+ for (i = 0; i < h->picture_count; i++) |
|
816 |
+ h->DPB[i].needs_realloc = 1; |
|
817 |
+ } |
|
818 |
+ |
|
819 |
+ h->cur_pic_ptr = NULL; |
|
820 |
+ |
|
809 | 821 |
for (i = 0; i < MAX_THREADS; i++) { |
810 | 822 |
hx = h->thread_context[i]; |
811 | 823 |
if (!hx) |
... | ... |
@@ -813,6 +974,15 @@ static void free_tables(H264Context *h, int free_rbsp) |
813 | 813 |
av_freep(&hx->top_borders[1]); |
814 | 814 |
av_freep(&hx->top_borders[0]); |
815 | 815 |
av_freep(&hx->bipred_scratchpad); |
816 |
+ av_freep(&hx->edge_emu_buffer); |
|
817 |
+ av_freep(&hx->dc_val_base); |
|
818 |
+ av_freep(&hx->me.scratchpad); |
|
819 |
+ av_freep(&hx->er.mb_index2xy); |
|
820 |
+ av_freep(&hx->er.error_status_table); |
|
821 |
+ av_freep(&hx->er.er_temp_buffer); |
|
822 |
+ av_freep(&hx->er.mbintra_table); |
|
823 |
+ av_freep(&hx->er.mbskip_table); |
|
824 |
+ |
|
816 | 825 |
if (free_rbsp) { |
817 | 826 |
av_freep(&hx->rbsp_buffer[1]); |
818 | 827 |
av_freep(&hx->rbsp_buffer[0]); |
... | ... |
@@ -896,50 +1066,59 @@ static void init_dequant_tables(H264Context *h) |
896 | 896 |
|
897 | 897 |
int ff_h264_alloc_tables(H264Context *h) |
898 | 898 |
{ |
899 |
- MpegEncContext *const s = &h->s; |
|
900 |
- const int big_mb_num = s->mb_stride * (s->mb_height + 1); |
|
901 |
- const int row_mb_num = s->mb_stride * 2 * s->avctx->thread_count; |
|
902 |
- int x, y; |
|
899 |
+ const int big_mb_num = h->mb_stride * (h->mb_height + 1); |
|
900 |
+ const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count; |
|
901 |
+ int x, y, i; |
|
903 | 902 |
|
904 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode, |
|
903 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode, |
|
905 | 904 |
row_mb_num * 8 * sizeof(uint8_t), fail) |
906 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->non_zero_count, |
|
905 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count, |
|
907 | 906 |
big_mb_num * 48 * sizeof(uint8_t), fail) |
908 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->slice_table_base, |
|
909 |
- (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base), fail) |
|
910 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->cbp_table, |
|
907 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base, |
|
908 |
+ (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail) |
|
909 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table, |
|
911 | 910 |
big_mb_num * sizeof(uint16_t), fail) |
912 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->chroma_pred_mode_table, |
|
911 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table, |
|
913 | 912 |
big_mb_num * sizeof(uint8_t), fail) |
914 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[0], |
|
913 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0], |
|
915 | 914 |
16 * row_mb_num * sizeof(uint8_t), fail); |
916 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[1], |
|
915 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1], |
|
917 | 916 |
16 * row_mb_num * sizeof(uint8_t), fail); |
918 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->direct_table, |
|
917 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table, |
|
919 | 918 |
4 * big_mb_num * sizeof(uint8_t), fail); |
920 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->list_counts, |
|
919 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts, |
|
921 | 920 |
big_mb_num * sizeof(uint8_t), fail) |
922 | 921 |
|
923 | 922 |
memset(h->slice_table_base, -1, |
924 |
- (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base)); |
|
925 |
- h->slice_table = h->slice_table_base + s->mb_stride * 2 + 1; |
|
923 |
+ (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base)); |
|
924 |
+ h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1; |
|
926 | 925 |
|
927 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b_xy, |
|
926 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy, |
|
928 | 927 |
big_mb_num * sizeof(uint32_t), fail); |
929 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2br_xy, |
|
928 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy, |
|
930 | 929 |
big_mb_num * sizeof(uint32_t), fail); |
931 |
- for (y = 0; y < s->mb_height; y++) |
|
932 |
- for (x = 0; x < s->mb_width; x++) { |
|
933 |
- const int mb_xy = x + y * s->mb_stride; |
|
930 |
+ for (y = 0; y < h->mb_height; y++) |
|
931 |
+ for (x = 0; x < h->mb_width; x++) { |
|
932 |
+ const int mb_xy = x + y * h->mb_stride; |
|
934 | 933 |
const int b_xy = 4 * x + 4 * y * h->b_stride; |
935 | 934 |
|
936 | 935 |
h->mb2b_xy[mb_xy] = b_xy; |
937 |
- h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * s->mb_stride))); |
|
936 |
+ h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride))); |
|
938 | 937 |
} |
939 | 938 |
|
940 | 939 |
if (!h->dequant4_coeff[0]) |
941 | 940 |
init_dequant_tables(h); |
942 | 941 |
|
942 |
+ if (!h->DPB) { |
|
943 |
+ h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count); |
|
944 |
+ h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB)); |
|
945 |
+ if (!h->DPB) |
|
946 |
+ return AVERROR(ENOMEM); |
|
947 |
+ for (i = 0; i < h->picture_count; i++) |
|
948 |
+ avcodec_get_frame_defaults(&h->DPB[i].f); |
|
949 |
+ avcodec_get_frame_defaults(&h->cur_pic.f); |
|
950 |
+ } |
|
951 |
+ |
|
943 | 952 |
return 0; |
944 | 953 |
|
945 | 954 |
fail: |
... | ... |
@@ -952,20 +1131,24 @@ fail: |
952 | 952 |
*/ |
953 | 953 |
static void clone_tables(H264Context *dst, H264Context *src, int i) |
954 | 954 |
{ |
955 |
- MpegEncContext *const s = &src->s; |
|
956 |
- dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * s->mb_stride; |
|
955 |
+ dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * src->mb_stride; |
|
957 | 956 |
dst->non_zero_count = src->non_zero_count; |
958 | 957 |
dst->slice_table = src->slice_table; |
959 | 958 |
dst->cbp_table = src->cbp_table; |
960 | 959 |
dst->mb2b_xy = src->mb2b_xy; |
961 | 960 |
dst->mb2br_xy = src->mb2br_xy; |
962 | 961 |
dst->chroma_pred_mode_table = src->chroma_pred_mode_table; |
963 |
- dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * s->mb_stride; |
|
964 |
- dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * s->mb_stride; |
|
962 |
+ dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * src->mb_stride; |
|
963 |
+ dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * src->mb_stride; |
|
965 | 964 |
dst->direct_table = src->direct_table; |
966 | 965 |
dst->list_counts = src->list_counts; |
966 |
+ dst->DPB = src->DPB; |
|
967 |
+ dst->cur_pic_ptr = src->cur_pic_ptr; |
|
968 |
+ dst->cur_pic = src->cur_pic; |
|
967 | 969 |
dst->bipred_scratchpad = NULL; |
968 |
- ff_h264_pred_init(&dst->hpc, src->s.codec_id, src->sps.bit_depth_luma, |
|
970 |
+ dst->edge_emu_buffer = NULL; |
|
971 |
+ dst->me.scratchpad = NULL; |
|
972 |
+ ff_h264_pred_init(&dst->hpc, src->avctx->codec_id, src->sps.bit_depth_luma, |
|
969 | 973 |
src->sps.chroma_format_idc); |
970 | 974 |
} |
971 | 975 |
|
... | ... |
@@ -975,10 +1158,17 @@ static void clone_tables(H264Context *dst, H264Context *src, int i) |
975 | 975 |
*/ |
976 | 976 |
static int context_init(H264Context *h) |
977 | 977 |
{ |
978 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[0], |
|
979 |
- h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) |
|
980 |
- FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[1], |
|
981 |
- h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) |
|
978 |
+ ERContext *er = &h->er; |
|
979 |
+ int mb_array_size = h->mb_height * h->mb_stride; |
|
980 |
+ int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1); |
|
981 |
+ int c_size = h->mb_stride * (h->mb_height + 1); |
|
982 |
+ int yc_size = y_size + 2 * c_size; |
|
983 |
+ int x, y, i; |
|
984 |
+ |
|
985 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->top_borders[0], |
|
986 |
+ h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) |
|
987 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->top_borders[1], |
|
988 |
+ h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) |
|
982 | 989 |
|
983 | 990 |
h->ref_cache[0][scan8[5] + 1] = |
984 | 991 |
h->ref_cache[0][scan8[7] + 1] = |
... | ... |
@@ -987,8 +1177,45 @@ static int context_init(H264Context *h) |
987 | 987 |
h->ref_cache[1][scan8[7] + 1] = |
988 | 988 |
h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE; |
989 | 989 |
|
990 |
- h->s.er.decode_mb = h264_er_decode_mb; |
|
991 |
- h->s.er.opaque = h; |
|
990 |
+ /* init ER */ |
|
991 |
+ er->avctx = h->avctx; |
|
992 |
+ er->dsp = &h->dsp; |
|
993 |
+ er->decode_mb = h264_er_decode_mb; |
|
994 |
+ er->opaque = h; |
|
995 |
+ er->quarter_sample = 1; |
|
996 |
+ |
|
997 |
+ er->mb_num = h->mb_num; |
|
998 |
+ er->mb_width = h->mb_width; |
|
999 |
+ er->mb_height = h->mb_height; |
|
1000 |
+ er->mb_stride = h->mb_stride; |
|
1001 |
+ er->b8_stride = h->mb_width * 2 + 1; |
|
1002 |
+ |
|
1003 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int), |
|
1004 |
+ fail); // error ressilience code looks cleaner with this |
|
1005 |
+ for (y = 0; y < h->mb_height; y++) |
|
1006 |
+ for (x = 0; x < h->mb_width; x++) |
|
1007 |
+ er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride; |
|
1008 |
+ |
|
1009 |
+ er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) * |
|
1010 |
+ h->mb_stride + h->mb_width; |
|
1011 |
+ |
|
1012 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table, |
|
1013 |
+ mb_array_size * sizeof(uint8_t), fail); |
|
1014 |
+ |
|
1015 |
+ FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail); |
|
1016 |
+ memset(er->mbintra_table, 1, mb_array_size); |
|
1017 |
+ |
|
1018 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail); |
|
1019 |
+ |
|
1020 |
+ FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer, h->mb_height * h->mb_stride, |
|
1021 |
+ fail); |
|
1022 |
+ |
|
1023 |
+ FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail); |
|
1024 |
+ er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2; |
|
1025 |
+ er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1; |
|
1026 |
+ er->dc_val[2] = er->dc_val[1] + c_size; |
|
1027 |
+ for (i = 0; i < yc_size; i++) |
|
1028 |
+ h->dc_val_base[i] = 1024; |
|
992 | 1029 |
|
993 | 1030 |
return 0; |
994 | 1031 |
|
... | ... |
@@ -1001,23 +1228,23 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
1001 | 1001 |
|
1002 | 1002 |
static av_cold void common_init(H264Context *h) |
1003 | 1003 |
{ |
1004 |
- MpegEncContext *const s = &h->s; |
|
1005 | 1004 |
|
1006 |
- s->width = s->avctx->width; |
|
1007 |
- s->height = s->avctx->height; |
|
1008 |
- s->codec_id = s->avctx->codec->id; |
|
1005 |
+ h->width = h->avctx->width; |
|
1006 |
+ h->height = h->avctx->height; |
|
1007 |
+ |
|
1008 |
+ h->bit_depth_luma = 8; |
|
1009 |
+ h->chroma_format_idc = 1; |
|
1009 | 1010 |
|
1010 | 1011 |
ff_h264dsp_init(&h->h264dsp, 8, 1); |
1011 | 1012 |
ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); |
1012 | 1013 |
ff_h264qpel_init(&h->h264qpel, 8); |
1013 |
- ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1); |
|
1014 |
+ ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1); |
|
1014 | 1015 |
|
1015 | 1016 |
h->dequant_coeff_pps = -1; |
1016 |
- s->unrestricted_mv = 1; |
|
1017 | 1017 |
|
1018 | 1018 |
/* needed so that IDCT permutation is known early */ |
1019 |
- ff_dsputil_init(&s->dsp, s->avctx); |
|
1020 |
- ff_videodsp_init(&s->vdsp, 8); |
|
1019 |
+ ff_dsputil_init(&h->dsp, h->avctx); |
|
1020 |
+ ff_videodsp_init(&h->vdsp, 8); |
|
1021 | 1021 |
|
1022 | 1022 |
memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t)); |
1023 | 1023 |
memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t)); |
... | ... |
@@ -1025,7 +1252,7 @@ static av_cold void common_init(H264Context *h) |
1025 | 1025 |
|
1026 | 1026 |
int ff_h264_decode_extradata(H264Context *h) |
1027 | 1027 |
{ |
1028 |
- AVCodecContext *avctx = h->s.avctx; |
|
1028 |
+ AVCodecContext *avctx = h->avctx; |
|
1029 | 1029 |
|
1030 | 1030 |
if (avctx->extradata[0] == 1) { |
1031 | 1031 |
int i, cnt, nalsize; |
... | ... |
@@ -1080,22 +1307,22 @@ int ff_h264_decode_extradata(H264Context *h) |
1080 | 1080 |
av_cold int ff_h264_decode_init(AVCodecContext *avctx) |
1081 | 1081 |
{ |
1082 | 1082 |
H264Context *h = avctx->priv_data; |
1083 |
- MpegEncContext *const s = &h->s; |
|
1084 | 1083 |
int i; |
1085 | 1084 |
|
1086 |
- ff_MPV_decode_defaults(s); |
|
1087 |
- |
|
1088 |
- s->avctx = avctx; |
|
1085 |
+ h->avctx = avctx; |
|
1089 | 1086 |
common_init(h); |
1090 | 1087 |
|
1091 |
- s->out_format = FMT_H264; |
|
1092 |
- s->workaround_bugs = avctx->workaround_bugs; |
|
1088 |
+ h->picture_structure = PICT_FRAME; |
|
1089 |
+ h->picture_range_start = 0; |
|
1090 |
+ h->picture_range_end = MAX_PICTURE_COUNT; |
|
1091 |
+ h->slice_context_count = 1; |
|
1092 |
+ h->workaround_bugs = avctx->workaround_bugs; |
|
1093 |
+ h->flags = avctx->flags; |
|
1093 | 1094 |
|
1094 | 1095 |
/* set defaults */ |
1095 | 1096 |
// s->decode_mb = ff_h263_decode_mb; |
1096 |
- s->quarter_sample = 1; |
|
1097 | 1097 |
if (!avctx->has_b_frames) |
1098 |
- s->low_delay = 1; |
|
1098 |
+ h->low_delay = 1; |
|
1099 | 1099 |
|
1100 | 1100 |
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; |
1101 | 1101 |
|
... | ... |
@@ -1113,7 +1340,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) |
1113 | 1113 |
ff_h264_reset_sei(h); |
1114 | 1114 |
if (avctx->codec_id == AV_CODEC_ID_H264) { |
1115 | 1115 |
if (avctx->ticks_per_frame == 1) |
1116 |
- s->avctx->time_base.den *= 2; |
|
1116 |
+ h->avctx->time_base.den *= 2; |
|
1117 | 1117 |
avctx->ticks_per_frame = 2; |
1118 | 1118 |
} |
1119 | 1119 |
|
... | ... |
@@ -1122,25 +1349,30 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) |
1122 | 1122 |
return -1; |
1123 | 1123 |
|
1124 | 1124 |
if (h->sps.bitstream_restriction_flag && |
1125 |
- s->avctx->has_b_frames < h->sps.num_reorder_frames) { |
|
1126 |
- s->avctx->has_b_frames = h->sps.num_reorder_frames; |
|
1127 |
- s->low_delay = 0; |
|
1125 |
+ h->avctx->has_b_frames < h->sps.num_reorder_frames) { |
|
1126 |
+ h->avctx->has_b_frames = h->sps.num_reorder_frames; |
|
1127 |
+ h->low_delay = 0; |
|
1128 | 1128 |
} |
1129 | 1129 |
|
1130 | 1130 |
return 0; |
1131 | 1131 |
} |
1132 | 1132 |
|
1133 | 1133 |
#define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size)))) |
1134 |
+#undef REBASE_PICTURE |
|
1135 |
+#define REBASE_PICTURE(pic, new_ctx, old_ctx) \ |
|
1136 |
+ ((pic && pic >= old_ctx->DPB && \ |
|
1137 |
+ pic < old_ctx->DPB + old_ctx->picture_count) ? \ |
|
1138 |
+ &new_ctx->DPB[pic - old_ctx->DPB] : NULL) |
|
1134 | 1139 |
|
1135 | 1140 |
static void copy_picture_range(Picture **to, Picture **from, int count, |
1136 |
- MpegEncContext *new_base, |
|
1137 |
- MpegEncContext *old_base) |
|
1141 |
+ H264Context *new_base, |
|
1142 |
+ H264Context *old_base) |
|
1138 | 1143 |
{ |
1139 | 1144 |
int i; |
1140 | 1145 |
|
1141 | 1146 |
for (i = 0; i < count; i++) { |
1142 | 1147 |
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) || |
1143 |
- IN_RANGE(from[i], old_base->picture, |
|
1148 |
+ IN_RANGE(from[i], old_base->DPB, |
|
1144 | 1149 |
sizeof(Picture) * old_base->picture_count) || |
1145 | 1150 |
!from[i])); |
1146 | 1151 |
to[i] = REBASE_PICTURE(from[i], new_base, old_base); |
... | ... |
@@ -1171,7 +1403,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx) |
1171 | 1171 |
memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); |
1172 | 1172 |
memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); |
1173 | 1173 |
|
1174 |
- h->s.context_initialized = 0; |
|
1174 |
+ h->context_initialized = 0; |
|
1175 | 1175 |
|
1176 | 1176 |
return 0; |
1177 | 1177 |
} |
... | ... |
@@ -1188,49 +1420,47 @@ static int decode_update_thread_context(AVCodecContext *dst, |
1188 | 1188 |
const AVCodecContext *src) |
1189 | 1189 |
{ |
1190 | 1190 |
H264Context *h = dst->priv_data, *h1 = src->priv_data; |
1191 |
- MpegEncContext *const s = &h->s, *const s1 = &h1->s; |
|
1192 |
- int inited = s->context_initialized, err; |
|
1191 |
+ int inited = h->context_initialized, err = 0; |
|
1192 |
+ int context_reinitialized = 0; |
|
1193 | 1193 |
int i; |
1194 | 1194 |
|
1195 |
- if (dst == src || !s1->context_initialized) |
|
1195 |
+ if (dst == src || !h1->context_initialized) |
|
1196 | 1196 |
return 0; |
1197 | 1197 |
|
1198 | 1198 |
if (inited && |
1199 |
- (s->width != s1->width || |
|
1200 |
- s->height != s1->height || |
|
1201 |
- s->mb_width != s1->mb_width || |
|
1202 |
- s->mb_height != s1->mb_height || |
|
1199 |
+ (h->width != h1->width || |
|
1200 |
+ h->height != h1->height || |
|
1201 |
+ h->mb_width != h1->mb_width || |
|
1202 |
+ h->mb_height != h1->mb_height || |
|
1203 | 1203 |
h->sps.bit_depth_luma != h1->sps.bit_depth_luma || |
1204 | 1204 |
h->sps.chroma_format_idc != h1->sps.chroma_format_idc || |
1205 | 1205 |
h->sps.colorspace != h1->sps.colorspace)) { |
1206 | 1206 |
|
1207 | 1207 |
av_freep(&h->bipred_scratchpad); |
1208 | 1208 |
|
1209 |
- s->width = s1->width; |
|
1210 |
- s->height = s1->height; |
|
1211 |
- s->mb_height = s1->mb_height; |
|
1209 |
+ h->width = h1->width; |
|
1210 |
+ h->height = h1->height; |
|
1211 |
+ h->mb_height = h1->mb_height; |
|
1212 |
+ h->mb_width = h1->mb_width; |
|
1213 |
+ h->mb_num = h1->mb_num; |
|
1214 |
+ h->mb_stride = h1->mb_stride; |
|
1212 | 1215 |
h->b_stride = h1->b_stride; |
1213 | 1216 |
|
1214 | 1217 |
if ((err = h264_slice_header_init(h, 1)) < 0) { |
1215 |
- av_log(h->s.avctx, AV_LOG_ERROR, "h264_slice_header_init() failed"); |
|
1218 |
+ av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed"); |
|
1216 | 1219 |
return err; |
1217 | 1220 |
} |
1218 |
- h->context_reinitialized = 1; |
|
1221 |
+ context_reinitialized = 1; |
|
1219 | 1222 |
|
1220 |
- /* update linesize on resize for h264. The h264 decoder doesn't |
|
1221 |
- * necessarily call ff_MPV_frame_start in the new thread */ |
|
1222 |
- s->linesize = s1->linesize; |
|
1223 |
- s->uvlinesize = s1->uvlinesize; |
|
1223 |
+ /* update linesize on resize. The decoder doesn't |
|
1224 |
+ * necessarily call ff_h264_frame_start in the new thread */ |
|
1225 |
+ h->linesize = h1->linesize; |
|
1226 |
+ h->uvlinesize = h1->uvlinesize; |
|
1224 | 1227 |
|
1225 | 1228 |
/* copy block_offset since frame_start may not be called */ |
1226 | 1229 |
memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset)); |
1227 |
- h264_set_parameter_from_sps(h); |
|
1228 | 1230 |
} |
1229 | 1231 |
|
1230 |
- err = ff_mpeg_update_thread_context(dst, src); |
|
1231 |
- if (err) |
|
1232 |
- return err; |
|
1233 |
- |
|
1234 | 1232 |
if (!inited) { |
1235 | 1233 |
for (i = 0; i < MAX_SPS_COUNT; i++) |
1236 | 1234 |
av_freep(h->sps_buffers + i); |
... | ... |
@@ -1238,11 +1468,20 @@ static int decode_update_thread_context(AVCodecContext *dst, |
1238 | 1238 |
for (i = 0; i < MAX_PPS_COUNT; i++) |
1239 | 1239 |
av_freep(h->pps_buffers + i); |
1240 | 1240 |
|
1241 |
- // copy all fields after MpegEnc |
|
1242 |
- memcpy(&h->s + 1, &h1->s + 1, |
|
1243 |
- sizeof(H264Context) - sizeof(MpegEncContext)); |
|
1241 |
+ memcpy(h, h1, sizeof(*h1)); |
|
1244 | 1242 |
memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); |
1245 | 1243 |
memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); |
1244 |
+ memset(&h->er, 0, sizeof(h->er)); |
|
1245 |
+ memset(&h->me, 0, sizeof(h->me)); |
|
1246 |
+ h->context_initialized = 0; |
|
1247 |
+ |
|
1248 |
+ h->picture_range_start += MAX_PICTURE_COUNT; |
|
1249 |
+ h->picture_range_end += MAX_PICTURE_COUNT; |
|
1250 |
+ |
|
1251 |
+ h->avctx = dst; |
|
1252 |
+ h->DPB = NULL; |
|
1253 |
+ h->cur_pic.f.extended_data = h->cur_pic.f.data; |
|
1254 |
+ |
|
1246 | 1255 |
if (ff_h264_alloc_tables(h) < 0) { |
1247 | 1256 |
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); |
1248 | 1257 |
return AVERROR(ENOMEM); |
... | ... |
@@ -1254,17 +1493,46 @@ static int decode_update_thread_context(AVCodecContext *dst, |
1254 | 1254 |
h->rbsp_buffer_size[i] = 0; |
1255 | 1255 |
} |
1256 | 1256 |
h->bipred_scratchpad = NULL; |
1257 |
+ h->edge_emu_buffer = NULL; |
|
1257 | 1258 |
|
1258 | 1259 |
h->thread_context[0] = h; |
1259 | 1260 |
|
1260 |
- s->dsp.clear_blocks(h->mb); |
|
1261 |
- s->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift)); |
|
1261 |
+ h->dsp.clear_blocks(h->mb); |
|
1262 |
+ h->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift)); |
|
1263 |
+ h->context_initialized = 1; |
|
1262 | 1264 |
} |
1263 | 1265 |
|
1266 |
+ h->avctx->coded_height = h1->avctx->coded_height; |
|
1267 |
+ h->avctx->coded_width = h1->avctx->coded_width; |
|
1268 |
+ h->avctx->width = h1->avctx->width; |
|
1269 |
+ h->avctx->height = h1->avctx->height; |
|
1270 |
+ h->coded_picture_number = h1->coded_picture_number; |
|
1271 |
+ h->first_field = h1->first_field; |
|
1272 |
+ h->picture_structure = h1->picture_structure; |
|
1273 |
+ h->qscale = h1->qscale; |
|
1274 |
+ h->droppable = h1->droppable; |
|
1275 |
+ h->data_partitioning = h1->data_partitioning; |
|
1276 |
+ h->low_delay = h1->low_delay; |
|
1277 |
+ |
|
1278 |
+ memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB)); |
|
1279 |
+ |
|
1280 |
+ // reset s->picture[].f.extended_data to s->picture[].f.data |
|
1281 |
+ for (i = 0; i < h->picture_count; i++) |
|
1282 |
+ h->DPB[i].f.extended_data = h->DPB[i].f.data; |
|
1283 |
+ |
|
1284 |
+ h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); |
|
1285 |
+ h->cur_pic = h1->cur_pic; |
|
1286 |
+ h->cur_pic.f.extended_data = h->cur_pic.f.data; |
|
1287 |
+ |
|
1288 |
+ h->workaround_bugs = h1->workaround_bugs; |
|
1289 |
+ h->low_delay = h1->low_delay; |
|
1290 |
+ h->droppable = h1->droppable; |
|
1291 |
+ |
|
1264 | 1292 |
/* frame_start may not be called for the next thread (if it's decoding |
1265 | 1293 |
* a bottom field) so this has to be allocated here */ |
1266 |
- if (!h->bipred_scratchpad) |
|
1267 |
- h->bipred_scratchpad = av_malloc(16 * 6 * s->linesize); |
|
1294 |
+ err = alloc_scratch_buffers(h, h1->linesize); |
|
1295 |
+ if (err < 0) |
|
1296 |
+ return err; |
|
1268 | 1297 |
|
1269 | 1298 |
// extradata/NAL handling |
1270 | 1299 |
h->is_avc = h1->is_avc; |
... | ... |
@@ -1299,17 +1567,20 @@ static int decode_update_thread_context(AVCodecContext *dst, |
1299 | 1299 |
copy_fields(h, h1, ref2frm, intra_gb); |
1300 | 1300 |
copy_fields(h, h1, short_ref, cabac_init_idc); |
1301 | 1301 |
|
1302 |
- copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1); |
|
1303 |
- copy_picture_range(h->long_ref, h1->long_ref, 32, s, s1); |
|
1302 |
+ copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1); |
|
1303 |
+ copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1); |
|
1304 | 1304 |
copy_picture_range(h->delayed_pic, h1->delayed_pic, |
1305 |
- MAX_DELAYED_PIC_COUNT + 2, s, s1); |
|
1305 |
+ MAX_DELAYED_PIC_COUNT + 2, h, h1); |
|
1306 | 1306 |
|
1307 | 1307 |
h->last_slice_type = h1->last_slice_type; |
1308 | 1308 |
|
1309 |
- if (!s->current_picture_ptr) |
|
1309 |
+ if (context_reinitialized) |
|
1310 |
+ h264_set_parameter_from_sps(h); |
|
1311 |
+ |
|
1312 |
+ if (!h->cur_pic_ptr) |
|
1310 | 1313 |
return 0; |
1311 | 1314 |
|
1312 |
- if (!s->droppable) { |
|
1315 |
+ if (!h->droppable) { |
|
1313 | 1316 |
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); |
1314 | 1317 |
h->prev_poc_msb = h->poc_msb; |
1315 | 1318 |
h->prev_poc_lsb = h->poc_lsb; |
... | ... |
@@ -1323,45 +1594,66 @@ static int decode_update_thread_context(AVCodecContext *dst, |
1323 | 1323 |
|
1324 | 1324 |
int ff_h264_frame_start(H264Context *h) |
1325 | 1325 |
{ |
1326 |
- MpegEncContext *const s = &h->s; |
|
1327 |
- int i; |
|
1326 |
+ Picture *pic; |
|
1327 |
+ int i, ret; |
|
1328 | 1328 |
const int pixel_shift = h->pixel_shift; |
1329 | 1329 |
|
1330 |
- if (ff_MPV_frame_start(s, s->avctx) < 0) |
|
1331 |
- return -1; |
|
1332 |
- ff_mpeg_er_frame_start(s); |
|
1330 |
+ release_unused_pictures(h, 1); |
|
1331 |
+ h->cur_pic_ptr = NULL; |
|
1332 |
+ |
|
1333 |
+ i = find_unused_picture(h); |
|
1334 |
+ if (i < 0) { |
|
1335 |
+ av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n"); |
|
1336 |
+ return i; |
|
1337 |
+ } |
|
1338 |
+ pic = &h->DPB[i]; |
|
1339 |
+ |
|
1340 |
+ pic->f.reference = h->droppable ? 0 : h->picture_structure; |
|
1341 |
+ pic->f.coded_picture_number = h->coded_picture_number++; |
|
1342 |
+ pic->field_picture = h->picture_structure != PICT_FRAME; |
|
1333 | 1343 |
/* |
1334 |
- * ff_MPV_frame_start uses pict_type to derive key_frame. |
|
1335 |
- * This is incorrect for H.264; IDR markings must be used. |
|
1336 |
- * Zero here; IDR markings per slice in frame or fields are ORed in later. |
|
1344 |
+ * Zero key_frame here; IDR markings per slice in frame or fields are ORed |
|
1345 |
+ * in later. |
|
1337 | 1346 |
* See decode_nal_units(). |
1338 | 1347 |
*/ |
1339 |
- s->current_picture_ptr->f.key_frame = 0; |
|
1340 |
- s->current_picture_ptr->mmco_reset = 0; |
|
1348 |
+ pic->f.key_frame = 0; |
|
1349 |
+ pic->mmco_reset = 0; |
|
1350 |
+ |
|
1351 |
+ if ((ret = alloc_picture(h, pic)) < 0) |
|
1352 |
+ return ret; |
|
1353 |
+ |
|
1354 |
+ h->cur_pic_ptr = pic; |
|
1355 |
+ h->cur_pic = *h->cur_pic_ptr; |
|
1356 |
+ h->cur_pic.f.extended_data = h->cur_pic.f.data; |
|
1357 |
+ |
|
1358 |
+ ff_er_frame_start(&h->er); |
|
1341 | 1359 |
|
1342 |
- assert(s->linesize && s->uvlinesize); |
|
1360 |
+ assert(h->linesize && h->uvlinesize); |
|
1343 | 1361 |
|
1344 | 1362 |
for (i = 0; i < 16; i++) { |
1345 |
- h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1346 |
- h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1363 |
+ h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1364 |
+ h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1347 | 1365 |
} |
1348 | 1366 |
for (i = 0; i < 16; i++) { |
1349 | 1367 |
h->block_offset[16 + i] = |
1350 |
- h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1368 |
+ h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1351 | 1369 |
h->block_offset[48 + 16 + i] = |
1352 |
- h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1370 |
+ h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1353 | 1371 |
} |
1354 | 1372 |
|
1355 | 1373 |
/* can't be in alloc_tables because linesize isn't known there. |
1356 | 1374 |
* FIXME: redo bipred weight to not require extra buffer? */ |
1357 |
- for (i = 0; i < s->slice_context_count; i++) |
|
1358 |
- if (h->thread_context[i] && !h->thread_context[i]->bipred_scratchpad) |
|
1359 |
- h->thread_context[i]->bipred_scratchpad = av_malloc(16 * 6 * s->linesize); |
|
1375 |
+ for (i = 0; i < h->slice_context_count; i++) |
|
1376 |
+ if (h->thread_context[i]) { |
|
1377 |
+ ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); |
|
1378 |
+ if (ret < 0) |
|
1379 |
+ return ret; |
|
1380 |
+ } |
|
1360 | 1381 |
|
1361 | 1382 |
/* Some macroblocks can be accessed before they're available in case |
1362 | 1383 |
* of lost slices, MBAFF or threading. */ |
1363 | 1384 |
memset(h->slice_table, -1, |
1364 |
- (s->mb_height * s->mb_stride - 1) * sizeof(*h->slice_table)); |
|
1385 |
+ (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); |
|
1365 | 1386 |
|
1366 | 1387 |
// s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding || |
1367 | 1388 |
// s->current_picture.f.reference /* || h->contains_intra */ || 1; |
... | ... |
@@ -1372,15 +1664,14 @@ int ff_h264_frame_start(H264Context *h) |
1372 | 1372 |
* SVQ3 as well as most other codecs have only last/next/current and thus |
1373 | 1373 |
* get released even with set reference, besides SVQ3 and others do not |
1374 | 1374 |
* mark frames as reference later "naturally". */ |
1375 |
- if (s->codec_id != AV_CODEC_ID_SVQ3) |
|
1376 |
- s->current_picture_ptr->f.reference = 0; |
|
1375 |
+ if (h->avctx->codec_id != AV_CODEC_ID_SVQ3) |
|
1376 |
+ h->cur_pic_ptr->f.reference = 0; |
|
1377 | 1377 |
|
1378 |
- s->current_picture_ptr->field_poc[0] = |
|
1379 |
- s->current_picture_ptr->field_poc[1] = INT_MAX; |
|
1378 |
+ h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX; |
|
1380 | 1379 |
|
1381 | 1380 |
h->next_output_pic = NULL; |
1382 | 1381 |
|
1383 |
- assert(s->current_picture_ptr->long_ref == 0); |
|
1382 |
+ assert(h->cur_pic_ptr->long_ref == 0); |
|
1384 | 1383 |
|
1385 | 1384 |
return 0; |
1386 | 1385 |
} |
... | ... |
@@ -1395,14 +1686,13 @@ int ff_h264_frame_start(H264Context *h) |
1395 | 1395 |
*/ |
1396 | 1396 |
static void decode_postinit(H264Context *h, int setup_finished) |
1397 | 1397 |
{ |
1398 |
- MpegEncContext *const s = &h->s; |
|
1399 |
- Picture *out = s->current_picture_ptr; |
|
1400 |
- Picture *cur = s->current_picture_ptr; |
|
1398 |
+ Picture *out = h->cur_pic_ptr; |
|
1399 |
+ Picture *cur = h->cur_pic_ptr; |
|
1401 | 1400 |
int i, pics, out_of_order, out_idx; |
1402 | 1401 |
int invalid = 0, cnt = 0; |
1403 | 1402 |
|
1404 |
- s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264; |
|
1405 |
- s->current_picture_ptr->f.pict_type = s->pict_type; |
|
1403 |
+ h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264; |
|
1404 |
+ h->cur_pic_ptr->f.pict_type = h->pict_type; |
|
1406 | 1405 |
|
1407 | 1406 |
if (h->next_output_pic) |
1408 | 1407 |
return; |
... | ... |
@@ -1413,7 +1703,7 @@ static void decode_postinit(H264Context *h, int setup_finished) |
1413 | 1413 |
* The check in decode_nal_units() is not good enough to find this |
1414 | 1414 |
* yet, so we assume the worst for now. */ |
1415 | 1415 |
// if (setup_finished) |
1416 |
- // ff_thread_finish_setup(s->avctx); |
|
1416 |
+ // ff_thread_finish_setup(h->avctx); |
|
1417 | 1417 |
return; |
1418 | 1418 |
} |
1419 | 1419 |
|
... | ... |
@@ -1487,15 +1777,15 @@ static void decode_postinit(H264Context *h, int setup_finished) |
1487 | 1487 |
/* Sort B-frames into display order */ |
1488 | 1488 |
|
1489 | 1489 |
if (h->sps.bitstream_restriction_flag && |
1490 |
- s->avctx->has_b_frames < h->sps.num_reorder_frames) { |
|
1491 |
- s->avctx->has_b_frames = h->sps.num_reorder_frames; |
|
1492 |
- s->low_delay = 0; |
|
1490 |
+ h->avctx->has_b_frames < h->sps.num_reorder_frames) { |
|
1491 |
+ h->avctx->has_b_frames = h->sps.num_reorder_frames; |
|
1492 |
+ h->low_delay = 0; |
|
1493 | 1493 |
} |
1494 | 1494 |
|
1495 |
- if (s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT && |
|
1495 |
+ if (h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT && |
|
1496 | 1496 |
!h->sps.bitstream_restriction_flag) { |
1497 |
- s->avctx->has_b_frames = MAX_DELAYED_PIC_COUNT - 1; |
|
1498 |
- s->low_delay = 0; |
|
1497 |
+ h->avctx->has_b_frames = MAX_DELAYED_PIC_COUNT - 1; |
|
1498 |
+ h->low_delay = 0; |
|
1499 | 1499 |
} |
1500 | 1500 |
|
1501 | 1501 |
pics = 0; |
... | ... |
@@ -1516,7 +1806,7 @@ static void decode_postinit(H264Context *h, int setup_finished) |
1516 | 1516 |
* there is no delay, we can't detect that (since the frame was already |
1517 | 1517 |
* output to the user), so we also set h->mmco_reset to detect the MMCO |
1518 | 1518 |
* reset code. |
1519 |
- * FIXME: if we detect insufficient delays (as per s->avctx->has_b_frames), |
|
1519 |
+ * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames), |
|
1520 | 1520 |
* we increase the delay between input and output. All frames affected by |
1521 | 1521 |
* the lag (e.g. those that should have been output before another frame |
1522 | 1522 |
* that we already returned to the user) will be dropped. This is a bug |
... | ... |
@@ -1548,40 +1838,40 @@ static void decode_postinit(H264Context *h, int setup_finished) |
1548 | 1548 |
out = h->delayed_pic[i]; |
1549 | 1549 |
out_idx = i; |
1550 | 1550 |
} |
1551 |
- if (s->avctx->has_b_frames == 0 && |
|
1551 |
+ if (h->avctx->has_b_frames == 0 && |
|
1552 | 1552 |
(h->delayed_pic[0]->f.key_frame || h->mmco_reset)) |
1553 | 1553 |
h->next_outputed_poc = INT_MIN; |
1554 | 1554 |
out_of_order = !out->f.key_frame && !h->mmco_reset && |
1555 | 1555 |
(out->poc < h->next_outputed_poc); |
1556 | 1556 |
|
1557 | 1557 |
if (h->sps.bitstream_restriction_flag && |
1558 |
- s->avctx->has_b_frames >= h->sps.num_reorder_frames) { |
|
1559 |
- } else if (out_of_order && pics - 1 == s->avctx->has_b_frames && |
|
1560 |
- s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) { |
|
1558 |
+ h->avctx->has_b_frames >= h->sps.num_reorder_frames) { |
|
1559 |
+ } else if (out_of_order && pics - 1 == h->avctx->has_b_frames && |
|
1560 |
+ h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) { |
|
1561 | 1561 |
if (invalid + cnt < MAX_DELAYED_PIC_COUNT) { |
1562 |
- s->avctx->has_b_frames = FFMAX(s->avctx->has_b_frames, cnt); |
|
1562 |
+ h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt); |
|
1563 | 1563 |
} |
1564 |
- s->low_delay = 0; |
|
1565 |
- } else if (s->low_delay && |
|
1564 |
+ h->low_delay = 0; |
|
1565 |
+ } else if (h->low_delay && |
|
1566 | 1566 |
((h->next_outputed_poc != INT_MIN && |
1567 | 1567 |
out->poc > h->next_outputed_poc + 2) || |
1568 | 1568 |
cur->f.pict_type == AV_PICTURE_TYPE_B)) { |
1569 |
- s->low_delay = 0; |
|
1570 |
- s->avctx->has_b_frames++; |
|
1569 |
+ h->low_delay = 0; |
|
1570 |
+ h->avctx->has_b_frames++; |
|
1571 | 1571 |
} |
1572 | 1572 |
|
1573 |
- if (pics > s->avctx->has_b_frames) { |
|
1573 |
+ if (pics > h->avctx->has_b_frames) { |
|
1574 | 1574 |
out->f.reference &= ~DELAYED_PIC_REF; |
1575 | 1575 |
// for frame threading, the owner must be the second field's thread or |
1576 | 1576 |
// else the first thread can release the picture and reuse it unsafely |
1577 |
- out->owner2 = s; |
|
1577 |
+ out->owner2 = h; |
|
1578 | 1578 |
for (i = out_idx; h->delayed_pic[i]; i++) |
1579 | 1579 |
h->delayed_pic[i] = h->delayed_pic[i + 1]; |
1580 | 1580 |
} |
1581 | 1581 |
memmove(h->last_pocs, &h->last_pocs[1], |
1582 | 1582 |
sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1)); |
1583 | 1583 |
h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc; |
1584 |
- if (!out_of_order && pics > s->avctx->has_b_frames) { |
|
1584 |
+ if (!out_of_order && pics > h->avctx->has_b_frames) { |
|
1585 | 1585 |
h->next_output_pic = out; |
1586 | 1586 |
if (out->mmco_reset) { |
1587 | 1587 |
if (out_idx > 0) { |
... | ... |
@@ -1599,11 +1889,11 @@ static void decode_postinit(H264Context *h, int setup_finished) |
1599 | 1599 |
} |
1600 | 1600 |
h->mmco_reset = 0; |
1601 | 1601 |
} else { |
1602 |
- av_log(s->avctx, AV_LOG_DEBUG, "no picture\n"); |
|
1602 |
+ av_log(h->avctx, AV_LOG_DEBUG, "no picture\n"); |
|
1603 | 1603 |
} |
1604 | 1604 |
|
1605 | 1605 |
if (setup_finished) |
1606 |
- ff_thread_finish_setup(s->avctx); |
|
1606 |
+ ff_thread_finish_setup(h->avctx); |
|
1607 | 1607 |
} |
1608 | 1608 |
|
1609 | 1609 |
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, |
... | ... |
@@ -1611,7 +1901,6 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, |
1611 | 1611 |
int linesize, int uvlinesize, |
1612 | 1612 |
int simple) |
1613 | 1613 |
{ |
1614 |
- MpegEncContext *const s = &h->s; |
|
1615 | 1614 |
uint8_t *top_border; |
1616 | 1615 |
int top_idx = 1; |
1617 | 1616 |
const int pixel_shift = h->pixel_shift; |
... | ... |
@@ -1623,13 +1912,13 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, |
1623 | 1623 |
src_cr -= uvlinesize; |
1624 | 1624 |
|
1625 | 1625 |
if (!simple && FRAME_MBAFF) { |
1626 |
- if (s->mb_y & 1) { |
|
1626 |
+ if (h->mb_y & 1) { |
|
1627 | 1627 |
if (!MB_MBAFF) { |
1628 |
- top_border = h->top_borders[0][s->mb_x]; |
|
1628 |
+ top_border = h->top_borders[0][h->mb_x]; |
|
1629 | 1629 |
AV_COPY128(top_border, src_y + 15 * linesize); |
1630 | 1630 |
if (pixel_shift) |
1631 | 1631 |
AV_COPY128(top_border + 16, src_y + 15 * linesize + 16); |
1632 |
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
1632 |
+ if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
1633 | 1633 |
if (chroma444) { |
1634 | 1634 |
if (pixel_shift) { |
1635 | 1635 |
AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize); |
... | ... |
@@ -1665,14 +1954,14 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, |
1665 | 1665 |
return; |
1666 | 1666 |
} |
1667 | 1667 |
|
1668 |
- top_border = h->top_borders[top_idx][s->mb_x]; |
|
1668 |
+ top_border = h->top_borders[top_idx][h->mb_x]; |
|
1669 | 1669 |
/* There are two lines saved, the line above the top macroblock |
1670 | 1670 |
* of a pair, and the line above the bottom macroblock. */ |
1671 | 1671 |
AV_COPY128(top_border, src_y + 16 * linesize); |
1672 | 1672 |
if (pixel_shift) |
1673 | 1673 |
AV_COPY128(top_border + 16, src_y + 16 * linesize + 16); |
1674 | 1674 |
|
1675 |
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
1675 |
+ if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
1676 | 1676 |
if (chroma444) { |
1677 | 1677 |
if (pixel_shift) { |
1678 | 1678 |
AV_COPY128(top_border + 32, src_cb + 16 * linesize); |
... | ... |
@@ -1709,7 +1998,6 @@ static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, |
1709 | 1709 |
int xchg, int chroma444, |
1710 | 1710 |
int simple, int pixel_shift) |
1711 | 1711 |
{ |
1712 |
- MpegEncContext *const s = &h->s; |
|
1713 | 1712 |
int deblock_topleft; |
1714 | 1713 |
int deblock_top; |
1715 | 1714 |
int top_idx = 1; |
... | ... |
@@ -1717,7 +2005,7 @@ static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, |
1717 | 1717 |
uint8_t *top_border; |
1718 | 1718 |
|
1719 | 1719 |
if (!simple && FRAME_MBAFF) { |
1720 |
- if (s->mb_y & 1) { |
|
1720 |
+ if (h->mb_y & 1) { |
|
1721 | 1721 |
if (!MB_MBAFF) |
1722 | 1722 |
return; |
1723 | 1723 |
} else { |
... | ... |
@@ -1726,19 +2014,19 @@ static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, |
1726 | 1726 |
} |
1727 | 1727 |
|
1728 | 1728 |
if (h->deblocking_filter == 2) { |
1729 |
- deblock_topleft = h->slice_table[h->mb_xy - 1 - s->mb_stride] == h->slice_num; |
|
1729 |
+ deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num; |
|
1730 | 1730 |
deblock_top = h->top_type; |
1731 | 1731 |
} else { |
1732 |
- deblock_topleft = (s->mb_x > 0); |
|
1733 |
- deblock_top = (s->mb_y > !!MB_FIELD); |
|
1732 |
+ deblock_topleft = (h->mb_x > 0); |
|
1733 |
+ deblock_top = (h->mb_y > !!MB_FIELD); |
|
1734 | 1734 |
} |
1735 | 1735 |
|
1736 | 1736 |
src_y -= linesize + 1 + pixel_shift; |
1737 | 1737 |
src_cb -= uvlinesize + 1 + pixel_shift; |
1738 | 1738 |
src_cr -= uvlinesize + 1 + pixel_shift; |
1739 | 1739 |
|
1740 |
- top_border_m1 = h->top_borders[top_idx][s->mb_x - 1]; |
|
1741 |
- top_border = h->top_borders[top_idx][s->mb_x]; |
|
1740 |
+ top_border_m1 = h->top_borders[top_idx][h->mb_x - 1]; |
|
1741 |
+ top_border = h->top_borders[top_idx][h->mb_x]; |
|
1742 | 1742 |
|
1743 | 1743 |
#define XCHG(a, b, xchg) \ |
1744 | 1744 |
if (pixel_shift) { \ |
... | ... |
@@ -1760,12 +2048,12 @@ static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, |
1760 | 1760 |
} |
1761 | 1761 |
XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg); |
1762 | 1762 |
XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1); |
1763 |
- if (s->mb_x + 1 < s->mb_width) { |
|
1764 |
- XCHG(h->top_borders[top_idx][s->mb_x + 1], |
|
1763 |
+ if (h->mb_x + 1 < h->mb_width) { |
|
1764 |
+ XCHG(h->top_borders[top_idx][h->mb_x + 1], |
|
1765 | 1765 |
src_y + (17 << pixel_shift), 1); |
1766 | 1766 |
} |
1767 | 1767 |
} |
1768 |
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
1768 |
+ if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
1769 | 1769 |
if (chroma444) { |
1770 | 1770 |
if (deblock_topleft) { |
1771 | 1771 |
XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1); |
... | ... |
@@ -1775,9 +2063,9 @@ static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, |
1775 | 1775 |
XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1); |
1776 | 1776 |
XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg); |
1777 | 1777 |
XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1); |
1778 |
- if (s->mb_x + 1 < s->mb_width) { |
|
1779 |
- XCHG(h->top_borders[top_idx][s->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1); |
|
1780 |
- XCHG(h->top_borders[top_idx][s->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1); |
|
1778 |
+ if (h->mb_x + 1 < h->mb_width) { |
|
1779 |
+ XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1); |
|
1780 |
+ XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1); |
|
1781 | 1781 |
} |
1782 | 1782 |
} else { |
1783 | 1783 |
if (deblock_top) { |
... | ... |
@@ -1819,17 +2107,16 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, |
1819 | 1819 |
int linesize, |
1820 | 1820 |
uint8_t *dest_y, int p) |
1821 | 1821 |
{ |
1822 |
- MpegEncContext *const s = &h->s; |
|
1823 | 1822 |
void (*idct_add)(uint8_t *dst, int16_t *block, int stride); |
1824 | 1823 |
void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride); |
1825 | 1824 |
int i; |
1826 |
- int qscale = p == 0 ? s->qscale : h->chroma_qp[p - 1]; |
|
1825 |
+ int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1]; |
|
1827 | 1826 |
block_offset += 16 * p; |
1828 | 1827 |
if (IS_INTRA4x4(mb_type)) { |
1829 | 1828 |
if (IS_8x8DCT(mb_type)) { |
1830 | 1829 |
if (transform_bypass) { |
1831 | 1830 |
idct_dc_add = |
1832 |
- idct_add = s->dsp.add_pixels8; |
|
1831 |
+ idct_add = h->dsp.add_pixels8; |
|
1833 | 1832 |
} else { |
1834 | 1833 |
idct_dc_add = h->h264dsp.h264_idct8_dc_add; |
1835 | 1834 |
idct_add = h->h264dsp.h264_idct8_add; |
... | ... |
@@ -1854,7 +2141,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, |
1854 | 1854 |
} else { |
1855 | 1855 |
if (transform_bypass) { |
1856 | 1856 |
idct_dc_add = |
1857 |
- idct_add = s->dsp.add_pixels4; |
|
1857 |
+ idct_add = h->dsp.add_pixels4; |
|
1858 | 1858 |
} else { |
1859 | 1859 |
idct_dc_add = h->h264dsp.h264_idct_dc_add; |
1860 | 1860 |
idct_add = h->h264dsp.h264_idct_add; |
... | ... |
@@ -1871,7 +2158,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, |
1871 | 1871 |
uint64_t tr_high; |
1872 | 1872 |
if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) { |
1873 | 1873 |
const int topright_avail = (h->topright_samples_available << i) & 0x8000; |
1874 |
- assert(s->mb_y || linesize <= block_offset[i]); |
|
1874 |
+ assert(h->mb_y || linesize <= block_offset[i]); |
|
1875 | 1875 |
if (!topright_avail) { |
1876 | 1876 |
if (pixel_shift) { |
1877 | 1877 |
tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL; |
... | ... |
@@ -1934,7 +2221,6 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, |
1934 | 1934 |
int linesize, |
1935 | 1935 |
uint8_t *dest_y, int p) |
1936 | 1936 |
{ |
1937 |
- MpegEncContext *const s = &h->s; |
|
1938 | 1937 |
void (*idct_add)(uint8_t *dst, int16_t *block, int stride); |
1939 | 1938 |
int i; |
1940 | 1939 |
block_offset += 16 * p; |
... | ... |
@@ -1952,7 +2238,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, |
1952 | 1952 |
for (i = 0; i < 16; i++) |
1953 | 1953 |
if (h->non_zero_count_cache[scan8[i + p * 16]] || |
1954 | 1954 |
dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256)) |
1955 |
- s->dsp.add_pixels4(dest_y + block_offset[i], |
|
1955 |
+ h->dsp.add_pixels4(dest_y + block_offset[i], |
|
1956 | 1956 |
h->mb + (i * 16 + p * 256 << pixel_shift), |
1957 | 1957 |
linesize); |
1958 | 1958 |
} |
... | ... |
@@ -1965,8 +2251,8 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, |
1965 | 1965 |
} else if (h->cbp & 15) { |
1966 | 1966 |
if (transform_bypass) { |
1967 | 1967 |
const int di = IS_8x8DCT(mb_type) ? 4 : 1; |
1968 |
- idct_add = IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 |
|
1969 |
- : s->dsp.add_pixels4; |
|
1968 |
+ idct_add = IS_8x8DCT(mb_type) ? h->dsp.add_pixels8 |
|
1969 |
+ : h->dsp.add_pixels4; |
|
1970 | 1970 |
for (i = 0; i < 16; i += di) |
1971 | 1971 |
if (h->non_zero_count_cache[scan8[i + p * 16]]) |
1972 | 1972 |
idct_add(dest_y + block_offset[i], |
... | ... |
@@ -1991,7 +2277,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, |
1991 | 1991 |
// FIXME benchmark weird rule, & below |
1992 | 1992 |
uint8_t *const ptr = dest_y + block_offset[i]; |
1993 | 1993 |
ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, |
1994 |
- s->qscale, IS_INTRA(mb_type) ? 1 : 0); |
|
1994 |
+ h->qscale, IS_INTRA(mb_type) ? 1 : 0); |
|
1995 | 1995 |
} |
1996 | 1996 |
} |
1997 | 1997 |
} |
... | ... |
@@ -2011,10 +2297,9 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, |
2011 | 2011 |
|
2012 | 2012 |
void ff_h264_hl_decode_mb(H264Context *h) |
2013 | 2013 |
{ |
2014 |
- MpegEncContext *const s = &h->s; |
|
2015 | 2014 |
const int mb_xy = h->mb_xy; |
2016 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
2017 |
- int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0; |
|
2015 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
2016 |
+ int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0; |
|
2018 | 2017 |
|
2019 | 2018 |
if (CHROMA444) { |
2020 | 2019 |
if (is_complex || h->pixel_shift) |
... | ... |
@@ -2031,15 +2316,14 @@ void ff_h264_hl_decode_mb(H264Context *h) |
2031 | 2031 |
|
2032 | 2032 |
static int pred_weight_table(H264Context *h) |
2033 | 2033 |
{ |
2034 |
- MpegEncContext *const s = &h->s; |
|
2035 | 2034 |
int list, i; |
2036 | 2035 |
int luma_def, chroma_def; |
2037 | 2036 |
|
2038 | 2037 |
h->use_weight = 0; |
2039 | 2038 |
h->use_weight_chroma = 0; |
2040 |
- h->luma_log2_weight_denom = get_ue_golomb(&s->gb); |
|
2039 |
+ h->luma_log2_weight_denom = get_ue_golomb(&h->gb); |
|
2041 | 2040 |
if (h->sps.chroma_format_idc) |
2042 |
- h->chroma_log2_weight_denom = get_ue_golomb(&s->gb); |
|
2041 |
+ h->chroma_log2_weight_denom = get_ue_golomb(&h->gb); |
|
2043 | 2042 |
luma_def = 1 << h->luma_log2_weight_denom; |
2044 | 2043 |
chroma_def = 1 << h->chroma_log2_weight_denom; |
2045 | 2044 |
|
... | ... |
@@ -2049,10 +2333,10 @@ static int pred_weight_table(H264Context *h) |
2049 | 2049 |
for (i = 0; i < h->ref_count[list]; i++) { |
2050 | 2050 |
int luma_weight_flag, chroma_weight_flag; |
2051 | 2051 |
|
2052 |
- luma_weight_flag = get_bits1(&s->gb); |
|
2052 |
+ luma_weight_flag = get_bits1(&h->gb); |
|
2053 | 2053 |
if (luma_weight_flag) { |
2054 |
- h->luma_weight[i][list][0] = get_se_golomb(&s->gb); |
|
2055 |
- h->luma_weight[i][list][1] = get_se_golomb(&s->gb); |
|
2054 |
+ h->luma_weight[i][list][0] = get_se_golomb(&h->gb); |
|
2055 |
+ h->luma_weight[i][list][1] = get_se_golomb(&h->gb); |
|
2056 | 2056 |
if (h->luma_weight[i][list][0] != luma_def || |
2057 | 2057 |
h->luma_weight[i][list][1] != 0) { |
2058 | 2058 |
h->use_weight = 1; |
... | ... |
@@ -2064,12 +2348,12 @@ static int pred_weight_table(H264Context *h) |
2064 | 2064 |
} |
2065 | 2065 |
|
2066 | 2066 |
if (h->sps.chroma_format_idc) { |
2067 |
- chroma_weight_flag = get_bits1(&s->gb); |
|
2067 |
+ chroma_weight_flag = get_bits1(&h->gb); |
|
2068 | 2068 |
if (chroma_weight_flag) { |
2069 | 2069 |
int j; |
2070 | 2070 |
for (j = 0; j < 2; j++) { |
2071 |
- h->chroma_weight[i][list][j][0] = get_se_golomb(&s->gb); |
|
2072 |
- h->chroma_weight[i][list][j][1] = get_se_golomb(&s->gb); |
|
2071 |
+ h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb); |
|
2072 |
+ h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb); |
|
2073 | 2073 |
if (h->chroma_weight[i][list][j][0] != chroma_def || |
2074 | 2074 |
h->chroma_weight[i][list][j][1] != 0) { |
2075 | 2075 |
h->use_weight_chroma = 1; |
... | ... |
@@ -2099,7 +2383,6 @@ static int pred_weight_table(H264Context *h) |
2099 | 2099 |
*/ |
2100 | 2100 |
static void implicit_weight_table(H264Context *h, int field) |
2101 | 2101 |
{ |
2102 |
- MpegEncContext *const s = &h->s; |
|
2103 | 2102 |
int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1; |
2104 | 2103 |
|
2105 | 2104 |
for (i = 0; i < 2; i++) { |
... | ... |
@@ -2108,10 +2391,10 @@ static void implicit_weight_table(H264Context *h, int field) |
2108 | 2108 |
} |
2109 | 2109 |
|
2110 | 2110 |
if (field < 0) { |
2111 |
- if (s->picture_structure == PICT_FRAME) { |
|
2112 |
- cur_poc = s->current_picture_ptr->poc; |
|
2111 |
+ if (h->picture_structure == PICT_FRAME) { |
|
2112 |
+ cur_poc = h->cur_pic_ptr->poc; |
|
2113 | 2113 |
} else { |
2114 |
- cur_poc = s->current_picture_ptr->field_poc[s->picture_structure - 1]; |
|
2114 |
+ cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1]; |
|
2115 | 2115 |
} |
2116 | 2116 |
if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF && |
2117 | 2117 |
h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) { |
... | ... |
@@ -2123,7 +2406,7 @@ static void implicit_weight_table(H264Context *h, int field) |
2123 | 2123 |
ref_count0 = h->ref_count[0]; |
2124 | 2124 |
ref_count1 = h->ref_count[1]; |
2125 | 2125 |
} else { |
2126 |
- cur_poc = s->current_picture_ptr->field_poc[field]; |
|
2126 |
+ cur_poc = h->cur_pic_ptr->field_poc[field]; |
|
2127 | 2127 |
ref_start = 16; |
2128 | 2128 |
ref_count0 = 16 + 2 * h->ref_count[0]; |
2129 | 2129 |
ref_count1 = 16 + 2 * h->ref_count[1]; |
... | ... |
@@ -2180,9 +2463,9 @@ static void flush_change(H264Context *h) |
2180 | 2180 |
h->outputed_poc = h->next_outputed_poc = INT_MIN; |
2181 | 2181 |
h->prev_interlaced_frame = 1; |
2182 | 2182 |
idr(h); |
2183 |
- if (h->s.current_picture_ptr) |
|
2184 |
- h->s.current_picture_ptr->f.reference = 0; |
|
2185 |
- h->s.first_field = 0; |
|
2183 |
+ if (h->cur_pic_ptr) |
|
2184 |
+ h->cur_pic_ptr->f.reference = 0; |
|
2185 |
+ h->first_field = 0; |
|
2186 | 2186 |
memset(h->ref_list[0], 0, sizeof(h->ref_list[0])); |
2187 | 2187 |
memset(h->ref_list[1], 0, sizeof(h->ref_list[1])); |
2188 | 2188 |
memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0])); |
... | ... |
@@ -2203,15 +2486,28 @@ static void flush_dpb(AVCodecContext *avctx) |
2203 | 2203 |
} |
2204 | 2204 |
|
2205 | 2205 |
flush_change(h); |
2206 |
- ff_mpeg_flush(avctx); |
|
2206 |
+ |
|
2207 |
+ for (i = 0; i < h->picture_count; i++) { |
|
2208 |
+ if (h->DPB[i].f.data[0]) |
|
2209 |
+ free_frame_buffer(h, &h->DPB[i]); |
|
2210 |
+ } |
|
2211 |
+ h->cur_pic_ptr = NULL; |
|
2212 |
+ |
|
2213 |
+ h->mb_x = h->mb_y = 0; |
|
2214 |
+ |
|
2215 |
+ h->parse_context.state = -1; |
|
2216 |
+ h->parse_context.frame_start_found = 0; |
|
2217 |
+ h->parse_context.overread = 0; |
|
2218 |
+ h->parse_context.overread_index = 0; |
|
2219 |
+ h->parse_context.index = 0; |
|
2220 |
+ h->parse_context.last_index = 0; |
|
2207 | 2221 |
} |
2208 | 2222 |
|
2209 | 2223 |
static int init_poc(H264Context *h) |
2210 | 2224 |
{ |
2211 |
- MpegEncContext *const s = &h->s; |
|
2212 | 2225 |
const int max_frame_num = 1 << h->sps.log2_max_frame_num; |
2213 | 2226 |
int field_poc[2]; |
2214 |
- Picture *cur = s->current_picture_ptr; |
|
2227 |
+ Picture *cur = h->cur_pic_ptr; |
|
2215 | 2228 |
|
2216 | 2229 |
h->frame_num_offset = h->prev_frame_num_offset; |
2217 | 2230 |
if (h->frame_num < h->prev_frame_num) |
... | ... |
@@ -2228,7 +2524,7 @@ static int init_poc(H264Context *h) |
2228 | 2228 |
h->poc_msb = h->prev_poc_msb; |
2229 | 2229 |
field_poc[0] = |
2230 | 2230 |
field_poc[1] = h->poc_msb + h->poc_lsb; |
2231 |
- if (s->picture_structure == PICT_FRAME) |
|
2231 |
+ if (h->picture_structure == PICT_FRAME) |
|
2232 | 2232 |
field_poc[1] += h->delta_poc_bottom; |
2233 | 2233 |
} else if (h->sps.poc_type == 1) { |
2234 | 2234 |
int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc; |
... | ... |
@@ -2263,7 +2559,7 @@ static int init_poc(H264Context *h) |
2263 | 2263 |
field_poc[0] = expectedpoc + h->delta_poc[0]; |
2264 | 2264 |
field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field; |
2265 | 2265 |
|
2266 |
- if (s->picture_structure == PICT_FRAME) |
|
2266 |
+ if (h->picture_structure == PICT_FRAME) |
|
2267 | 2267 |
field_poc[1] += h->delta_poc[1]; |
2268 | 2268 |
} else { |
2269 | 2269 |
int poc = 2 * (h->frame_num_offset + h->frame_num); |
... | ... |
@@ -2275,10 +2571,10 @@ static int init_poc(H264Context *h) |
2275 | 2275 |
field_poc[1] = poc; |
2276 | 2276 |
} |
2277 | 2277 |
|
2278 |
- if (s->picture_structure != PICT_BOTTOM_FIELD) |
|
2279 |
- s->current_picture_ptr->field_poc[0] = field_poc[0]; |
|
2280 |
- if (s->picture_structure != PICT_TOP_FIELD) |
|
2281 |
- s->current_picture_ptr->field_poc[1] = field_poc[1]; |
|
2278 |
+ if (h->picture_structure != PICT_BOTTOM_FIELD) |
|
2279 |
+ h->cur_pic_ptr->field_poc[0] = field_poc[0]; |
|
2280 |
+ if (h->picture_structure != PICT_TOP_FIELD) |
|
2281 |
+ h->cur_pic_ptr->field_poc[1] = field_poc[1]; |
|
2282 | 2282 |
cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]); |
2283 | 2283 |
|
2284 | 2284 |
return 0; |
... | ... |
@@ -2323,21 +2619,20 @@ static void init_scan_tables(H264Context *h) |
2323 | 2323 |
|
2324 | 2324 |
static int field_end(H264Context *h, int in_setup) |
2325 | 2325 |
{ |
2326 |
- MpegEncContext *const s = &h->s; |
|
2327 |
- AVCodecContext *const avctx = s->avctx; |
|
2326 |
+ AVCodecContext *const avctx = h->avctx; |
|
2328 | 2327 |
int err = 0; |
2329 |
- s->mb_y = 0; |
|
2328 |
+ h->mb_y = 0; |
|
2330 | 2329 |
|
2331 |
- if (!in_setup && !s->droppable) |
|
2332 |
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, |
|
2333 |
- s->picture_structure == PICT_BOTTOM_FIELD); |
|
2330 |
+ if (!in_setup && !h->droppable) |
|
2331 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, |
|
2332 |
+ h->picture_structure == PICT_BOTTOM_FIELD); |
|
2334 | 2333 |
|
2335 | 2334 |
if (CONFIG_H264_VDPAU_DECODER && |
2336 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2337 |
- ff_vdpau_h264_set_reference_frames(s); |
|
2335 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2336 |
+ ff_vdpau_h264_set_reference_frames(h); |
|
2338 | 2337 |
|
2339 | 2338 |
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { |
2340 |
- if (!s->droppable) { |
|
2339 |
+ if (!h->droppable) { |
|
2341 | 2340 |
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); |
2342 | 2341 |
h->prev_poc_msb = h->poc_msb; |
2343 | 2342 |
h->prev_poc_lsb = h->poc_lsb; |
... | ... |
@@ -2354,8 +2649,8 @@ static int field_end(H264Context *h, int in_setup) |
2354 | 2354 |
} |
2355 | 2355 |
|
2356 | 2356 |
if (CONFIG_H264_VDPAU_DECODER && |
2357 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2358 |
- ff_vdpau_h264_picture_complete(s); |
|
2357 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2358 |
+ ff_vdpau_h264_picture_complete(h); |
|
2359 | 2359 |
|
2360 | 2360 |
/* |
2361 | 2361 |
* FIXME: Error handling code does not seem to support interlaced |
... | ... |
@@ -2369,10 +2664,36 @@ static int field_end(H264Context *h, int in_setup) |
2369 | 2369 |
* past end by one (callers fault) and resync_mb_y != 0 |
2370 | 2370 |
* causes problems for the first MB line, too. |
2371 | 2371 |
*/ |
2372 |
- if (!FIELD_PICTURE) |
|
2373 |
- ff_er_frame_end(&s->er); |
|
2374 |
- |
|
2375 |
- ff_MPV_frame_end(s); |
|
2372 |
+ if (!FIELD_PICTURE) { |
|
2373 |
+ h->er.cur_pic = h->cur_pic_ptr; |
|
2374 |
+ h->er.last_pic = h->ref_count[0] ? &h->ref_list[0][0] : NULL; |
|
2375 |
+ h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL; |
|
2376 |
+ ff_er_frame_end(&h->er); |
|
2377 |
+ } |
|
2378 |
+ |
|
2379 |
+ /* redraw edges for the frame if decoding didn't complete */ |
|
2380 |
+ if (h->er.error_count && |
|
2381 |
+ !h->avctx->hwaccel && |
|
2382 |
+ !(h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && |
|
2383 |
+ h->cur_pic_ptr->f.reference && |
|
2384 |
+ !(h->flags & CODEC_FLAG_EMU_EDGE)) { |
|
2385 |
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(h->avctx->pix_fmt); |
|
2386 |
+ int hshift = desc->log2_chroma_w; |
|
2387 |
+ int vshift = desc->log2_chroma_h; |
|
2388 |
+ h->dsp.draw_edges(h->cur_pic.f.data[0], h->linesize, |
|
2389 |
+ h->mb_width * 16, h->mb_height * 16, |
|
2390 |
+ EDGE_WIDTH, EDGE_WIDTH, |
|
2391 |
+ EDGE_TOP | EDGE_BOTTOM); |
|
2392 |
+ h->dsp.draw_edges(h->cur_pic.f.data[1], h->uvlinesize, |
|
2393 |
+ (h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift, |
|
2394 |
+ EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, |
|
2395 |
+ EDGE_TOP | EDGE_BOTTOM); |
|
2396 |
+ h->dsp.draw_edges(h->cur_pic.f.data[2], h->uvlinesize, |
|
2397 |
+ (h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift, |
|
2398 |
+ EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, |
|
2399 |
+ EDGE_TOP | EDGE_BOTTOM); |
|
2400 |
+ } |
|
2401 |
+ emms_c(); |
|
2376 | 2402 |
|
2377 | 2403 |
h->current_slice = 0; |
2378 | 2404 |
|
... | ... |
@@ -2384,21 +2705,12 @@ static int field_end(H264Context *h, int in_setup) |
2384 | 2384 |
*/ |
2385 | 2385 |
static int clone_slice(H264Context *dst, H264Context *src) |
2386 | 2386 |
{ |
2387 |
- int ret; |
|
2388 |
- |
|
2389 | 2387 |
memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset)); |
2390 |
- dst->s.current_picture_ptr = src->s.current_picture_ptr; |
|
2391 |
- dst->s.current_picture = src->s.current_picture; |
|
2392 |
- dst->s.linesize = src->s.linesize; |
|
2393 |
- dst->s.uvlinesize = src->s.uvlinesize; |
|
2394 |
- dst->s.first_field = src->s.first_field; |
|
2395 |
- |
|
2396 |
- if (!dst->s.edge_emu_buffer && |
|
2397 |
- (ret = ff_mpv_frame_size_alloc(&dst->s, dst->s.linesize))) { |
|
2398 |
- av_log(dst->s.avctx, AV_LOG_ERROR, |
|
2399 |
- "Failed to allocate scratch buffers\n"); |
|
2400 |
- return ret; |
|
2401 |
- } |
|
2388 |
+ dst->cur_pic_ptr = src->cur_pic_ptr; |
|
2389 |
+ dst->cur_pic = src->cur_pic; |
|
2390 |
+ dst->linesize = src->linesize; |
|
2391 |
+ dst->uvlinesize = src->uvlinesize; |
|
2392 |
+ dst->first_field = src->first_field; |
|
2402 | 2393 |
|
2403 | 2394 |
dst->prev_poc_msb = src->prev_poc_msb; |
2404 | 2395 |
dst->prev_poc_lsb = src->prev_poc_lsb; |
... | ... |
@@ -2445,32 +2757,30 @@ int ff_h264_get_profile(SPS *sps) |
2445 | 2445 |
|
2446 | 2446 |
static int h264_set_parameter_from_sps(H264Context *h) |
2447 | 2447 |
{ |
2448 |
- MpegEncContext *s = &h->s; |
|
2449 |
- |
|
2450 |
- if (s->flags & CODEC_FLAG_LOW_DELAY || |
|
2448 |
+ if (h->flags & CODEC_FLAG_LOW_DELAY || |
|
2451 | 2449 |
(h->sps.bitstream_restriction_flag && |
2452 | 2450 |
!h->sps.num_reorder_frames)) { |
2453 |
- if (s->avctx->has_b_frames > 1 || h->delayed_pic[0]) |
|
2454 |
- av_log(h->s.avctx, AV_LOG_WARNING, "Delayed frames seen. " |
|
2451 |
+ if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) |
|
2452 |
+ av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " |
|
2455 | 2453 |
"Reenabling low delay requires a codec flush.\n"); |
2456 | 2454 |
else |
2457 |
- s->low_delay = 1; |
|
2455 |
+ h->low_delay = 1; |
|
2458 | 2456 |
} |
2459 | 2457 |
|
2460 |
- if (s->avctx->has_b_frames < 2) |
|
2461 |
- s->avctx->has_b_frames = !s->low_delay; |
|
2458 |
+ if (h->avctx->has_b_frames < 2) |
|
2459 |
+ h->avctx->has_b_frames = !h->low_delay; |
|
2462 | 2460 |
|
2463 |
- if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || |
|
2461 |
+ if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || |
|
2464 | 2462 |
h->cur_chroma_format_idc != h->sps.chroma_format_idc) { |
2465 |
- if (s->avctx->codec && |
|
2466 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU && |
|
2463 |
+ if (h->avctx->codec && |
|
2464 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU && |
|
2467 | 2465 |
(h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { |
2468 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
2466 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2469 | 2467 |
"VDPAU decoding does not support video colorspace.\n"); |
2470 | 2468 |
return AVERROR_INVALIDDATA; |
2471 | 2469 |
} |
2472 | 2470 |
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) { |
2473 |
- s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; |
|
2471 |
+ h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; |
|
2474 | 2472 |
h->cur_chroma_format_idc = h->sps.chroma_format_idc; |
2475 | 2473 |
h->pixel_shift = h->sps.bit_depth_luma > 8; |
2476 | 2474 |
|
... | ... |
@@ -2478,13 +2788,13 @@ static int h264_set_parameter_from_sps(H264Context *h) |
2478 | 2478 |
h->sps.chroma_format_idc); |
2479 | 2479 |
ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); |
2480 | 2480 |
ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma); |
2481 |
- ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, |
|
2481 |
+ ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma, |
|
2482 | 2482 |
h->sps.chroma_format_idc); |
2483 |
- s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16; |
|
2484 |
- ff_dsputil_init(&s->dsp, s->avctx); |
|
2485 |
- ff_videodsp_init(&s->vdsp, h->sps.bit_depth_luma); |
|
2483 |
+ h->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16; |
|
2484 |
+ ff_dsputil_init(&h->dsp, h->avctx); |
|
2485 |
+ ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma); |
|
2486 | 2486 |
} else { |
2487 |
- av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", |
|
2487 |
+ av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", |
|
2488 | 2488 |
h->sps.bit_depth_luma); |
2489 | 2489 |
return AVERROR_INVALIDDATA; |
2490 | 2490 |
} |
... | ... |
@@ -2494,11 +2804,10 @@ static int h264_set_parameter_from_sps(H264Context *h) |
2494 | 2494 |
|
2495 | 2495 |
static enum PixelFormat get_pixel_format(H264Context *h) |
2496 | 2496 |
{ |
2497 |
- MpegEncContext *const s = &h->s; |
|
2498 | 2497 |
switch (h->sps.bit_depth_luma) { |
2499 | 2498 |
case 9: |
2500 | 2499 |
if (CHROMA444) { |
2501 |
- if (s->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2500 |
+ if (h->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2502 | 2501 |
return AV_PIX_FMT_GBRP9; |
2503 | 2502 |
} else |
2504 | 2503 |
return AV_PIX_FMT_YUV444P9; |
... | ... |
@@ -2509,7 +2818,7 @@ static enum PixelFormat get_pixel_format(H264Context *h) |
2509 | 2509 |
break; |
2510 | 2510 |
case 10: |
2511 | 2511 |
if (CHROMA444) { |
2512 |
- if (s->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2512 |
+ if (h->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2513 | 2513 |
return AV_PIX_FMT_GBRP10; |
2514 | 2514 |
} else |
2515 | 2515 |
return AV_PIX_FMT_YUV444P10; |
... | ... |
@@ -2520,24 +2829,24 @@ static enum PixelFormat get_pixel_format(H264Context *h) |
2520 | 2520 |
break; |
2521 | 2521 |
case 8: |
2522 | 2522 |
if (CHROMA444) { |
2523 |
- if (s->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2523 |
+ if (h->avctx->colorspace == AVCOL_SPC_RGB) { |
|
2524 | 2524 |
return AV_PIX_FMT_GBRP; |
2525 | 2525 |
} else |
2526 |
- return s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ444P |
|
2526 |
+ return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ444P |
|
2527 | 2527 |
: AV_PIX_FMT_YUV444P; |
2528 | 2528 |
} else if (CHROMA422) { |
2529 |
- return s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P |
|
2529 |
+ return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P |
|
2530 | 2530 |
: AV_PIX_FMT_YUV422P; |
2531 | 2531 |
} else { |
2532 |
- return s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts ? |
|
2533 |
- s->avctx->codec->pix_fmts : |
|
2534 |
- s->avctx->color_range == AVCOL_RANGE_JPEG ? |
|
2532 |
+ return h->avctx->get_format(h->avctx, h->avctx->codec->pix_fmts ? |
|
2533 |
+ h->avctx->codec->pix_fmts : |
|
2534 |
+ h->avctx->color_range == AVCOL_RANGE_JPEG ? |
|
2535 | 2535 |
hwaccel_pixfmt_list_h264_jpeg_420 : |
2536 | 2536 |
ff_hwaccel_pixfmt_list_420); |
2537 | 2537 |
} |
2538 | 2538 |
break; |
2539 | 2539 |
default: |
2540 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
2540 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2541 | 2541 |
"Unsupported bit depth: %d\n", h->sps.bit_depth_luma); |
2542 | 2542 |
return AVERROR_INVALIDDATA; |
2543 | 2543 |
} |
... | ... |
@@ -2545,73 +2854,101 @@ static enum PixelFormat get_pixel_format(H264Context *h) |
2545 | 2545 |
|
2546 | 2546 |
static int h264_slice_header_init(H264Context *h, int reinit) |
2547 | 2547 |
{ |
2548 |
- MpegEncContext *const s = &h->s; |
|
2549 |
- int i, ret; |
|
2548 |
+ int nb_slices = (HAVE_THREADS && |
|
2549 |
+ h->avctx->active_thread_type & FF_THREAD_SLICE) ? |
|
2550 |
+ h->avctx->thread_count : 1; |
|
2551 |
+ int i; |
|
2550 | 2552 |
|
2551 |
- avcodec_set_dimensions(s->avctx, s->width, s->height); |
|
2552 |
- s->avctx->sample_aspect_ratio = h->sps.sar; |
|
2553 |
- av_assert0(s->avctx->sample_aspect_ratio.den); |
|
2553 |
+ avcodec_set_dimensions(h->avctx, h->width, h->height); |
|
2554 |
+ h->avctx->sample_aspect_ratio = h->sps.sar; |
|
2555 |
+ av_assert0(h->avctx->sample_aspect_ratio.den); |
|
2556 |
+ av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, |
|
2557 |
+ &h->chroma_x_shift, &h->chroma_y_shift); |
|
2554 | 2558 |
|
2555 | 2559 |
if (h->sps.timing_info_present_flag) { |
2556 | 2560 |
int64_t den = h->sps.time_scale; |
2557 | 2561 |
if (h->x264_build < 44U) |
2558 | 2562 |
den *= 2; |
2559 |
- av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, |
|
2563 |
+ av_reduce(&h->avctx->time_base.num, &h->avctx->time_base.den, |
|
2560 | 2564 |
h->sps.num_units_in_tick, den, 1 << 30); |
2561 | 2565 |
} |
2562 | 2566 |
|
2563 |
- s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt); |
|
2567 |
+ h->avctx->hwaccel = ff_find_hwaccel(h->avctx->codec->id, h->avctx->pix_fmt); |
|
2564 | 2568 |
|
2565 |
- if (reinit) { |
|
2569 |
+ if (reinit) |
|
2566 | 2570 |
free_tables(h, 0); |
2567 |
- if ((ret = ff_MPV_common_frame_size_change(s)) < 0) { |
|
2568 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_frame_size_change() failed.\n"); |
|
2569 |
- return ret; |
|
2570 |
- } |
|
2571 |
- } else { |
|
2572 |
- if ((ret = ff_MPV_common_init(s)) < 0) { |
|
2573 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n"); |
|
2574 |
- return ret; |
|
2575 |
- } |
|
2576 |
- } |
|
2577 |
- s->first_field = 0; |
|
2571 |
+ h->first_field = 0; |
|
2578 | 2572 |
h->prev_interlaced_frame = 1; |
2579 | 2573 |
|
2580 | 2574 |
init_scan_tables(h); |
2581 | 2575 |
if (ff_h264_alloc_tables(h) < 0) { |
2582 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2576 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2583 | 2577 |
"Could not allocate memory for h264\n"); |
2584 | 2578 |
return AVERROR(ENOMEM); |
2585 | 2579 |
} |
2586 | 2580 |
|
2587 |
- if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_SLICE)) { |
|
2581 |
+ if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { |
|
2582 |
+ int max_slices; |
|
2583 |
+ if (h->mb_height) |
|
2584 |
+ max_slices = FFMIN(MAX_THREADS, h->mb_height); |
|
2585 |
+ else |
|
2586 |
+ max_slices = MAX_THREADS; |
|
2587 |
+ av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices (%d)," |
|
2588 |
+ " reducing to %d\n", nb_slices, max_slices); |
|
2589 |
+ nb_slices = max_slices; |
|
2590 |
+ } |
|
2591 |
+ h->slice_context_count = nb_slices; |
|
2592 |
+ |
|
2593 |
+ if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) { |
|
2588 | 2594 |
if (context_init(h) < 0) { |
2589 |
- av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n"); |
|
2595 |
+ av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); |
|
2590 | 2596 |
return -1; |
2591 | 2597 |
} |
2592 | 2598 |
} else { |
2593 |
- for (i = 1; i < s->slice_context_count; i++) { |
|
2599 |
+ for (i = 1; i < h->slice_context_count; i++) { |
|
2594 | 2600 |
H264Context *c; |
2595 |
- c = h->thread_context[i] = av_malloc(sizeof(H264Context)); |
|
2596 |
- memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext)); |
|
2597 |
- memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext)); |
|
2601 |
+ c = h->thread_context[i] = av_mallocz(sizeof(H264Context)); |
|
2602 |
+ c->avctx = h->avctx; |
|
2603 |
+ c->dsp = h->dsp; |
|
2604 |
+ c->vdsp = h->vdsp; |
|
2598 | 2605 |
c->h264dsp = h->h264dsp; |
2599 | 2606 |
c->h264qpel = h->h264qpel; |
2600 | 2607 |
c->h264chroma = h->h264chroma; |
2601 | 2608 |
c->sps = h->sps; |
2602 | 2609 |
c->pps = h->pps; |
2603 | 2610 |
c->pixel_shift = h->pixel_shift; |
2611 |
+ c->width = h->width; |
|
2612 |
+ c->height = h->height; |
|
2613 |
+ c->linesize = h->linesize; |
|
2614 |
+ c->uvlinesize = h->uvlinesize; |
|
2615 |
+ c->chroma_x_shift = h->chroma_x_shift; |
|
2616 |
+ c->chroma_y_shift = h->chroma_y_shift; |
|
2617 |
+ c->qscale = h->qscale; |
|
2618 |
+ c->droppable = h->droppable; |
|
2619 |
+ c->data_partitioning = h->data_partitioning; |
|
2620 |
+ c->low_delay = h->low_delay; |
|
2621 |
+ c->mb_width = h->mb_width; |
|
2622 |
+ c->mb_height = h->mb_height; |
|
2623 |
+ c->mb_stride = h->mb_stride; |
|
2624 |
+ c->mb_num = h->mb_num; |
|
2625 |
+ c->flags = h->flags; |
|
2626 |
+ c->workaround_bugs = h->workaround_bugs; |
|
2627 |
+ c->pict_type = h->pict_type; |
|
2628 |
+ |
|
2604 | 2629 |
init_scan_tables(c); |
2605 | 2630 |
clone_tables(c, h, i); |
2631 |
+ c->context_initialized = 1; |
|
2606 | 2632 |
} |
2607 | 2633 |
|
2608 |
- for (i = 0; i < s->slice_context_count; i++) |
|
2634 |
+ for (i = 0; i < h->slice_context_count; i++) |
|
2609 | 2635 |
if (context_init(h->thread_context[i]) < 0) { |
2610 |
- av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n"); |
|
2636 |
+ av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); |
|
2611 | 2637 |
return -1; |
2612 | 2638 |
} |
2613 | 2639 |
} |
2614 | 2640 |
|
2641 |
+ h->context_initialized = 1; |
|
2642 |
+ |
|
2615 | 2643 |
return 0; |
2616 | 2644 |
} |
2617 | 2645 |
|
... | ... |
@@ -2627,8 +2964,6 @@ static int h264_slice_header_init(H264Context *h, int reinit) |
2627 | 2627 |
*/ |
2628 | 2628 |
static int decode_slice_header(H264Context *h, H264Context *h0) |
2629 | 2629 |
{ |
2630 |
- MpegEncContext *const s = &h->s; |
|
2631 |
- MpegEncContext *const s0 = &h0->s; |
|
2632 | 2630 |
unsigned int first_mb_in_slice; |
2633 | 2631 |
unsigned int pps_id; |
2634 | 2632 |
int num_ref_idx_active_override_flag, max_refs, ret; |
... | ... |
@@ -2637,10 +2972,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2637 | 2637 |
int last_pic_structure, last_pic_droppable; |
2638 | 2638 |
int needs_reinit = 0; |
2639 | 2639 |
|
2640 |
- s->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; |
|
2641 |
- s->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; |
|
2640 |
+ h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; |
|
2641 |
+ h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; |
|
2642 | 2642 |
|
2643 |
- first_mb_in_slice = get_ue_golomb(&s->gb); |
|
2643 |
+ first_mb_in_slice = get_ue_golomb(&h->gb); |
|
2644 | 2644 |
|
2645 | 2645 |
if (first_mb_in_slice == 0) { // FIXME better field boundary detection |
2646 | 2646 |
if (h0->current_slice && FIELD_PICTURE) { |
... | ... |
@@ -2648,21 +2983,21 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2648 | 2648 |
} |
2649 | 2649 |
|
2650 | 2650 |
h0->current_slice = 0; |
2651 |
- if (!s0->first_field) { |
|
2652 |
- if (s->current_picture_ptr && !s->droppable && |
|
2653 |
- s->current_picture_ptr->owner2 == s) { |
|
2654 |
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, |
|
2655 |
- s->picture_structure == PICT_BOTTOM_FIELD); |
|
2651 |
+ if (!h0->first_field) { |
|
2652 |
+ if (h->cur_pic_ptr && !h->droppable && |
|
2653 |
+ h->cur_pic_ptr->owner2 == h) { |
|
2654 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, |
|
2655 |
+ h->picture_structure == PICT_BOTTOM_FIELD); |
|
2656 | 2656 |
} |
2657 |
- s->current_picture_ptr = NULL; |
|
2657 |
+ h->cur_pic_ptr = NULL; |
|
2658 | 2658 |
} |
2659 | 2659 |
} |
2660 | 2660 |
|
2661 |
- slice_type = get_ue_golomb_31(&s->gb); |
|
2661 |
+ slice_type = get_ue_golomb_31(&h->gb); |
|
2662 | 2662 |
if (slice_type > 9) { |
2663 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2663 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2664 | 2664 |
"slice type too large (%d) at %d %d\n", |
2665 |
- h->slice_type, s->mb_x, s->mb_y); |
|
2665 |
+ h->slice_type, h->mb_x, h->mb_y); |
|
2666 | 2666 |
return -1; |
2667 | 2667 |
} |
2668 | 2668 |
if (slice_type > 4) { |
... | ... |
@@ -2680,15 +3015,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2680 | 2680 |
h->slice_type_nos = slice_type & 3; |
2681 | 2681 |
|
2682 | 2682 |
// to make a few old functions happy, it's wrong though |
2683 |
- s->pict_type = h->slice_type; |
|
2683 |
+ h->pict_type = h->slice_type; |
|
2684 | 2684 |
|
2685 |
- pps_id = get_ue_golomb(&s->gb); |
|
2685 |
+ pps_id = get_ue_golomb(&h->gb); |
|
2686 | 2686 |
if (pps_id >= MAX_PPS_COUNT) { |
2687 |
- av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n"); |
|
2687 |
+ av_log(h->avctx, AV_LOG_ERROR, "pps_id out of range\n"); |
|
2688 | 2688 |
return -1; |
2689 | 2689 |
} |
2690 | 2690 |
if (!h0->pps_buffers[pps_id]) { |
2691 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2691 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2692 | 2692 |
"non-existing PPS %u referenced\n", |
2693 | 2693 |
pps_id); |
2694 | 2694 |
return -1; |
... | ... |
@@ -2696,77 +3031,77 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2696 | 2696 |
h->pps = *h0->pps_buffers[pps_id]; |
2697 | 2697 |
|
2698 | 2698 |
if (!h0->sps_buffers[h->pps.sps_id]) { |
2699 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2699 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2700 | 2700 |
"non-existing SPS %u referenced\n", |
2701 | 2701 |
h->pps.sps_id); |
2702 | 2702 |
return -1; |
2703 | 2703 |
} |
2704 | 2704 |
|
2705 | 2705 |
if (h->pps.sps_id != h->current_sps_id || |
2706 |
- h->context_reinitialized || |
|
2707 | 2706 |
h0->sps_buffers[h->pps.sps_id]->new) { |
2708 |
- SPS *new_sps = h0->sps_buffers[h->pps.sps_id]; |
|
2709 |
- |
|
2710 | 2707 |
h0->sps_buffers[h->pps.sps_id]->new = 0; |
2711 | 2708 |
|
2712 |
- if (h->sps.chroma_format_idc != new_sps->chroma_format_idc || |
|
2713 |
- h->sps.bit_depth_luma != new_sps->bit_depth_luma) |
|
2714 |
- needs_reinit = 1; |
|
2715 |
- |
|
2716 | 2709 |
h->current_sps_id = h->pps.sps_id; |
2717 | 2710 |
h->sps = *h0->sps_buffers[h->pps.sps_id]; |
2718 | 2711 |
|
2712 |
+ if (h->bit_depth_luma != h->sps.bit_depth_luma || |
|
2713 |
+ h->chroma_format_idc != h->sps.chroma_format_idc) { |
|
2714 |
+ h->bit_depth_luma = h->sps.bit_depth_luma; |
|
2715 |
+ h->chroma_format_idc = h->sps.chroma_format_idc; |
|
2716 |
+ needs_reinit = 1; |
|
2717 |
+ } |
|
2719 | 2718 |
if ((ret = h264_set_parameter_from_sps(h)) < 0) |
2720 | 2719 |
return ret; |
2721 | 2720 |
} |
2722 | 2721 |
|
2723 |
- s->avctx->profile = ff_h264_get_profile(&h->sps); |
|
2724 |
- s->avctx->level = h->sps.level_idc; |
|
2725 |
- s->avctx->refs = h->sps.ref_frame_count; |
|
2722 |
+ h->avctx->profile = ff_h264_get_profile(&h->sps); |
|
2723 |
+ h->avctx->level = h->sps.level_idc; |
|
2724 |
+ h->avctx->refs = h->sps.ref_frame_count; |
|
2726 | 2725 |
|
2727 |
- if (s->mb_width != h->sps.mb_width || |
|
2728 |
- s->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)) |
|
2726 |
+ if (h->mb_width != h->sps.mb_width || |
|
2727 |
+ h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)) |
|
2729 | 2728 |
needs_reinit = 1; |
2730 | 2729 |
|
2731 |
- s->mb_width = h->sps.mb_width; |
|
2732 |
- s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); |
|
2730 |
+ h->mb_width = h->sps.mb_width; |
|
2731 |
+ h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); |
|
2732 |
+ h->mb_num = h->mb_width * h->mb_height; |
|
2733 |
+ h->mb_stride = h->mb_width + 1; |
|
2733 | 2734 |
|
2734 |
- h->b_stride = s->mb_width * 4; |
|
2735 |
+ h->b_stride = h->mb_width * 4; |
|
2735 | 2736 |
|
2736 |
- s->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p |
|
2737 |
+ h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p |
|
2737 | 2738 |
|
2738 |
- s->width = 16 * s->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1); |
|
2739 |
+ h->width = 16 * h->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1); |
|
2739 | 2740 |
if (h->sps.frame_mbs_only_flag) |
2740 |
- s->height = 16 * s->mb_height - (1 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1); |
|
2741 |
+ h->height = 16 * h->mb_height - (1 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1); |
|
2741 | 2742 |
else |
2742 |
- s->height = 16 * s->mb_height - (2 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1); |
|
2743 |
+ h->height = 16 * h->mb_height - (2 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1); |
|
2743 | 2744 |
|
2744 |
- if (FFALIGN(s->avctx->width, 16) == s->width && |
|
2745 |
- FFALIGN(s->avctx->height, 16) == s->height) { |
|
2746 |
- s->width = s->avctx->width; |
|
2747 |
- s->height = s->avctx->height; |
|
2745 |
+ if (FFALIGN(h->avctx->width, 16) == h->width && |
|
2746 |
+ FFALIGN(h->avctx->height, 16) == h->height) { |
|
2747 |
+ h->width = h->avctx->width; |
|
2748 |
+ h->height = h->avctx->height; |
|
2748 | 2749 |
} |
2749 | 2750 |
|
2750 | 2751 |
if (h->sps.video_signal_type_present_flag) { |
2751 |
- s->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG |
|
2752 |
+ h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG |
|
2752 | 2753 |
: AVCOL_RANGE_MPEG; |
2753 | 2754 |
if (h->sps.colour_description_present_flag) { |
2754 |
- if (s->avctx->colorspace != h->sps.colorspace) |
|
2755 |
+ if (h->avctx->colorspace != h->sps.colorspace) |
|
2755 | 2756 |
needs_reinit = 1; |
2756 |
- s->avctx->color_primaries = h->sps.color_primaries; |
|
2757 |
- s->avctx->color_trc = h->sps.color_trc; |
|
2758 |
- s->avctx->colorspace = h->sps.colorspace; |
|
2757 |
+ h->avctx->color_primaries = h->sps.color_primaries; |
|
2758 |
+ h->avctx->color_trc = h->sps.color_trc; |
|
2759 |
+ h->avctx->colorspace = h->sps.colorspace; |
|
2759 | 2760 |
} |
2760 | 2761 |
} |
2761 | 2762 |
|
2762 |
- if (s->context_initialized && |
|
2763 |
- (s->width != s->avctx->width || |
|
2764 |
- s->height != s->avctx->height || |
|
2765 |
- needs_reinit || |
|
2766 |
- av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) { |
|
2763 |
+ if (h->context_initialized && |
|
2764 |
+ (h->width != h->avctx->width || |
|
2765 |
+ h->height != h->avctx->height || |
|
2766 |
+ needs_reinit)) { |
|
2767 | 2767 |
|
2768 | 2768 |
if (h != h0) { |
2769 |
- av_log(s->avctx, AV_LOG_ERROR, "changing width/height on " |
|
2769 |
+ av_log(h->avctx, AV_LOG_ERROR, "changing width/height on " |
|
2770 | 2770 |
"slice %d\n", h0->current_slice + 1); |
2771 | 2771 |
return AVERROR_INVALIDDATA; |
2772 | 2772 |
} |
... | ... |
@@ -2775,31 +3110,30 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2775 | 2775 |
|
2776 | 2776 |
if ((ret = get_pixel_format(h)) < 0) |
2777 | 2777 |
return ret; |
2778 |
- s->avctx->pix_fmt = ret; |
|
2778 |
+ h->avctx->pix_fmt = ret; |
|
2779 | 2779 |
|
2780 |
- av_log(h->s.avctx, AV_LOG_INFO, "Reinit context to %dx%d, " |
|
2781 |
- "pix_fmt: %d\n", s->width, s->height, s->avctx->pix_fmt); |
|
2780 |
+ av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " |
|
2781 |
+ "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt); |
|
2782 | 2782 |
|
2783 | 2783 |
if ((ret = h264_slice_header_init(h, 1)) < 0) { |
2784 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2784 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2785 | 2785 |
"h264_slice_header_init() failed\n"); |
2786 | 2786 |
return ret; |
2787 | 2787 |
} |
2788 |
- h->context_reinitialized = 1; |
|
2789 | 2788 |
} |
2790 |
- if (!s->context_initialized) { |
|
2789 |
+ if (!h->context_initialized) { |
|
2791 | 2790 |
if (h != h0) { |
2792 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2791 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2793 | 2792 |
"Cannot (re-)initialize context during parallel decoding.\n"); |
2794 | 2793 |
return -1; |
2795 | 2794 |
} |
2796 | 2795 |
|
2797 | 2796 |
if ((ret = get_pixel_format(h)) < 0) |
2798 | 2797 |
return ret; |
2799 |
- s->avctx->pix_fmt = ret; |
|
2798 |
+ h->avctx->pix_fmt = ret; |
|
2800 | 2799 |
|
2801 | 2800 |
if ((ret = h264_slice_header_init(h, 0)) < 0) { |
2802 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2801 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2803 | 2802 |
"h264_slice_header_init() failed\n"); |
2804 | 2803 |
return ret; |
2805 | 2804 |
} |
... | ... |
@@ -2810,37 +3144,37 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2810 | 2810 |
init_dequant_tables(h); |
2811 | 2811 |
} |
2812 | 2812 |
|
2813 |
- h->frame_num = get_bits(&s->gb, h->sps.log2_max_frame_num); |
|
2813 |
+ h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); |
|
2814 | 2814 |
|
2815 | 2815 |
h->mb_mbaff = 0; |
2816 | 2816 |
h->mb_aff_frame = 0; |
2817 |
- last_pic_structure = s0->picture_structure; |
|
2818 |
- last_pic_droppable = s0->droppable; |
|
2819 |
- s->droppable = h->nal_ref_idc == 0; |
|
2817 |
+ last_pic_structure = h0->picture_structure; |
|
2818 |
+ last_pic_droppable = h0->droppable; |
|
2819 |
+ h->droppable = h->nal_ref_idc == 0; |
|
2820 | 2820 |
if (h->sps.frame_mbs_only_flag) { |
2821 |
- s->picture_structure = PICT_FRAME; |
|
2821 |
+ h->picture_structure = PICT_FRAME; |
|
2822 | 2822 |
} else { |
2823 |
- if (get_bits1(&s->gb)) { // field_pic_flag |
|
2824 |
- s->picture_structure = PICT_TOP_FIELD + get_bits1(&s->gb); // bottom_field_flag |
|
2823 |
+ if (get_bits1(&h->gb)) { // field_pic_flag |
|
2824 |
+ h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag |
|
2825 | 2825 |
} else { |
2826 |
- s->picture_structure = PICT_FRAME; |
|
2826 |
+ h->picture_structure = PICT_FRAME; |
|
2827 | 2827 |
h->mb_aff_frame = h->sps.mb_aff; |
2828 | 2828 |
} |
2829 | 2829 |
} |
2830 |
- h->mb_field_decoding_flag = s->picture_structure != PICT_FRAME; |
|
2830 |
+ h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; |
|
2831 | 2831 |
|
2832 | 2832 |
if (h0->current_slice != 0) { |
2833 |
- if (last_pic_structure != s->picture_structure || |
|
2834 |
- last_pic_droppable != s->droppable) { |
|
2835 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
2833 |
+ if (last_pic_structure != h->picture_structure || |
|
2834 |
+ last_pic_droppable != h->droppable) { |
|
2835 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2836 | 2836 |
"Changing field mode (%d -> %d) between slices is not allowed\n", |
2837 |
- last_pic_structure, s->picture_structure); |
|
2838 |
- s->picture_structure = last_pic_structure; |
|
2839 |
- s->droppable = last_pic_droppable; |
|
2837 |
+ last_pic_structure, h->picture_structure); |
|
2838 |
+ h->picture_structure = last_pic_structure; |
|
2839 |
+ h->droppable = last_pic_droppable; |
|
2840 | 2840 |
return AVERROR_INVALIDDATA; |
2841 |
- } else if (!s0->current_picture_ptr) { |
|
2842 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
2843 |
- "unset current_picture_ptr on %d. slice\n", |
|
2841 |
+ } else if (!h0->cur_pic_ptr) { |
|
2842 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2843 |
+ "unset cur_pic_ptr on %d. slice\n", |
|
2844 | 2844 |
h0->current_slice + 1); |
2845 | 2845 |
return AVERROR_INVALIDDATA; |
2846 | 2846 |
} |
... | ... |
@@ -2868,53 +3202,53 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2868 | 2868 |
* decode frames as "finished". |
2869 | 2869 |
* We have to do that before the "dummy" in-between frame allocation, |
2870 | 2870 |
* since that can modify s->current_picture_ptr. */ |
2871 |
- if (s0->first_field) { |
|
2872 |
- assert(s0->current_picture_ptr); |
|
2873 |
- assert(s0->current_picture_ptr->f.data[0]); |
|
2874 |
- assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF); |
|
2871 |
+ if (h0->first_field) { |
|
2872 |
+ assert(h0->cur_pic_ptr); |
|
2873 |
+ assert(h0->cur_pic_ptr->f.data[0]); |
|
2874 |
+ assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); |
|
2875 | 2875 |
|
2876 | 2876 |
/* Mark old field/frame as completed */ |
2877 |
- if (!last_pic_droppable && s0->current_picture_ptr->owner2 == s0) { |
|
2878 |
- ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, |
|
2877 |
+ if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) { |
|
2878 |
+ ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, |
|
2879 | 2879 |
last_pic_structure == PICT_BOTTOM_FIELD); |
2880 | 2880 |
} |
2881 | 2881 |
|
2882 | 2882 |
/* figure out if we have a complementary field pair */ |
2883 |
- if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) { |
|
2883 |
+ if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) { |
|
2884 | 2884 |
/* Previous field is unmatched. Don't display it, but let it |
2885 | 2885 |
* remain for reference if marked as such. */ |
2886 | 2886 |
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { |
2887 |
- ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, |
|
2887 |
+ ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, |
|
2888 | 2888 |
last_pic_structure == PICT_TOP_FIELD); |
2889 | 2889 |
} |
2890 | 2890 |
} else { |
2891 |
- if (s0->current_picture_ptr->frame_num != h->frame_num) { |
|
2891 |
+ if (h0->cur_pic_ptr->frame_num != h->frame_num) { |
|
2892 | 2892 |
/* This and previous field were reference, but had |
2893 | 2893 |
* different frame_nums. Consider this field first in |
2894 | 2894 |
* pair. Throw away previous field except for reference |
2895 | 2895 |
* purposes. */ |
2896 | 2896 |
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { |
2897 |
- ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, |
|
2897 |
+ ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, |
|
2898 | 2898 |
last_pic_structure == PICT_TOP_FIELD); |
2899 | 2899 |
} |
2900 | 2900 |
} else { |
2901 | 2901 |
/* Second field in complementary pair */ |
2902 | 2902 |
if (!((last_pic_structure == PICT_TOP_FIELD && |
2903 |
- s->picture_structure == PICT_BOTTOM_FIELD) || |
|
2903 |
+ h->picture_structure == PICT_BOTTOM_FIELD) || |
|
2904 | 2904 |
(last_pic_structure == PICT_BOTTOM_FIELD && |
2905 |
- s->picture_structure == PICT_TOP_FIELD))) { |
|
2906 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
2905 |
+ h->picture_structure == PICT_TOP_FIELD))) { |
|
2906 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2907 | 2907 |
"Invalid field mode combination %d/%d\n", |
2908 |
- last_pic_structure, s->picture_structure); |
|
2909 |
- s->picture_structure = last_pic_structure; |
|
2910 |
- s->droppable = last_pic_droppable; |
|
2908 |
+ last_pic_structure, h->picture_structure); |
|
2909 |
+ h->picture_structure = last_pic_structure; |
|
2910 |
+ h->droppable = last_pic_droppable; |
|
2911 | 2911 |
return AVERROR_INVALIDDATA; |
2912 |
- } else if (last_pic_droppable != s->droppable) { |
|
2913 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
2912 |
+ } else if (last_pic_droppable != h->droppable) { |
|
2913 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2914 | 2914 |
"Cannot combine reference and non-reference fields in the same frame\n"); |
2915 |
- av_log_ask_for_sample(s->avctx, NULL); |
|
2916 |
- s->picture_structure = last_pic_structure; |
|
2917 |
- s->droppable = last_pic_droppable; |
|
2915 |
+ av_log_ask_for_sample(h->avctx, NULL); |
|
2916 |
+ h->picture_structure = last_pic_structure; |
|
2917 |
+ h->droppable = last_pic_droppable; |
|
2918 | 2918 |
return AVERROR_PATCHWELCOME; |
2919 | 2919 |
} |
2920 | 2920 |
|
... | ... |
@@ -2924,7 +3258,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2924 | 2924 |
* on that first field (or if that was us, we just did that above). |
2925 | 2925 |
* By taking ownership, we assign responsibility to ourselves to |
2926 | 2926 |
* report progress on the second field. */ |
2927 |
- s0->current_picture_ptr->owner2 = s0; |
|
2927 |
+ h0->cur_pic_ptr->owner2 = h0; |
|
2928 | 2928 |
} |
2929 | 2929 |
} |
2930 | 2930 |
} |
... | ... |
@@ -2932,20 +3266,20 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2932 | 2932 |
while (h->frame_num != h->prev_frame_num && |
2933 | 2933 |
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { |
2934 | 2934 |
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; |
2935 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", |
|
2935 |
+ av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", |
|
2936 | 2936 |
h->frame_num, h->prev_frame_num); |
2937 | 2937 |
if (ff_h264_frame_start(h) < 0) |
2938 | 2938 |
return -1; |
2939 | 2939 |
h->prev_frame_num++; |
2940 | 2940 |
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; |
2941 |
- s->current_picture_ptr->frame_num = h->prev_frame_num; |
|
2942 |
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0); |
|
2943 |
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1); |
|
2941 |
+ h->cur_pic_ptr->frame_num = h->prev_frame_num; |
|
2942 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0); |
|
2943 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1); |
|
2944 | 2944 |
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 && |
2945 |
- s->avctx->err_recognition & AV_EF_EXPLODE) |
|
2945 |
+ h->avctx->err_recognition & AV_EF_EXPLODE) |
|
2946 | 2946 |
return ret; |
2947 | 2947 |
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 && |
2948 |
- (s->avctx->err_recognition & AV_EF_EXPLODE)) |
|
2948 |
+ (h->avctx->err_recognition & AV_EF_EXPLODE)) |
|
2949 | 2949 |
return AVERROR_INVALIDDATA; |
2950 | 2950 |
/* Error concealment: if a ref is missing, copy the previous ref in its place. |
2951 | 2951 |
* FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions |
... | ... |
@@ -2957,7 +3291,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2957 | 2957 |
if (prev) { |
2958 | 2958 |
av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, |
2959 | 2959 |
(const uint8_t **)prev->f.data, prev->f.linesize, |
2960 |
- s->avctx->pix_fmt, s->mb_width * 16, s->mb_height * 16); |
|
2960 |
+ h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); |
|
2961 | 2961 |
h->short_ref[0]->poc = prev->poc + 2; |
2962 | 2962 |
} |
2963 | 2963 |
h->short_ref[0]->frame_num = h->prev_frame_num; |
... | ... |
@@ -2967,61 +3301,61 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
2967 | 2967 |
/* See if we have a decoded first field looking for a pair... |
2968 | 2968 |
* We're using that to see whether to continue decoding in that |
2969 | 2969 |
* frame, or to allocate a new one. */ |
2970 |
- if (s0->first_field) { |
|
2971 |
- assert(s0->current_picture_ptr); |
|
2972 |
- assert(s0->current_picture_ptr->f.data[0]); |
|
2973 |
- assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF); |
|
2970 |
+ if (h0->first_field) { |
|
2971 |
+ assert(h0->cur_pic_ptr); |
|
2972 |
+ assert(h0->cur_pic_ptr->f.data[0]); |
|
2973 |
+ assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); |
|
2974 | 2974 |
|
2975 | 2975 |
/* figure out if we have a complementary field pair */ |
2976 |
- if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) { |
|
2976 |
+ if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) { |
|
2977 | 2977 |
/* Previous field is unmatched. Don't display it, but let it |
2978 | 2978 |
* remain for reference if marked as such. */ |
2979 |
- s0->current_picture_ptr = NULL; |
|
2980 |
- s0->first_field = FIELD_PICTURE; |
|
2979 |
+ h0->cur_pic_ptr = NULL; |
|
2980 |
+ h0->first_field = FIELD_PICTURE; |
|
2981 | 2981 |
} else { |
2982 |
- if (s0->current_picture_ptr->frame_num != h->frame_num) { |
|
2982 |
+ if (h0->cur_pic_ptr->frame_num != h->frame_num) { |
|
2983 | 2983 |
/* This and the previous field had different frame_nums. |
2984 | 2984 |
* Consider this field first in pair. Throw away previous |
2985 | 2985 |
* one except for reference purposes. */ |
2986 |
- s0->first_field = 1; |
|
2987 |
- s0->current_picture_ptr = NULL; |
|
2986 |
+ h0->first_field = 1; |
|
2987 |
+ h0->cur_pic_ptr = NULL; |
|
2988 | 2988 |
} else { |
2989 | 2989 |
/* Second field in complementary pair */ |
2990 |
- s0->first_field = 0; |
|
2990 |
+ h0->first_field = 0; |
|
2991 | 2991 |
} |
2992 | 2992 |
} |
2993 | 2993 |
} else { |
2994 | 2994 |
/* Frame or first field in a potentially complementary pair */ |
2995 |
- s0->first_field = FIELD_PICTURE; |
|
2995 |
+ h0->first_field = FIELD_PICTURE; |
|
2996 | 2996 |
} |
2997 | 2997 |
|
2998 |
- if (!FIELD_PICTURE || s0->first_field) { |
|
2998 |
+ if (!FIELD_PICTURE || h0->first_field) { |
|
2999 | 2999 |
if (ff_h264_frame_start(h) < 0) { |
3000 |
- s0->first_field = 0; |
|
3000 |
+ h0->first_field = 0; |
|
3001 | 3001 |
return -1; |
3002 | 3002 |
} |
3003 | 3003 |
} else { |
3004 |
- ff_release_unused_pictures(s, 0); |
|
3004 |
+ release_unused_pictures(h, 0); |
|
3005 | 3005 |
} |
3006 | 3006 |
} |
3007 | 3007 |
if (h != h0 && (ret = clone_slice(h, h0)) < 0) |
3008 | 3008 |
return ret; |
3009 | 3009 |
|
3010 |
- s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup |
|
3010 |
+ h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup |
|
3011 | 3011 |
|
3012 |
- assert(s->mb_num == s->mb_width * s->mb_height); |
|
3013 |
- if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num || |
|
3014 |
- first_mb_in_slice >= s->mb_num) { |
|
3015 |
- av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); |
|
3012 |
+ assert(h->mb_num == h->mb_width * h->mb_height); |
|
3013 |
+ if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= h->mb_num || |
|
3014 |
+ first_mb_in_slice >= h->mb_num) { |
|
3015 |
+ av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); |
|
3016 | 3016 |
return -1; |
3017 | 3017 |
} |
3018 |
- s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width; |
|
3019 |
- s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE; |
|
3020 |
- if (s->picture_structure == PICT_BOTTOM_FIELD) |
|
3021 |
- s->resync_mb_y = s->mb_y = s->mb_y + 1; |
|
3022 |
- assert(s->mb_y < s->mb_height); |
|
3018 |
+ h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; |
|
3019 |
+ h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE; |
|
3020 |
+ if (h->picture_structure == PICT_BOTTOM_FIELD) |
|
3021 |
+ h->resync_mb_y = h->mb_y = h->mb_y + 1; |
|
3022 |
+ assert(h->mb_y < h->mb_height); |
|
3023 | 3023 |
|
3024 |
- if (s->picture_structure == PICT_FRAME) { |
|
3024 |
+ if (h->picture_structure == PICT_FRAME) { |
|
3025 | 3025 |
h->curr_pic_num = h->frame_num; |
3026 | 3026 |
h->max_pic_num = 1 << h->sps.log2_max_frame_num; |
3027 | 3027 |
} else { |
... | ... |
@@ -3030,26 +3364,26 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3030 | 3030 |
} |
3031 | 3031 |
|
3032 | 3032 |
if (h->nal_unit_type == NAL_IDR_SLICE) |
3033 |
- get_ue_golomb(&s->gb); /* idr_pic_id */ |
|
3033 |
+ get_ue_golomb(&h->gb); /* idr_pic_id */ |
|
3034 | 3034 |
|
3035 | 3035 |
if (h->sps.poc_type == 0) { |
3036 |
- h->poc_lsb = get_bits(&s->gb, h->sps.log2_max_poc_lsb); |
|
3036 |
+ h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); |
|
3037 | 3037 |
|
3038 |
- if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME) |
|
3039 |
- h->delta_poc_bottom = get_se_golomb(&s->gb); |
|
3038 |
+ if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) |
|
3039 |
+ h->delta_poc_bottom = get_se_golomb(&h->gb); |
|
3040 | 3040 |
} |
3041 | 3041 |
|
3042 | 3042 |
if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { |
3043 |
- h->delta_poc[0] = get_se_golomb(&s->gb); |
|
3043 |
+ h->delta_poc[0] = get_se_golomb(&h->gb); |
|
3044 | 3044 |
|
3045 |
- if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME) |
|
3046 |
- h->delta_poc[1] = get_se_golomb(&s->gb); |
|
3045 |
+ if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) |
|
3046 |
+ h->delta_poc[1] = get_se_golomb(&h->gb); |
|
3047 | 3047 |
} |
3048 | 3048 |
|
3049 | 3049 |
init_poc(h); |
3050 | 3050 |
|
3051 | 3051 |
if (h->pps.redundant_pic_cnt_present) |
3052 |
- h->redundant_pic_count = get_ue_golomb(&s->gb); |
|
3052 |
+ h->redundant_pic_count = get_ue_golomb(&h->gb); |
|
3053 | 3053 |
|
3054 | 3054 |
// set defaults, might be overridden a few lines later |
3055 | 3055 |
h->ref_count[0] = h->pps.ref_count[0]; |
... | ... |
@@ -3057,15 +3391,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3057 | 3057 |
|
3058 | 3058 |
if (h->slice_type_nos != AV_PICTURE_TYPE_I) { |
3059 | 3059 |
if (h->slice_type_nos == AV_PICTURE_TYPE_B) |
3060 |
- h->direct_spatial_mv_pred = get_bits1(&s->gb); |
|
3061 |
- num_ref_idx_active_override_flag = get_bits1(&s->gb); |
|
3060 |
+ h->direct_spatial_mv_pred = get_bits1(&h->gb); |
|
3061 |
+ num_ref_idx_active_override_flag = get_bits1(&h->gb); |
|
3062 | 3062 |
|
3063 | 3063 |
if (num_ref_idx_active_override_flag) { |
3064 |
- h->ref_count[0] = get_ue_golomb(&s->gb) + 1; |
|
3064 |
+ h->ref_count[0] = get_ue_golomb(&h->gb) + 1; |
|
3065 | 3065 |
if (h->ref_count[0] < 1) |
3066 | 3066 |
return AVERROR_INVALIDDATA; |
3067 | 3067 |
if (h->slice_type_nos == AV_PICTURE_TYPE_B) { |
3068 |
- h->ref_count[1] = get_ue_golomb(&s->gb) + 1; |
|
3068 |
+ h->ref_count[1] = get_ue_golomb(&h->gb) + 1; |
|
3069 | 3069 |
if (h->ref_count[1] < 1) |
3070 | 3070 |
return AVERROR_INVALIDDATA; |
3071 | 3071 |
} |
... | ... |
@@ -3078,10 +3412,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3078 | 3078 |
} else |
3079 | 3079 |
h->list_count = 0; |
3080 | 3080 |
|
3081 |
- max_refs = s->picture_structure == PICT_FRAME ? 16 : 32; |
|
3081 |
+ max_refs = h->picture_structure == PICT_FRAME ? 16 : 32; |
|
3082 | 3082 |
|
3083 | 3083 |
if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) { |
3084 |
- av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n"); |
|
3084 |
+ av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n"); |
|
3085 | 3085 |
h->ref_count[0] = h->ref_count[1] = 1; |
3086 | 3086 |
return AVERROR_INVALIDDATA; |
3087 | 3087 |
} |
... | ... |
@@ -3095,19 +3429,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3095 | 3095 |
return -1; |
3096 | 3096 |
} |
3097 | 3097 |
|
3098 |
- if (h->slice_type_nos != AV_PICTURE_TYPE_I) { |
|
3099 |
- s->last_picture_ptr = &h->ref_list[0][0]; |
|
3100 |
- s->last_picture_ptr->owner2 = s; |
|
3101 |
- s->er.last_pic = s->last_picture_ptr; |
|
3102 |
- ff_copy_picture(&s->last_picture, s->last_picture_ptr); |
|
3103 |
- } |
|
3104 |
- if (h->slice_type_nos == AV_PICTURE_TYPE_B) { |
|
3105 |
- s->next_picture_ptr = &h->ref_list[1][0]; |
|
3106 |
- s->next_picture_ptr->owner2 = s; |
|
3107 |
- s->er.next_pic = s->next_picture_ptr; |
|
3108 |
- ff_copy_picture(&s->next_picture, s->next_picture_ptr); |
|
3109 |
- } |
|
3110 |
- |
|
3111 | 3098 |
if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || |
3112 | 3099 |
(h->pps.weighted_bipred_idc == 1 && |
3113 | 3100 |
h->slice_type_nos == AV_PICTURE_TYPE_B)) |
... | ... |
@@ -3129,10 +3450,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3129 | 3129 |
// further down the line. This may break decoding if the first slice is |
3130 | 3130 |
// corrupt, thus we only do this if frame-mt is enabled. |
3131 | 3131 |
if (h->nal_ref_idc && |
3132 |
- ff_h264_decode_ref_pic_marking(h0, &s->gb, |
|
3133 |
- !(s->avctx->active_thread_type & FF_THREAD_FRAME) || |
|
3132 |
+ ff_h264_decode_ref_pic_marking(h0, &h->gb, |
|
3133 |
+ !(h->avctx->active_thread_type & FF_THREAD_FRAME) || |
|
3134 | 3134 |
h0->current_slice == 0) < 0 && |
3135 |
- (s->avctx->err_recognition & AV_EF_EXPLODE)) |
|
3135 |
+ (h->avctx->err_recognition & AV_EF_EXPLODE)) |
|
3136 | 3136 |
return AVERROR_INVALIDDATA; |
3137 | 3137 |
|
3138 | 3138 |
if (FRAME_MBAFF) { |
... | ... |
@@ -3149,37 +3470,37 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3149 | 3149 |
ff_h264_direct_ref_list_init(h); |
3150 | 3150 |
|
3151 | 3151 |
if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { |
3152 |
- tmp = get_ue_golomb_31(&s->gb); |
|
3152 |
+ tmp = get_ue_golomb_31(&h->gb); |
|
3153 | 3153 |
if (tmp > 2) { |
3154 |
- av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); |
|
3154 |
+ av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); |
|
3155 | 3155 |
return -1; |
3156 | 3156 |
} |
3157 | 3157 |
h->cabac_init_idc = tmp; |
3158 | 3158 |
} |
3159 | 3159 |
|
3160 | 3160 |
h->last_qscale_diff = 0; |
3161 |
- tmp = h->pps.init_qp + get_se_golomb(&s->gb); |
|
3161 |
+ tmp = h->pps.init_qp + get_se_golomb(&h->gb); |
|
3162 | 3162 |
if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { |
3163 |
- av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); |
|
3163 |
+ av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); |
|
3164 | 3164 |
return -1; |
3165 | 3165 |
} |
3166 |
- s->qscale = tmp; |
|
3167 |
- h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); |
|
3168 |
- h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); |
|
3166 |
+ h->qscale = tmp; |
|
3167 |
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); |
|
3168 |
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); |
|
3169 | 3169 |
// FIXME qscale / qp ... stuff |
3170 | 3170 |
if (h->slice_type == AV_PICTURE_TYPE_SP) |
3171 |
- get_bits1(&s->gb); /* sp_for_switch_flag */ |
|
3171 |
+ get_bits1(&h->gb); /* sp_for_switch_flag */ |
|
3172 | 3172 |
if (h->slice_type == AV_PICTURE_TYPE_SP || |
3173 | 3173 |
h->slice_type == AV_PICTURE_TYPE_SI) |
3174 |
- get_se_golomb(&s->gb); /* slice_qs_delta */ |
|
3174 |
+ get_se_golomb(&h->gb); /* slice_qs_delta */ |
|
3175 | 3175 |
|
3176 | 3176 |
h->deblocking_filter = 1; |
3177 | 3177 |
h->slice_alpha_c0_offset = 52; |
3178 | 3178 |
h->slice_beta_offset = 52; |
3179 | 3179 |
if (h->pps.deblocking_filter_parameters_present) { |
3180 |
- tmp = get_ue_golomb_31(&s->gb); |
|
3180 |
+ tmp = get_ue_golomb_31(&h->gb); |
|
3181 | 3181 |
if (tmp > 2) { |
3182 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
3182 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3183 | 3183 |
"deblocking_filter_idc %u out of range\n", tmp); |
3184 | 3184 |
return -1; |
3185 | 3185 |
} |
... | ... |
@@ -3188,11 +3509,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3188 | 3188 |
h->deblocking_filter ^= 1; // 1<->0 |
3189 | 3189 |
|
3190 | 3190 |
if (h->deblocking_filter) { |
3191 |
- h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1; |
|
3192 |
- h->slice_beta_offset += get_se_golomb(&s->gb) << 1; |
|
3191 |
+ h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1; |
|
3192 |
+ h->slice_beta_offset += get_se_golomb(&h->gb) << 1; |
|
3193 | 3193 |
if (h->slice_alpha_c0_offset > 104U || |
3194 | 3194 |
h->slice_beta_offset > 104U) { |
3195 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
3195 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3196 | 3196 |
"deblocking filter parameters %d %d out of range\n", |
3197 | 3197 |
h->slice_alpha_c0_offset, h->slice_beta_offset); |
3198 | 3198 |
return -1; |
... | ... |
@@ -3200,29 +3521,29 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3200 | 3200 |
} |
3201 | 3201 |
} |
3202 | 3202 |
|
3203 |
- if (s->avctx->skip_loop_filter >= AVDISCARD_ALL || |
|
3204 |
- (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && |
|
3203 |
+ if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || |
|
3204 |
+ (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && |
|
3205 | 3205 |
h->slice_type_nos != AV_PICTURE_TYPE_I) || |
3206 |
- (s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && |
|
3206 |
+ (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && |
|
3207 | 3207 |
h->slice_type_nos == AV_PICTURE_TYPE_B) || |
3208 |
- (s->avctx->skip_loop_filter >= AVDISCARD_NONREF && |
|
3208 |
+ (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && |
|
3209 | 3209 |
h->nal_ref_idc == 0)) |
3210 | 3210 |
h->deblocking_filter = 0; |
3211 | 3211 |
|
3212 | 3212 |
if (h->deblocking_filter == 1 && h0->max_contexts > 1) { |
3213 |
- if (s->avctx->flags2 & CODEC_FLAG2_FAST) { |
|
3213 |
+ if (h->avctx->flags2 & CODEC_FLAG2_FAST) { |
|
3214 | 3214 |
/* Cheat slightly for speed: |
3215 | 3215 |
* Do not bother to deblock across slices. */ |
3216 | 3216 |
h->deblocking_filter = 2; |
3217 | 3217 |
} else { |
3218 | 3218 |
h0->max_contexts = 1; |
3219 | 3219 |
if (!h0->single_decode_warning) { |
3220 |
- av_log(s->avctx, AV_LOG_INFO, |
|
3220 |
+ av_log(h->avctx, AV_LOG_INFO, |
|
3221 | 3221 |
"Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); |
3222 | 3222 |
h0->single_decode_warning = 1; |
3223 | 3223 |
} |
3224 | 3224 |
if (h != h0) { |
3225 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
3225 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3226 | 3226 |
"Deblocking switched inside frame.\n"); |
3227 | 3227 |
return 1; |
3228 | 3228 |
} |
... | ... |
@@ -3238,7 +3559,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3238 | 3238 |
h0->last_slice_type = slice_type; |
3239 | 3239 |
h->slice_num = ++h0->current_slice; |
3240 | 3240 |
if (h->slice_num >= MAX_SLICES) { |
3241 |
- av_log(s->avctx, AV_LOG_ERROR, |
|
3241 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3242 | 3242 |
"Too many slices, increase MAX_SLICES and recompile\n"); |
3243 | 3243 |
} |
3244 | 3244 |
|
... | ... |
@@ -3276,26 +3597,26 @@ static int decode_slice_header(H264Context *h, H264Context *h0) |
3276 | 3276 |
} |
3277 | 3277 |
|
3278 | 3278 |
// FIXME: fix draw_edges + PAFF + frame threads |
3279 |
- h->emu_edge_width = (s->flags & CODEC_FLAG_EMU_EDGE || |
|
3279 |
+ h->emu_edge_width = (h->flags & CODEC_FLAG_EMU_EDGE || |
|
3280 | 3280 |
(!h->sps.frame_mbs_only_flag && |
3281 |
- s->avctx->active_thread_type)) |
|
3281 |
+ h->avctx->active_thread_type)) |
|
3282 | 3282 |
? 0 : 16; |
3283 | 3283 |
h->emu_edge_height = (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width; |
3284 | 3284 |
|
3285 |
- if (s->avctx->debug & FF_DEBUG_PICT_INFO) { |
|
3286 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
3285 |
+ if (h->avctx->debug & FF_DEBUG_PICT_INFO) { |
|
3286 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
3287 | 3287 |
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", |
3288 | 3288 |
h->slice_num, |
3289 |
- (s->picture_structure == PICT_FRAME ? "F" : s->picture_structure == PICT_TOP_FIELD ? "T" : "B"), |
|
3289 |
+ (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), |
|
3290 | 3290 |
first_mb_in_slice, |
3291 | 3291 |
av_get_picture_type_char(h->slice_type), |
3292 | 3292 |
h->slice_type_fixed ? " fix" : "", |
3293 | 3293 |
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", |
3294 | 3294 |
pps_id, h->frame_num, |
3295 |
- s->current_picture_ptr->field_poc[0], |
|
3296 |
- s->current_picture_ptr->field_poc[1], |
|
3295 |
+ h->cur_pic_ptr->field_poc[0], |
|
3296 |
+ h->cur_pic_ptr->field_poc[1], |
|
3297 | 3297 |
h->ref_count[0], h->ref_count[1], |
3298 |
- s->qscale, |
|
3298 |
+ h->qscale, |
|
3299 | 3299 |
h->deblocking_filter, |
3300 | 3300 |
h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26, |
3301 | 3301 |
h->use_weight, |
... | ... |
@@ -3325,7 +3646,6 @@ int ff_h264_get_slice_type(const H264Context *h) |
3325 | 3325 |
} |
3326 | 3326 |
|
3327 | 3327 |
static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3328 |
- MpegEncContext *const s, |
|
3329 | 3328 |
int mb_type, int top_xy, |
3330 | 3329 |
int left_xy[LEFT_MBS], |
3331 | 3330 |
int top_type, |
... | ... |
@@ -3340,11 +3660,11 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3340 | 3340 |
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride; |
3341 | 3341 |
const int b8_xy = 4 * top_xy + 2; |
3342 | 3342 |
int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); |
3343 |
- AV_COPY128(mv_dst - 1 * 8, s->current_picture.f.motion_val[list][b_xy + 0]); |
|
3343 |
+ AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]); |
|
3344 | 3344 |
ref_cache[0 - 1 * 8] = |
3345 |
- ref_cache[1 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]]; |
|
3345 |
+ ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]]; |
|
3346 | 3346 |
ref_cache[2 - 1 * 8] = |
3347 |
- ref_cache[3 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]]; |
|
3347 |
+ ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]]; |
|
3348 | 3348 |
} else { |
3349 | 3349 |
AV_ZERO128(mv_dst - 1 * 8); |
3350 | 3350 |
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u); |
... | ... |
@@ -3355,14 +3675,14 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3355 | 3355 |
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3; |
3356 | 3356 |
const int b8_xy = 4 * left_xy[LTOP] + 1; |
3357 | 3357 |
int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); |
3358 |
- AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride * 0]); |
|
3359 |
- AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride * 1]); |
|
3360 |
- AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride * 2]); |
|
3361 |
- AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride * 3]); |
|
3358 |
+ AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]); |
|
3359 |
+ AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]); |
|
3360 |
+ AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]); |
|
3361 |
+ AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]); |
|
3362 | 3362 |
ref_cache[-1 + 0] = |
3363 |
- ref_cache[-1 + 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 0]]; |
|
3363 |
+ ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]]; |
|
3364 | 3364 |
ref_cache[-1 + 16] = |
3365 |
- ref_cache[-1 + 24] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 1]]; |
|
3365 |
+ ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]]; |
|
3366 | 3366 |
} else { |
3367 | 3367 |
AV_ZERO32(mv_dst - 1 + 0); |
3368 | 3368 |
AV_ZERO32(mv_dst - 1 + 8); |
... | ... |
@@ -3386,7 +3706,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3386 | 3386 |
} |
3387 | 3387 |
|
3388 | 3388 |
{ |
3389 |
- int8_t *ref = &s->current_picture.f.ref_index[list][4 * mb_xy]; |
|
3389 |
+ int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy]; |
|
3390 | 3390 |
int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); |
3391 | 3391 |
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101; |
3392 | 3392 |
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101; |
... | ... |
@@ -3397,7 +3717,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3397 | 3397 |
} |
3398 | 3398 |
|
3399 | 3399 |
{ |
3400 |
- int16_t(*mv_src)[2] = &s->current_picture.f.motion_val[list][4 * s->mb_x + 4 * s->mb_y * b_stride]; |
|
3400 |
+ int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride]; |
|
3401 | 3401 |
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride); |
3402 | 3402 |
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride); |
3403 | 3403 |
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride); |
... | ... |
@@ -3411,31 +3731,30 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, |
3411 | 3411 |
*/ |
3412 | 3412 |
static int fill_filter_caches(H264Context *h, int mb_type) |
3413 | 3413 |
{ |
3414 |
- MpegEncContext *const s = &h->s; |
|
3415 | 3414 |
const int mb_xy = h->mb_xy; |
3416 | 3415 |
int top_xy, left_xy[LEFT_MBS]; |
3417 | 3416 |
int top_type, left_type[LEFT_MBS]; |
3418 | 3417 |
uint8_t *nnz; |
3419 | 3418 |
uint8_t *nnz_cache; |
3420 | 3419 |
|
3421 |
- top_xy = mb_xy - (s->mb_stride << MB_FIELD); |
|
3420 |
+ top_xy = mb_xy - (h->mb_stride << MB_FIELD); |
|
3422 | 3421 |
|
3423 | 3422 |
/* Wow, what a mess, why didn't they simplify the interlacing & intra |
3424 | 3423 |
* stuff, I can't imagine that these complex rules are worth it. */ |
3425 | 3424 |
|
3426 | 3425 |
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; |
3427 | 3426 |
if (FRAME_MBAFF) { |
3428 |
- const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]); |
|
3427 |
+ const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); |
|
3429 | 3428 |
const int curr_mb_field_flag = IS_INTERLACED(mb_type); |
3430 |
- if (s->mb_y & 1) { |
|
3429 |
+ if (h->mb_y & 1) { |
|
3431 | 3430 |
if (left_mb_field_flag != curr_mb_field_flag) |
3432 |
- left_xy[LTOP] -= s->mb_stride; |
|
3431 |
+ left_xy[LTOP] -= h->mb_stride; |
|
3433 | 3432 |
} else { |
3434 | 3433 |
if (curr_mb_field_flag) |
3435 |
- top_xy += s->mb_stride & |
|
3436 |
- (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1); |
|
3434 |
+ top_xy += h->mb_stride & |
|
3435 |
+ (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); |
|
3437 | 3436 |
if (left_mb_field_flag != curr_mb_field_flag) |
3438 |
- left_xy[LBOT] += s->mb_stride; |
|
3437 |
+ left_xy[LBOT] += h->mb_stride; |
|
3439 | 3438 |
} |
3440 | 3439 |
} |
3441 | 3440 |
|
... | ... |
@@ -3447,25 +3766,25 @@ static int fill_filter_caches(H264Context *h, int mb_type) |
3447 | 3447 |
* This is a conservative estimate: could also check beta_offset |
3448 | 3448 |
* and more accurate chroma_qp. */ |
3449 | 3449 |
int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice |
3450 |
- int qp = s->current_picture.f.qscale_table[mb_xy]; |
|
3450 |
+ int qp = h->cur_pic.f.qscale_table[mb_xy]; |
|
3451 | 3451 |
if (qp <= qp_thresh && |
3452 | 3452 |
(left_xy[LTOP] < 0 || |
3453 |
- ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) && |
|
3453 |
+ ((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) && |
|
3454 | 3454 |
(top_xy < 0 || |
3455 |
- ((qp + s->current_picture.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) { |
|
3455 |
+ ((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) { |
|
3456 | 3456 |
if (!FRAME_MBAFF) |
3457 | 3457 |
return 1; |
3458 | 3458 |
if ((left_xy[LTOP] < 0 || |
3459 |
- ((qp + s->current_picture.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) && |
|
3460 |
- (top_xy < s->mb_stride || |
|
3461 |
- ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh)) |
|
3459 |
+ ((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) && |
|
3460 |
+ (top_xy < h->mb_stride || |
|
3461 |
+ ((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh)) |
|
3462 | 3462 |
return 1; |
3463 | 3463 |
} |
3464 | 3464 |
} |
3465 | 3465 |
|
3466 |
- top_type = s->current_picture.f.mb_type[top_xy]; |
|
3467 |
- left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]]; |
|
3468 |
- left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]]; |
|
3466 |
+ top_type = h->cur_pic.f.mb_type[top_xy]; |
|
3467 |
+ left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; |
|
3468 |
+ left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; |
|
3469 | 3469 |
if (h->deblocking_filter == 2) { |
3470 | 3470 |
if (h->slice_table[top_xy] != h->slice_num) |
3471 | 3471 |
top_type = 0; |
... | ... |
@@ -3484,10 +3803,10 @@ static int fill_filter_caches(H264Context *h, int mb_type) |
3484 | 3484 |
if (IS_INTRA(mb_type)) |
3485 | 3485 |
return 0; |
3486 | 3486 |
|
3487 |
- fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy, |
|
3487 |
+ fill_filter_caches_inter(h, mb_type, top_xy, left_xy, |
|
3488 | 3488 |
top_type, left_type, mb_xy, 0); |
3489 | 3489 |
if (h->list_count == 2) |
3490 |
- fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy, |
|
3490 |
+ fill_filter_caches_inter(h, mb_type, top_xy, left_xy, |
|
3491 | 3491 |
top_type, left_type, mb_xy, 1); |
3492 | 3492 |
|
3493 | 3493 |
nnz = h->non_zero_count[mb_xy]; |
... | ... |
@@ -3557,57 +3876,56 @@ static int fill_filter_caches(H264Context *h, int mb_type) |
3557 | 3557 |
|
3558 | 3558 |
static void loop_filter(H264Context *h, int start_x, int end_x) |
3559 | 3559 |
{ |
3560 |
- MpegEncContext *const s = &h->s; |
|
3561 | 3560 |
uint8_t *dest_y, *dest_cb, *dest_cr; |
3562 | 3561 |
int linesize, uvlinesize, mb_x, mb_y; |
3563 |
- const int end_mb_y = s->mb_y + FRAME_MBAFF; |
|
3562 |
+ const int end_mb_y = h->mb_y + FRAME_MBAFF; |
|
3564 | 3563 |
const int old_slice_type = h->slice_type; |
3565 | 3564 |
const int pixel_shift = h->pixel_shift; |
3566 |
- const int block_h = 16 >> s->chroma_y_shift; |
|
3565 |
+ const int block_h = 16 >> h->chroma_y_shift; |
|
3567 | 3566 |
|
3568 | 3567 |
if (h->deblocking_filter) { |
3569 | 3568 |
for (mb_x = start_x; mb_x < end_x; mb_x++) |
3570 | 3569 |
for (mb_y = end_mb_y - FRAME_MBAFF; mb_y <= end_mb_y; mb_y++) { |
3571 | 3570 |
int mb_xy, mb_type; |
3572 |
- mb_xy = h->mb_xy = mb_x + mb_y * s->mb_stride; |
|
3571 |
+ mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride; |
|
3573 | 3572 |
h->slice_num = h->slice_table[mb_xy]; |
3574 |
- mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
3573 |
+ mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
3575 | 3574 |
h->list_count = h->list_counts[mb_xy]; |
3576 | 3575 |
|
3577 | 3576 |
if (FRAME_MBAFF) |
3578 | 3577 |
h->mb_mbaff = |
3579 | 3578 |
h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type); |
3580 | 3579 |
|
3581 |
- s->mb_x = mb_x; |
|
3582 |
- s->mb_y = mb_y; |
|
3583 |
- dest_y = s->current_picture.f.data[0] + |
|
3584 |
- ((mb_x << pixel_shift) + mb_y * s->linesize) * 16; |
|
3585 |
- dest_cb = s->current_picture.f.data[1] + |
|
3580 |
+ h->mb_x = mb_x; |
|
3581 |
+ h->mb_y = mb_y; |
|
3582 |
+ dest_y = h->cur_pic.f.data[0] + |
|
3583 |
+ ((mb_x << pixel_shift) + mb_y * h->linesize) * 16; |
|
3584 |
+ dest_cb = h->cur_pic.f.data[1] + |
|
3586 | 3585 |
(mb_x << pixel_shift) * (8 << CHROMA444) + |
3587 |
- mb_y * s->uvlinesize * block_h; |
|
3588 |
- dest_cr = s->current_picture.f.data[2] + |
|
3586 |
+ mb_y * h->uvlinesize * block_h; |
|
3587 |
+ dest_cr = h->cur_pic.f.data[2] + |
|
3589 | 3588 |
(mb_x << pixel_shift) * (8 << CHROMA444) + |
3590 |
- mb_y * s->uvlinesize * block_h; |
|
3589 |
+ mb_y * h->uvlinesize * block_h; |
|
3591 | 3590 |
// FIXME simplify above |
3592 | 3591 |
|
3593 | 3592 |
if (MB_FIELD) { |
3594 |
- linesize = h->mb_linesize = s->linesize * 2; |
|
3595 |
- uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; |
|
3593 |
+ linesize = h->mb_linesize = h->linesize * 2; |
|
3594 |
+ uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2; |
|
3596 | 3595 |
if (mb_y & 1) { // FIXME move out of this function? |
3597 |
- dest_y -= s->linesize * 15; |
|
3598 |
- dest_cb -= s->uvlinesize * (block_h - 1); |
|
3599 |
- dest_cr -= s->uvlinesize * (block_h - 1); |
|
3596 |
+ dest_y -= h->linesize * 15; |
|
3597 |
+ dest_cb -= h->uvlinesize * (block_h - 1); |
|
3598 |
+ dest_cr -= h->uvlinesize * (block_h - 1); |
|
3600 | 3599 |
} |
3601 | 3600 |
} else { |
3602 |
- linesize = h->mb_linesize = s->linesize; |
|
3603 |
- uvlinesize = h->mb_uvlinesize = s->uvlinesize; |
|
3601 |
+ linesize = h->mb_linesize = h->linesize; |
|
3602 |
+ uvlinesize = h->mb_uvlinesize = h->uvlinesize; |
|
3604 | 3603 |
} |
3605 | 3604 |
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, |
3606 | 3605 |
uvlinesize, 0); |
3607 | 3606 |
if (fill_filter_caches(h, mb_type)) |
3608 | 3607 |
continue; |
3609 |
- h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]); |
|
3610 |
- h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]); |
|
3608 |
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]); |
|
3609 |
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]); |
|
3611 | 3610 |
|
3612 | 3611 |
if (FRAME_MBAFF) { |
3613 | 3612 |
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, |
... | ... |
@@ -3619,20 +3937,19 @@ static void loop_filter(H264Context *h, int start_x, int end_x) |
3619 | 3619 |
} |
3620 | 3620 |
} |
3621 | 3621 |
h->slice_type = old_slice_type; |
3622 |
- s->mb_x = end_x; |
|
3623 |
- s->mb_y = end_mb_y - FRAME_MBAFF; |
|
3624 |
- h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); |
|
3625 |
- h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); |
|
3622 |
+ h->mb_x = end_x; |
|
3623 |
+ h->mb_y = end_mb_y - FRAME_MBAFF; |
|
3624 |
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); |
|
3625 |
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); |
|
3626 | 3626 |
} |
3627 | 3627 |
|
3628 | 3628 |
static void predict_field_decoding_flag(H264Context *h) |
3629 | 3629 |
{ |
3630 |
- MpegEncContext *const s = &h->s; |
|
3631 |
- const int mb_xy = s->mb_x + s->mb_y * s->mb_stride; |
|
3630 |
+ const int mb_xy = h->mb_x + h->mb_y * h->mb_stride; |
|
3632 | 3631 |
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ? |
3633 |
- s->current_picture.f.mb_type[mb_xy - 1] : |
|
3634 |
- (h->slice_table[mb_xy - s->mb_stride] == h->slice_num) ? |
|
3635 |
- s->current_picture.f.mb_type[mb_xy - s->mb_stride] : 0; |
|
3632 |
+ h->cur_pic.f.mb_type[mb_xy - 1] : |
|
3633 |
+ (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ? |
|
3634 |
+ h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0; |
|
3636 | 3635 |
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0; |
3637 | 3636 |
} |
3638 | 3637 |
|
... | ... |
@@ -3641,9 +3958,8 @@ static void predict_field_decoding_flag(H264Context *h) |
3641 | 3641 |
*/ |
3642 | 3642 |
static void decode_finish_row(H264Context *h) |
3643 | 3643 |
{ |
3644 |
- MpegEncContext *const s = &h->s; |
|
3645 |
- int top = 16 * (s->mb_y >> FIELD_PICTURE); |
|
3646 |
- int pic_height = 16 * s->mb_height >> FIELD_PICTURE; |
|
3644 |
+ int top = 16 * (h->mb_y >> FIELD_PICTURE); |
|
3645 |
+ int pic_height = 16 * h->mb_height >> FIELD_PICTURE; |
|
3647 | 3646 |
int height = 16 << FRAME_MBAFF; |
3648 | 3647 |
int deblock_border = (16 + 4) << FRAME_MBAFF; |
3649 | 3648 |
|
... | ... |
@@ -3662,19 +3978,19 @@ static void decode_finish_row(H264Context *h) |
3662 | 3662 |
top = 0; |
3663 | 3663 |
} |
3664 | 3664 |
|
3665 |
- ff_mpeg_draw_horiz_band(s, top, height); |
|
3665 |
+ ff_h264_draw_horiz_band(h, top, height); |
|
3666 | 3666 |
|
3667 |
- if (s->droppable) |
|
3667 |
+ if (h->droppable) |
|
3668 | 3668 |
return; |
3669 | 3669 |
|
3670 |
- ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1, |
|
3671 |
- s->picture_structure == PICT_BOTTOM_FIELD); |
|
3670 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1, |
|
3671 |
+ h->picture_structure == PICT_BOTTOM_FIELD); |
|
3672 | 3672 |
} |
3673 | 3673 |
|
3674 | 3674 |
static void er_add_slice(H264Context *h, int startx, int starty, |
3675 | 3675 |
int endx, int endy, int status) |
3676 | 3676 |
{ |
3677 |
- ERContext *er = &h->s.er; |
|
3677 |
+ ERContext *er = &h->er; |
|
3678 | 3678 |
|
3679 | 3679 |
er->ref_count = h->ref_count[0]; |
3680 | 3680 |
ff_er_add_slice(er, startx, starty, endx, endy, status); |
... | ... |
@@ -3683,24 +3999,23 @@ static void er_add_slice(H264Context *h, int startx, int starty, |
3683 | 3683 |
static int decode_slice(struct AVCodecContext *avctx, void *arg) |
3684 | 3684 |
{ |
3685 | 3685 |
H264Context *h = *(void **)arg; |
3686 |
- MpegEncContext *const s = &h->s; |
|
3687 |
- int lf_x_start = s->mb_x; |
|
3686 |
+ int lf_x_start = h->mb_x; |
|
3688 | 3687 |
|
3689 |
- s->mb_skip_run = -1; |
|
3688 |
+ h->mb_skip_run = -1; |
|
3690 | 3689 |
|
3691 |
- h->is_complex = FRAME_MBAFF || s->picture_structure != PICT_FRAME || |
|
3692 |
- s->codec_id != AV_CODEC_ID_H264 || |
|
3693 |
- (CONFIG_GRAY && (s->flags & CODEC_FLAG_GRAY)); |
|
3690 |
+ h->is_complex = FRAME_MBAFF || h->picture_structure != PICT_FRAME || |
|
3691 |
+ avctx->codec_id != AV_CODEC_ID_H264 || |
|
3692 |
+ (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY)); |
|
3694 | 3693 |
|
3695 | 3694 |
if (h->pps.cabac) { |
3696 | 3695 |
/* realign */ |
3697 |
- align_get_bits(&s->gb); |
|
3696 |
+ align_get_bits(&h->gb); |
|
3698 | 3697 |
|
3699 | 3698 |
/* init cabac */ |
3700 | 3699 |
ff_init_cabac_states(&h->cabac); |
3701 | 3700 |
ff_init_cabac_decoder(&h->cabac, |
3702 |
- s->gb.buffer + get_bits_count(&s->gb) / 8, |
|
3703 |
- (get_bits_left(&s->gb) + 7) / 8); |
|
3701 |
+ h->gb.buffer + get_bits_count(&h->gb) / 8, |
|
3702 |
+ (get_bits_left(&h->gb) + 7) / 8); |
|
3704 | 3703 |
|
3705 | 3704 |
ff_h264_init_cabac_states(h); |
3706 | 3705 |
|
... | ... |
@@ -3715,53 +4030,53 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) |
3715 | 3715 |
|
3716 | 3716 |
// FIXME optimal? or let mb_decode decode 16x32 ? |
3717 | 3717 |
if (ret >= 0 && FRAME_MBAFF) { |
3718 |
- s->mb_y++; |
|
3718 |
+ h->mb_y++; |
|
3719 | 3719 |
|
3720 | 3720 |
ret = ff_h264_decode_mb_cabac(h); |
3721 | 3721 |
|
3722 | 3722 |
if (ret >= 0) |
3723 | 3723 |
ff_h264_hl_decode_mb(h); |
3724 |
- s->mb_y--; |
|
3724 |
+ h->mb_y--; |
|
3725 | 3725 |
} |
3726 | 3726 |
eos = get_cabac_terminate(&h->cabac); |
3727 | 3727 |
|
3728 |
- if ((s->workaround_bugs & FF_BUG_TRUNCATED) && |
|
3728 |
+ if ((h->workaround_bugs & FF_BUG_TRUNCATED) && |
|
3729 | 3729 |
h->cabac.bytestream > h->cabac.bytestream_end + 2) { |
3730 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, |
|
3731 |
- s->mb_y, ER_MB_END); |
|
3732 |
- if (s->mb_x >= lf_x_start) |
|
3733 |
- loop_filter(h, lf_x_start, s->mb_x + 1); |
|
3730 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, |
|
3731 |
+ h->mb_y, ER_MB_END); |
|
3732 |
+ if (h->mb_x >= lf_x_start) |
|
3733 |
+ loop_filter(h, lf_x_start, h->mb_x + 1); |
|
3734 | 3734 |
return 0; |
3735 | 3735 |
} |
3736 | 3736 |
if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) { |
3737 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
3737 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3738 | 3738 |
"error while decoding MB %d %d, bytestream (%td)\n", |
3739 |
- s->mb_x, s->mb_y, |
|
3739 |
+ h->mb_x, h->mb_y, |
|
3740 | 3740 |
h->cabac.bytestream_end - h->cabac.bytestream); |
3741 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x, |
|
3742 |
- s->mb_y, ER_MB_ERROR); |
|
3741 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, |
|
3742 |
+ h->mb_y, ER_MB_ERROR); |
|
3743 | 3743 |
return -1; |
3744 | 3744 |
} |
3745 | 3745 |
|
3746 |
- if (++s->mb_x >= s->mb_width) { |
|
3747 |
- loop_filter(h, lf_x_start, s->mb_x); |
|
3748 |
- s->mb_x = lf_x_start = 0; |
|
3746 |
+ if (++h->mb_x >= h->mb_width) { |
|
3747 |
+ loop_filter(h, lf_x_start, h->mb_x); |
|
3748 |
+ h->mb_x = lf_x_start = 0; |
|
3749 | 3749 |
decode_finish_row(h); |
3750 |
- ++s->mb_y; |
|
3750 |
+ ++h->mb_y; |
|
3751 | 3751 |
if (FIELD_OR_MBAFF_PICTURE) { |
3752 |
- ++s->mb_y; |
|
3753 |
- if (FRAME_MBAFF && s->mb_y < s->mb_height) |
|
3752 |
+ ++h->mb_y; |
|
3753 |
+ if (FRAME_MBAFF && h->mb_y < h->mb_height) |
|
3754 | 3754 |
predict_field_decoding_flag(h); |
3755 | 3755 |
} |
3756 | 3756 |
} |
3757 | 3757 |
|
3758 |
- if (eos || s->mb_y >= s->mb_height) { |
|
3759 |
- tprintf(s->avctx, "slice end %d %d\n", |
|
3760 |
- get_bits_count(&s->gb), s->gb.size_in_bits); |
|
3761 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, |
|
3762 |
- s->mb_y, ER_MB_END); |
|
3763 |
- if (s->mb_x > lf_x_start) |
|
3764 |
- loop_filter(h, lf_x_start, s->mb_x); |
|
3758 |
+ if (eos || h->mb_y >= h->mb_height) { |
|
3759 |
+ tprintf(h->avctx, "slice end %d %d\n", |
|
3760 |
+ get_bits_count(&h->gb), h->gb.size_in_bits); |
|
3761 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, |
|
3762 |
+ h->mb_y, ER_MB_END); |
|
3763 |
+ if (h->mb_x > lf_x_start) |
|
3764 |
+ loop_filter(h, lf_x_start, h->mb_x); |
|
3765 | 3765 |
return 0; |
3766 | 3766 |
} |
3767 | 3767 |
} |
... | ... |
@@ -3774,45 +4089,45 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) |
3774 | 3774 |
|
3775 | 3775 |
// FIXME optimal? or let mb_decode decode 16x32 ? |
3776 | 3776 |
if (ret >= 0 && FRAME_MBAFF) { |
3777 |
- s->mb_y++; |
|
3777 |
+ h->mb_y++; |
|
3778 | 3778 |
ret = ff_h264_decode_mb_cavlc(h); |
3779 | 3779 |
|
3780 | 3780 |
if (ret >= 0) |
3781 | 3781 |
ff_h264_hl_decode_mb(h); |
3782 |
- s->mb_y--; |
|
3782 |
+ h->mb_y--; |
|
3783 | 3783 |
} |
3784 | 3784 |
|
3785 | 3785 |
if (ret < 0) { |
3786 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
3787 |
- "error while decoding MB %d %d\n", s->mb_x, s->mb_y); |
|
3788 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x, |
|
3789 |
- s->mb_y, ER_MB_ERROR); |
|
3786 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3787 |
+ "error while decoding MB %d %d\n", h->mb_x, h->mb_y); |
|
3788 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, |
|
3789 |
+ h->mb_y, ER_MB_ERROR); |
|
3790 | 3790 |
return -1; |
3791 | 3791 |
} |
3792 | 3792 |
|
3793 |
- if (++s->mb_x >= s->mb_width) { |
|
3794 |
- loop_filter(h, lf_x_start, s->mb_x); |
|
3795 |
- s->mb_x = lf_x_start = 0; |
|
3793 |
+ if (++h->mb_x >= h->mb_width) { |
|
3794 |
+ loop_filter(h, lf_x_start, h->mb_x); |
|
3795 |
+ h->mb_x = lf_x_start = 0; |
|
3796 | 3796 |
decode_finish_row(h); |
3797 |
- ++s->mb_y; |
|
3797 |
+ ++h->mb_y; |
|
3798 | 3798 |
if (FIELD_OR_MBAFF_PICTURE) { |
3799 |
- ++s->mb_y; |
|
3800 |
- if (FRAME_MBAFF && s->mb_y < s->mb_height) |
|
3799 |
+ ++h->mb_y; |
|
3800 |
+ if (FRAME_MBAFF && h->mb_y < h->mb_height) |
|
3801 | 3801 |
predict_field_decoding_flag(h); |
3802 | 3802 |
} |
3803 |
- if (s->mb_y >= s->mb_height) { |
|
3804 |
- tprintf(s->avctx, "slice end %d %d\n", |
|
3805 |
- get_bits_count(&s->gb), s->gb.size_in_bits); |
|
3803 |
+ if (h->mb_y >= h->mb_height) { |
|
3804 |
+ tprintf(h->avctx, "slice end %d %d\n", |
|
3805 |
+ get_bits_count(&h->gb), h->gb.size_in_bits); |
|
3806 | 3806 |
|
3807 |
- if (get_bits_left(&s->gb) == 0) { |
|
3808 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, |
|
3809 |
- s->mb_x - 1, s->mb_y, |
|
3807 |
+ if (get_bits_left(&h->gb) == 0) { |
|
3808 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, |
|
3809 |
+ h->mb_x - 1, h->mb_y, |
|
3810 | 3810 |
ER_MB_END); |
3811 | 3811 |
|
3812 | 3812 |
return 0; |
3813 | 3813 |
} else { |
3814 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, |
|
3815 |
- s->mb_x - 1, s->mb_y, |
|
3814 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, |
|
3815 |
+ h->mb_x - 1, h->mb_y, |
|
3816 | 3816 |
ER_MB_END); |
3817 | 3817 |
|
3818 | 3818 |
return -1; |
... | ... |
@@ -3820,20 +4135,20 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) |
3820 | 3820 |
} |
3821 | 3821 |
} |
3822 | 3822 |
|
3823 |
- if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0) { |
|
3824 |
- tprintf(s->avctx, "slice end %d %d\n", |
|
3825 |
- get_bits_count(&s->gb), s->gb.size_in_bits); |
|
3826 |
- if (get_bits_left(&s->gb) == 0) { |
|
3827 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, |
|
3828 |
- s->mb_x - 1, s->mb_y, |
|
3823 |
+ if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) { |
|
3824 |
+ tprintf(h->avctx, "slice end %d %d\n", |
|
3825 |
+ get_bits_count(&h->gb), h->gb.size_in_bits); |
|
3826 |
+ if (get_bits_left(&h->gb) == 0) { |
|
3827 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, |
|
3828 |
+ h->mb_x - 1, h->mb_y, |
|
3829 | 3829 |
ER_MB_END); |
3830 |
- if (s->mb_x > lf_x_start) |
|
3831 |
- loop_filter(h, lf_x_start, s->mb_x); |
|
3830 |
+ if (h->mb_x > lf_x_start) |
|
3831 |
+ loop_filter(h, lf_x_start, h->mb_x); |
|
3832 | 3832 |
|
3833 | 3833 |
return 0; |
3834 | 3834 |
} else { |
3835 |
- er_add_slice(h, s->resync_mb_x, s->resync_mb_y, s->mb_x, |
|
3836 |
- s->mb_y, ER_MB_ERROR); |
|
3835 |
+ er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, |
|
3836 |
+ h->mb_y, ER_MB_ERROR); |
|
3837 | 3837 |
|
3838 | 3838 |
return -1; |
3839 | 3839 |
} |
... | ... |
@@ -3850,21 +4165,19 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) |
3850 | 3850 |
*/ |
3851 | 3851 |
static int execute_decode_slices(H264Context *h, int context_count) |
3852 | 3852 |
{ |
3853 |
- MpegEncContext *const s = &h->s; |
|
3854 |
- AVCodecContext *const avctx = s->avctx; |
|
3853 |
+ AVCodecContext *const avctx = h->avctx; |
|
3855 | 3854 |
H264Context *hx; |
3856 | 3855 |
int i; |
3857 | 3856 |
|
3858 |
- if (s->avctx->hwaccel || |
|
3859 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
3857 |
+ if (h->avctx->hwaccel || |
|
3858 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
3860 | 3859 |
return 0; |
3861 | 3860 |
if (context_count == 1) { |
3862 | 3861 |
return decode_slice(avctx, &h); |
3863 | 3862 |
} else { |
3864 | 3863 |
for (i = 1; i < context_count; i++) { |
3865 | 3864 |
hx = h->thread_context[i]; |
3866 |
- hx->s.err_recognition = avctx->err_recognition; |
|
3867 |
- hx->s.er.error_count = 0; |
|
3865 |
+ hx->er.error_count = 0; |
|
3868 | 3866 |
} |
3869 | 3867 |
|
3870 | 3868 |
avctx->execute(avctx, decode_slice, h->thread_context, |
... | ... |
@@ -3872,12 +4185,12 @@ static int execute_decode_slices(H264Context *h, int context_count) |
3872 | 3872 |
|
3873 | 3873 |
/* pull back stuff from slices to master context */ |
3874 | 3874 |
hx = h->thread_context[context_count - 1]; |
3875 |
- s->mb_x = hx->s.mb_x; |
|
3876 |
- s->mb_y = hx->s.mb_y; |
|
3877 |
- s->droppable = hx->s.droppable; |
|
3878 |
- s->picture_structure = hx->s.picture_structure; |
|
3875 |
+ h->mb_x = hx->mb_x; |
|
3876 |
+ h->mb_y = hx->mb_y; |
|
3877 |
+ h->droppable = hx->droppable; |
|
3878 |
+ h->picture_structure = hx->picture_structure; |
|
3879 | 3879 |
for (i = 1; i < context_count; i++) |
3880 |
- h->s.er.error_count += h->thread_context[i]->s.er.error_count; |
|
3880 |
+ h->er.error_count += h->thread_context[i]->er.error_count; |
|
3881 | 3881 |
} |
3882 | 3882 |
|
3883 | 3883 |
return 0; |
... | ... |
@@ -3886,8 +4199,7 @@ static int execute_decode_slices(H264Context *h, int context_count) |
3886 | 3886 |
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
3887 | 3887 |
int parse_extradata) |
3888 | 3888 |
{ |
3889 |
- MpegEncContext *const s = &h->s; |
|
3890 |
- AVCodecContext *const avctx = s->avctx; |
|
3889 |
+ AVCodecContext *const avctx = h->avctx; |
|
3891 | 3890 |
H264Context *hx; ///< thread context |
3892 | 3891 |
int buf_index; |
3893 | 3892 |
int context_count; |
... | ... |
@@ -3896,11 +4208,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
3896 | 3896 |
int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts |
3897 | 3897 |
int nal_index; |
3898 | 3898 |
|
3899 |
- h->max_contexts = s->slice_context_count; |
|
3900 |
- if (!(s->flags2 & CODEC_FLAG2_CHUNKS)) { |
|
3899 |
+ h->max_contexts = h->slice_context_count; |
|
3900 |
+ if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) { |
|
3901 | 3901 |
h->current_slice = 0; |
3902 |
- if (!s->first_field) |
|
3903 |
- s->current_picture_ptr = NULL; |
|
3902 |
+ if (!h->first_field) |
|
3903 |
+ h->cur_pic_ptr = NULL; |
|
3904 | 3904 |
ff_h264_reset_sei(h); |
3905 | 3905 |
} |
3906 | 3906 |
|
... | ... |
@@ -3924,7 +4236,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
3924 | 3924 |
for (i = 0; i < h->nal_length_size; i++) |
3925 | 3925 |
nalsize = (nalsize << 8) | buf[buf_index++]; |
3926 | 3926 |
if (nalsize <= 0 || nalsize > buf_size - buf_index) { |
3927 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
3927 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
3928 | 3928 |
"AVC: nal size %d\n", nalsize); |
3929 | 3929 |
break; |
3930 | 3930 |
} |
... | ... |
@@ -3957,25 +4269,25 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
3957 | 3957 |
goto end; |
3958 | 3958 |
} |
3959 | 3959 |
i = buf_index + consumed; |
3960 |
- if ((s->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc && |
|
3960 |
+ if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc && |
|
3961 | 3961 |
buf[i] == 0x00 && buf[i + 1] == 0x00 && |
3962 | 3962 |
buf[i + 2] == 0x01 && buf[i + 3] == 0xE0) |
3963 |
- s->workaround_bugs |= FF_BUG_TRUNCATED; |
|
3963 |
+ h->workaround_bugs |= FF_BUG_TRUNCATED; |
|
3964 | 3964 |
|
3965 |
- if (!(s->workaround_bugs & FF_BUG_TRUNCATED)) |
|
3965 |
+ if (!(h->workaround_bugs & FF_BUG_TRUNCATED)) |
|
3966 | 3966 |
while (ptr[dst_length - 1] == 0 && dst_length > 0) |
3967 | 3967 |
dst_length--; |
3968 | 3968 |
bit_length = !dst_length ? 0 |
3969 | 3969 |
: (8 * dst_length - |
3970 | 3970 |
decode_rbsp_trailing(h, ptr + dst_length - 1)); |
3971 | 3971 |
|
3972 |
- if (s->avctx->debug & FF_DEBUG_STARTCODE) |
|
3973 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
3972 |
+ if (h->avctx->debug & FF_DEBUG_STARTCODE) |
|
3973 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
3974 | 3974 |
"NAL %d at %d/%d length %d\n", |
3975 | 3975 |
hx->nal_unit_type, buf_index, buf_size, dst_length); |
3976 | 3976 |
|
3977 | 3977 |
if (h->is_avc && (nalsize != consumed) && nalsize) |
3978 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
3978 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
3979 | 3979 |
"AVC: Consumed only %d bytes instead of %d\n", |
3980 | 3980 |
consumed, nalsize); |
3981 | 3981 |
|
... | ... |
@@ -3995,8 +4307,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, |
3995 | 3995 |
case NAL_DPA: |
3996 | 3996 |
case NAL_IDR_SLICE: |
3997 | 3997 |
case NAL_SLICE: |
3998 |
- init_get_bits(&hx->s.gb, ptr, bit_length); |
|
3999 |
- if (!get_ue_golomb(&hx->s.gb)) |
|
3998 |
+ init_get_bits(&hx->gb, ptr, bit_length); |
|
3999 |
+ if (!get_ue_golomb(&hx->gb)) |
|
4000 | 4000 |
nals_needed = nal_index; |
4001 | 4001 |
} |
4002 | 4002 |
continue; |
... | ... |
@@ -4011,7 +4323,7 @@ again: |
4011 | 4011 |
* parsing. Decoding slices is not possible in codec init |
4012 | 4012 |
* with frame-mt */ |
4013 | 4013 |
if (parse_extradata && HAVE_THREADS && |
4014 |
- (s->avctx->active_thread_type & FF_THREAD_FRAME) && |
|
4014 |
+ (h->avctx->active_thread_type & FF_THREAD_FRAME) && |
|
4015 | 4015 |
(hx->nal_unit_type != NAL_PPS && |
4016 | 4016 |
hx->nal_unit_type != NAL_SPS)) { |
4017 | 4017 |
av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during " |
... | ... |
@@ -4022,35 +4334,35 @@ again: |
4022 | 4022 |
switch (hx->nal_unit_type) { |
4023 | 4023 |
case NAL_IDR_SLICE: |
4024 | 4024 |
if (h->nal_unit_type != NAL_IDR_SLICE) { |
4025 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
4025 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
4026 | 4026 |
"Invalid mix of idr and non-idr slices\n"); |
4027 | 4027 |
buf_index = -1; |
4028 | 4028 |
goto end; |
4029 | 4029 |
} |
4030 | 4030 |
idr(h); // FIXME ensure we don't lose some frames if there is reordering |
4031 | 4031 |
case NAL_SLICE: |
4032 |
- init_get_bits(&hx->s.gb, ptr, bit_length); |
|
4032 |
+ init_get_bits(&hx->gb, ptr, bit_length); |
|
4033 | 4033 |
hx->intra_gb_ptr = |
4034 |
- hx->inter_gb_ptr = &hx->s.gb; |
|
4035 |
- hx->s.data_partitioning = 0; |
|
4034 |
+ hx->inter_gb_ptr = &hx->gb; |
|
4035 |
+ hx->data_partitioning = 0; |
|
4036 | 4036 |
|
4037 | 4037 |
if ((err = decode_slice_header(hx, h))) |
4038 | 4038 |
break; |
4039 | 4039 |
|
4040 |
- s->current_picture_ptr->f.key_frame |= |
|
4040 |
+ h->cur_pic_ptr->f.key_frame |= |
|
4041 | 4041 |
(hx->nal_unit_type == NAL_IDR_SLICE) || |
4042 | 4042 |
(h->sei_recovery_frame_cnt >= 0); |
4043 | 4043 |
|
4044 | 4044 |
if (h->current_slice == 1) { |
4045 |
- if (!(s->flags2 & CODEC_FLAG2_CHUNKS)) |
|
4045 |
+ if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) |
|
4046 | 4046 |
decode_postinit(h, nal_index >= nals_needed); |
4047 | 4047 |
|
4048 |
- if (s->avctx->hwaccel && |
|
4049 |
- s->avctx->hwaccel->start_frame(s->avctx, NULL, 0) < 0) |
|
4048 |
+ if (h->avctx->hwaccel && |
|
4049 |
+ h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0) |
|
4050 | 4050 |
return -1; |
4051 | 4051 |
if (CONFIG_H264_VDPAU_DECODER && |
4052 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
4053 |
- ff_vdpau_h264_picture_start(s); |
|
4052 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
4053 |
+ ff_vdpau_h264_picture_start(h); |
|
4054 | 4054 |
} |
4055 | 4055 |
|
4056 | 4056 |
if (hx->redundant_pic_count == 0 && |
... | ... |
@@ -4067,26 +4379,26 @@ again: |
4067 | 4067 |
consumed) < 0) |
4068 | 4068 |
return -1; |
4069 | 4069 |
} else if (CONFIG_H264_VDPAU_DECODER && |
4070 |
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { |
|
4070 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { |
|
4071 | 4071 |
static const uint8_t start_code[] = { |
4072 | 4072 |
0x00, 0x00, 0x01 }; |
4073 |
- ff_vdpau_add_data_chunk(s, start_code, |
|
4073 |
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], start_code, |
|
4074 | 4074 |
sizeof(start_code)); |
4075 |
- ff_vdpau_add_data_chunk(s, &buf[buf_index - consumed], |
|
4075 |
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], &buf[buf_index - consumed], |
|
4076 | 4076 |
consumed); |
4077 | 4077 |
} else |
4078 | 4078 |
context_count++; |
4079 | 4079 |
} |
4080 | 4080 |
break; |
4081 | 4081 |
case NAL_DPA: |
4082 |
- init_get_bits(&hx->s.gb, ptr, bit_length); |
|
4082 |
+ init_get_bits(&hx->gb, ptr, bit_length); |
|
4083 | 4083 |
hx->intra_gb_ptr = |
4084 | 4084 |
hx->inter_gb_ptr = NULL; |
4085 | 4085 |
|
4086 | 4086 |
if ((err = decode_slice_header(hx, h)) < 0) |
4087 | 4087 |
break; |
4088 | 4088 |
|
4089 |
- hx->s.data_partitioning = 1; |
|
4089 |
+ hx->data_partitioning = 1; |
|
4090 | 4090 |
break; |
4091 | 4091 |
case NAL_DPB: |
4092 | 4092 |
init_get_bits(&hx->intra_gb, ptr, bit_length); |
... | ... |
@@ -4098,9 +4410,8 @@ again: |
4098 | 4098 |
|
4099 | 4099 |
if (hx->redundant_pic_count == 0 && |
4100 | 4100 |
hx->intra_gb_ptr && |
4101 |
- hx->s.data_partitioning && |
|
4102 |
- s->current_picture_ptr && |
|
4103 |
- s->context_initialized && |
|
4101 |
+ hx->data_partitioning && |
|
4102 |
+ h->cur_pic_ptr && h->context_initialized && |
|
4104 | 4103 |
(avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && |
4105 | 4104 |
(avctx->skip_frame < AVDISCARD_BIDIR || |
4106 | 4105 |
hx->slice_type_nos != AV_PICTURE_TYPE_B) && |
... | ... |
@@ -4110,16 +4421,16 @@ again: |
4110 | 4110 |
context_count++; |
4111 | 4111 |
break; |
4112 | 4112 |
case NAL_SEI: |
4113 |
- init_get_bits(&s->gb, ptr, bit_length); |
|
4113 |
+ init_get_bits(&h->gb, ptr, bit_length); |
|
4114 | 4114 |
ff_h264_decode_sei(h); |
4115 | 4115 |
break; |
4116 | 4116 |
case NAL_SPS: |
4117 |
- init_get_bits(&s->gb, ptr, bit_length); |
|
4117 |
+ init_get_bits(&h->gb, ptr, bit_length); |
|
4118 | 4118 |
if (ff_h264_decode_seq_parameter_set(h) < 0 && |
4119 | 4119 |
h->is_avc && (nalsize != consumed) && nalsize) { |
4120 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
4120 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
4121 | 4121 |
"SPS decoding failure, trying again with the complete NAL\n"); |
4122 |
- init_get_bits(&s->gb, buf + buf_index + 1 - consumed, |
|
4122 |
+ init_get_bits(&h->gb, buf + buf_index + 1 - consumed, |
|
4123 | 4123 |
8 * (nalsize - 1)); |
4124 | 4124 |
ff_h264_decode_seq_parameter_set(h); |
4125 | 4125 |
} |
... | ... |
@@ -4130,7 +4441,7 @@ again: |
4130 | 4130 |
} |
4131 | 4131 |
break; |
4132 | 4132 |
case NAL_PPS: |
4133 |
- init_get_bits(&s->gb, ptr, bit_length); |
|
4133 |
+ init_get_bits(&h->gb, ptr, bit_length); |
|
4134 | 4134 |
ff_h264_decode_picture_parameter_set(h, bit_length); |
4135 | 4135 |
break; |
4136 | 4136 |
case NAL_AUD: |
... | ... |
@@ -4153,7 +4464,7 @@ again: |
4153 | 4153 |
} |
4154 | 4154 |
|
4155 | 4155 |
if (err < 0) |
4156 |
- av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n"); |
|
4156 |
+ av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n"); |
|
4157 | 4157 |
else if (err == 1) { |
4158 | 4158 |
/* Slice could not be decoded in parallel mode, copy down |
4159 | 4159 |
* NAL unit stuff to context 0 and restart. Note that |
... | ... |
@@ -4171,10 +4482,10 @@ again: |
4171 | 4171 |
|
4172 | 4172 |
end: |
4173 | 4173 |
/* clean up */ |
4174 |
- if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s && |
|
4175 |
- !s->droppable) { |
|
4176 |
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, |
|
4177 |
- s->picture_structure == PICT_BOTTOM_FIELD); |
|
4174 |
+ if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h && |
|
4175 |
+ !h->droppable) { |
|
4176 |
+ ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, |
|
4177 |
+ h->picture_structure == PICT_BOTTOM_FIELD); |
|
4178 | 4178 |
} |
4179 | 4179 |
|
4180 | 4180 |
return buf_index; |
... | ... |
@@ -4183,7 +4494,7 @@ end: |
4183 | 4183 |
/** |
4184 | 4184 |
* Return the number of bytes consumed for building the current frame. |
4185 | 4185 |
*/ |
4186 |
-static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size) |
|
4186 |
+static int get_consumed_bytes(int pos, int buf_size) |
|
4187 | 4187 |
{ |
4188 | 4188 |
if (pos == 0) |
4189 | 4189 |
pos = 1; // avoid infinite loops (i doubt that is needed but ...) |
... | ... |
@@ -4199,12 +4510,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, |
4199 | 4199 |
const uint8_t *buf = avpkt->data; |
4200 | 4200 |
int buf_size = avpkt->size; |
4201 | 4201 |
H264Context *h = avctx->priv_data; |
4202 |
- MpegEncContext *s = &h->s; |
|
4203 | 4202 |
AVFrame *pict = data; |
4204 | 4203 |
int buf_index = 0; |
4205 | 4204 |
|
4206 |
- s->flags = avctx->flags; |
|
4207 |
- s->flags2 = avctx->flags2; |
|
4205 |
+ h->flags = avctx->flags; |
|
4208 | 4206 |
|
4209 | 4207 |
/* end of stream, output what is still in the buffers */ |
4210 | 4208 |
out: |
... | ... |
@@ -4212,7 +4521,7 @@ out: |
4212 | 4212 |
Picture *out; |
4213 | 4213 |
int i, out_idx; |
4214 | 4214 |
|
4215 |
- s->current_picture_ptr = NULL; |
|
4215 |
+ h->cur_pic_ptr = NULL; |
|
4216 | 4216 |
|
4217 | 4217 |
// FIXME factorize this with the output code below |
4218 | 4218 |
out = h->delayed_pic[0]; |
... | ... |
@@ -4242,25 +4551,24 @@ out: |
4242 | 4242 |
if (buf_index < 0) |
4243 | 4243 |
return -1; |
4244 | 4244 |
|
4245 |
- if (!s->current_picture_ptr && h->nal_unit_type == NAL_END_SEQUENCE) { |
|
4245 |
+ if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) { |
|
4246 | 4246 |
buf_size = 0; |
4247 | 4247 |
goto out; |
4248 | 4248 |
} |
4249 | 4249 |
|
4250 |
- if (!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr) { |
|
4250 |
+ if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) { |
|
4251 | 4251 |
if (avctx->skip_frame >= AVDISCARD_NONREF) |
4252 | 4252 |
return 0; |
4253 | 4253 |
av_log(avctx, AV_LOG_ERROR, "no frame!\n"); |
4254 | 4254 |
return -1; |
4255 | 4255 |
} |
4256 | 4256 |
|
4257 |
- if (!(s->flags2 & CODEC_FLAG2_CHUNKS) || |
|
4258 |
- (s->mb_y >= s->mb_height && s->mb_height)) { |
|
4259 |
- if (s->flags2 & CODEC_FLAG2_CHUNKS) |
|
4257 |
+ if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) || |
|
4258 |
+ (h->mb_y >= h->mb_height && h->mb_height)) { |
|
4259 |
+ if (avctx->flags2 & CODEC_FLAG2_CHUNKS) |
|
4260 | 4260 |
decode_postinit(h, 1); |
4261 | 4261 |
|
4262 | 4262 |
field_end(h, 0); |
4263 |
- h->context_reinitialized = 0; |
|
4264 | 4263 |
|
4265 | 4264 |
if (!h->next_output_pic) { |
4266 | 4265 |
/* Wait for second field. */ |
... | ... |
@@ -4272,9 +4580,8 @@ out: |
4272 | 4272 |
} |
4273 | 4273 |
|
4274 | 4274 |
assert(pict->data[0] || !*got_frame); |
4275 |
- ff_print_debug_info(s, pict); |
|
4276 | 4275 |
|
4277 |
- return get_consumed_bytes(s, buf_index, buf_size); |
|
4276 |
+ return get_consumed_bytes(buf_index, buf_size); |
|
4278 | 4277 |
} |
4279 | 4278 |
|
4280 | 4279 |
av_cold void ff_h264_free_context(H264Context *h) |
... | ... |
@@ -4293,13 +4600,16 @@ av_cold void ff_h264_free_context(H264Context *h) |
4293 | 4293 |
static av_cold int h264_decode_end(AVCodecContext *avctx) |
4294 | 4294 |
{ |
4295 | 4295 |
H264Context *h = avctx->priv_data; |
4296 |
- MpegEncContext *s = &h->s; |
|
4296 |
+ int i; |
|
4297 | 4297 |
|
4298 | 4298 |
ff_h264_free_context(h); |
4299 | 4299 |
|
4300 |
- ff_MPV_common_end(s); |
|
4301 |
- |
|
4302 |
- // memset(h, 0, sizeof(H264Context)); |
|
4300 |
+ if (h->DPB && !h->avctx->internal->is_copy) { |
|
4301 |
+ for (i = 0; i < h->picture_count; i++) { |
|
4302 |
+ free_picture(h, &h->DPB[i]); |
|
4303 |
+ } |
|
4304 |
+ } |
|
4305 |
+ av_freep(&h->DPB); |
|
4303 | 4306 |
|
4304 | 4307 |
return 0; |
4305 | 4308 |
} |
... | ... |
@@ -30,6 +30,7 @@ |
30 | 30 |
|
31 | 31 |
#include "libavutil/intreadwrite.h" |
32 | 32 |
#include "cabac.h" |
33 |
+#include "get_bits.h" |
|
33 | 34 |
#include "mpegvideo.h" |
34 | 35 |
#include "h264chroma.h" |
35 | 36 |
#include "h264dsp.h" |
... | ... |
@@ -60,7 +61,7 @@ |
60 | 60 |
#define MB_MBAFF h->mb_mbaff |
61 | 61 |
#define MB_FIELD h->mb_field_decoding_flag |
62 | 62 |
#define FRAME_MBAFF h->mb_aff_frame |
63 |
-#define FIELD_PICTURE (s->picture_structure != PICT_FRAME) |
|
63 |
+#define FIELD_PICTURE (h->picture_structure != PICT_FRAME) |
|
64 | 64 |
#define LEFT_MBS 2 |
65 | 65 |
#define LTOP 0 |
66 | 66 |
#define LBOT 1 |
... | ... |
@@ -250,15 +251,42 @@ typedef struct MMCO { |
250 | 250 |
* H264Context |
251 | 251 |
*/ |
252 | 252 |
typedef struct H264Context { |
253 |
- MpegEncContext s; |
|
253 |
+ AVCodecContext *avctx; |
|
254 |
+ DSPContext dsp; |
|
255 |
+ VideoDSPContext vdsp; |
|
254 | 256 |
H264DSPContext h264dsp; |
255 | 257 |
H264ChromaContext h264chroma; |
256 | 258 |
H264QpelContext h264qpel; |
259 |
+ MotionEstContext me; |
|
260 |
+ ParseContext parse_context; |
|
261 |
+ GetBitContext gb; |
|
262 |
+ ERContext er; |
|
263 |
+ |
|
264 |
+ Picture *DPB; |
|
265 |
+ Picture *cur_pic_ptr; |
|
266 |
+ Picture cur_pic; |
|
267 |
+ int picture_count; |
|
268 |
+ int picture_range_start, picture_range_end; |
|
269 |
+ |
|
257 | 270 |
int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264 |
258 | 271 |
int chroma_qp[2]; // QPc |
259 | 272 |
|
260 | 273 |
int qp_thresh; ///< QP threshold to skip loopfilter |
261 | 274 |
|
275 |
+ int width, height; |
|
276 |
+ int linesize, uvlinesize; |
|
277 |
+ int chroma_x_shift, chroma_y_shift; |
|
278 |
+ |
|
279 |
+ int qscale; |
|
280 |
+ int droppable; |
|
281 |
+ int data_partitioning; |
|
282 |
+ int coded_picture_number; |
|
283 |
+ int low_delay; |
|
284 |
+ |
|
285 |
+ int context_initialized; |
|
286 |
+ int flags; |
|
287 |
+ int workaround_bugs; |
|
288 |
+ |
|
262 | 289 |
int prev_mb_skipped; |
263 | 290 |
int next_mb_skipped; |
264 | 291 |
|
... | ... |
@@ -348,6 +376,8 @@ typedef struct H264Context { |
348 | 348 |
int mb_aff_frame; |
349 | 349 |
int mb_field_decoding_flag; |
350 | 350 |
int mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag |
351 |
+ int picture_structure; |
|
352 |
+ int first_field; |
|
351 | 353 |
|
352 | 354 |
DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4]; |
353 | 355 |
|
... | ... |
@@ -424,6 +454,13 @@ typedef struct H264Context { |
424 | 424 |
|
425 | 425 |
int x264_build; |
426 | 426 |
|
427 |
+ int mb_x, mb_y; |
|
428 |
+ int resync_mb_x; |
|
429 |
+ int resync_mb_y; |
|
430 |
+ int mb_skip_run; |
|
431 |
+ int mb_height, mb_width; |
|
432 |
+ int mb_stride; |
|
433 |
+ int mb_num; |
|
427 | 434 |
int mb_xy; |
428 | 435 |
|
429 | 436 |
int is_complex; |
... | ... |
@@ -448,7 +485,8 @@ typedef struct H264Context { |
448 | 448 |
int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4) |
449 | 449 |
int got_first; ///< this flag is != 0 if we've parsed a frame |
450 | 450 |
|
451 |
- int context_reinitialized; |
|
451 |
+ int bit_depth_luma; ///< luma bit depth from sps to detect changes |
|
452 |
+ int chroma_format_idc; ///< chroma format from sps to detect changes |
|
452 | 453 |
|
453 | 454 |
SPS *sps_buffers[MAX_SPS_COUNT]; |
454 | 455 |
PPS *pps_buffers[MAX_PPS_COUNT]; |
... | ... |
@@ -521,12 +559,16 @@ typedef struct H264Context { |
521 | 521 |
*/ |
522 | 522 |
int max_contexts; |
523 | 523 |
|
524 |
+ int slice_context_count; |
|
525 |
+ |
|
524 | 526 |
/** |
525 | 527 |
* 1 if the single thread fallback warning has already been |
526 | 528 |
* displayed, 0 otherwise. |
527 | 529 |
*/ |
528 | 530 |
int single_decode_warning; |
529 | 531 |
|
532 |
+ enum AVPictureType pict_type; |
|
533 |
+ |
|
530 | 534 |
int last_slice_type; |
531 | 535 |
/** @} */ |
532 | 536 |
|
... | ... |
@@ -578,6 +620,8 @@ typedef struct H264Context { |
578 | 578 |
|
579 | 579 |
int cur_chroma_format_idc; |
580 | 580 |
uint8_t *bipred_scratchpad; |
581 |
+ uint8_t *edge_emu_buffer; |
|
582 |
+ int16_t *dc_val_base; |
|
581 | 583 |
} H264Context; |
582 | 584 |
|
583 | 585 |
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma qp table for each supported bit depth (8, 9, 10). |
... | ... |
@@ -786,7 +830,7 @@ static av_always_inline int pred_intra_mode(H264Context *h, int n) |
786 | 786 |
const int top = h->intra4x4_pred_mode_cache[index8 - 8]; |
787 | 787 |
const int min = FFMIN(left, top); |
788 | 788 |
|
789 |
- tprintf(h->s.avctx, "mode:%d %d min:%d\n", left, top, min); |
|
789 |
+ tprintf(h->avctx, "mode:%d %d min:%d\n", left, top, min); |
|
790 | 790 |
|
791 | 791 |
if (min < 0) |
792 | 792 |
return DC_PRED; |
... | ... |
@@ -820,7 +864,7 @@ static av_always_inline void write_back_non_zero_count(H264Context *h) |
820 | 820 |
AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]); |
821 | 821 |
AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]); |
822 | 822 |
|
823 |
- if (!h->s.chroma_y_shift) { |
|
823 |
+ if (!h->chroma_y_shift) { |
|
824 | 824 |
AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]); |
825 | 825 |
AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]); |
826 | 826 |
AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]); |
... | ... |
@@ -829,12 +873,11 @@ static av_always_inline void write_back_non_zero_count(H264Context *h) |
829 | 829 |
} |
830 | 830 |
|
831 | 831 |
static av_always_inline void write_back_motion_list(H264Context *h, |
832 |
- MpegEncContext *const s, |
|
833 | 832 |
int b_stride, |
834 | 833 |
int b_xy, int b8_xy, |
835 | 834 |
int mb_type, int list) |
836 | 835 |
{ |
837 |
- int16_t(*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy]; |
|
836 |
+ int16_t(*mv_dst)[2] = &h->cur_pic.f.motion_val[list][b_xy]; |
|
838 | 837 |
int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]]; |
839 | 838 |
AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0); |
840 | 839 |
AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1); |
... | ... |
@@ -855,7 +898,7 @@ static av_always_inline void write_back_motion_list(H264Context *h, |
855 | 855 |
} |
856 | 856 |
|
857 | 857 |
{ |
858 |
- int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy]; |
|
858 |
+ int8_t *ref_index = &h->cur_pic.f.ref_index[list][b8_xy]; |
|
859 | 859 |
int8_t *ref_cache = h->ref_cache[list]; |
860 | 860 |
ref_index[0 + 0 * 2] = ref_cache[scan8[0]]; |
861 | 861 |
ref_index[1 + 0 * 2] = ref_cache[scan8[4]]; |
... | ... |
@@ -866,19 +909,18 @@ static av_always_inline void write_back_motion_list(H264Context *h, |
866 | 866 |
|
867 | 867 |
static av_always_inline void write_back_motion(H264Context *h, int mb_type) |
868 | 868 |
{ |
869 |
- MpegEncContext *const s = &h->s; |
|
870 | 869 |
const int b_stride = h->b_stride; |
871 |
- const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride; // try mb2b(8)_xy |
|
870 |
+ const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride; // try mb2b(8)_xy |
|
872 | 871 |
const int b8_xy = 4 * h->mb_xy; |
873 | 872 |
|
874 | 873 |
if (USES_LIST(mb_type, 0)) { |
875 |
- write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0); |
|
874 |
+ write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0); |
|
876 | 875 |
} else { |
877 |
- fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy], |
|
876 |
+ fill_rectangle(&h->cur_pic.f.ref_index[0][b8_xy], |
|
878 | 877 |
2, 2, 2, (uint8_t)LIST_NOT_USED, 1); |
879 | 878 |
} |
880 | 879 |
if (USES_LIST(mb_type, 1)) |
881 |
- write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1); |
|
880 |
+ write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 1); |
|
882 | 881 |
|
883 | 882 |
if (h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC) { |
884 | 883 |
if (IS_8X8(mb_type)) { |
... | ... |
@@ -902,4 +944,6 @@ static av_always_inline int get_dct8x8_allowed(H264Context *h) |
902 | 902 |
0x0001000100010001ULL)); |
903 | 903 |
} |
904 | 904 |
|
905 |
+void ff_h264_draw_horiz_band(H264Context *h, int y, int height); |
|
906 |
+ |
|
905 | 907 |
#endif /* AVCODEC_H264_H */ |
... | ... |
@@ -1260,10 +1260,9 @@ static const int8_t cabac_context_init_PB[3][1024][2] = |
1260 | 1260 |
}; |
1261 | 1261 |
|
1262 | 1262 |
void ff_h264_init_cabac_states(H264Context *h) { |
1263 |
- MpegEncContext * const s = &h->s; |
|
1264 | 1263 |
int i; |
1265 | 1264 |
const int8_t (*tab)[2]; |
1266 |
- const int slice_qp = av_clip(s->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); |
|
1265 |
+ const int slice_qp = av_clip(h->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); |
|
1267 | 1266 |
|
1268 | 1267 |
if( h->slice_type_nos == AV_PICTURE_TYPE_I ) tab = cabac_context_init_I; |
1269 | 1268 |
else tab = cabac_context_init_PB[h->cabac_init_idc]; |
... | ... |
@@ -1281,13 +1280,12 @@ void ff_h264_init_cabac_states(H264Context *h) { |
1281 | 1281 |
} |
1282 | 1282 |
|
1283 | 1283 |
static int decode_cabac_field_decoding_flag(H264Context *h) { |
1284 |
- MpegEncContext * const s = &h->s; |
|
1285 |
- const long mbb_xy = h->mb_xy - 2L*s->mb_stride; |
|
1284 |
+ const long mbb_xy = h->mb_xy - 2L*h->mb_stride; |
|
1286 | 1285 |
|
1287 | 1286 |
unsigned long ctx = 0; |
1288 | 1287 |
|
1289 |
- ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num); |
|
1290 |
- ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num); |
|
1288 |
+ ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num); |
|
1289 |
+ ctx += (h->cur_pic.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num); |
|
1291 | 1290 |
|
1292 | 1291 |
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] ); |
1293 | 1292 |
} |
... | ... |
@@ -1323,34 +1321,33 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl |
1323 | 1323 |
} |
1324 | 1324 |
|
1325 | 1325 |
static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) { |
1326 |
- MpegEncContext * const s = &h->s; |
|
1327 | 1326 |
int mba_xy, mbb_xy; |
1328 | 1327 |
int ctx = 0; |
1329 | 1328 |
|
1330 | 1329 |
if(FRAME_MBAFF){ //FIXME merge with the stuff in fill_caches? |
1331 |
- int mb_xy = mb_x + (mb_y&~1)*s->mb_stride; |
|
1330 |
+ int mb_xy = mb_x + (mb_y&~1)*h->mb_stride; |
|
1332 | 1331 |
mba_xy = mb_xy - 1; |
1333 | 1332 |
if( (mb_y&1) |
1334 | 1333 |
&& h->slice_table[mba_xy] == h->slice_num |
1335 |
- && MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) ) |
|
1336 |
- mba_xy += s->mb_stride; |
|
1334 |
+ && MB_FIELD == !!IS_INTERLACED( h->cur_pic.f.mb_type[mba_xy] ) ) |
|
1335 |
+ mba_xy += h->mb_stride; |
|
1337 | 1336 |
if( MB_FIELD ){ |
1338 |
- mbb_xy = mb_xy - s->mb_stride; |
|
1337 |
+ mbb_xy = mb_xy - h->mb_stride; |
|
1339 | 1338 |
if( !(mb_y&1) |
1340 | 1339 |
&& h->slice_table[mbb_xy] == h->slice_num |
1341 |
- && IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) ) |
|
1342 |
- mbb_xy -= s->mb_stride; |
|
1340 |
+ && IS_INTERLACED( h->cur_pic.f.mb_type[mbb_xy] ) ) |
|
1341 |
+ mbb_xy -= h->mb_stride; |
|
1343 | 1342 |
}else |
1344 |
- mbb_xy = mb_x + (mb_y-1)*s->mb_stride; |
|
1343 |
+ mbb_xy = mb_x + (mb_y-1)*h->mb_stride; |
|
1345 | 1344 |
}else{ |
1346 | 1345 |
int mb_xy = h->mb_xy; |
1347 | 1346 |
mba_xy = mb_xy - 1; |
1348 |
- mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE); |
|
1347 |
+ mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE); |
|
1349 | 1348 |
} |
1350 | 1349 |
|
1351 |
- if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] )) |
|
1350 |
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mba_xy] )) |
|
1352 | 1351 |
ctx++; |
1353 |
- if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] )) |
|
1352 |
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mbb_xy] )) |
|
1354 | 1353 |
ctx++; |
1355 | 1354 |
|
1356 | 1355 |
if( h->slice_type_nos == AV_PICTURE_TYPE_B ) |
... | ... |
@@ -1507,7 +1504,7 @@ static int decode_cabac_mb_mvd( H264Context *h, int ctxbase, int amvd, int *mvda |
1507 | 1507 |
mvd += 1 << k; |
1508 | 1508 |
k++; |
1509 | 1509 |
if(k>24){ |
1510 |
- av_log(h->s.avctx, AV_LOG_ERROR, "overflow in decode_cabac_mb_mvd\n"); |
|
1510 |
+ av_log(h->avctx, AV_LOG_ERROR, "overflow in decode_cabac_mb_mvd\n"); |
|
1511 | 1511 |
return INT_MIN; |
1512 | 1512 |
} |
1513 | 1513 |
} |
... | ... |
@@ -1832,8 +1829,7 @@ static av_always_inline void decode_cabac_luma_residual( H264Context *h, const u |
1832 | 1832 |
static const uint8_t ctx_cat[4][3] = {{0,6,10},{1,7,11},{2,8,12},{5,9,13}}; |
1833 | 1833 |
const uint32_t *qmul; |
1834 | 1834 |
int i8x8, i4x4; |
1835 |
- MpegEncContext * const s = &h->s; |
|
1836 |
- int qscale = p == 0 ? s->qscale : h->chroma_qp[p-1]; |
|
1835 |
+ int qscale = p == 0 ? h->qscale : h->chroma_qp[p-1]; |
|
1837 | 1836 |
if( IS_INTRA16x16( mb_type ) ) { |
1838 | 1837 |
AV_ZERO128(h->mb_luma_dc[p]+0); |
1839 | 1838 |
AV_ZERO128(h->mb_luma_dc[p]+8); |
... | ... |
@@ -1879,28 +1875,27 @@ static av_always_inline void decode_cabac_luma_residual( H264Context *h, const u |
1879 | 1879 |
* @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR if an error is noticed |
1880 | 1880 |
*/ |
1881 | 1881 |
int ff_h264_decode_mb_cabac(H264Context *h) { |
1882 |
- MpegEncContext * const s = &h->s; |
|
1883 | 1882 |
int mb_xy; |
1884 | 1883 |
int mb_type, partition_count, cbp = 0; |
1885 | 1884 |
int dct8x8_allowed= h->pps.transform_8x8_mode; |
1886 | 1885 |
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; |
1887 | 1886 |
const int pixel_shift = h->pixel_shift; |
1888 | 1887 |
|
1889 |
- mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; |
|
1888 |
+ mb_xy = h->mb_xy = h->mb_x + h->mb_y*h->mb_stride; |
|
1890 | 1889 |
|
1891 |
- tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); |
|
1890 |
+ tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y); |
|
1892 | 1891 |
if( h->slice_type_nos != AV_PICTURE_TYPE_I ) { |
1893 | 1892 |
int skip; |
1894 | 1893 |
/* a skipped mb needs the aff flag from the following mb */ |
1895 |
- if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped ) |
|
1894 |
+ if( FRAME_MBAFF && (h->mb_y&1)==1 && h->prev_mb_skipped ) |
|
1896 | 1895 |
skip = h->next_mb_skipped; |
1897 | 1896 |
else |
1898 |
- skip = decode_cabac_mb_skip( h, s->mb_x, s->mb_y ); |
|
1897 |
+ skip = decode_cabac_mb_skip( h, h->mb_x, h->mb_y ); |
|
1899 | 1898 |
/* read skip flags */ |
1900 | 1899 |
if( skip ) { |
1901 |
- if( FRAME_MBAFF && (s->mb_y&1)==0 ){ |
|
1902 |
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP; |
|
1903 |
- h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 ); |
|
1900 |
+ if( FRAME_MBAFF && (h->mb_y&1)==0 ){ |
|
1901 |
+ h->cur_pic.f.mb_type[mb_xy] = MB_TYPE_SKIP; |
|
1902 |
+ h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 ); |
|
1904 | 1903 |
if(!h->next_mb_skipped) |
1905 | 1904 |
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); |
1906 | 1905 |
} |
... | ... |
@@ -1916,7 +1911,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) { |
1916 | 1916 |
} |
1917 | 1917 |
} |
1918 | 1918 |
if(FRAME_MBAFF){ |
1919 |
- if( (s->mb_y&1) == 0 ) |
|
1919 |
+ if( (h->mb_y&1) == 0 ) |
|
1920 | 1920 |
h->mb_mbaff = |
1921 | 1921 |
h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); |
1922 | 1922 |
} |
... | ... |
@@ -2017,10 +2012,10 @@ decode_intra_mb: |
2017 | 2017 |
h->cbp_table[mb_xy] = 0xf7ef; |
2018 | 2018 |
h->chroma_pred_mode_table[mb_xy] = 0; |
2019 | 2019 |
// In deblocking, the quantizer is 0 |
2020 |
- s->current_picture.f.qscale_table[mb_xy] = 0; |
|
2020 |
+ h->cur_pic.f.qscale_table[mb_xy] = 0; |
|
2021 | 2021 |
// All coeffs are present |
2022 | 2022 |
memset(h->non_zero_count[mb_xy], 16, 48); |
2023 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
2023 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
2024 | 2024 |
h->last_qscale_diff = 0; |
2025 | 2025 |
return 0; |
2026 | 2026 |
} |
... | ... |
@@ -2042,7 +2037,7 @@ decode_intra_mb: |
2042 | 2042 |
int pred = pred_intra_mode( h, i ); |
2043 | 2043 |
h->intra4x4_pred_mode_cache[ scan8[i] ] = decode_cabac_mb_intra4x4_pred_mode( h, pred ); |
2044 | 2044 |
|
2045 |
- av_dlog(s->avctx, "i4x4 pred=%d mode=%d\n", pred, |
|
2045 |
+ av_dlog(h->avctx, "i4x4 pred=%d mode=%d\n", pred, |
|
2046 | 2046 |
h->intra4x4_pred_mode_cache[scan8[i]]); |
2047 | 2047 |
} |
2048 | 2048 |
} |
... | ... |
@@ -2097,7 +2092,7 @@ decode_intra_mb: |
2097 | 2097 |
if (rc > 1) { |
2098 | 2098 |
ref[list][i] = decode_cabac_mb_ref( h, list, 4*i ); |
2099 | 2099 |
if (ref[list][i] >= (unsigned) rc) { |
2100 |
- av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref[list][i], rc); |
|
2100 |
+ av_log(h->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref[list][i], rc); |
|
2101 | 2101 |
return -1; |
2102 | 2102 |
} |
2103 | 2103 |
}else |
... | ... |
@@ -2132,7 +2127,7 @@ decode_intra_mb: |
2132 | 2132 |
uint8_t (* mvd_cache)[2]= &h->mvd_cache[list][ scan8[index] ]; |
2133 | 2133 |
pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); |
2134 | 2134 |
DECODE_CABAC_MB_MVD( h, list, index) |
2135 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
2135 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
2136 | 2136 |
|
2137 | 2137 |
if(IS_SUB_8X8(sub_mb_type)){ |
2138 | 2138 |
mv_cache[ 1 ][0]= |
... | ... |
@@ -2183,7 +2178,7 @@ decode_intra_mb: |
2183 | 2183 |
if (rc > 1) { |
2184 | 2184 |
ref= decode_cabac_mb_ref(h, list, 0); |
2185 | 2185 |
if (ref >= (unsigned) rc) { |
2186 |
- av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2186 |
+ av_log(h->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2187 | 2187 |
return -1; |
2188 | 2188 |
} |
2189 | 2189 |
}else |
... | ... |
@@ -2196,7 +2191,7 @@ decode_intra_mb: |
2196 | 2196 |
int mx,my,mpx,mpy; |
2197 | 2197 |
pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); |
2198 | 2198 |
DECODE_CABAC_MB_MVD( h, list, 0) |
2199 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
2199 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
2200 | 2200 |
|
2201 | 2201 |
fill_rectangle(h->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack8to16(mpx,mpy), 2); |
2202 | 2202 |
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); |
... | ... |
@@ -2211,7 +2206,7 @@ decode_intra_mb: |
2211 | 2211 |
if (rc > 1) { |
2212 | 2212 |
ref= decode_cabac_mb_ref( h, list, 8*i ); |
2213 | 2213 |
if (ref >= (unsigned) rc) { |
2214 |
- av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2214 |
+ av_log(h->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2215 | 2215 |
return -1; |
2216 | 2216 |
} |
2217 | 2217 |
}else |
... | ... |
@@ -2227,7 +2222,7 @@ decode_intra_mb: |
2227 | 2227 |
int mx,my,mpx,mpy; |
2228 | 2228 |
pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); |
2229 | 2229 |
DECODE_CABAC_MB_MVD( h, list, 8*i) |
2230 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
2230 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
2231 | 2231 |
|
2232 | 2232 |
fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack8to16(mpx,mpy), 2); |
2233 | 2233 |
fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); |
... | ... |
@@ -2246,7 +2241,7 @@ decode_intra_mb: |
2246 | 2246 |
if (rc > 1) { |
2247 | 2247 |
ref= decode_cabac_mb_ref( h, list, 4*i ); |
2248 | 2248 |
if (ref >= (unsigned) rc) { |
2249 |
- av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2249 |
+ av_log(h->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, rc); |
|
2250 | 2250 |
return -1; |
2251 | 2251 |
} |
2252 | 2252 |
}else |
... | ... |
@@ -2263,7 +2258,7 @@ decode_intra_mb: |
2263 | 2263 |
pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); |
2264 | 2264 |
DECODE_CABAC_MB_MVD( h, list, 4*i) |
2265 | 2265 |
|
2266 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
2266 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
2267 | 2267 |
fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack8to16(mpx,mpy), 2); |
2268 | 2268 |
fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); |
2269 | 2269 |
}else{ |
... | ... |
@@ -2314,18 +2309,18 @@ decode_intra_mb: |
2314 | 2314 |
AV_WN32A(&nnz_cache[4+8*10], top_empty); |
2315 | 2315 |
} |
2316 | 2316 |
} |
2317 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
2317 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
2318 | 2318 |
|
2319 | 2319 |
if( cbp || IS_INTRA16x16( mb_type ) ) { |
2320 | 2320 |
const uint8_t *scan, *scan8x8; |
2321 | 2321 |
const uint32_t *qmul; |
2322 | 2322 |
|
2323 | 2323 |
if(IS_INTERLACED(mb_type)){ |
2324 |
- scan8x8= s->qscale ? h->field_scan8x8 : h->field_scan8x8_q0; |
|
2325 |
- scan= s->qscale ? h->field_scan : h->field_scan_q0; |
|
2324 |
+ scan8x8= h->qscale ? h->field_scan8x8 : h->field_scan8x8_q0; |
|
2325 |
+ scan= h->qscale ? h->field_scan : h->field_scan_q0; |
|
2326 | 2326 |
}else{ |
2327 |
- scan8x8= s->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0; |
|
2328 |
- scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0; |
|
2327 |
+ scan8x8= h->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0; |
|
2328 |
+ scan= h->qscale ? h->zigzag_scan : h->zigzag_scan_q0; |
|
2329 | 2329 |
} |
2330 | 2330 |
|
2331 | 2331 |
// decode_cabac_mb_dqp |
... | ... |
@@ -2338,7 +2333,7 @@ decode_intra_mb: |
2338 | 2338 |
ctx= 3; |
2339 | 2339 |
val++; |
2340 | 2340 |
if(val > 2*max_qp){ //prevent infinite loop |
2341 |
- av_log(h->s.avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", s->mb_x, s->mb_y); |
|
2341 |
+ av_log(h->avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", h->mb_x, h->mb_y); |
|
2342 | 2342 |
return -1; |
2343 | 2343 |
} |
2344 | 2344 |
} |
... | ... |
@@ -2348,13 +2343,13 @@ decode_intra_mb: |
2348 | 2348 |
else |
2349 | 2349 |
val= -((val + 1)>>1); |
2350 | 2350 |
h->last_qscale_diff = val; |
2351 |
- s->qscale += val; |
|
2352 |
- if(((unsigned)s->qscale) > max_qp){ |
|
2353 |
- if(s->qscale<0) s->qscale+= max_qp+1; |
|
2354 |
- else s->qscale-= max_qp+1; |
|
2351 |
+ h->qscale += val; |
|
2352 |
+ if(((unsigned)h->qscale) > max_qp){ |
|
2353 |
+ if(h->qscale<0) h->qscale+= max_qp+1; |
|
2354 |
+ else h->qscale-= max_qp+1; |
|
2355 | 2355 |
} |
2356 |
- h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); |
|
2357 |
- h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); |
|
2356 |
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); |
|
2357 |
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); |
|
2358 | 2358 |
}else |
2359 | 2359 |
h->last_qscale_diff=0; |
2360 | 2360 |
|
... | ... |
@@ -2416,7 +2411,7 @@ decode_intra_mb: |
2416 | 2416 |
h->last_qscale_diff = 0; |
2417 | 2417 |
} |
2418 | 2418 |
|
2419 |
- s->current_picture.f.qscale_table[mb_xy] = s->qscale; |
|
2419 |
+ h->cur_pic.f.qscale_table[mb_xy] = h->qscale; |
|
2420 | 2420 |
write_back_non_zero_count(h); |
2421 | 2421 |
|
2422 | 2422 |
return 0; |
... | ... |
@@ -292,7 +292,7 @@ static inline int pred_non_zero_count(H264Context *h, int n){ |
292 | 292 |
|
293 | 293 |
if(i<64) i= (i+1)>>1; |
294 | 294 |
|
295 |
- tprintf(h->s.avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31); |
|
295 |
+ tprintf(h->avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31); |
|
296 | 296 |
|
297 | 297 |
return i&31; |
298 | 298 |
} |
... | ... |
@@ -443,7 +443,6 @@ static inline int get_level_prefix(GetBitContext *gb){ |
443 | 443 |
* @return <0 if an error occurred |
444 | 444 |
*/ |
445 | 445 |
static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff){ |
446 |
- MpegEncContext * const s = &h->s; |
|
447 | 446 |
static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; |
448 | 447 |
int level[16]; |
449 | 448 |
int zeros_left, coeff_token, total_coeff, i, trailing_ones, run_before; |
... | ... |
@@ -474,12 +473,12 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in |
474 | 474 |
if(total_coeff==0) |
475 | 475 |
return 0; |
476 | 476 |
if(total_coeff > (unsigned)max_coeff) { |
477 |
- av_log(h->s.avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", s->mb_x, s->mb_y, total_coeff); |
|
477 |
+ av_log(h->avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", h->mb_x, h->mb_y, total_coeff); |
|
478 | 478 |
return -1; |
479 | 479 |
} |
480 | 480 |
|
481 | 481 |
trailing_ones= coeff_token&3; |
482 |
- tprintf(h->s.avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff); |
|
482 |
+ tprintf(h->avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff); |
|
483 | 483 |
assert(total_coeff<=16); |
484 | 484 |
|
485 | 485 |
i = show_bits(gb, 3); |
... | ... |
@@ -515,7 +514,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in |
515 | 515 |
level_code= 30 + get_bits(gb, prefix-3); //part |
516 | 516 |
if(prefix>=16){ |
517 | 517 |
if(prefix > 25+3){ |
518 |
- av_log(h->s.avctx, AV_LOG_ERROR, "Invalid level prefix\n"); |
|
518 |
+ av_log(h->avctx, AV_LOG_ERROR, "Invalid level prefix\n"); |
|
519 | 519 |
return -1; |
520 | 520 |
} |
521 | 521 |
level_code += (1<<(prefix-3))-4096; |
... | ... |
@@ -611,8 +610,8 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in |
611 | 611 |
} |
612 | 612 |
|
613 | 613 |
if (zeros_left < 0) { |
614 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
615 |
- "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y); |
|
614 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
615 |
+ "negative number of zero coeffs at %d %d\n", h->mb_x, h->mb_y); |
|
616 | 616 |
return AVERROR_INVALIDDATA; |
617 | 617 |
} |
618 | 618 |
|
... | ... |
@@ -627,8 +626,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in |
627 | 627 |
|
628 | 628 |
static av_always_inline int decode_luma_residual(H264Context *h, GetBitContext *gb, const uint8_t *scan, const uint8_t *scan8x8, int pixel_shift, int mb_type, int cbp, int p){ |
629 | 629 |
int i4x4, i8x8; |
630 |
- MpegEncContext * const s = &h->s; |
|
631 |
- int qscale = p == 0 ? s->qscale : h->chroma_qp[p-1]; |
|
630 |
+ int qscale = p == 0 ? h->qscale : h->chroma_qp[p-1]; |
|
632 | 631 |
if(IS_INTRA16x16(mb_type)){ |
633 | 632 |
AV_ZERO128(h->mb_luma_dc[p]+0); |
634 | 633 |
AV_ZERO128(h->mb_luma_dc[p]+8); |
... | ... |
@@ -693,7 +691,6 @@ static av_always_inline int decode_luma_residual(H264Context *h, GetBitContext * |
693 | 693 |
} |
694 | 694 |
|
695 | 695 |
int ff_h264_decode_mb_cavlc(H264Context *h){ |
696 |
- MpegEncContext * const s = &h->s; |
|
697 | 696 |
int mb_xy; |
698 | 697 |
int partition_count; |
699 | 698 |
unsigned int mb_type, cbp; |
... | ... |
@@ -701,32 +698,32 @@ int ff_h264_decode_mb_cavlc(H264Context *h){ |
701 | 701 |
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; |
702 | 702 |
const int pixel_shift = h->pixel_shift; |
703 | 703 |
|
704 |
- mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; |
|
704 |
+ mb_xy = h->mb_xy = h->mb_x + h->mb_y*h->mb_stride; |
|
705 | 705 |
|
706 |
- tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); |
|
706 |
+ tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y); |
|
707 | 707 |
cbp = 0; /* avoid warning. FIXME: find a solution without slowing |
708 | 708 |
down the code */ |
709 | 709 |
if(h->slice_type_nos != AV_PICTURE_TYPE_I){ |
710 |
- if(s->mb_skip_run==-1) |
|
711 |
- s->mb_skip_run= get_ue_golomb(&s->gb); |
|
710 |
+ if(h->mb_skip_run==-1) |
|
711 |
+ h->mb_skip_run= get_ue_golomb(&h->gb); |
|
712 | 712 |
|
713 |
- if (s->mb_skip_run--) { |
|
714 |
- if(FRAME_MBAFF && (s->mb_y&1) == 0){ |
|
715 |
- if(s->mb_skip_run==0) |
|
716 |
- h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); |
|
713 |
+ if (h->mb_skip_run--) { |
|
714 |
+ if(FRAME_MBAFF && (h->mb_y&1) == 0){ |
|
715 |
+ if(h->mb_skip_run==0) |
|
716 |
+ h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&h->gb); |
|
717 | 717 |
} |
718 | 718 |
decode_mb_skip(h); |
719 | 719 |
return 0; |
720 | 720 |
} |
721 | 721 |
} |
722 | 722 |
if(FRAME_MBAFF){ |
723 |
- if( (s->mb_y&1) == 0 ) |
|
724 |
- h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); |
|
723 |
+ if( (h->mb_y&1) == 0 ) |
|
724 |
+ h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&h->gb); |
|
725 | 725 |
} |
726 | 726 |
|
727 | 727 |
h->prev_mb_skipped= 0; |
728 | 728 |
|
729 |
- mb_type= get_ue_golomb(&s->gb); |
|
729 |
+ mb_type= get_ue_golomb(&h->gb); |
|
730 | 730 |
if(h->slice_type_nos == AV_PICTURE_TYPE_B){ |
731 | 731 |
if(mb_type < 23){ |
732 | 732 |
partition_count= b_mb_type_info[mb_type].partition_count; |
... | ... |
@@ -749,7 +746,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){ |
749 | 749 |
mb_type--; |
750 | 750 |
decode_intra_mb: |
751 | 751 |
if(mb_type > 25){ |
752 |
- av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), s->mb_x, s->mb_y); |
|
752 |
+ av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), h->mb_x, h->mb_y); |
|
753 | 753 |
return -1; |
754 | 754 |
} |
755 | 755 |
partition_count=0; |
... | ... |
@@ -769,19 +766,19 @@ decode_intra_mb: |
769 | 769 |
h->sps.bit_depth_luma >> 3; |
770 | 770 |
|
771 | 771 |
// We assume these blocks are very rare so we do not optimize it. |
772 |
- align_get_bits(&s->gb); |
|
772 |
+ align_get_bits(&h->gb); |
|
773 | 773 |
|
774 | 774 |
// The pixels are stored in the same order as levels in h->mb array. |
775 | 775 |
for(x=0; x < mb_size; x++){ |
776 |
- ((uint8_t*)h->mb)[x]= get_bits(&s->gb, 8); |
|
776 |
+ ((uint8_t*)h->mb)[x]= get_bits(&h->gb, 8); |
|
777 | 777 |
} |
778 | 778 |
|
779 | 779 |
// In deblocking, the quantizer is 0 |
780 |
- s->current_picture.f.qscale_table[mb_xy] = 0; |
|
780 |
+ h->cur_pic.f.qscale_table[mb_xy] = 0; |
|
781 | 781 |
// All coeffs are present |
782 | 782 |
memset(h->non_zero_count[mb_xy], 16, 48); |
783 | 783 |
|
784 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
784 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
785 | 785 |
return 0; |
786 | 786 |
} |
787 | 787 |
|
... | ... |
@@ -795,7 +792,7 @@ decode_intra_mb: |
795 | 795 |
if(IS_INTRA4x4(mb_type)){ |
796 | 796 |
int i; |
797 | 797 |
int di = 1; |
798 |
- if(dct8x8_allowed && get_bits1(&s->gb)){ |
|
798 |
+ if(dct8x8_allowed && get_bits1(&h->gb)){ |
|
799 | 799 |
mb_type |= MB_TYPE_8x8DCT; |
800 | 800 |
di = 4; |
801 | 801 |
} |
... | ... |
@@ -804,8 +801,8 @@ decode_intra_mb: |
804 | 804 |
for(i=0; i<16; i+=di){ |
805 | 805 |
int mode= pred_intra_mode(h, i); |
806 | 806 |
|
807 |
- if(!get_bits1(&s->gb)){ |
|
808 |
- const int rem_mode= get_bits(&s->gb, 3); |
|
807 |
+ if(!get_bits1(&h->gb)){ |
|
808 |
+ const int rem_mode= get_bits(&h->gb, 3); |
|
809 | 809 |
mode = rem_mode + (rem_mode >= mode); |
810 | 810 |
} |
811 | 811 |
|
... | ... |
@@ -823,7 +820,7 @@ decode_intra_mb: |
823 | 823 |
return -1; |
824 | 824 |
} |
825 | 825 |
if(decode_chroma){ |
826 |
- pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb), 1); |
|
826 |
+ pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&h->gb), 1); |
|
827 | 827 |
if(pred_mode < 0) |
828 | 828 |
return -1; |
829 | 829 |
h->chroma_pred_mode= pred_mode; |
... | ... |
@@ -835,9 +832,9 @@ decode_intra_mb: |
835 | 835 |
|
836 | 836 |
if(h->slice_type_nos == AV_PICTURE_TYPE_B){ |
837 | 837 |
for(i=0; i<4; i++){ |
838 |
- h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); |
|
838 |
+ h->sub_mb_type[i]= get_ue_golomb_31(&h->gb); |
|
839 | 839 |
if(h->sub_mb_type[i] >=13){ |
840 |
- av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); |
|
840 |
+ av_log(h->avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], h->mb_x, h->mb_y); |
|
841 | 841 |
return -1; |
842 | 842 |
} |
843 | 843 |
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; |
... | ... |
@@ -853,9 +850,9 @@ decode_intra_mb: |
853 | 853 |
}else{ |
854 | 854 |
assert(h->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ? |
855 | 855 |
for(i=0; i<4; i++){ |
856 |
- h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); |
|
856 |
+ h->sub_mb_type[i]= get_ue_golomb_31(&h->gb); |
|
857 | 857 |
if(h->sub_mb_type[i] >=4){ |
858 |
- av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); |
|
858 |
+ av_log(h->avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], h->mb_x, h->mb_y); |
|
859 | 859 |
return -1; |
860 | 860 |
} |
861 | 861 |
sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; |
... | ... |
@@ -872,11 +869,11 @@ decode_intra_mb: |
872 | 872 |
if(ref_count == 1){ |
873 | 873 |
tmp= 0; |
874 | 874 |
}else if(ref_count == 2){ |
875 |
- tmp= get_bits1(&s->gb)^1; |
|
875 |
+ tmp= get_bits1(&h->gb)^1; |
|
876 | 876 |
}else{ |
877 |
- tmp= get_ue_golomb_31(&s->gb); |
|
877 |
+ tmp= get_ue_golomb_31(&h->gb); |
|
878 | 878 |
if(tmp>=ref_count){ |
879 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp); |
|
879 |
+ av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp); |
|
880 | 880 |
return -1; |
881 | 881 |
} |
882 | 882 |
} |
... | ... |
@@ -908,9 +905,9 @@ decode_intra_mb: |
908 | 908 |
const int index= 4*i + block_width*j; |
909 | 909 |
int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ]; |
910 | 910 |
pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); |
911 |
- mx += get_se_golomb(&s->gb); |
|
912 |
- my += get_se_golomb(&s->gb); |
|
913 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
911 |
+ mx += get_se_golomb(&h->gb); |
|
912 |
+ my += get_se_golomb(&h->gb); |
|
913 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
914 | 914 |
|
915 | 915 |
if(IS_SUB_8X8(sub_mb_type)){ |
916 | 916 |
mv_cache[ 1 ][0]= |
... | ... |
@@ -948,11 +945,11 @@ decode_intra_mb: |
948 | 948 |
if (rc == 1) { |
949 | 949 |
val= 0; |
950 | 950 |
} else if (rc == 2) { |
951 |
- val= get_bits1(&s->gb)^1; |
|
951 |
+ val= get_bits1(&h->gb)^1; |
|
952 | 952 |
}else{ |
953 |
- val= get_ue_golomb_31(&s->gb); |
|
953 |
+ val= get_ue_golomb_31(&h->gb); |
|
954 | 954 |
if (val >= rc) { |
955 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
955 |
+ av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
956 | 956 |
return -1; |
957 | 957 |
} |
958 | 958 |
} |
... | ... |
@@ -962,9 +959,9 @@ decode_intra_mb: |
962 | 962 |
for(list=0; list<h->list_count; list++){ |
963 | 963 |
if(IS_DIR(mb_type, 0, list)){ |
964 | 964 |
pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); |
965 |
- mx += get_se_golomb(&s->gb); |
|
966 |
- my += get_se_golomb(&s->gb); |
|
967 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
965 |
+ mx += get_se_golomb(&h->gb); |
|
966 |
+ my += get_se_golomb(&h->gb); |
|
967 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
968 | 968 |
|
969 | 969 |
fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); |
970 | 970 |
} |
... | ... |
@@ -979,11 +976,11 @@ decode_intra_mb: |
979 | 979 |
if (rc == 1) { |
980 | 980 |
val= 0; |
981 | 981 |
} else if (rc == 2) { |
982 |
- val= get_bits1(&s->gb)^1; |
|
982 |
+ val= get_bits1(&h->gb)^1; |
|
983 | 983 |
}else{ |
984 |
- val= get_ue_golomb_31(&s->gb); |
|
984 |
+ val= get_ue_golomb_31(&h->gb); |
|
985 | 985 |
if (val >= rc) { |
986 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
986 |
+ av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
987 | 987 |
return -1; |
988 | 988 |
} |
989 | 989 |
} |
... | ... |
@@ -997,9 +994,9 @@ decode_intra_mb: |
997 | 997 |
unsigned int val; |
998 | 998 |
if(IS_DIR(mb_type, i, list)){ |
999 | 999 |
pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); |
1000 |
- mx += get_se_golomb(&s->gb); |
|
1001 |
- my += get_se_golomb(&s->gb); |
|
1002 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
1000 |
+ mx += get_se_golomb(&h->gb); |
|
1001 |
+ my += get_se_golomb(&h->gb); |
|
1002 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
1003 | 1003 |
|
1004 | 1004 |
val= pack16to32(mx,my); |
1005 | 1005 |
}else |
... | ... |
@@ -1017,11 +1014,11 @@ decode_intra_mb: |
1017 | 1017 |
if (rc == 1) { |
1018 | 1018 |
val= 0; |
1019 | 1019 |
} else if (rc == 2) { |
1020 |
- val= get_bits1(&s->gb)^1; |
|
1020 |
+ val= get_bits1(&h->gb)^1; |
|
1021 | 1021 |
}else{ |
1022 |
- val= get_ue_golomb_31(&s->gb); |
|
1022 |
+ val= get_ue_golomb_31(&h->gb); |
|
1023 | 1023 |
if (val >= rc) { |
1024 |
- av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
1024 |
+ av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val); |
|
1025 | 1025 |
return -1; |
1026 | 1026 |
} |
1027 | 1027 |
} |
... | ... |
@@ -1035,9 +1032,9 @@ decode_intra_mb: |
1035 | 1035 |
unsigned int val; |
1036 | 1036 |
if(IS_DIR(mb_type, i, list)){ |
1037 | 1037 |
pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); |
1038 |
- mx += get_se_golomb(&s->gb); |
|
1039 |
- my += get_se_golomb(&s->gb); |
|
1040 |
- tprintf(s->avctx, "final mv:%d %d\n", mx, my); |
|
1038 |
+ mx += get_se_golomb(&h->gb); |
|
1039 |
+ my += get_se_golomb(&h->gb); |
|
1040 |
+ tprintf(h->avctx, "final mv:%d %d\n", mx, my); |
|
1041 | 1041 |
|
1042 | 1042 |
val= pack16to32(mx,my); |
1043 | 1043 |
}else |
... | ... |
@@ -1052,18 +1049,18 @@ decode_intra_mb: |
1052 | 1052 |
write_back_motion(h, mb_type); |
1053 | 1053 |
|
1054 | 1054 |
if(!IS_INTRA16x16(mb_type)){ |
1055 |
- cbp= get_ue_golomb(&s->gb); |
|
1055 |
+ cbp= get_ue_golomb(&h->gb); |
|
1056 | 1056 |
|
1057 | 1057 |
if(decode_chroma){ |
1058 | 1058 |
if(cbp > 47){ |
1059 |
- av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y); |
|
1059 |
+ av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, h->mb_x, h->mb_y); |
|
1060 | 1060 |
return -1; |
1061 | 1061 |
} |
1062 | 1062 |
if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp]; |
1063 | 1063 |
else cbp= golomb_to_inter_cbp [cbp]; |
1064 | 1064 |
}else{ |
1065 | 1065 |
if(cbp > 15){ |
1066 |
- av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y); |
|
1066 |
+ av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, h->mb_x, h->mb_y); |
|
1067 | 1067 |
return -1; |
1068 | 1068 |
} |
1069 | 1069 |
if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp]; |
... | ... |
@@ -1072,11 +1069,11 @@ decode_intra_mb: |
1072 | 1072 |
} |
1073 | 1073 |
|
1074 | 1074 |
if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){ |
1075 |
- mb_type |= MB_TYPE_8x8DCT*get_bits1(&s->gb); |
|
1075 |
+ mb_type |= MB_TYPE_8x8DCT*get_bits1(&h->gb); |
|
1076 | 1076 |
} |
1077 | 1077 |
h->cbp= |
1078 | 1078 |
h->cbp_table[mb_xy]= cbp; |
1079 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
1079 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
1080 | 1080 |
|
1081 | 1081 |
if(cbp || IS_INTRA16x16(mb_type)){ |
1082 | 1082 |
int i4x4, i8x8, chroma_idx; |
... | ... |
@@ -1087,28 +1084,28 @@ decode_intra_mb: |
1087 | 1087 |
const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); |
1088 | 1088 |
|
1089 | 1089 |
if(IS_INTERLACED(mb_type)){ |
1090 |
- scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; |
|
1091 |
- scan= s->qscale ? h->field_scan : h->field_scan_q0; |
|
1090 |
+ scan8x8= h->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; |
|
1091 |
+ scan= h->qscale ? h->field_scan : h->field_scan_q0; |
|
1092 | 1092 |
}else{ |
1093 |
- scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0; |
|
1094 |
- scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0; |
|
1093 |
+ scan8x8= h->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0; |
|
1094 |
+ scan= h->qscale ? h->zigzag_scan : h->zigzag_scan_q0; |
|
1095 | 1095 |
} |
1096 | 1096 |
|
1097 |
- dquant= get_se_golomb(&s->gb); |
|
1097 |
+ dquant= get_se_golomb(&h->gb); |
|
1098 | 1098 |
|
1099 |
- s->qscale += dquant; |
|
1099 |
+ h->qscale += dquant; |
|
1100 | 1100 |
|
1101 |
- if(((unsigned)s->qscale) > max_qp){ |
|
1102 |
- if(s->qscale<0) s->qscale+= max_qp+1; |
|
1103 |
- else s->qscale-= max_qp+1; |
|
1104 |
- if(((unsigned)s->qscale) > max_qp){ |
|
1105 |
- av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y); |
|
1101 |
+ if(((unsigned)h->qscale) > max_qp){ |
|
1102 |
+ if(h->qscale<0) h->qscale+= max_qp+1; |
|
1103 |
+ else h->qscale-= max_qp+1; |
|
1104 |
+ if(((unsigned)h->qscale) > max_qp){ |
|
1105 |
+ av_log(h->avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, h->mb_x, h->mb_y); |
|
1106 | 1106 |
return -1; |
1107 | 1107 |
} |
1108 | 1108 |
} |
1109 | 1109 |
|
1110 |
- h->chroma_qp[0]= get_chroma_qp(h, 0, s->qscale); |
|
1111 |
- h->chroma_qp[1]= get_chroma_qp(h, 1, s->qscale); |
|
1110 |
+ h->chroma_qp[0]= get_chroma_qp(h, 0, h->qscale); |
|
1111 |
+ h->chroma_qp[1]= get_chroma_qp(h, 1, h->qscale); |
|
1112 | 1112 |
|
1113 | 1113 |
if( (ret = decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ){ |
1114 | 1114 |
return -1; |
... | ... |
@@ -1176,7 +1173,7 @@ decode_intra_mb: |
1176 | 1176 |
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); |
1177 | 1177 |
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); |
1178 | 1178 |
} |
1179 |
- s->current_picture.f.qscale_table[mb_xy] = s->qscale; |
|
1179 |
+ h->cur_pic.f.qscale_table[mb_xy] = h->qscale; |
|
1180 | 1180 |
write_back_non_zero_count(h); |
1181 | 1181 |
|
1182 | 1182 |
return 0; |
... | ... |
@@ -50,14 +50,13 @@ static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){ |
50 | 50 |
} |
51 | 51 |
|
52 | 52 |
void ff_h264_direct_dist_scale_factor(H264Context * const h){ |
53 |
- MpegEncContext * const s = &h->s; |
|
54 |
- const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ]; |
|
53 |
+ const int poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]; |
|
55 | 54 |
const int poc1 = h->ref_list[1][0].poc; |
56 | 55 |
int i, field; |
57 | 56 |
|
58 | 57 |
if (FRAME_MBAFF) |
59 | 58 |
for (field = 0; field < 2; field++){ |
60 |
- const int poc = h->s.current_picture_ptr->field_poc[field]; |
|
59 |
+ const int poc = h->cur_pic_ptr->field_poc[field]; |
|
61 | 60 |
const int poc1 = h->ref_list[1][0].field_poc[field]; |
62 | 61 |
for (i = 0; i < 2 * h->ref_count[0]; i++) |
63 | 62 |
h->dist_scale_factor_field[field][i^field] = |
... | ... |
@@ -70,12 +69,11 @@ void ff_h264_direct_dist_scale_factor(H264Context * const h){ |
70 | 70 |
} |
71 | 71 |
|
72 | 72 |
static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){ |
73 |
- MpegEncContext * const s = &h->s; |
|
74 | 73 |
Picture * const ref1 = &h->ref_list[1][0]; |
75 | 74 |
int j, old_ref, rfield; |
76 | 75 |
int start= mbafi ? 16 : 0; |
77 | 76 |
int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0]; |
78 |
- int interl= mbafi || s->picture_structure != PICT_FRAME; |
|
77 |
+ int interl= mbafi || h->picture_structure != PICT_FRAME; |
|
79 | 78 |
|
80 | 79 |
/* bogus; fills in for missing frames */ |
81 | 80 |
memset(map[list], 0, sizeof(map[list])); |
... | ... |
@@ -104,11 +102,10 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, |
104 | 104 |
} |
105 | 105 |
|
106 | 106 |
void ff_h264_direct_ref_list_init(H264Context * const h){ |
107 |
- MpegEncContext * const s = &h->s; |
|
108 | 107 |
Picture * const ref1 = &h->ref_list[1][0]; |
109 |
- Picture * const cur = s->current_picture_ptr; |
|
108 |
+ Picture * const cur = h->cur_pic_ptr; |
|
110 | 109 |
int list, j, field; |
111 |
- int sidx= (s->picture_structure&1)^1; |
|
110 |
+ int sidx= (h->picture_structure&1)^1; |
|
112 | 111 |
int ref1sidx = (ref1->f.reference&1)^1; |
113 | 112 |
|
114 | 113 |
for(list=0; list<2; list++){ |
... | ... |
@@ -117,7 +114,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){ |
117 | 117 |
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3); |
118 | 118 |
} |
119 | 119 |
|
120 |
- if(s->picture_structure == PICT_FRAME){ |
|
120 |
+ if(h->picture_structure == PICT_FRAME){ |
|
121 | 121 |
memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0])); |
122 | 122 |
memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0])); |
123 | 123 |
} |
... | ... |
@@ -125,12 +122,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){ |
125 | 125 |
cur->mbaff= FRAME_MBAFF; |
126 | 126 |
|
127 | 127 |
h->col_fieldoff= 0; |
128 |
- if(s->picture_structure == PICT_FRAME){ |
|
129 |
- int cur_poc = s->current_picture_ptr->poc; |
|
128 |
+ if(h->picture_structure == PICT_FRAME){ |
|
129 |
+ int cur_poc = h->cur_pic_ptr->poc; |
|
130 | 130 |
int *col_poc = h->ref_list[1]->field_poc; |
131 | 131 |
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc)); |
132 | 132 |
ref1sidx=sidx= h->col_parity; |
133 |
- } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity |
|
133 |
+ } else if (!(h->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity |
|
134 | 134 |
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3; |
135 | 135 |
} |
136 | 136 |
|
... | ... |
@@ -149,9 +146,9 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y |
149 | 149 |
{ |
150 | 150 |
int ref_field = ref->f.reference - 1; |
151 | 151 |
int ref_field_picture = ref->field_picture; |
152 |
- int ref_height = 16*h->s.mb_height >> ref_field_picture; |
|
152 |
+ int ref_height = 16*h->mb_height >> ref_field_picture; |
|
153 | 153 |
|
154 |
- if(!HAVE_THREADS || !(h->s.avctx->active_thread_type&FF_THREAD_FRAME)) |
|
154 |
+ if(!HAVE_THREADS || !(h->avctx->active_thread_type&FF_THREAD_FRAME)) |
|
155 | 155 |
return; |
156 | 156 |
|
157 | 157 |
//FIXME it can be safe to access mb stuff |
... | ... |
@@ -163,10 +160,9 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y |
163 | 163 |
} |
164 | 164 |
|
165 | 165 |
static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ |
166 |
- MpegEncContext * const s = &h->s; |
|
167 | 166 |
int b8_stride = 2; |
168 | 167 |
int b4_stride = h->b_stride; |
169 |
- int mb_xy = h->mb_xy, mb_y = s->mb_y; |
|
168 |
+ int mb_xy = h->mb_xy, mb_y = h->mb_y; |
|
170 | 169 |
int mb_type_col[2]; |
171 | 170 |
const int16_t (*l1mv0)[2], (*l1mv1)[2]; |
172 | 171 |
const int8_t *l1ref0, *l1ref1; |
... | ... |
@@ -179,7 +175,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ |
179 | 179 |
|
180 | 180 |
assert(h->ref_list[1][0].f.reference & 3); |
181 | 181 |
|
182 |
- await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); |
|
182 |
+ await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); |
|
183 | 183 |
|
184 | 184 |
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM) |
185 | 185 |
|
... | ... |
@@ -241,21 +237,21 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ |
241 | 241 |
|
242 | 242 |
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL |
243 | 243 |
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL |
244 |
- mb_y = (s->mb_y&~1) + h->col_parity; |
|
245 |
- mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; |
|
244 |
+ mb_y = (h->mb_y&~1) + h->col_parity; |
|
245 |
+ mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; |
|
246 | 246 |
b8_stride = 0; |
247 | 247 |
}else{ |
248 | 248 |
mb_y += h->col_fieldoff; |
249 |
- mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity |
|
249 |
+ mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity |
|
250 | 250 |
} |
251 | 251 |
goto single_col; |
252 | 252 |
}else{ // AFL/AFR/FR/FL -> AFR/FR |
253 | 253 |
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR |
254 |
- mb_y = s->mb_y&~1; |
|
255 |
- mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; |
|
254 |
+ mb_y = h->mb_y&~1; |
|
255 |
+ mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; |
|
256 | 256 |
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; |
257 |
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; |
|
258 |
- b8_stride = 2+4*s->mb_stride; |
|
257 |
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; |
|
258 |
+ b8_stride = 2+4*h->mb_stride; |
|
259 | 259 |
b4_stride *= 6; |
260 | 260 |
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { |
261 | 261 |
mb_type_col[0] &= ~MB_TYPE_INTERLACED; |
... | ... |
@@ -298,7 +294,7 @@ single_col: |
298 | 298 |
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; |
299 | 299 |
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; |
300 | 300 |
if(!b8_stride){ |
301 |
- if(s->mb_y&1){ |
|
301 |
+ if(h->mb_y&1){ |
|
302 | 302 |
l1ref0 += 2; |
303 | 303 |
l1ref1 += 2; |
304 | 304 |
l1mv0 += 2*b4_stride; |
... | ... |
@@ -414,10 +410,9 @@ single_col: |
414 | 414 |
} |
415 | 415 |
|
416 | 416 |
static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ |
417 |
- MpegEncContext * const s = &h->s; |
|
418 | 417 |
int b8_stride = 2; |
419 | 418 |
int b4_stride = h->b_stride; |
420 |
- int mb_xy = h->mb_xy, mb_y = s->mb_y; |
|
419 |
+ int mb_xy = h->mb_xy, mb_y = h->mb_y; |
|
421 | 420 |
int mb_type_col[2]; |
422 | 421 |
const int16_t (*l1mv0)[2], (*l1mv1)[2]; |
423 | 422 |
const int8_t *l1ref0, *l1ref1; |
... | ... |
@@ -427,25 +422,25 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ |
427 | 427 |
|
428 | 428 |
assert(h->ref_list[1][0].f.reference & 3); |
429 | 429 |
|
430 |
- await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); |
|
430 |
+ await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); |
|
431 | 431 |
|
432 | 432 |
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL |
433 | 433 |
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL |
434 |
- mb_y = (s->mb_y&~1) + h->col_parity; |
|
435 |
- mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; |
|
434 |
+ mb_y = (h->mb_y&~1) + h->col_parity; |
|
435 |
+ mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; |
|
436 | 436 |
b8_stride = 0; |
437 | 437 |
}else{ |
438 | 438 |
mb_y += h->col_fieldoff; |
439 |
- mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity |
|
439 |
+ mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity |
|
440 | 440 |
} |
441 | 441 |
goto single_col; |
442 | 442 |
}else{ // AFL/AFR/FR/FL -> AFR/FR |
443 | 443 |
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR |
444 |
- mb_y = s->mb_y&~1; |
|
445 |
- mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; |
|
444 |
+ mb_y = h->mb_y&~1; |
|
445 |
+ mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; |
|
446 | 446 |
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; |
447 |
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; |
|
448 |
- b8_stride = 2+4*s->mb_stride; |
|
447 |
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; |
|
448 |
+ b8_stride = 2+4*h->mb_stride; |
|
449 | 449 |
b4_stride *= 6; |
450 | 450 |
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { |
451 | 451 |
mb_type_col[0] &= ~MB_TYPE_INTERLACED; |
... | ... |
@@ -489,7 +484,7 @@ single_col: |
489 | 489 |
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; |
490 | 490 |
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; |
491 | 491 |
if(!b8_stride){ |
492 |
- if(s->mb_y&1){ |
|
492 |
+ if(h->mb_y&1){ |
|
493 | 493 |
l1ref0 += 2; |
494 | 494 |
l1ref1 += 2; |
495 | 495 |
l1mv0 += 2*b4_stride; |
... | ... |
@@ -503,9 +498,9 @@ single_col: |
503 | 503 |
int ref_offset; |
504 | 504 |
|
505 | 505 |
if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){ |
506 |
- map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0]; |
|
507 |
- map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1]; |
|
508 |
- dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1]; |
|
506 |
+ map_col_to_list0[0] = h->map_col_to_list0_field[h->mb_y&1][0]; |
|
507 |
+ map_col_to_list0[1] = h->map_col_to_list0_field[h->mb_y&1][1]; |
|
508 |
+ dist_scale_factor =h->dist_scale_factor_field[h->mb_y&1]; |
|
509 | 509 |
} |
510 | 510 |
ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0 |
511 | 511 |
|
... | ... |
@@ -244,8 +244,7 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h, |
244 | 244 |
unsigned int uvlinesize, |
245 | 245 |
int pixel_shift) |
246 | 246 |
{ |
247 |
- MpegEncContext * const s = &h->s; |
|
248 |
- int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)); |
|
247 |
+ int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); |
|
249 | 248 |
int chroma444 = CHROMA444; |
250 | 249 |
int chroma422 = CHROMA422; |
251 | 250 |
|
... | ... |
@@ -257,10 +256,10 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h, |
257 | 257 |
int a = h->slice_alpha_c0_offset - qp_bd_offset; |
258 | 258 |
int b = h->slice_beta_offset - qp_bd_offset; |
259 | 259 |
|
260 |
- int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
261 |
- int qp = s->current_picture.f.qscale_table[mb_xy]; |
|
262 |
- int qp0 = s->current_picture.f.qscale_table[mb_xy - 1]; |
|
263 |
- int qp1 = s->current_picture.f.qscale_table[h->top_mb_xy]; |
|
260 |
+ int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
261 |
+ int qp = h->cur_pic.f.qscale_table[mb_xy]; |
|
262 |
+ int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1]; |
|
263 |
+ int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy]; |
|
264 | 264 |
int qpc = get_chroma_qp( h, 0, qp ); |
265 | 265 |
int qpc0 = get_chroma_qp( h, 0, qp0 ); |
266 | 266 |
int qpc1 = get_chroma_qp( h, 0, qp1 ); |
... | ... |
@@ -465,7 +464,6 @@ static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){ |
465 | 465 |
} |
466 | 466 |
|
467 | 467 |
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) { |
468 |
- MpegEncContext * const s = &h->s; |
|
469 | 468 |
int edge; |
470 | 469 |
int chroma_qp_avg[2]; |
471 | 470 |
int chroma444 = CHROMA444; |
... | ... |
@@ -493,16 +491,16 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
493 | 493 |
// |
494 | 494 |
unsigned int tmp_linesize = 2 * linesize; |
495 | 495 |
unsigned int tmp_uvlinesize = 2 * uvlinesize; |
496 |
- int mbn_xy = mb_xy - 2 * s->mb_stride; |
|
496 |
+ int mbn_xy = mb_xy - 2 * h->mb_stride; |
|
497 | 497 |
int j; |
498 | 498 |
|
499 |
- for(j=0; j<2; j++, mbn_xy += s->mb_stride){ |
|
499 |
+ for(j=0; j<2; j++, mbn_xy += h->mb_stride){ |
|
500 | 500 |
DECLARE_ALIGNED(8, int16_t, bS)[4]; |
501 | 501 |
int qp; |
502 |
- if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) { |
|
502 |
+ if (IS_INTRA(mb_type | h->cur_pic.f.mb_type[mbn_xy])) { |
|
503 | 503 |
AV_WN64A(bS, 0x0003000300030003ULL); |
504 | 504 |
} else { |
505 |
- if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) { |
|
505 |
+ if (!CABAC && IS_8x8DCT(h->cur_pic.f.mb_type[mbn_xy])) { |
|
506 | 506 |
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]); |
507 | 507 |
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]); |
508 | 508 |
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]); |
... | ... |
@@ -517,12 +515,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
517 | 517 |
} |
518 | 518 |
// Do not use s->qscale as luma quantizer because it has not the same |
519 | 519 |
// value in IPCM macroblocks. |
520 |
- qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1; |
|
521 |
- tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); |
|
522 |
- { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } |
|
520 |
+ qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1; |
|
521 |
+ tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); |
|
522 |
+ { int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } |
|
523 | 523 |
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 ); |
524 |
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; |
|
525 |
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; |
|
524 |
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; |
|
525 |
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; |
|
526 | 526 |
if (chroma) { |
527 | 527 |
if (chroma444) { |
528 | 528 |
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); |
... | ... |
@@ -540,7 +538,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
540 | 540 |
if( IS_INTRA(mb_type|mbm_type)) { |
541 | 541 |
AV_WN64A(bS, 0x0003000300030003ULL); |
542 | 542 |
if ( (!IS_INTERLACED(mb_type|mbm_type)) |
543 |
- || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) |
|
543 |
+ || ((FRAME_MBAFF || (h->picture_structure != PICT_FRAME)) && (dir == 0)) |
|
544 | 544 |
) |
545 | 545 |
AV_WN64A(bS, 0x0004000400040004ULL); |
546 | 546 |
} else { |
... | ... |
@@ -582,10 +580,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
582 | 582 |
// Do not use s->qscale as luma quantizer because it has not the same |
583 | 583 |
// value in IPCM macroblocks. |
584 | 584 |
if(bS[0]+bS[1]+bS[2]+bS[3]){ |
585 |
- qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1; |
|
586 |
- tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); |
|
587 |
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; |
|
588 |
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; |
|
585 |
+ qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1; |
|
586 |
+ tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); |
|
587 |
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; |
|
588 |
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; |
|
589 | 589 |
if( dir == 0 ) { |
590 | 590 |
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 ); |
591 | 591 |
if (chroma) { |
... | ... |
@@ -665,8 +663,8 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
665 | 665 |
/* Filter edge */ |
666 | 666 |
// Do not use s->qscale as luma quantizer because it has not the same |
667 | 667 |
// value in IPCM macroblocks. |
668 |
- qp = s->current_picture.f.qscale_table[mb_xy]; |
|
669 |
- tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); |
|
668 |
+ qp = h->cur_pic.f.qscale_table[mb_xy]; |
|
669 |
+ tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); |
|
670 | 670 |
if( dir == 0 ) { |
671 | 671 |
filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 ); |
672 | 672 |
if (chroma) { |
... | ... |
@@ -703,13 +701,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u |
703 | 703 |
} |
704 | 704 |
|
705 | 705 |
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { |
706 |
- MpegEncContext * const s = &h->s; |
|
707 |
- const int mb_xy= mb_x + mb_y*s->mb_stride; |
|
708 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
706 |
+ const int mb_xy= mb_x + mb_y*h->mb_stride; |
|
707 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
709 | 708 |
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; |
710 | 709 |
int first_vertical_edge_done = 0; |
711 | 710 |
av_unused int dir; |
712 |
- int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)); |
|
711 |
+ int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); |
|
713 | 712 |
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); |
714 | 713 |
int a = h->slice_alpha_c0_offset - qp_bd_offset; |
715 | 714 |
int b = h->slice_beta_offset - qp_bd_offset; |
... | ... |
@@ -761,9 +758,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint |
761 | 761 |
} |
762 | 762 |
} |
763 | 763 |
|
764 |
- mb_qp = s->current_picture.f.qscale_table[mb_xy]; |
|
765 |
- mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]]; |
|
766 |
- mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]]; |
|
764 |
+ mb_qp = h->cur_pic.f.qscale_table[mb_xy]; |
|
765 |
+ mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]]; |
|
766 |
+ mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]]; |
|
767 | 767 |
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; |
768 | 768 |
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + |
769 | 769 |
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; |
... | ... |
@@ -776,8 +773,8 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint |
776 | 776 |
get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1; |
777 | 777 |
|
778 | 778 |
/* Filter edge */ |
779 |
- tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); |
|
780 |
- { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } |
|
779 |
+ tprintf(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); |
|
780 |
+ { int i; for (i = 0; i < 8; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } |
|
781 | 781 |
if(MB_FIELD){ |
782 | 782 |
filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 ); |
783 | 783 |
filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 ); |
... | ... |
@@ -40,39 +40,38 @@ |
40 | 40 |
|
41 | 41 |
static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
42 | 42 |
{ |
43 |
- MpegEncContext *const s = &h->s; |
|
44 |
- const int mb_x = s->mb_x; |
|
45 |
- const int mb_y = s->mb_y; |
|
43 |
+ const int mb_x = h->mb_x; |
|
44 |
+ const int mb_y = h->mb_y; |
|
46 | 45 |
const int mb_xy = h->mb_xy; |
47 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
46 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
48 | 47 |
uint8_t *dest_y, *dest_cb, *dest_cr; |
49 | 48 |
int linesize, uvlinesize /*dct_offset*/; |
50 | 49 |
int i, j; |
51 | 50 |
int *block_offset = &h->block_offset[0]; |
52 |
- const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass); |
|
51 |
+ const int transform_bypass = !SIMPLE && (h->qscale == 0 && h->sps.transform_bypass); |
|
53 | 52 |
/* is_h264 should always be true if SVQ3 is disabled. */ |
54 |
- const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || s->codec_id == AV_CODEC_ID_H264; |
|
53 |
+ const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || h->avctx->codec_id == AV_CODEC_ID_H264; |
|
55 | 54 |
void (*idct_add)(uint8_t *dst, int16_t *block, int stride); |
56 |
- const int block_h = 16 >> s->chroma_y_shift; |
|
55 |
+ const int block_h = 16 >> h->chroma_y_shift; |
|
57 | 56 |
const int chroma422 = CHROMA422; |
58 | 57 |
|
59 |
- dest_y = s->current_picture.f.data[0] + ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16; |
|
60 |
- dest_cb = s->current_picture.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h; |
|
61 |
- dest_cr = s->current_picture.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h; |
|
58 |
+ dest_y = h->cur_pic.f.data[0] + ((mb_x << PIXEL_SHIFT) + mb_y * h->linesize) * 16; |
|
59 |
+ dest_cb = h->cur_pic.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h; |
|
60 |
+ dest_cr = h->cur_pic.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h; |
|
62 | 61 |
|
63 |
- s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT), s->linesize, 4); |
|
64 |
- s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * s->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2); |
|
62 |
+ h->vdsp.prefetch(dest_y + (h->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT), h->linesize, 4); |
|
63 |
+ h->vdsp.prefetch(dest_cb + (h->mb_x & 7) * h->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2); |
|
65 | 64 |
|
66 | 65 |
h->list_counts[mb_xy] = h->list_count; |
67 | 66 |
|
68 | 67 |
if (!SIMPLE && MB_FIELD) { |
69 |
- linesize = h->mb_linesize = s->linesize * 2; |
|
70 |
- uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; |
|
68 |
+ linesize = h->mb_linesize = h->linesize * 2; |
|
69 |
+ uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2; |
|
71 | 70 |
block_offset = &h->block_offset[48]; |
72 | 71 |
if (mb_y & 1) { // FIXME move out of this function? |
73 |
- dest_y -= s->linesize * 15; |
|
74 |
- dest_cb -= s->uvlinesize * (block_h - 1); |
|
75 |
- dest_cr -= s->uvlinesize * (block_h - 1); |
|
72 |
+ dest_y -= h->linesize * 15; |
|
73 |
+ dest_cb -= h->uvlinesize * (block_h - 1); |
|
74 |
+ dest_cr -= h->uvlinesize * (block_h - 1); |
|
76 | 75 |
} |
77 | 76 |
if (FRAME_MBAFF) { |
78 | 77 |
int list; |
... | ... |
@@ -81,20 +80,20 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
81 | 81 |
continue; |
82 | 82 |
if (IS_16X16(mb_type)) { |
83 | 83 |
int8_t *ref = &h->ref_cache[list][scan8[0]]; |
84 |
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1); |
|
84 |
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (h->mb_y & 1), 1); |
|
85 | 85 |
} else { |
86 | 86 |
for (i = 0; i < 16; i += 4) { |
87 | 87 |
int ref = h->ref_cache[list][scan8[i]]; |
88 | 88 |
if (ref >= 0) |
89 | 89 |
fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, |
90 |
- 8, (16 + ref) ^ (s->mb_y & 1), 1); |
|
90 |
+ 8, (16 + ref) ^ (h->mb_y & 1), 1); |
|
91 | 91 |
} |
92 | 92 |
} |
93 | 93 |
} |
94 | 94 |
} |
95 | 95 |
} else { |
96 |
- linesize = h->mb_linesize = s->linesize; |
|
97 |
- uvlinesize = h->mb_uvlinesize = s->uvlinesize; |
|
96 |
+ linesize = h->mb_linesize = h->linesize; |
|
97 |
+ uvlinesize = h->mb_uvlinesize = h->uvlinesize; |
|
98 | 98 |
// dct_offset = s->linesize * 16; |
99 | 99 |
} |
100 | 100 |
|
... | ... |
@@ -111,7 +110,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
111 | 111 |
for (j = 0; j < 16; j++) |
112 | 112 |
tmp_y[j] = get_bits(&gb, bit_depth); |
113 | 113 |
} |
114 |
- if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
114 |
+ if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
115 | 115 |
if (!h->sps.chroma_format_idc) { |
116 | 116 |
for (i = 0; i < block_h; i++) { |
117 | 117 |
uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); |
... | ... |
@@ -139,7 +138,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
139 | 139 |
} else { |
140 | 140 |
for (i = 0; i < 16; i++) |
141 | 141 |
memcpy(dest_y + i * linesize, (uint8_t *)h->mb + i * 16, 16); |
142 |
- if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
142 |
+ if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
143 | 143 |
if (!h->sps.chroma_format_idc) { |
144 | 144 |
for (i = 0; i < block_h; i++) { |
145 | 145 |
memset(dest_cb + i * uvlinesize, 128, 8); |
... | ... |
@@ -161,7 +160,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
161 | 161 |
xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, |
162 | 162 |
uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT); |
163 | 163 |
|
164 |
- if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { |
|
164 |
+ if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { |
|
165 | 165 |
h->hpc.pred8x8[h->chroma_pred_mode](dest_cb, uvlinesize); |
166 | 166 |
h->hpc.pred8x8[h->chroma_pred_mode](dest_cr, uvlinesize); |
167 | 167 |
} |
... | ... |
@@ -176,14 +175,14 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
176 | 176 |
} else if (is_h264) { |
177 | 177 |
if (chroma422) { |
178 | 178 |
FUNC(hl_motion_422)(h, dest_y, dest_cb, dest_cr, |
179 |
- s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
180 |
- s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
179 |
+ h->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
180 |
+ h->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
181 | 181 |
h->h264dsp.weight_h264_pixels_tab, |
182 | 182 |
h->h264dsp.biweight_h264_pixels_tab); |
183 | 183 |
} else { |
184 | 184 |
FUNC(hl_motion_420)(h, dest_y, dest_cb, dest_cr, |
185 |
- s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
186 |
- s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
185 |
+ h->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
186 |
+ h->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
187 | 187 |
h->h264dsp.weight_h264_pixels_tab, |
188 | 188 |
h->h264dsp.biweight_h264_pixels_tab); |
189 | 189 |
} |
... | ... |
@@ -192,7 +191,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
192 | 192 |
hl_decode_mb_idct_luma(h, mb_type, is_h264, SIMPLE, transform_bypass, |
193 | 193 |
PIXEL_SHIFT, block_offset, linesize, dest_y, 0); |
194 | 194 |
|
195 |
- if ((SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) && |
|
195 |
+ if ((SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) && |
|
196 | 196 |
(h->cbp & 0x30)) { |
197 | 197 |
uint8_t *dest[2] = { dest_cb, dest_cr }; |
198 | 198 |
if (transform_bypass) { |
... | ... |
@@ -208,7 +207,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
208 | 208 |
h->mb + (16 * 16 * 2 << PIXEL_SHIFT), |
209 | 209 |
uvlinesize); |
210 | 210 |
} else { |
211 |
- idct_add = s->dsp.add_pixels4; |
|
211 |
+ idct_add = h->dsp.add_pixels4; |
|
212 | 212 |
for (j = 1; j < 3; j++) { |
213 | 213 |
for (i = j * 16; i < j * 16 + 4; i++) |
214 | 214 |
if (h->non_zero_count_cache[scan8[i]] || |
... | ... |
@@ -256,7 +255,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
256 | 256 |
uint8_t *const ptr = dest[j - 1] + block_offset[i]; |
257 | 257 |
ff_svq3_add_idct_c(ptr, h->mb + i * 16, |
258 | 258 |
uvlinesize, |
259 |
- ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2); |
|
259 |
+ ff_h264_chroma_qp[0][h->qscale + 12] - 12, 2); |
|
260 | 260 |
} |
261 | 261 |
} |
262 | 262 |
} |
... | ... |
@@ -264,8 +263,8 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
264 | 264 |
} |
265 | 265 |
} |
266 | 266 |
if (h->cbp || IS_INTRA(mb_type)) { |
267 |
- s->dsp.clear_blocks(h->mb); |
|
268 |
- s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); |
|
267 |
+ h->dsp.clear_blocks(h->mb); |
|
268 |
+ h->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); |
|
269 | 269 |
} |
270 | 270 |
} |
271 | 271 |
|
... | ... |
@@ -277,33 +276,32 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h) |
277 | 277 |
|
278 | 278 |
static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h) |
279 | 279 |
{ |
280 |
- MpegEncContext *const s = &h->s; |
|
281 |
- const int mb_x = s->mb_x; |
|
282 |
- const int mb_y = s->mb_y; |
|
280 |
+ const int mb_x = h->mb_x; |
|
281 |
+ const int mb_y = h->mb_y; |
|
283 | 282 |
const int mb_xy = h->mb_xy; |
284 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
283 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
285 | 284 |
uint8_t *dest[3]; |
286 | 285 |
int linesize; |
287 | 286 |
int i, j, p; |
288 | 287 |
int *block_offset = &h->block_offset[0]; |
289 |
- const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass); |
|
290 |
- const int plane_count = (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) ? 3 : 1; |
|
288 |
+ const int transform_bypass = !SIMPLE && (h->qscale == 0 && h->sps.transform_bypass); |
|
289 |
+ const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) ? 3 : 1; |
|
291 | 290 |
|
292 | 291 |
for (p = 0; p < plane_count; p++) { |
293 |
- dest[p] = s->current_picture.f.data[p] + |
|
294 |
- ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16; |
|
295 |
- s->vdsp.prefetch(dest[p] + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT), |
|
296 |
- s->linesize, 4); |
|
292 |
+ dest[p] = h->cur_pic.f.data[p] + |
|
293 |
+ ((mb_x << PIXEL_SHIFT) + mb_y * h->linesize) * 16; |
|
294 |
+ h->vdsp.prefetch(dest[p] + (h->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT), |
|
295 |
+ h->linesize, 4); |
|
297 | 296 |
} |
298 | 297 |
|
299 | 298 |
h->list_counts[mb_xy] = h->list_count; |
300 | 299 |
|
301 | 300 |
if (!SIMPLE && MB_FIELD) { |
302 |
- linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize * 2; |
|
301 |
+ linesize = h->mb_linesize = h->mb_uvlinesize = h->linesize * 2; |
|
303 | 302 |
block_offset = &h->block_offset[48]; |
304 | 303 |
if (mb_y & 1) // FIXME move out of this function? |
305 | 304 |
for (p = 0; p < 3; p++) |
306 |
- dest[p] -= s->linesize * 15; |
|
305 |
+ dest[p] -= h->linesize * 15; |
|
307 | 306 |
if (FRAME_MBAFF) { |
308 | 307 |
int list; |
309 | 308 |
for (list = 0; list < h->list_count; list++) { |
... | ... |
@@ -311,19 +309,19 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h) |
311 | 311 |
continue; |
312 | 312 |
if (IS_16X16(mb_type)) { |
313 | 313 |
int8_t *ref = &h->ref_cache[list][scan8[0]]; |
314 |
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1); |
|
314 |
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (h->mb_y & 1), 1); |
|
315 | 315 |
} else { |
316 | 316 |
for (i = 0; i < 16; i += 4) { |
317 | 317 |
int ref = h->ref_cache[list][scan8[i]]; |
318 | 318 |
if (ref >= 0) |
319 | 319 |
fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, |
320 |
- 8, (16 + ref) ^ (s->mb_y & 1), 1); |
|
320 |
+ 8, (16 + ref) ^ (h->mb_y & 1), 1); |
|
321 | 321 |
} |
322 | 322 |
} |
323 | 323 |
} |
324 | 324 |
} |
325 | 325 |
} else { |
326 |
- linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize; |
|
326 |
+ linesize = h->mb_linesize = h->mb_uvlinesize = h->linesize; |
|
327 | 327 |
} |
328 | 328 |
|
329 | 329 |
if (!SIMPLE && IS_INTRA_PCM(mb_type)) { |
... | ... |
@@ -360,8 +358,8 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h) |
360 | 360 |
linesize, 0, 1, SIMPLE, PIXEL_SHIFT); |
361 | 361 |
} else { |
362 | 362 |
FUNC(hl_motion_444)(h, dest[0], dest[1], dest[2], |
363 |
- s->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
364 |
- s->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
363 |
+ h->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, |
|
364 |
+ h->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, |
|
365 | 365 |
h->h264dsp.weight_h264_pixels_tab, |
366 | 366 |
h->h264dsp.biweight_h264_pixels_tab); |
367 | 367 |
} |
... | ... |
@@ -372,8 +370,8 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h) |
372 | 372 |
dest[p], p); |
373 | 373 |
} |
374 | 374 |
if (h->cbp || IS_INTRA(mb_type)) { |
375 |
- s->dsp.clear_blocks(h->mb); |
|
376 |
- s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); |
|
375 |
+ h->dsp.clear_blocks(h->mb); |
|
376 |
+ h->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); |
|
377 | 377 |
} |
378 | 378 |
} |
379 | 379 |
|
... | ... |
@@ -46,7 +46,7 @@ static void mc_part(H264Context *h, int n, int square, |
46 | 46 |
int list0, int list1) |
47 | 47 |
{ |
48 | 48 |
if ((h->use_weight == 2 && list0 && list1 && |
49 |
- (h->implicit_weight[h->ref_cache[0][scan8[n]]][h->ref_cache[1][scan8[n]]][h->s.mb_y & 1] != 32)) || |
|
49 |
+ (h->implicit_weight[h->ref_cache[0][scan8[n]]][h->ref_cache[1][scan8[n]]][h->mb_y & 1] != 32)) || |
|
50 | 50 |
h->use_weight == 1) |
51 | 51 |
mc_part_weighted(h, n, square, height, delta, dest_y, dest_cb, dest_cr, |
52 | 52 |
x_offset, y_offset, qpix_put, chroma_put, |
... | ... |
@@ -67,13 +67,12 @@ static void MCFUNC(hl_motion)(H264Context *h, uint8_t *dest_y, |
67 | 67 |
h264_weight_func *weight_op, |
68 | 68 |
h264_biweight_func *weight_avg) |
69 | 69 |
{ |
70 |
- MpegEncContext *const s = &h->s; |
|
71 | 70 |
const int mb_xy = h->mb_xy; |
72 |
- const int mb_type = s->current_picture.f.mb_type[mb_xy]; |
|
71 |
+ const int mb_type = h->cur_pic.f.mb_type[mb_xy]; |
|
73 | 72 |
|
74 | 73 |
assert(IS_INTER(mb_type)); |
75 | 74 |
|
76 |
- if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) |
|
75 |
+ if (HAVE_THREADS && (h->avctx->active_thread_type & FF_THREAD_FRAME)) |
|
77 | 76 |
await_references(h); |
78 | 77 |
prefetch_motion(h, 0, PIXEL_SHIFT, CHROMA_IDC); |
79 | 78 |
|
... | ... |
@@ -39,32 +39,31 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, |
39 | 39 |
int i, int list, int part_width) |
40 | 40 |
{ |
41 | 41 |
const int topright_ref = h->ref_cache[list][i - 8 + part_width]; |
42 |
- MpegEncContext *s = &h->s; |
|
43 | 42 |
|
44 | 43 |
/* there is no consistent mapping of mvs to neighboring locations that will |
45 | 44 |
* make mbaff happy, so we can't move all this logic to fill_caches */ |
46 | 45 |
if (FRAME_MBAFF) { |
47 | 46 |
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \ |
48 | 47 |
const int xy = XY, y4 = Y4; \ |
49 |
- const int mb_type = mb_types[xy + (y4 >> 2) * s->mb_stride]; \ |
|
48 |
+ const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \ |
|
50 | 49 |
if (!USES_LIST(mb_type, list)) \ |
51 | 50 |
return LIST_NOT_USED; \ |
52 |
- mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \ |
|
51 |
+ mv = h->cur_pic_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \ |
|
53 | 52 |
h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \ |
54 | 53 |
h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \ |
55 |
- return s->current_picture_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP; |
|
54 |
+ return h->cur_pic_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP; |
|
56 | 55 |
|
57 | 56 |
if (topright_ref == PART_NOT_AVAILABLE |
58 | 57 |
&& i >= scan8[0] + 8 && (i & 7) == 4 |
59 | 58 |
&& h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) { |
60 |
- const uint32_t *mb_types = s->current_picture_ptr->f.mb_type; |
|
59 |
+ const uint32_t *mb_types = h->cur_pic_ptr->f.mb_type; |
|
61 | 60 |
const int16_t *mv; |
62 | 61 |
AV_ZERO32(h->mv_cache[list][scan8[0] - 2]); |
63 | 62 |
*C = h->mv_cache[list][scan8[0] - 2]; |
64 | 63 |
|
65 | 64 |
if (!MB_FIELD && IS_INTERLACED(h->left_type[0])) { |
66 |
- SET_DIAG_MV(* 2, >> 1, h->left_mb_xy[0] + s->mb_stride, |
|
67 |
- (s->mb_y & 1) * 2 + (i >> 5)); |
|
65 |
+ SET_DIAG_MV(* 2, >> 1, h->left_mb_xy[0] + h->mb_stride, |
|
66 |
+ (h->mb_y & 1) * 2 + (i >> 5)); |
|
68 | 67 |
} |
69 | 68 |
if (MB_FIELD && !IS_INTERLACED(h->left_type[0])) { |
70 | 69 |
// left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK. |
... | ... |
@@ -78,7 +77,7 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, |
78 | 78 |
*C = h->mv_cache[list][i - 8 + part_width]; |
79 | 79 |
return topright_ref; |
80 | 80 |
} else { |
81 |
- tprintf(s->avctx, "topright MV not available\n"); |
|
81 |
+ tprintf(h->avctx, "topright MV not available\n"); |
|
82 | 82 |
|
83 | 83 |
*C = h->mv_cache[list][i - 8 - 1]; |
84 | 84 |
return h->ref_cache[list][i - 8 - 1]; |
... | ... |
@@ -116,7 +115,7 @@ static av_always_inline void pred_motion(H264Context *const h, int n, |
116 | 116 |
|
117 | 117 |
diagonal_ref = fetch_diagonal_mv(h, &C, index8, list, part_width); |
118 | 118 |
match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref); |
119 |
- tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count); |
|
119 |
+ tprintf(h->avctx, "pred_motion match_count=%d\n", match_count); |
|
120 | 120 |
if (match_count > 1) { //most common |
121 | 121 |
*mx = mid_pred(A[0], B[0], C[0]); |
122 | 122 |
*my = mid_pred(A[1], B[1], C[1]); |
... | ... |
@@ -143,10 +142,10 @@ static av_always_inline void pred_motion(H264Context *const h, int n, |
143 | 143 |
} |
144 | 144 |
} |
145 | 145 |
|
146 |
- tprintf(h->s.avctx, |
|
146 |
+ tprintf(h->avctx, |
|
147 | 147 |
"pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", |
148 | 148 |
top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, |
149 |
- A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list); |
|
149 |
+ A[0], A[1], ref, *mx, *my, h->mb_x, h->mb_y, n, list); |
|
150 | 150 |
} |
151 | 151 |
|
152 | 152 |
/** |
... | ... |
@@ -163,8 +162,8 @@ static av_always_inline void pred_16x8_motion(H264Context *const h, |
163 | 163 |
const int top_ref = h->ref_cache[list][scan8[0] - 8]; |
164 | 164 |
const int16_t *const B = h->mv_cache[list][scan8[0] - 8]; |
165 | 165 |
|
166 |
- tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
167 |
- top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); |
|
166 |
+ tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
167 |
+ top_ref, B[0], B[1], h->mb_x, h->mb_y, n, list); |
|
168 | 168 |
|
169 | 169 |
if (top_ref == ref) { |
170 | 170 |
*mx = B[0]; |
... | ... |
@@ -175,8 +174,8 @@ static av_always_inline void pred_16x8_motion(H264Context *const h, |
175 | 175 |
const int left_ref = h->ref_cache[list][scan8[8] - 1]; |
176 | 176 |
const int16_t *const A = h->mv_cache[list][scan8[8] - 1]; |
177 | 177 |
|
178 |
- tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
179 |
- left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); |
|
178 |
+ tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
179 |
+ left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list); |
|
180 | 180 |
|
181 | 181 |
if (left_ref == ref) { |
182 | 182 |
*mx = A[0]; |
... | ... |
@@ -203,8 +202,8 @@ static av_always_inline void pred_8x16_motion(H264Context *const h, |
203 | 203 |
const int left_ref = h->ref_cache[list][scan8[0] - 1]; |
204 | 204 |
const int16_t *const A = h->mv_cache[list][scan8[0] - 1]; |
205 | 205 |
|
206 |
- tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
207 |
- left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); |
|
206 |
+ tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
207 |
+ left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list); |
|
208 | 208 |
|
209 | 209 |
if (left_ref == ref) { |
210 | 210 |
*mx = A[0]; |
... | ... |
@@ -217,8 +216,8 @@ static av_always_inline void pred_8x16_motion(H264Context *const h, |
217 | 217 |
|
218 | 218 |
diagonal_ref = fetch_diagonal_mv(h, &C, scan8[4], list, 2); |
219 | 219 |
|
220 |
- tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
221 |
- diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); |
|
220 |
+ tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", |
|
221 |
+ diagonal_ref, C[0], C[1], h->mb_x, h->mb_y, n, list); |
|
222 | 222 |
|
223 | 223 |
if (diagonal_ref == ref) { |
224 | 224 |
*mx = C[0]; |
... | ... |
@@ -254,9 +253,8 @@ static av_always_inline void pred_pskip_motion(H264Context *const h) |
254 | 254 |
{ |
255 | 255 |
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 }; |
256 | 256 |
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; |
257 |
- MpegEncContext *const s = &h->s; |
|
258 |
- int8_t *ref = s->current_picture.f.ref_index[0]; |
|
259 |
- int16_t(*mv)[2] = s->current_picture.f.motion_val[0]; |
|
257 |
+ int8_t *ref = h->cur_pic.f.ref_index[0]; |
|
258 |
+ int16_t(*mv)[2] = h->cur_pic.f.motion_val[0]; |
|
260 | 259 |
int top_ref, left_ref, diagonal_ref, match_count, mx, my; |
261 | 260 |
const int16_t *A, *B, *C; |
262 | 261 |
int b_stride = h->b_stride; |
... | ... |
@@ -294,8 +292,8 @@ static av_always_inline void pred_pskip_motion(H264Context *const h) |
294 | 294 |
goto zeromv; |
295 | 295 |
} |
296 | 296 |
|
297 |
- tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", |
|
298 |
- top_ref, left_ref, h->s.mb_x, h->s.mb_y); |
|
297 |
+ tprintf(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", |
|
298 |
+ top_ref, left_ref, h->mb_x, h->mb_y); |
|
299 | 299 |
|
300 | 300 |
if (USES_LIST(h->topright_type, 0)) { |
301 | 301 |
diagonal_ref = ref[4 * h->topright_mb_xy + 2]; |
... | ... |
@@ -321,7 +319,7 @@ static av_always_inline void pred_pskip_motion(H264Context *const h) |
321 | 321 |
} |
322 | 322 |
|
323 | 323 |
match_count = !diagonal_ref + !top_ref + !left_ref; |
324 |
- tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count); |
|
324 |
+ tprintf(h->avctx, "pred_pskip_motion match_count=%d\n", match_count); |
|
325 | 325 |
if (match_count > 1) { |
326 | 326 |
mx = mid_pred(A[0], B[0], C[0]); |
327 | 327 |
my = mid_pred(A[1], B[1], C[1]); |
... | ... |
@@ -351,7 +349,6 @@ zeromv: |
351 | 351 |
|
352 | 352 |
static void fill_decode_neighbors(H264Context *h, int mb_type) |
353 | 353 |
{ |
354 |
- MpegEncContext *const s = &h->s; |
|
355 | 354 |
const int mb_xy = h->mb_xy; |
356 | 355 |
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS]; |
357 | 356 |
static const uint8_t left_block_options[4][32] = { |
... | ... |
@@ -363,7 +360,7 @@ static void fill_decode_neighbors(H264Context *h, int mb_type) |
363 | 363 |
|
364 | 364 |
h->topleft_partition = -1; |
365 | 365 |
|
366 |
- top_xy = mb_xy - (s->mb_stride << MB_FIELD); |
|
366 |
+ top_xy = mb_xy - (h->mb_stride << MB_FIELD); |
|
367 | 367 |
|
368 | 368 |
/* Wow, what a mess, why didn't they simplify the interlacing & intra |
369 | 369 |
* stuff, I can't imagine that these complex rules are worth it. */ |
... | ... |
@@ -373,16 +370,16 @@ static void fill_decode_neighbors(H264Context *h, int mb_type) |
373 | 373 |
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; |
374 | 374 |
h->left_block = left_block_options[0]; |
375 | 375 |
if (FRAME_MBAFF) { |
376 |
- const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]); |
|
376 |
+ const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); |
|
377 | 377 |
const int curr_mb_field_flag = IS_INTERLACED(mb_type); |
378 |
- if (s->mb_y & 1) { |
|
378 |
+ if (h->mb_y & 1) { |
|
379 | 379 |
if (left_mb_field_flag != curr_mb_field_flag) { |
380 |
- left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1; |
|
380 |
+ left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1; |
|
381 | 381 |
if (curr_mb_field_flag) { |
382 |
- left_xy[LBOT] += s->mb_stride; |
|
382 |
+ left_xy[LBOT] += h->mb_stride; |
|
383 | 383 |
h->left_block = left_block_options[3]; |
384 | 384 |
} else { |
385 |
- topleft_xy += s->mb_stride; |
|
385 |
+ topleft_xy += h->mb_stride; |
|
386 | 386 |
/* take top left mv from the middle of the mb, as opposed |
387 | 387 |
* to all other modes which use the bottom right partition */ |
388 | 388 |
h->topleft_partition = 0; |
... | ... |
@@ -391,13 +388,13 @@ static void fill_decode_neighbors(H264Context *h, int mb_type) |
391 | 391 |
} |
392 | 392 |
} else { |
393 | 393 |
if (curr_mb_field_flag) { |
394 |
- topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1); |
|
395 |
- topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1); |
|
396 |
- top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1); |
|
394 |
+ topleft_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy - 1] >> 7) & 1) - 1); |
|
395 |
+ topright_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy + 1] >> 7) & 1) - 1); |
|
396 |
+ top_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); |
|
397 | 397 |
} |
398 | 398 |
if (left_mb_field_flag != curr_mb_field_flag) { |
399 | 399 |
if (curr_mb_field_flag) { |
400 |
- left_xy[LBOT] += s->mb_stride; |
|
400 |
+ left_xy[LBOT] += h->mb_stride; |
|
401 | 401 |
h->left_block = left_block_options[3]; |
402 | 402 |
} else { |
403 | 403 |
h->left_block = left_block_options[2]; |
... | ... |
@@ -413,11 +410,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type) |
413 | 413 |
h->left_mb_xy[LBOT] = left_xy[LBOT]; |
414 | 414 |
//FIXME do we need all in the context? |
415 | 415 |
|
416 |
- h->topleft_type = s->current_picture.f.mb_type[topleft_xy]; |
|
417 |
- h->top_type = s->current_picture.f.mb_type[top_xy]; |
|
418 |
- h->topright_type = s->current_picture.f.mb_type[topright_xy]; |
|
419 |
- h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]]; |
|
420 |
- h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]]; |
|
416 |
+ h->topleft_type = h->cur_pic.f.mb_type[topleft_xy]; |
|
417 |
+ h->top_type = h->cur_pic.f.mb_type[top_xy]; |
|
418 |
+ h->topright_type = h->cur_pic.f.mb_type[topright_xy]; |
|
419 |
+ h->left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; |
|
420 |
+ h->left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; |
|
421 | 421 |
|
422 | 422 |
if (FMO) { |
423 | 423 |
if (h->slice_table[topleft_xy] != h->slice_num) |
... | ... |
@@ -441,7 +438,6 @@ static void fill_decode_neighbors(H264Context *h, int mb_type) |
441 | 441 |
|
442 | 442 |
static void fill_decode_caches(H264Context *h, int mb_type) |
443 | 443 |
{ |
444 |
- MpegEncContext *const s = &h->s; |
|
445 | 444 |
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS]; |
446 | 445 |
int topleft_type, top_type, topright_type, left_type[LEFT_MBS]; |
447 | 446 |
const uint8_t *left_block = h->left_block; |
... | ... |
@@ -484,7 +480,7 @@ static void fill_decode_caches(H264Context *h, int mb_type) |
484 | 484 |
h->left_samples_available &= 0xFF5F; |
485 | 485 |
} |
486 | 486 |
} else { |
487 |
- int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride]; |
|
487 |
+ int left_typei = h->cur_pic.f.mb_type[left_xy[LTOP] + h->mb_stride]; |
|
488 | 488 |
|
489 | 489 |
assert(left_xy[LTOP] == left_xy[LBOT]); |
490 | 490 |
if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) { |
... | ... |
@@ -541,7 +537,7 @@ static void fill_decode_caches(H264Context *h, int mb_type) |
541 | 541 |
if (top_type) { |
542 | 542 |
nnz = h->non_zero_count[top_xy]; |
543 | 543 |
AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]); |
544 |
- if (!s->chroma_y_shift) { |
|
544 |
+ if (!h->chroma_y_shift) { |
|
545 | 545 |
AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]); |
546 | 546 |
AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]); |
547 | 547 |
} else { |
... | ... |
@@ -606,9 +602,9 @@ static void fill_decode_caches(H264Context *h, int mb_type) |
606 | 606 |
int b_stride = h->b_stride; |
607 | 607 |
for (list = 0; list < h->list_count; list++) { |
608 | 608 |
int8_t *ref_cache = &h->ref_cache[list][scan8[0]]; |
609 |
- int8_t *ref = s->current_picture.f.ref_index[list]; |
|
609 |
+ int8_t *ref = h->cur_pic.f.ref_index[list]; |
|
610 | 610 |
int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]]; |
611 |
- int16_t(*mv)[2] = s->current_picture.f.motion_val[list]; |
|
611 |
+ int16_t(*mv)[2] = h->cur_pic.f.motion_val[list]; |
|
612 | 612 |
if (!USES_LIST(mb_type, list)) |
613 | 613 |
continue; |
614 | 614 |
assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)); |
... | ... |
@@ -800,7 +796,6 @@ static void fill_decode_caches(H264Context *h, int mb_type) |
800 | 800 |
*/ |
801 | 801 |
static void av_unused decode_mb_skip(H264Context *h) |
802 | 802 |
{ |
803 |
- MpegEncContext *const s = &h->s; |
|
804 | 803 |
const int mb_xy = h->mb_xy; |
805 | 804 |
int mb_type = 0; |
806 | 805 |
|
... | ... |
@@ -826,10 +821,10 @@ static void av_unused decode_mb_skip(H264Context *h) |
826 | 826 |
} |
827 | 827 |
|
828 | 828 |
write_back_motion(h, mb_type); |
829 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
830 |
- s->current_picture.f.qscale_table[mb_xy] = s->qscale; |
|
831 |
- h->slice_table[mb_xy] = h->slice_num; |
|
832 |
- h->prev_mb_skipped = 1; |
|
829 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
830 |
+ h->cur_pic.f.qscale_table[mb_xy] = h->qscale; |
|
831 |
+ h->slice_table[mb_xy] = h->slice_num; |
|
832 |
+ h->prev_mb_skipped = 1; |
|
833 | 833 |
} |
834 | 834 |
|
835 | 835 |
#endif /* AVCODEC_H264_MVPRED_H */ |
... | ... |
@@ -36,7 +36,7 @@ static int ff_h264_find_frame_end(H264Context *h, const uint8_t *buf, int buf_si |
36 | 36 |
{ |
37 | 37 |
int i; |
38 | 38 |
uint32_t state; |
39 |
- ParseContext *pc = &(h->s.parse_context); |
|
39 |
+ ParseContext *pc = &h->parse_context; |
|
40 | 40 |
// mb_addr= pc->mb_addr - 1; |
41 | 41 |
state= pc->state; |
42 | 42 |
if(state>13) |
... | ... |
@@ -119,7 +119,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, |
119 | 119 |
s->pict_type = AV_PICTURE_TYPE_I; |
120 | 120 |
s->key_frame = 0; |
121 | 121 |
|
122 |
- h->s.avctx= avctx; |
|
122 |
+ h->avctx= avctx; |
|
123 | 123 |
h->sei_recovery_frame_cnt = -1; |
124 | 124 |
h->sei_dpb_output_delay = 0; |
125 | 125 |
h->sei_cpb_removal_delay = -1; |
... | ... |
@@ -147,13 +147,13 @@ static inline int parse_nal_units(AVCodecParserContext *s, |
147 | 147 |
if (ptr==NULL || dst_length < 0) |
148 | 148 |
break; |
149 | 149 |
|
150 |
- init_get_bits(&h->s.gb, ptr, 8*dst_length); |
|
150 |
+ init_get_bits(&h->gb, ptr, 8*dst_length); |
|
151 | 151 |
switch(h->nal_unit_type) { |
152 | 152 |
case NAL_SPS: |
153 | 153 |
ff_h264_decode_seq_parameter_set(h); |
154 | 154 |
break; |
155 | 155 |
case NAL_PPS: |
156 |
- ff_h264_decode_picture_parameter_set(h, h->s.gb.size_in_bits); |
|
156 |
+ ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits); |
|
157 | 157 |
break; |
158 | 158 |
case NAL_SEI: |
159 | 159 |
ff_h264_decode_sei(h); |
... | ... |
@@ -162,40 +162,40 @@ static inline int parse_nal_units(AVCodecParserContext *s, |
162 | 162 |
s->key_frame = 1; |
163 | 163 |
/* fall through */ |
164 | 164 |
case NAL_SLICE: |
165 |
- get_ue_golomb(&h->s.gb); // skip first_mb_in_slice |
|
166 |
- slice_type = get_ue_golomb_31(&h->s.gb); |
|
165 |
+ get_ue_golomb(&h->gb); // skip first_mb_in_slice |
|
166 |
+ slice_type = get_ue_golomb_31(&h->gb); |
|
167 | 167 |
s->pict_type = golomb_to_pict_type[slice_type % 5]; |
168 | 168 |
if (h->sei_recovery_frame_cnt >= 0) { |
169 | 169 |
/* key frame, since recovery_frame_cnt is set */ |
170 | 170 |
s->key_frame = 1; |
171 | 171 |
} |
172 |
- pps_id= get_ue_golomb(&h->s.gb); |
|
172 |
+ pps_id= get_ue_golomb(&h->gb); |
|
173 | 173 |
if(pps_id>=MAX_PPS_COUNT) { |
174 |
- av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n"); |
|
174 |
+ av_log(h->avctx, AV_LOG_ERROR, "pps_id out of range\n"); |
|
175 | 175 |
return -1; |
176 | 176 |
} |
177 | 177 |
if(!h->pps_buffers[pps_id]) { |
178 |
- av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS referenced\n"); |
|
178 |
+ av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS referenced\n"); |
|
179 | 179 |
return -1; |
180 | 180 |
} |
181 | 181 |
h->pps= *h->pps_buffers[pps_id]; |
182 | 182 |
if(!h->sps_buffers[h->pps.sps_id]) { |
183 |
- av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS referenced\n"); |
|
183 |
+ av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS referenced\n"); |
|
184 | 184 |
return -1; |
185 | 185 |
} |
186 | 186 |
h->sps = *h->sps_buffers[h->pps.sps_id]; |
187 |
- h->frame_num = get_bits(&h->s.gb, h->sps.log2_max_frame_num); |
|
187 |
+ h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); |
|
188 | 188 |
|
189 | 189 |
avctx->profile = ff_h264_get_profile(&h->sps); |
190 | 190 |
avctx->level = h->sps.level_idc; |
191 | 191 |
|
192 | 192 |
if(h->sps.frame_mbs_only_flag){ |
193 |
- h->s.picture_structure= PICT_FRAME; |
|
193 |
+ h->picture_structure= PICT_FRAME; |
|
194 | 194 |
}else{ |
195 |
- if(get_bits1(&h->s.gb)) { //field_pic_flag |
|
196 |
- h->s.picture_structure= PICT_TOP_FIELD + get_bits1(&h->s.gb); //bottom_field_flag |
|
195 |
+ if(get_bits1(&h->gb)) { //field_pic_flag |
|
196 |
+ h->picture_structure= PICT_TOP_FIELD + get_bits1(&h->gb); //bottom_field_flag |
|
197 | 197 |
} else { |
198 |
- h->s.picture_structure= PICT_FRAME; |
|
198 |
+ h->picture_structure= PICT_FRAME; |
|
199 | 199 |
} |
200 | 200 |
} |
201 | 201 |
|
... | ... |
@@ -221,11 +221,11 @@ static inline int parse_nal_units(AVCodecParserContext *s, |
221 | 221 |
s->repeat_pict = 5; |
222 | 222 |
break; |
223 | 223 |
default: |
224 |
- s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 1 : 0; |
|
224 |
+ s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0; |
|
225 | 225 |
break; |
226 | 226 |
} |
227 | 227 |
} else { |
228 |
- s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 1 : 0; |
|
228 |
+ s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0; |
|
229 | 229 |
} |
230 | 230 |
|
231 | 231 |
return 0; /* no need to evaluate the rest */ |
... | ... |
@@ -233,7 +233,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, |
233 | 233 |
buf += consumed; |
234 | 234 |
} |
235 | 235 |
/* didn't find a picture! */ |
236 |
- av_log(h->s.avctx, AV_LOG_ERROR, "missing picture in access unit\n"); |
|
236 |
+ av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit\n"); |
|
237 | 237 |
return -1; |
238 | 238 |
} |
239 | 239 |
|
... | ... |
@@ -243,20 +243,20 @@ static int h264_parse(AVCodecParserContext *s, |
243 | 243 |
const uint8_t *buf, int buf_size) |
244 | 244 |
{ |
245 | 245 |
H264Context *h = s->priv_data; |
246 |
- ParseContext *pc = &h->s.parse_context; |
|
246 |
+ ParseContext *pc = &h->parse_context; |
|
247 | 247 |
int next; |
248 | 248 |
|
249 | 249 |
if (!h->got_first) { |
250 | 250 |
h->got_first = 1; |
251 | 251 |
if (avctx->extradata_size) { |
252 |
- h->s.avctx = avctx; |
|
252 |
+ h->avctx = avctx; |
|
253 | 253 |
// must be done like in the decoder. |
254 | 254 |
// otherwise opening the parser, creating extradata, |
255 | 255 |
// and then closing and opening again |
256 | 256 |
// will cause has_b_frames to be always set. |
257 | 257 |
// NB: estimate_timings_from_pts behaves exactly like this. |
258 | 258 |
if (!avctx->has_b_frames) |
259 |
- h->s.low_delay = 1; |
|
259 |
+ h->low_delay = 1; |
|
260 | 260 |
ff_h264_decode_extradata(h); |
261 | 261 |
} |
262 | 262 |
} |
... | ... |
@@ -326,7 +326,7 @@ static int h264_split(AVCodecContext *avctx, |
326 | 326 |
static void close(AVCodecParserContext *s) |
327 | 327 |
{ |
328 | 328 |
H264Context *h = s->priv_data; |
329 |
- ParseContext *pc = &h->s.parse_context; |
|
329 |
+ ParseContext *pc = &h->parse_context; |
|
330 | 330 |
|
331 | 331 |
av_free(pc->buffer); |
332 | 332 |
ff_h264_free_context(h); |
... | ... |
@@ -336,7 +336,7 @@ static int init(AVCodecParserContext *s) |
336 | 336 |
{ |
337 | 337 |
H264Context *h = s->priv_data; |
338 | 338 |
h->thread_context[0] = h; |
339 |
- h->s.slice_context_count = 1; |
|
339 |
+ h->slice_context_count = 1; |
|
340 | 340 |
return 0; |
341 | 341 |
} |
342 | 342 |
|
... | ... |
@@ -121,46 +121,44 @@ static const uint8_t default_scaling8[2][64]={ |
121 | 121 |
}}; |
122 | 122 |
|
123 | 123 |
static inline int decode_hrd_parameters(H264Context *h, SPS *sps){ |
124 |
- MpegEncContext * const s = &h->s; |
|
125 | 124 |
int cpb_count, i; |
126 |
- cpb_count = get_ue_golomb_31(&s->gb) + 1; |
|
125 |
+ cpb_count = get_ue_golomb_31(&h->gb) + 1; |
|
127 | 126 |
|
128 | 127 |
if(cpb_count > 32U){ |
129 |
- av_log(h->s.avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); |
|
128 |
+ av_log(h->avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); |
|
130 | 129 |
return -1; |
131 | 130 |
} |
132 | 131 |
|
133 |
- get_bits(&s->gb, 4); /* bit_rate_scale */ |
|
134 |
- get_bits(&s->gb, 4); /* cpb_size_scale */ |
|
132 |
+ get_bits(&h->gb, 4); /* bit_rate_scale */ |
|
133 |
+ get_bits(&h->gb, 4); /* cpb_size_scale */ |
|
135 | 134 |
for(i=0; i<cpb_count; i++){ |
136 |
- get_ue_golomb_long(&s->gb); /* bit_rate_value_minus1 */ |
|
137 |
- get_ue_golomb_long(&s->gb); /* cpb_size_value_minus1 */ |
|
138 |
- get_bits1(&s->gb); /* cbr_flag */ |
|
135 |
+ get_ue_golomb_long(&h->gb); /* bit_rate_value_minus1 */ |
|
136 |
+ get_ue_golomb_long(&h->gb); /* cpb_size_value_minus1 */ |
|
137 |
+ get_bits1(&h->gb); /* cbr_flag */ |
|
139 | 138 |
} |
140 |
- sps->initial_cpb_removal_delay_length = get_bits(&s->gb, 5) + 1; |
|
141 |
- sps->cpb_removal_delay_length = get_bits(&s->gb, 5) + 1; |
|
142 |
- sps->dpb_output_delay_length = get_bits(&s->gb, 5) + 1; |
|
143 |
- sps->time_offset_length = get_bits(&s->gb, 5); |
|
139 |
+ sps->initial_cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; |
|
140 |
+ sps->cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; |
|
141 |
+ sps->dpb_output_delay_length = get_bits(&h->gb, 5) + 1; |
|
142 |
+ sps->time_offset_length = get_bits(&h->gb, 5); |
|
144 | 143 |
sps->cpb_cnt = cpb_count; |
145 | 144 |
return 0; |
146 | 145 |
} |
147 | 146 |
|
148 | 147 |
static inline int decode_vui_parameters(H264Context *h, SPS *sps){ |
149 |
- MpegEncContext * const s = &h->s; |
|
150 | 148 |
int aspect_ratio_info_present_flag; |
151 | 149 |
unsigned int aspect_ratio_idc; |
152 | 150 |
|
153 |
- aspect_ratio_info_present_flag= get_bits1(&s->gb); |
|
151 |
+ aspect_ratio_info_present_flag= get_bits1(&h->gb); |
|
154 | 152 |
|
155 | 153 |
if( aspect_ratio_info_present_flag ) { |
156 |
- aspect_ratio_idc= get_bits(&s->gb, 8); |
|
154 |
+ aspect_ratio_idc= get_bits(&h->gb, 8); |
|
157 | 155 |
if( aspect_ratio_idc == EXTENDED_SAR ) { |
158 |
- sps->sar.num= get_bits(&s->gb, 16); |
|
159 |
- sps->sar.den= get_bits(&s->gb, 16); |
|
156 |
+ sps->sar.num= get_bits(&h->gb, 16); |
|
157 |
+ sps->sar.den= get_bits(&h->gb, 16); |
|
160 | 158 |
}else if(aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)){ |
161 | 159 |
sps->sar= pixel_aspect[aspect_ratio_idc]; |
162 | 160 |
}else{ |
163 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); |
|
161 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); |
|
164 | 162 |
return -1; |
165 | 163 |
} |
166 | 164 |
}else{ |
... | ... |
@@ -169,20 +167,20 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){ |
169 | 169 |
} |
170 | 170 |
// s->avctx->aspect_ratio= sar_width*s->width / (float)(s->height*sar_height); |
171 | 171 |
|
172 |
- if(get_bits1(&s->gb)){ /* overscan_info_present_flag */ |
|
173 |
- get_bits1(&s->gb); /* overscan_appropriate_flag */ |
|
172 |
+ if(get_bits1(&h->gb)){ /* overscan_info_present_flag */ |
|
173 |
+ get_bits1(&h->gb); /* overscan_appropriate_flag */ |
|
174 | 174 |
} |
175 | 175 |
|
176 |
- sps->video_signal_type_present_flag = get_bits1(&s->gb); |
|
176 |
+ sps->video_signal_type_present_flag = get_bits1(&h->gb); |
|
177 | 177 |
if(sps->video_signal_type_present_flag){ |
178 |
- get_bits(&s->gb, 3); /* video_format */ |
|
179 |
- sps->full_range = get_bits1(&s->gb); /* video_full_range_flag */ |
|
178 |
+ get_bits(&h->gb, 3); /* video_format */ |
|
179 |
+ sps->full_range = get_bits1(&h->gb); /* video_full_range_flag */ |
|
180 | 180 |
|
181 |
- sps->colour_description_present_flag = get_bits1(&s->gb); |
|
181 |
+ sps->colour_description_present_flag = get_bits1(&h->gb); |
|
182 | 182 |
if(sps->colour_description_present_flag){ |
183 |
- sps->color_primaries = get_bits(&s->gb, 8); /* colour_primaries */ |
|
184 |
- sps->color_trc = get_bits(&s->gb, 8); /* transfer_characteristics */ |
|
185 |
- sps->colorspace = get_bits(&s->gb, 8); /* matrix_coefficients */ |
|
183 |
+ sps->color_primaries = get_bits(&h->gb, 8); /* colour_primaries */ |
|
184 |
+ sps->color_trc = get_bits(&h->gb, 8); /* transfer_characteristics */ |
|
185 |
+ sps->colorspace = get_bits(&h->gb, 8); /* matrix_coefficients */ |
|
186 | 186 |
if (sps->color_primaries >= AVCOL_PRI_NB) |
187 | 187 |
sps->color_primaries = AVCOL_PRI_UNSPECIFIED; |
188 | 188 |
if (sps->color_trc >= AVCOL_TRC_NB) |
... | ... |
@@ -192,56 +190,56 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){ |
192 | 192 |
} |
193 | 193 |
} |
194 | 194 |
|
195 |
- if(get_bits1(&s->gb)){ /* chroma_location_info_present_flag */ |
|
196 |
- s->avctx->chroma_sample_location = get_ue_golomb(&s->gb)+1; /* chroma_sample_location_type_top_field */ |
|
197 |
- get_ue_golomb(&s->gb); /* chroma_sample_location_type_bottom_field */ |
|
195 |
+ if(get_bits1(&h->gb)){ /* chroma_location_info_present_flag */ |
|
196 |
+ h->avctx->chroma_sample_location = get_ue_golomb(&h->gb)+1; /* chroma_sample_location_type_top_field */ |
|
197 |
+ get_ue_golomb(&h->gb); /* chroma_sample_location_type_bottom_field */ |
|
198 | 198 |
} |
199 | 199 |
|
200 |
- sps->timing_info_present_flag = get_bits1(&s->gb); |
|
200 |
+ sps->timing_info_present_flag = get_bits1(&h->gb); |
|
201 | 201 |
if(sps->timing_info_present_flag){ |
202 |
- sps->num_units_in_tick = get_bits_long(&s->gb, 32); |
|
203 |
- sps->time_scale = get_bits_long(&s->gb, 32); |
|
202 |
+ sps->num_units_in_tick = get_bits_long(&h->gb, 32); |
|
203 |
+ sps->time_scale = get_bits_long(&h->gb, 32); |
|
204 | 204 |
if(!sps->num_units_in_tick || !sps->time_scale){ |
205 |
- av_log(h->s.avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%d/%d)\n", sps->time_scale, sps->num_units_in_tick); |
|
205 |
+ av_log(h->avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%d/%d)\n", sps->time_scale, sps->num_units_in_tick); |
|
206 | 206 |
return -1; |
207 | 207 |
} |
208 |
- sps->fixed_frame_rate_flag = get_bits1(&s->gb); |
|
208 |
+ sps->fixed_frame_rate_flag = get_bits1(&h->gb); |
|
209 | 209 |
} |
210 | 210 |
|
211 |
- sps->nal_hrd_parameters_present_flag = get_bits1(&s->gb); |
|
211 |
+ sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb); |
|
212 | 212 |
if(sps->nal_hrd_parameters_present_flag) |
213 | 213 |
if(decode_hrd_parameters(h, sps) < 0) |
214 | 214 |
return -1; |
215 |
- sps->vcl_hrd_parameters_present_flag = get_bits1(&s->gb); |
|
215 |
+ sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb); |
|
216 | 216 |
if(sps->vcl_hrd_parameters_present_flag) |
217 | 217 |
if(decode_hrd_parameters(h, sps) < 0) |
218 | 218 |
return -1; |
219 | 219 |
if(sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) |
220 |
- get_bits1(&s->gb); /* low_delay_hrd_flag */ |
|
221 |
- sps->pic_struct_present_flag = get_bits1(&s->gb); |
|
220 |
+ get_bits1(&h->gb); /* low_delay_hrd_flag */ |
|
221 |
+ sps->pic_struct_present_flag = get_bits1(&h->gb); |
|
222 | 222 |
|
223 |
- sps->bitstream_restriction_flag = get_bits1(&s->gb); |
|
223 |
+ sps->bitstream_restriction_flag = get_bits1(&h->gb); |
|
224 | 224 |
if(sps->bitstream_restriction_flag){ |
225 |
- get_bits1(&s->gb); /* motion_vectors_over_pic_boundaries_flag */ |
|
226 |
- get_ue_golomb(&s->gb); /* max_bytes_per_pic_denom */ |
|
227 |
- get_ue_golomb(&s->gb); /* max_bits_per_mb_denom */ |
|
228 |
- get_ue_golomb(&s->gb); /* log2_max_mv_length_horizontal */ |
|
229 |
- get_ue_golomb(&s->gb); /* log2_max_mv_length_vertical */ |
|
230 |
- sps->num_reorder_frames= get_ue_golomb(&s->gb); |
|
231 |
- get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/ |
|
232 |
- |
|
233 |
- if (get_bits_left(&s->gb) < 0) { |
|
225 |
+ get_bits1(&h->gb); /* motion_vectors_over_pic_boundaries_flag */ |
|
226 |
+ get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */ |
|
227 |
+ get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */ |
|
228 |
+ get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */ |
|
229 |
+ get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */ |
|
230 |
+ sps->num_reorder_frames= get_ue_golomb(&h->gb); |
|
231 |
+ get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/ |
|
232 |
+ |
|
233 |
+ if (get_bits_left(&h->gb) < 0) { |
|
234 | 234 |
sps->num_reorder_frames=0; |
235 | 235 |
sps->bitstream_restriction_flag= 0; |
236 | 236 |
} |
237 | 237 |
|
238 | 238 |
if(sps->num_reorder_frames > 16U /*max_dec_frame_buffering || max_dec_frame_buffering > 16*/){ |
239 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal num_reorder_frames %d\n", sps->num_reorder_frames); |
|
239 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal num_reorder_frames %d\n", sps->num_reorder_frames); |
|
240 | 240 |
return -1; |
241 | 241 |
} |
242 | 242 |
} |
243 |
- if (get_bits_left(&s->gb) < 0) { |
|
244 |
- av_log(h->s.avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(&s->gb)); |
|
243 |
+ if (get_bits_left(&h->gb) < 0) { |
|
244 |
+ av_log(h->avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(&h->gb)); |
|
245 | 245 |
return AVERROR_INVALIDDATA; |
246 | 246 |
} |
247 | 247 |
|
... | ... |
@@ -250,15 +248,14 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){ |
250 | 250 |
|
251 | 251 |
static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, |
252 | 252 |
const uint8_t *jvt_list, const uint8_t *fallback_list){ |
253 |
- MpegEncContext * const s = &h->s; |
|
254 | 253 |
int i, last = 8, next = 8; |
255 | 254 |
const uint8_t *scan = size == 16 ? zigzag_scan : ff_zigzag_direct; |
256 |
- if(!get_bits1(&s->gb)) /* matrix not written, we use the predicted one */ |
|
255 |
+ if(!get_bits1(&h->gb)) /* matrix not written, we use the predicted one */ |
|
257 | 256 |
memcpy(factors, fallback_list, size*sizeof(uint8_t)); |
258 | 257 |
else |
259 | 258 |
for(i=0;i<size;i++){ |
260 | 259 |
if(next) |
261 |
- next = (last + get_se_golomb(&s->gb)) & 0xff; |
|
260 |
+ next = (last + get_se_golomb(&h->gb)) & 0xff; |
|
262 | 261 |
if(!i && !next){ /* matrix not written, we use the preset one */ |
263 | 262 |
memcpy(factors, jvt_list, size*sizeof(uint8_t)); |
264 | 263 |
break; |
... | ... |
@@ -269,7 +266,6 @@ static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, |
269 | 269 |
|
270 | 270 |
static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_sps, |
271 | 271 |
uint8_t (*scaling_matrix4)[16], uint8_t (*scaling_matrix8)[64]){ |
272 |
- MpegEncContext * const s = &h->s; |
|
273 | 272 |
int fallback_sps = !is_sps && sps->scaling_matrix_present; |
274 | 273 |
const uint8_t *fallback[4] = { |
275 | 274 |
fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0], |
... | ... |
@@ -277,7 +273,7 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_s |
277 | 277 |
fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], |
278 | 278 |
fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] |
279 | 279 |
}; |
280 |
- if(get_bits1(&s->gb)){ |
|
280 |
+ if(get_bits1(&h->gb)){ |
|
281 | 281 |
sps->scaling_matrix_present |= is_sps; |
282 | 282 |
decode_scaling_list(h,scaling_matrix4[0],16,default_scaling4[0],fallback[0]); // Intra, Y |
283 | 283 |
decode_scaling_list(h,scaling_matrix4[1],16,default_scaling4[0],scaling_matrix4[0]); // Intra, Cr |
... | ... |
@@ -301,23 +297,22 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_s |
301 | 301 |
} |
302 | 302 |
|
303 | 303 |
int ff_h264_decode_seq_parameter_set(H264Context *h){ |
304 |
- MpegEncContext * const s = &h->s; |
|
305 | 304 |
int profile_idc, level_idc, constraint_set_flags = 0; |
306 | 305 |
unsigned int sps_id; |
307 | 306 |
int i, log2_max_frame_num_minus4; |
308 | 307 |
SPS *sps; |
309 | 308 |
|
310 |
- profile_idc= get_bits(&s->gb, 8); |
|
311 |
- constraint_set_flags |= get_bits1(&s->gb) << 0; //constraint_set0_flag |
|
312 |
- constraint_set_flags |= get_bits1(&s->gb) << 1; //constraint_set1_flag |
|
313 |
- constraint_set_flags |= get_bits1(&s->gb) << 2; //constraint_set2_flag |
|
314 |
- constraint_set_flags |= get_bits1(&s->gb) << 3; //constraint_set3_flag |
|
315 |
- get_bits(&s->gb, 4); // reserved |
|
316 |
- level_idc= get_bits(&s->gb, 8); |
|
317 |
- sps_id= get_ue_golomb_31(&s->gb); |
|
309 |
+ profile_idc= get_bits(&h->gb, 8); |
|
310 |
+ constraint_set_flags |= get_bits1(&h->gb) << 0; //constraint_set0_flag |
|
311 |
+ constraint_set_flags |= get_bits1(&h->gb) << 1; //constraint_set1_flag |
|
312 |
+ constraint_set_flags |= get_bits1(&h->gb) << 2; //constraint_set2_flag |
|
313 |
+ constraint_set_flags |= get_bits1(&h->gb) << 3; //constraint_set3_flag |
|
314 |
+ get_bits(&h->gb, 4); // reserved |
|
315 |
+ level_idc= get_bits(&h->gb, 8); |
|
316 |
+ sps_id= get_ue_golomb_31(&h->gb); |
|
318 | 317 |
|
319 | 318 |
if(sps_id >= MAX_SPS_COUNT) { |
320 |
- av_log(h->s.avctx, AV_LOG_ERROR, "sps_id (%d) out of range\n", sps_id); |
|
319 |
+ av_log(h->avctx, AV_LOG_ERROR, "sps_id (%d) out of range\n", sps_id); |
|
321 | 320 |
return -1; |
322 | 321 |
} |
323 | 322 |
sps= av_mallocz(sizeof(SPS)); |
... | ... |
@@ -338,16 +333,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ |
338 | 338 |
sps->profile_idc == 44 || sps->profile_idc == 83 || |
339 | 339 |
sps->profile_idc == 86 || sps->profile_idc == 118 || |
340 | 340 |
sps->profile_idc == 128 || sps->profile_idc == 144) { |
341 |
- sps->chroma_format_idc= get_ue_golomb_31(&s->gb); |
|
341 |
+ sps->chroma_format_idc= get_ue_golomb_31(&h->gb); |
|
342 | 342 |
if(sps->chroma_format_idc > 3) { |
343 |
- av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc (%u) out of range\n", sps->chroma_format_idc); |
|
343 |
+ av_log(h->avctx, AV_LOG_ERROR, "chroma_format_idc (%u) out of range\n", sps->chroma_format_idc); |
|
344 | 344 |
goto fail; |
345 | 345 |
} else if(sps->chroma_format_idc == 3) { |
346 |
- sps->residual_color_transform_flag = get_bits1(&s->gb); |
|
346 |
+ sps->residual_color_transform_flag = get_bits1(&h->gb); |
|
347 | 347 |
} |
348 |
- sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8; |
|
349 |
- sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8; |
|
350 |
- sps->transform_bypass = get_bits1(&s->gb); |
|
348 |
+ sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8; |
|
349 |
+ sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8; |
|
350 |
+ sps->transform_bypass = get_bits1(&h->gb); |
|
351 | 351 |
decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); |
352 | 352 |
}else{ |
353 | 353 |
sps->chroma_format_idc= 1; |
... | ... |
@@ -355,78 +350,78 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ |
355 | 355 |
sps->bit_depth_chroma = 8; |
356 | 356 |
} |
357 | 357 |
|
358 |
- log2_max_frame_num_minus4 = get_ue_golomb(&s->gb); |
|
358 |
+ log2_max_frame_num_minus4 = get_ue_golomb(&h->gb); |
|
359 | 359 |
if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || |
360 | 360 |
log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { |
361 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
361 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
362 | 362 |
"log2_max_frame_num_minus4 out of range (0-12): %d\n", |
363 | 363 |
log2_max_frame_num_minus4); |
364 | 364 |
goto fail; |
365 | 365 |
} |
366 | 366 |
sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; |
367 | 367 |
|
368 |
- sps->poc_type= get_ue_golomb_31(&s->gb); |
|
368 |
+ sps->poc_type= get_ue_golomb_31(&h->gb); |
|
369 | 369 |
|
370 | 370 |
if(sps->poc_type == 0){ //FIXME #define |
371 |
- sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4; |
|
371 |
+ sps->log2_max_poc_lsb= get_ue_golomb(&h->gb) + 4; |
|
372 | 372 |
} else if(sps->poc_type == 1){//FIXME #define |
373 |
- sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb); |
|
374 |
- sps->offset_for_non_ref_pic= get_se_golomb(&s->gb); |
|
375 |
- sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb); |
|
376 |
- sps->poc_cycle_length = get_ue_golomb(&s->gb); |
|
373 |
+ sps->delta_pic_order_always_zero_flag= get_bits1(&h->gb); |
|
374 |
+ sps->offset_for_non_ref_pic= get_se_golomb(&h->gb); |
|
375 |
+ sps->offset_for_top_to_bottom_field= get_se_golomb(&h->gb); |
|
376 |
+ sps->poc_cycle_length = get_ue_golomb(&h->gb); |
|
377 | 377 |
|
378 | 378 |
if((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)){ |
379 |
- av_log(h->s.avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u\n", sps->poc_cycle_length); |
|
379 |
+ av_log(h->avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u\n", sps->poc_cycle_length); |
|
380 | 380 |
goto fail; |
381 | 381 |
} |
382 | 382 |
|
383 | 383 |
for(i=0; i<sps->poc_cycle_length; i++) |
384 |
- sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb); |
|
384 |
+ sps->offset_for_ref_frame[i]= get_se_golomb(&h->gb); |
|
385 | 385 |
}else if(sps->poc_type != 2){ |
386 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); |
|
386 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); |
|
387 | 387 |
goto fail; |
388 | 388 |
} |
389 | 389 |
|
390 |
- sps->ref_frame_count= get_ue_golomb_31(&s->gb); |
|
390 |
+ sps->ref_frame_count= get_ue_golomb_31(&h->gb); |
|
391 | 391 |
if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){ |
392 |
- av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n"); |
|
392 |
+ av_log(h->avctx, AV_LOG_ERROR, "too many reference frames\n"); |
|
393 | 393 |
goto fail; |
394 | 394 |
} |
395 |
- sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb); |
|
396 |
- sps->mb_width = get_ue_golomb(&s->gb) + 1; |
|
397 |
- sps->mb_height= get_ue_golomb(&s->gb) + 1; |
|
395 |
+ sps->gaps_in_frame_num_allowed_flag= get_bits1(&h->gb); |
|
396 |
+ sps->mb_width = get_ue_golomb(&h->gb) + 1; |
|
397 |
+ sps->mb_height= get_ue_golomb(&h->gb) + 1; |
|
398 | 398 |
if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 || |
399 |
- av_image_check_size(16*sps->mb_width, 16*sps->mb_height, 0, h->s.avctx)){ |
|
400 |
- av_log(h->s.avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); |
|
399 |
+ av_image_check_size(16*sps->mb_width, 16*sps->mb_height, 0, h->avctx)){ |
|
400 |
+ av_log(h->avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); |
|
401 | 401 |
goto fail; |
402 | 402 |
} |
403 | 403 |
|
404 |
- sps->frame_mbs_only_flag= get_bits1(&s->gb); |
|
404 |
+ sps->frame_mbs_only_flag= get_bits1(&h->gb); |
|
405 | 405 |
if(!sps->frame_mbs_only_flag) |
406 |
- sps->mb_aff= get_bits1(&s->gb); |
|
406 |
+ sps->mb_aff= get_bits1(&h->gb); |
|
407 | 407 |
else |
408 | 408 |
sps->mb_aff= 0; |
409 | 409 |
|
410 |
- sps->direct_8x8_inference_flag= get_bits1(&s->gb); |
|
410 |
+ sps->direct_8x8_inference_flag= get_bits1(&h->gb); |
|
411 | 411 |
if(!sps->frame_mbs_only_flag && !sps->direct_8x8_inference_flag){ |
412 |
- av_log(h->s.avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); |
|
412 |
+ av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); |
|
413 | 413 |
goto fail; |
414 | 414 |
} |
415 | 415 |
|
416 | 416 |
#ifndef ALLOW_INTERLACE |
417 | 417 |
if(sps->mb_aff) |
418 |
- av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); |
|
418 |
+ av_log(h->avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); |
|
419 | 419 |
#endif |
420 |
- sps->crop= get_bits1(&s->gb); |
|
420 |
+ sps->crop= get_bits1(&h->gb); |
|
421 | 421 |
if(sps->crop){ |
422 | 422 |
int crop_vertical_limit = sps->chroma_format_idc & 2 ? 16 : 8; |
423 | 423 |
int crop_horizontal_limit = sps->chroma_format_idc == 3 ? 16 : 8; |
424 |
- sps->crop_left = get_ue_golomb(&s->gb); |
|
425 |
- sps->crop_right = get_ue_golomb(&s->gb); |
|
426 |
- sps->crop_top = get_ue_golomb(&s->gb); |
|
427 |
- sps->crop_bottom= get_ue_golomb(&s->gb); |
|
428 |
- if (h->s.avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { |
|
429 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
424 |
+ sps->crop_left = get_ue_golomb(&h->gb); |
|
425 |
+ sps->crop_right = get_ue_golomb(&h->gb); |
|
426 |
+ sps->crop_top = get_ue_golomb(&h->gb); |
|
427 |
+ sps->crop_bottom= get_ue_golomb(&h->gb); |
|
428 |
+ if (h->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { |
|
429 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
430 | 430 |
"discarding sps cropping, " |
431 | 431 |
"original values are l:%u r:%u t:%u b:%u\n", |
432 | 432 |
sps->crop_left, |
... | ... |
@@ -440,10 +435,10 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ |
440 | 440 |
sps->crop_bottom = 0; |
441 | 441 |
} |
442 | 442 |
if(sps->crop_left || sps->crop_top){ |
443 |
- av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); |
|
443 |
+ av_log(h->avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); |
|
444 | 444 |
} |
445 | 445 |
if(sps->crop_right >= crop_horizontal_limit || sps->crop_bottom >= crop_vertical_limit){ |
446 |
- av_log(h->s.avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n"); |
|
446 |
+ av_log(h->avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n"); |
|
447 | 447 |
} |
448 | 448 |
}else{ |
449 | 449 |
sps->crop_left = |
... | ... |
@@ -452,7 +447,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ |
452 | 452 |
sps->crop_bottom= 0; |
453 | 453 |
} |
454 | 454 |
|
455 |
- sps->vui_parameters_present_flag= get_bits1(&s->gb); |
|
455 |
+ sps->vui_parameters_present_flag= get_bits1(&h->gb); |
|
456 | 456 |
if( sps->vui_parameters_present_flag ) |
457 | 457 |
if (decode_vui_parameters(h, sps) < 0) |
458 | 458 |
goto fail; |
... | ... |
@@ -460,9 +455,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ |
460 | 460 |
if(!sps->sar.den) |
461 | 461 |
sps->sar.den= 1; |
462 | 462 |
|
463 |
- if(s->avctx->debug&FF_DEBUG_PICT_INFO){ |
|
463 |
+ if(h->avctx->debug&FF_DEBUG_PICT_INFO){ |
|
464 | 464 |
static const char csp[4][5] = { "Gray", "420", "422", "444" }; |
465 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n", |
|
465 |
+ av_log(h->avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n", |
|
466 | 466 |
sps_id, sps->profile_idc, sps->level_idc, |
467 | 467 |
sps->poc_type, |
468 | 468 |
sps->ref_frame_count, |
... | ... |
@@ -500,35 +495,34 @@ build_qp_table(PPS *pps, int t, int index, const int depth) |
500 | 500 |
} |
501 | 501 |
|
502 | 502 |
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){ |
503 |
- MpegEncContext * const s = &h->s; |
|
504 |
- unsigned int pps_id= get_ue_golomb(&s->gb); |
|
503 |
+ unsigned int pps_id= get_ue_golomb(&h->gb); |
|
505 | 504 |
PPS *pps; |
506 | 505 |
const int qp_bd_offset = 6*(h->sps.bit_depth_luma-8); |
507 | 506 |
int bits_left; |
508 | 507 |
|
509 | 508 |
if(pps_id >= MAX_PPS_COUNT) { |
510 |
- av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id); |
|
509 |
+ av_log(h->avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id); |
|
511 | 510 |
return -1; |
512 | 511 |
} else if (h->sps.bit_depth_luma > 10) { |
513 |
- av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma); |
|
512 |
+ av_log(h->avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma); |
|
514 | 513 |
return AVERROR_PATCHWELCOME; |
515 | 514 |
} |
516 | 515 |
|
517 | 516 |
pps= av_mallocz(sizeof(PPS)); |
518 | 517 |
if(pps == NULL) |
519 | 518 |
return -1; |
520 |
- pps->sps_id= get_ue_golomb_31(&s->gb); |
|
519 |
+ pps->sps_id= get_ue_golomb_31(&h->gb); |
|
521 | 520 |
if((unsigned)pps->sps_id>=MAX_SPS_COUNT || h->sps_buffers[pps->sps_id] == NULL){ |
522 |
- av_log(h->s.avctx, AV_LOG_ERROR, "sps_id out of range\n"); |
|
521 |
+ av_log(h->avctx, AV_LOG_ERROR, "sps_id out of range\n"); |
|
523 | 522 |
goto fail; |
524 | 523 |
} |
525 | 524 |
|
526 |
- pps->cabac= get_bits1(&s->gb); |
|
527 |
- pps->pic_order_present= get_bits1(&s->gb); |
|
528 |
- pps->slice_group_count= get_ue_golomb(&s->gb) + 1; |
|
525 |
+ pps->cabac= get_bits1(&h->gb); |
|
526 |
+ pps->pic_order_present= get_bits1(&h->gb); |
|
527 |
+ pps->slice_group_count= get_ue_golomb(&h->gb) + 1; |
|
529 | 528 |
if(pps->slice_group_count > 1 ){ |
530 |
- pps->mb_slice_group_map_type= get_ue_golomb(&s->gb); |
|
531 |
- av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n"); |
|
529 |
+ pps->mb_slice_group_map_type= get_ue_golomb(&h->gb); |
|
530 |
+ av_log(h->avctx, AV_LOG_ERROR, "FMO not supported\n"); |
|
532 | 531 |
switch(pps->mb_slice_group_map_type){ |
533 | 532 |
case 0: |
534 | 533 |
#if 0 |
... | ... |
@@ -563,33 +557,33 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){ |
563 | 563 |
break; |
564 | 564 |
} |
565 | 565 |
} |
566 |
- pps->ref_count[0]= get_ue_golomb(&s->gb) + 1; |
|
567 |
- pps->ref_count[1]= get_ue_golomb(&s->gb) + 1; |
|
566 |
+ pps->ref_count[0]= get_ue_golomb(&h->gb) + 1; |
|
567 |
+ pps->ref_count[1]= get_ue_golomb(&h->gb) + 1; |
|
568 | 568 |
if(pps->ref_count[0]-1 > 32-1 || pps->ref_count[1]-1 > 32-1){ |
569 |
- av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); |
|
569 |
+ av_log(h->avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); |
|
570 | 570 |
goto fail; |
571 | 571 |
} |
572 | 572 |
|
573 |
- pps->weighted_pred= get_bits1(&s->gb); |
|
574 |
- pps->weighted_bipred_idc= get_bits(&s->gb, 2); |
|
575 |
- pps->init_qp= get_se_golomb(&s->gb) + 26 + qp_bd_offset; |
|
576 |
- pps->init_qs= get_se_golomb(&s->gb) + 26 + qp_bd_offset; |
|
577 |
- pps->chroma_qp_index_offset[0]= get_se_golomb(&s->gb); |
|
578 |
- pps->deblocking_filter_parameters_present= get_bits1(&s->gb); |
|
579 |
- pps->constrained_intra_pred= get_bits1(&s->gb); |
|
580 |
- pps->redundant_pic_cnt_present = get_bits1(&s->gb); |
|
573 |
+ pps->weighted_pred= get_bits1(&h->gb); |
|
574 |
+ pps->weighted_bipred_idc= get_bits(&h->gb, 2); |
|
575 |
+ pps->init_qp= get_se_golomb(&h->gb) + 26 + qp_bd_offset; |
|
576 |
+ pps->init_qs= get_se_golomb(&h->gb) + 26 + qp_bd_offset; |
|
577 |
+ pps->chroma_qp_index_offset[0]= get_se_golomb(&h->gb); |
|
578 |
+ pps->deblocking_filter_parameters_present= get_bits1(&h->gb); |
|
579 |
+ pps->constrained_intra_pred= get_bits1(&h->gb); |
|
580 |
+ pps->redundant_pic_cnt_present = get_bits1(&h->gb); |
|
581 | 581 |
|
582 | 582 |
pps->transform_8x8_mode= 0; |
583 | 583 |
h->dequant_coeff_pps= -1; //contents of sps/pps can change even if id doesn't, so reinit |
584 | 584 |
memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, sizeof(pps->scaling_matrix4)); |
585 | 585 |
memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, sizeof(pps->scaling_matrix8)); |
586 | 586 |
|
587 |
- bits_left = bit_length - get_bits_count(&s->gb); |
|
587 |
+ bits_left = bit_length - get_bits_count(&h->gb); |
|
588 | 588 |
if (bits_left && (bits_left > 8 || |
589 |
- show_bits(&s->gb, bits_left) != 1 << (bits_left - 1))) { |
|
590 |
- pps->transform_8x8_mode= get_bits1(&s->gb); |
|
589 |
+ show_bits(&h->gb, bits_left) != 1 << (bits_left - 1))) { |
|
590 |
+ pps->transform_8x8_mode= get_bits1(&h->gb); |
|
591 | 591 |
decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, pps->scaling_matrix4, pps->scaling_matrix8); |
592 |
- pps->chroma_qp_index_offset[1]= get_se_golomb(&s->gb); //second_chroma_qp_index_offset |
|
592 |
+ pps->chroma_qp_index_offset[1]= get_se_golomb(&h->gb); //second_chroma_qp_index_offset |
|
593 | 593 |
} else { |
594 | 594 |
pps->chroma_qp_index_offset[1]= pps->chroma_qp_index_offset[0]; |
595 | 595 |
} |
... | ... |
@@ -599,8 +593,8 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){ |
599 | 599 |
if(pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) |
600 | 600 |
pps->chroma_qp_diff= 1; |
601 | 601 |
|
602 |
- if(s->avctx->debug&FF_DEBUG_PICT_INFO){ |
|
603 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d/%d %s %s %s %s\n", |
|
602 |
+ if(h->avctx->debug&FF_DEBUG_PICT_INFO){ |
|
603 |
+ av_log(h->avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d/%d %s %s %s %s\n", |
|
604 | 604 |
pps_id, pps->sps_id, |
605 | 605 |
pps->cabac ? "CABAC" : "CAVLC", |
606 | 606 |
pps->slice_group_count, |
... | ... |
@@ -106,7 +106,6 @@ static int add_sorted(Picture **sorted, Picture **src, int len, int limit, int d |
106 | 106 |
} |
107 | 107 |
|
108 | 108 |
int ff_h264_fill_default_ref_list(H264Context *h){ |
109 |
- MpegEncContext * const s = &h->s; |
|
110 | 109 |
int i, len; |
111 | 110 |
|
112 | 111 |
if(h->slice_type_nos==AV_PICTURE_TYPE_B){ |
... | ... |
@@ -115,16 +114,16 @@ int ff_h264_fill_default_ref_list(H264Context *h){ |
115 | 115 |
int lens[2]; |
116 | 116 |
|
117 | 117 |
if(FIELD_PICTURE) |
118 |
- cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ]; |
|
118 |
+ cur_poc= h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]; |
|
119 | 119 |
else |
120 |
- cur_poc= s->current_picture_ptr->poc; |
|
120 |
+ cur_poc= h->cur_pic_ptr->poc; |
|
121 | 121 |
|
122 | 122 |
for(list= 0; list<2; list++){ |
123 | 123 |
len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list); |
124 | 124 |
len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list); |
125 | 125 |
assert(len<=32); |
126 |
- len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure); |
|
127 |
- len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure); |
|
126 |
+ len= build_def_list(h->default_ref_list[list] , sorted , len, 0, h->picture_structure); |
|
127 |
+ len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, h->picture_structure); |
|
128 | 128 |
assert(len<=32); |
129 | 129 |
|
130 | 130 |
if(len < h->ref_count[list]) |
... | ... |
@@ -138,19 +137,19 @@ int ff_h264_fill_default_ref_list(H264Context *h){ |
138 | 138 |
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); |
139 | 139 |
} |
140 | 140 |
}else{ |
141 |
- len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure); |
|
142 |
- len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure); |
|
141 |
+ len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure); |
|
142 |
+ len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, h->picture_structure); |
|
143 | 143 |
assert(len <= 32); |
144 | 144 |
if(len < h->ref_count[0]) |
145 | 145 |
memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len)); |
146 | 146 |
} |
147 | 147 |
#ifdef TRACE |
148 | 148 |
for (i=0; i<h->ref_count[0]; i++) { |
149 |
- tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].f.data[0]); |
|
149 |
+ tprintf(h->avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].f.data[0]); |
|
150 | 150 |
} |
151 | 151 |
if(h->slice_type_nos==AV_PICTURE_TYPE_B){ |
152 | 152 |
for (i=0; i<h->ref_count[1]; i++) { |
153 |
- tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].f.data[0]); |
|
153 |
+ tprintf(h->avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].f.data[0]); |
|
154 | 154 |
} |
155 | 155 |
} |
156 | 156 |
#endif |
... | ... |
@@ -171,9 +170,7 @@ static void print_long_term(H264Context *h); |
171 | 171 |
* described by pic_num |
172 | 172 |
*/ |
173 | 173 |
static int pic_num_extract(H264Context *h, int pic_num, int *structure){ |
174 |
- MpegEncContext * const s = &h->s; |
|
175 |
- |
|
176 |
- *structure = s->picture_structure; |
|
174 |
+ *structure = h->picture_structure; |
|
177 | 175 |
if(FIELD_PICTURE){ |
178 | 176 |
if (!(pic_num & 1)) |
179 | 177 |
/* opposite field */ |
... | ... |
@@ -185,7 +182,6 @@ static int pic_num_extract(H264Context *h, int pic_num, int *structure){ |
185 | 185 |
} |
186 | 186 |
|
187 | 187 |
int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
188 |
- MpegEncContext * const s = &h->s; |
|
189 | 188 |
int list, index, pic_structure; |
190 | 189 |
|
191 | 190 |
print_short_term(h); |
... | ... |
@@ -194,11 +190,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
194 | 194 |
for(list=0; list<h->list_count; list++){ |
195 | 195 |
memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); |
196 | 196 |
|
197 |
- if(get_bits1(&s->gb)){ |
|
197 |
+ if(get_bits1(&h->gb)){ |
|
198 | 198 |
int pred= h->curr_pic_num; |
199 | 199 |
|
200 | 200 |
for(index=0; ; index++){ |
201 |
- unsigned int reordering_of_pic_nums_idc= get_ue_golomb_31(&s->gb); |
|
201 |
+ unsigned int reordering_of_pic_nums_idc= get_ue_golomb_31(&h->gb); |
|
202 | 202 |
unsigned int pic_id; |
203 | 203 |
int i; |
204 | 204 |
Picture *ref = NULL; |
... | ... |
@@ -207,17 +203,17 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
207 | 207 |
break; |
208 | 208 |
|
209 | 209 |
if(index >= h->ref_count[list]){ |
210 |
- av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n"); |
|
210 |
+ av_log(h->avctx, AV_LOG_ERROR, "reference count overflow\n"); |
|
211 | 211 |
return -1; |
212 | 212 |
} |
213 | 213 |
|
214 | 214 |
if(reordering_of_pic_nums_idc<3){ |
215 | 215 |
if(reordering_of_pic_nums_idc<2){ |
216 |
- const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1; |
|
216 |
+ const unsigned int abs_diff_pic_num= get_ue_golomb(&h->gb) + 1; |
|
217 | 217 |
int frame_num; |
218 | 218 |
|
219 | 219 |
if(abs_diff_pic_num > h->max_pic_num){ |
220 |
- av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); |
|
220 |
+ av_log(h->avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); |
|
221 | 221 |
return -1; |
222 | 222 |
} |
223 | 223 |
|
... | ... |
@@ -241,12 +237,12 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
241 | 241 |
ref->pic_id= pred; |
242 | 242 |
}else{ |
243 | 243 |
int long_idx; |
244 |
- pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx |
|
244 |
+ pic_id= get_ue_golomb(&h->gb); //long_term_pic_idx |
|
245 | 245 |
|
246 | 246 |
long_idx= pic_num_extract(h, pic_id, &pic_structure); |
247 | 247 |
|
248 | 248 |
if(long_idx>31){ |
249 |
- av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); |
|
249 |
+ av_log(h->avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); |
|
250 | 250 |
return -1; |
251 | 251 |
} |
252 | 252 |
ref = h->long_ref[long_idx]; |
... | ... |
@@ -261,7 +257,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
261 | 261 |
} |
262 | 262 |
|
263 | 263 |
if (i < 0) { |
264 |
- av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); |
|
264 |
+ av_log(h->avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); |
|
265 | 265 |
memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME |
266 | 266 |
} else { |
267 | 267 |
for(i=index; i+1<h->ref_count[list]; i++){ |
... | ... |
@@ -277,7 +273,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
277 | 277 |
} |
278 | 278 |
} |
279 | 279 |
}else{ |
280 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); |
|
280 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); |
|
281 | 281 |
return -1; |
282 | 282 |
} |
283 | 283 |
} |
... | ... |
@@ -286,7 +282,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ |
286 | 286 |
for(list=0; list<h->list_count; list++){ |
287 | 287 |
for(index= 0; index < h->ref_count[list]; index++){ |
288 | 288 |
if (!h->ref_list[list][index].f.data[0]) { |
289 |
- av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n"); |
|
289 |
+ av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture\n"); |
|
290 | 290 |
if (h->default_ref_list[list][0].f.data[0]) |
291 | 291 |
h->ref_list[list][index]= h->default_ref_list[list][0]; |
292 | 292 |
else |
... | ... |
@@ -359,13 +355,12 @@ static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){ |
359 | 359 |
* frame number is found |
360 | 360 |
*/ |
361 | 361 |
static Picture * find_short(H264Context *h, int frame_num, int *idx){ |
362 |
- MpegEncContext * const s = &h->s; |
|
363 | 362 |
int i; |
364 | 363 |
|
365 | 364 |
for(i=0; i<h->short_ref_count; i++){ |
366 | 365 |
Picture *pic= h->short_ref[i]; |
367 |
- if(s->avctx->debug&FF_DEBUG_MMCO) |
|
368 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic); |
|
366 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) |
|
367 |
+ av_log(h->avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic); |
|
369 | 368 |
if(pic->frame_num == frame_num) { |
370 | 369 |
*idx = i; |
371 | 370 |
return pic; |
... | ... |
@@ -392,12 +387,11 @@ static void remove_short_at_index(H264Context *h, int i){ |
392 | 392 |
* @return the removed picture or NULL if an error occurs |
393 | 393 |
*/ |
394 | 394 |
static Picture * remove_short(H264Context *h, int frame_num, int ref_mask){ |
395 |
- MpegEncContext * const s = &h->s; |
|
396 | 395 |
Picture *pic; |
397 | 396 |
int i; |
398 | 397 |
|
399 |
- if(s->avctx->debug&FF_DEBUG_MMCO) |
|
400 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count); |
|
398 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) |
|
399 |
+ av_log(h->avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count); |
|
401 | 400 |
|
402 | 401 |
pic = find_short(h, frame_num, &i); |
403 | 402 |
if (pic){ |
... | ... |
@@ -449,11 +443,11 @@ void ff_h264_remove_all_refs(H264Context *h){ |
449 | 449 |
*/ |
450 | 450 |
static void print_short_term(H264Context *h) { |
451 | 451 |
uint32_t i; |
452 |
- if(h->s.avctx->debug&FF_DEBUG_MMCO) { |
|
453 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n"); |
|
452 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) { |
|
453 |
+ av_log(h->avctx, AV_LOG_DEBUG, "short term list:\n"); |
|
454 | 454 |
for(i=0; i<h->short_ref_count; i++){ |
455 | 455 |
Picture *pic= h->short_ref[i]; |
456 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", |
|
456 |
+ av_log(h->avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", |
|
457 | 457 |
i, pic->frame_num, pic->poc, pic->f.data[0]); |
458 | 458 |
} |
459 | 459 |
} |
... | ... |
@@ -464,12 +458,12 @@ static void print_short_term(H264Context *h) { |
464 | 464 |
*/ |
465 | 465 |
static void print_long_term(H264Context *h) { |
466 | 466 |
uint32_t i; |
467 |
- if(h->s.avctx->debug&FF_DEBUG_MMCO) { |
|
468 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:\n"); |
|
467 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) { |
|
468 |
+ av_log(h->avctx, AV_LOG_DEBUG, "long term list:\n"); |
|
469 | 469 |
for(i = 0; i < 16; i++){ |
470 | 470 |
Picture *pic= h->long_ref[i]; |
471 | 471 |
if (pic) { |
472 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", |
|
472 |
+ av_log(h->avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", |
|
473 | 473 |
i, pic->frame_num, pic->poc, pic->f.data[0]); |
474 | 474 |
} |
475 | 475 |
} |
... | ... |
@@ -490,7 +484,6 @@ static int check_opcodes(MMCO *mmco1, MMCO *mmco2, int n_mmcos) |
490 | 490 |
|
491 | 491 |
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) |
492 | 492 |
{ |
493 |
- MpegEncContext * const s = &h->s; |
|
494 | 493 |
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp; |
495 | 494 |
int mmco_index = 0, i; |
496 | 495 |
|
... | ... |
@@ -498,8 +491,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) |
498 | 498 |
|
499 | 499 |
if (h->short_ref_count && |
500 | 500 |
h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && |
501 |
- !(FIELD_PICTURE && !s->first_field && |
|
502 |
- s->current_picture_ptr->f.reference)) { |
|
501 |
+ !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->f.reference)) { |
|
503 | 502 |
mmco[0].opcode = MMCO_SHORT2UNUSED; |
504 | 503 |
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; |
505 | 504 |
mmco_index = 1; |
... | ... |
@@ -516,7 +508,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) |
516 | 516 |
} else if (!first_slice && mmco_index >= 0 && |
517 | 517 |
(mmco_index != h->mmco_index || |
518 | 518 |
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) { |
519 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
519 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
520 | 520 |
"Inconsistent MMCO state between slices [%d, %d, %d]\n", |
521 | 521 |
mmco_index, h->mmco_index, i); |
522 | 522 |
return AVERROR_INVALIDDATA; |
... | ... |
@@ -525,18 +517,17 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) |
525 | 525 |
} |
526 | 526 |
|
527 | 527 |
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
528 |
- MpegEncContext * const s = &h->s; |
|
529 | 528 |
int i, av_uninit(j); |
530 | 529 |
int current_ref_assigned=0, err=0; |
531 | 530 |
Picture *av_uninit(pic); |
532 | 531 |
|
533 |
- if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0) |
|
534 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n"); |
|
532 |
+ if((h->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0) |
|
533 |
+ av_log(h->avctx, AV_LOG_DEBUG, "no mmco here\n"); |
|
535 | 534 |
|
536 | 535 |
for(i=0; i<mmco_count; i++){ |
537 | 536 |
int av_uninit(structure), av_uninit(frame_num); |
538 |
- if(s->avctx->debug&FF_DEBUG_MMCO) |
|
539 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg); |
|
537 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) |
|
538 |
+ av_log(h->avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg); |
|
540 | 539 |
|
541 | 540 |
if( mmco[i].opcode == MMCO_SHORT2UNUSED |
542 | 541 |
|| mmco[i].opcode == MMCO_SHORT2LONG){ |
... | ... |
@@ -545,7 +536,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
545 | 545 |
if(!pic){ |
546 | 546 |
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg] |
547 | 547 |
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) { |
548 |
- av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failure\n"); |
|
548 |
+ av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n"); |
|
549 | 549 |
err = AVERROR_INVALIDDATA; |
550 | 550 |
} |
551 | 551 |
continue; |
... | ... |
@@ -554,8 +545,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
554 | 554 |
|
555 | 555 |
switch(mmco[i].opcode){ |
556 | 556 |
case MMCO_SHORT2UNUSED: |
557 |
- if(s->avctx->debug&FF_DEBUG_MMCO) |
|
558 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count); |
|
557 |
+ if(h->avctx->debug&FF_DEBUG_MMCO) |
|
558 |
+ av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count); |
|
559 | 559 |
remove_short(h, frame_num, structure ^ PICT_FRAME); |
560 | 560 |
break; |
561 | 561 |
case MMCO_SHORT2LONG: |
... | ... |
@@ -574,8 +565,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
574 | 574 |
pic = h->long_ref[j]; |
575 | 575 |
if (pic) { |
576 | 576 |
remove_long(h, j, structure ^ PICT_FRAME); |
577 |
- } else if(s->avctx->debug&FF_DEBUG_MMCO) |
|
578 |
- av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref long failure\n"); |
|
577 |
+ } else if(h->avctx->debug&FF_DEBUG_MMCO) |
|
578 |
+ av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref long failure\n"); |
|
579 | 579 |
break; |
580 | 580 |
case MMCO_LONG: |
581 | 581 |
// Comment below left from previous code as it is an interresting note. |
... | ... |
@@ -586,15 +577,15 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
586 | 586 |
* and mark this field valid. |
587 | 587 |
*/ |
588 | 588 |
|
589 |
- if (h->long_ref[mmco[i].long_arg] != s->current_picture_ptr) { |
|
589 |
+ if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) { |
|
590 | 590 |
remove_long(h, mmco[i].long_arg, 0); |
591 | 591 |
|
592 |
- h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr; |
|
592 |
+ h->long_ref[ mmco[i].long_arg ]= h->cur_pic_ptr; |
|
593 | 593 |
h->long_ref[ mmco[i].long_arg ]->long_ref=1; |
594 | 594 |
h->long_ref_count++; |
595 | 595 |
} |
596 | 596 |
|
597 |
- s->current_picture_ptr->f.reference |= s->picture_structure; |
|
597 |
+ h->cur_pic_ptr->f.reference |= h->picture_structure; |
|
598 | 598 |
current_ref_assigned=1; |
599 | 599 |
break; |
600 | 600 |
case MMCO_SET_MAX_LONG: |
... | ... |
@@ -612,9 +603,9 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
612 | 612 |
remove_long(h, j, 0); |
613 | 613 |
} |
614 | 614 |
h->frame_num= |
615 |
- s->current_picture_ptr->frame_num= 0; |
|
615 |
+ h->cur_pic_ptr->frame_num= 0; |
|
616 | 616 |
h->mmco_reset = 1; |
617 |
- s->current_picture_ptr->mmco_reset=1; |
|
617 |
+ h->cur_pic_ptr->mmco_reset=1; |
|
618 | 618 |
break; |
619 | 619 |
default: assert(0); |
620 | 620 |
} |
... | ... |
@@ -627,39 +618,39 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
627 | 627 |
* in long_ref; trying to put it on the short list here is an |
628 | 628 |
* error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3). |
629 | 629 |
*/ |
630 |
- if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) { |
|
630 |
+ if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) { |
|
631 | 631 |
/* Just mark the second field valid */ |
632 |
- s->current_picture_ptr->f.reference = PICT_FRAME; |
|
633 |
- } else if (s->current_picture_ptr->long_ref) { |
|
634 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference " |
|
635 |
- "assignment for second field " |
|
636 |
- "in complementary field pair " |
|
637 |
- "(first field is long term)\n"); |
|
632 |
+ h->cur_pic_ptr->f.reference = PICT_FRAME; |
|
633 |
+ } else if (h->cur_pic_ptr->long_ref) { |
|
634 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference " |
|
635 |
+ "assignment for second field " |
|
636 |
+ "in complementary field pair " |
|
637 |
+ "(first field is long term)\n"); |
|
638 | 638 |
err = AVERROR_INVALIDDATA; |
639 | 639 |
} else { |
640 |
- pic= remove_short(h, s->current_picture_ptr->frame_num, 0); |
|
640 |
+ pic= remove_short(h, h->cur_pic_ptr->frame_num, 0); |
|
641 | 641 |
if(pic){ |
642 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n"); |
|
642 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n"); |
|
643 | 643 |
err = AVERROR_INVALIDDATA; |
644 | 644 |
} |
645 | 645 |
|
646 | 646 |
if(h->short_ref_count) |
647 | 647 |
memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*)); |
648 | 648 |
|
649 |
- h->short_ref[0]= s->current_picture_ptr; |
|
649 |
+ h->short_ref[0]= h->cur_pic_ptr; |
|
650 | 650 |
h->short_ref_count++; |
651 |
- s->current_picture_ptr->f.reference |= s->picture_structure; |
|
651 |
+ h->cur_pic_ptr->f.reference |= h->picture_structure; |
|
652 | 652 |
} |
653 | 653 |
} |
654 | 654 |
|
655 | 655 |
if (h->long_ref_count + h->short_ref_count - |
656 |
- (h->short_ref[0] == s->current_picture_ptr) > h->sps.ref_frame_count){ |
|
656 |
+ (h->short_ref[0] == h->cur_pic_ptr) > h->sps.ref_frame_count){ |
|
657 | 657 |
|
658 | 658 |
/* We have too many reference frames, probably due to corrupted |
659 | 659 |
* stream. Need to discard one frame. Prevents overrun of the |
660 | 660 |
* short_ref and long_ref buffers. |
661 | 661 |
*/ |
662 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
662 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
663 | 663 |
"number of reference frames (%d+%d) exceeds max (%d; probably " |
664 | 664 |
"corrupt input), discarding one\n", |
665 | 665 |
h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count); |
... | ... |
@@ -680,19 +671,18 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ |
680 | 680 |
|
681 | 681 |
print_short_term(h); |
682 | 682 |
print_long_term(h); |
683 |
- return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0; |
|
683 |
+ return (h->avctx->err_recognition & AV_EF_EXPLODE) ? err : 0; |
|
684 | 684 |
} |
685 | 685 |
|
686 | 686 |
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, |
687 | 687 |
int first_slice) |
688 | 688 |
{ |
689 |
- MpegEncContext * const s = &h->s; |
|
690 | 689 |
int i, ret; |
691 | 690 |
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp; |
692 | 691 |
int mmco_index = 0; |
693 | 692 |
|
694 | 693 |
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields |
695 |
- s->broken_link = get_bits1(gb) - 1; |
|
694 |
+ skip_bits1(gb); // broken_link |
|
696 | 695 |
if (get_bits1(gb)){ |
697 | 696 |
mmco[0].opcode = MMCO_LONG; |
698 | 697 |
mmco[0].long_arg = 0; |
... | ... |
@@ -725,7 +715,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, |
725 | 725 |
(long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && |
726 | 726 |
long_arg == 16) && |
727 | 727 |
!(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){ |
728 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
728 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
729 | 729 |
"illegal long ref in memory management control " |
730 | 730 |
"operation %d\n", opcode); |
731 | 731 |
return -1; |
... | ... |
@@ -734,7 +724,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, |
734 | 734 |
} |
735 | 735 |
|
736 | 736 |
if (opcode > (unsigned) MMCO_LONG){ |
737 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
737 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
738 | 738 |
"illegal memory management control operation %d\n", |
739 | 739 |
opcode); |
740 | 740 |
return -1; |
... | ... |
@@ -746,7 +736,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, |
746 | 746 |
} else { |
747 | 747 |
if (first_slice) { |
748 | 748 |
ret = ff_generate_sliding_window_mmcos(h, first_slice); |
749 |
- if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE) |
|
749 |
+ if (ret < 0 && h->avctx->err_recognition & AV_EF_EXPLODE) |
|
750 | 750 |
return ret; |
751 | 751 |
} |
752 | 752 |
mmco_index = -1; |
... | ... |
@@ -758,7 +748,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, |
758 | 758 |
} else if (!first_slice && mmco_index >= 0 && |
759 | 759 |
(mmco_index != h->mmco_index || |
760 | 760 |
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) { |
761 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
761 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
762 | 762 |
"Inconsistent MMCO state between slices [%d, %d, %d]\n", |
763 | 763 |
mmco_index, h->mmco_index, i); |
764 | 764 |
return AVERROR_INVALIDDATA; |
... | ... |
@@ -45,14 +45,13 @@ void ff_h264_reset_sei(H264Context *h) { |
45 | 45 |
} |
46 | 46 |
|
47 | 47 |
static int decode_picture_timing(H264Context *h){ |
48 |
- MpegEncContext * const s = &h->s; |
|
49 | 48 |
if(h->sps.nal_hrd_parameters_present_flag || h->sps.vcl_hrd_parameters_present_flag){ |
50 |
- h->sei_cpb_removal_delay = get_bits(&s->gb, h->sps.cpb_removal_delay_length); |
|
51 |
- h->sei_dpb_output_delay = get_bits(&s->gb, h->sps.dpb_output_delay_length); |
|
49 |
+ h->sei_cpb_removal_delay = get_bits(&h->gb, h->sps.cpb_removal_delay_length); |
|
50 |
+ h->sei_dpb_output_delay = get_bits(&h->gb, h->sps.dpb_output_delay_length); |
|
52 | 51 |
} |
53 | 52 |
if(h->sps.pic_struct_present_flag){ |
54 | 53 |
unsigned int i, num_clock_ts; |
55 |
- h->sei_pic_struct = get_bits(&s->gb, 4); |
|
54 |
+ h->sei_pic_struct = get_bits(&h->gb, 4); |
|
56 | 55 |
h->sei_ct_type = 0; |
57 | 56 |
|
58 | 57 |
if (h->sei_pic_struct > SEI_PIC_STRUCT_FRAME_TRIPLING) |
... | ... |
@@ -61,42 +60,41 @@ static int decode_picture_timing(H264Context *h){ |
61 | 61 |
num_clock_ts = sei_num_clock_ts_table[h->sei_pic_struct]; |
62 | 62 |
|
63 | 63 |
for (i = 0 ; i < num_clock_ts ; i++){ |
64 |
- if(get_bits(&s->gb, 1)){ /* clock_timestamp_flag */ |
|
64 |
+ if(get_bits(&h->gb, 1)){ /* clock_timestamp_flag */ |
|
65 | 65 |
unsigned int full_timestamp_flag; |
66 |
- h->sei_ct_type |= 1<<get_bits(&s->gb, 2); |
|
67 |
- skip_bits(&s->gb, 1); /* nuit_field_based_flag */ |
|
68 |
- skip_bits(&s->gb, 5); /* counting_type */ |
|
69 |
- full_timestamp_flag = get_bits(&s->gb, 1); |
|
70 |
- skip_bits(&s->gb, 1); /* discontinuity_flag */ |
|
71 |
- skip_bits(&s->gb, 1); /* cnt_dropped_flag */ |
|
72 |
- skip_bits(&s->gb, 8); /* n_frames */ |
|
66 |
+ h->sei_ct_type |= 1<<get_bits(&h->gb, 2); |
|
67 |
+ skip_bits(&h->gb, 1); /* nuit_field_based_flag */ |
|
68 |
+ skip_bits(&h->gb, 5); /* counting_type */ |
|
69 |
+ full_timestamp_flag = get_bits(&h->gb, 1); |
|
70 |
+ skip_bits(&h->gb, 1); /* discontinuity_flag */ |
|
71 |
+ skip_bits(&h->gb, 1); /* cnt_dropped_flag */ |
|
72 |
+ skip_bits(&h->gb, 8); /* n_frames */ |
|
73 | 73 |
if(full_timestamp_flag){ |
74 |
- skip_bits(&s->gb, 6); /* seconds_value 0..59 */ |
|
75 |
- skip_bits(&s->gb, 6); /* minutes_value 0..59 */ |
|
76 |
- skip_bits(&s->gb, 5); /* hours_value 0..23 */ |
|
74 |
+ skip_bits(&h->gb, 6); /* seconds_value 0..59 */ |
|
75 |
+ skip_bits(&h->gb, 6); /* minutes_value 0..59 */ |
|
76 |
+ skip_bits(&h->gb, 5); /* hours_value 0..23 */ |
|
77 | 77 |
}else{ |
78 |
- if(get_bits(&s->gb, 1)){ /* seconds_flag */ |
|
79 |
- skip_bits(&s->gb, 6); /* seconds_value range 0..59 */ |
|
80 |
- if(get_bits(&s->gb, 1)){ /* minutes_flag */ |
|
81 |
- skip_bits(&s->gb, 6); /* minutes_value 0..59 */ |
|
82 |
- if(get_bits(&s->gb, 1)) /* hours_flag */ |
|
83 |
- skip_bits(&s->gb, 5); /* hours_value 0..23 */ |
|
78 |
+ if(get_bits(&h->gb, 1)){ /* seconds_flag */ |
|
79 |
+ skip_bits(&h->gb, 6); /* seconds_value range 0..59 */ |
|
80 |
+ if(get_bits(&h->gb, 1)){ /* minutes_flag */ |
|
81 |
+ skip_bits(&h->gb, 6); /* minutes_value 0..59 */ |
|
82 |
+ if(get_bits(&h->gb, 1)) /* hours_flag */ |
|
83 |
+ skip_bits(&h->gb, 5); /* hours_value 0..23 */ |
|
84 | 84 |
} |
85 | 85 |
} |
86 | 86 |
} |
87 | 87 |
if(h->sps.time_offset_length > 0) |
88 |
- skip_bits(&s->gb, h->sps.time_offset_length); /* time_offset */ |
|
88 |
+ skip_bits(&h->gb, h->sps.time_offset_length); /* time_offset */ |
|
89 | 89 |
} |
90 | 90 |
} |
91 | 91 |
|
92 |
- if(s->avctx->debug & FF_DEBUG_PICT_INFO) |
|
93 |
- av_log(s->avctx, AV_LOG_DEBUG, "ct_type:%X pic_struct:%d\n", h->sei_ct_type, h->sei_pic_struct); |
|
92 |
+ if(h->avctx->debug & FF_DEBUG_PICT_INFO) |
|
93 |
+ av_log(h->avctx, AV_LOG_DEBUG, "ct_type:%X pic_struct:%d\n", h->sei_ct_type, h->sei_pic_struct); |
|
94 | 94 |
} |
95 | 95 |
return 0; |
96 | 96 |
} |
97 | 97 |
|
98 | 98 |
static int decode_unregistered_user_data(H264Context *h, int size){ |
99 |
- MpegEncContext * const s = &h->s; |
|
100 | 99 |
uint8_t user_data[16+256]; |
101 | 100 |
int e, build, i; |
102 | 101 |
|
... | ... |
@@ -104,7 +102,7 @@ static int decode_unregistered_user_data(H264Context *h, int size){ |
104 | 104 |
return -1; |
105 | 105 |
|
106 | 106 |
for(i=0; i<sizeof(user_data)-1 && i<size; i++){ |
107 |
- user_data[i]= get_bits(&s->gb, 8); |
|
107 |
+ user_data[i]= get_bits(&h->gb, 8); |
|
108 | 108 |
} |
109 | 109 |
|
110 | 110 |
user_data[i]= 0; |
... | ... |
@@ -112,33 +110,30 @@ static int decode_unregistered_user_data(H264Context *h, int size){ |
112 | 112 |
if(e==1 && build>0) |
113 | 113 |
h->x264_build= build; |
114 | 114 |
|
115 |
- if(s->avctx->debug & FF_DEBUG_BUGS) |
|
116 |
- av_log(s->avctx, AV_LOG_DEBUG, "user data:\"%s\"\n", user_data+16); |
|
115 |
+ if(h->avctx->debug & FF_DEBUG_BUGS) |
|
116 |
+ av_log(h->avctx, AV_LOG_DEBUG, "user data:\"%s\"\n", user_data+16); |
|
117 | 117 |
|
118 | 118 |
for(; i<size; i++) |
119 |
- skip_bits(&s->gb, 8); |
|
119 |
+ skip_bits(&h->gb, 8); |
|
120 | 120 |
|
121 | 121 |
return 0; |
122 | 122 |
} |
123 | 123 |
|
124 | 124 |
static int decode_recovery_point(H264Context *h){ |
125 |
- MpegEncContext * const s = &h->s; |
|
126 |
- |
|
127 |
- h->sei_recovery_frame_cnt = get_ue_golomb(&s->gb); |
|
128 |
- skip_bits(&s->gb, 4); /* 1b exact_match_flag, 1b broken_link_flag, 2b changing_slice_group_idc */ |
|
125 |
+ h->sei_recovery_frame_cnt = get_ue_golomb(&h->gb); |
|
126 |
+ skip_bits(&h->gb, 4); /* 1b exact_match_flag, 1b broken_link_flag, 2b changing_slice_group_idc */ |
|
129 | 127 |
|
130 | 128 |
return 0; |
131 | 129 |
} |
132 | 130 |
|
133 | 131 |
static int decode_buffering_period(H264Context *h){ |
134 |
- MpegEncContext * const s = &h->s; |
|
135 | 132 |
unsigned int sps_id; |
136 | 133 |
int sched_sel_idx; |
137 | 134 |
SPS *sps; |
138 | 135 |
|
139 |
- sps_id = get_ue_golomb_31(&s->gb); |
|
136 |
+ sps_id = get_ue_golomb_31(&h->gb); |
|
140 | 137 |
if(sps_id > 31 || !h->sps_buffers[sps_id]) { |
141 |
- av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS %d referenced in buffering period\n", sps_id); |
|
138 |
+ av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %d referenced in buffering period\n", sps_id); |
|
142 | 139 |
return -1; |
143 | 140 |
} |
144 | 141 |
sps = h->sps_buffers[sps_id]; |
... | ... |
@@ -146,14 +141,14 @@ static int decode_buffering_period(H264Context *h){ |
146 | 146 |
// NOTE: This is really so duplicated in the standard... See H.264, D.1.1 |
147 | 147 |
if (sps->nal_hrd_parameters_present_flag) { |
148 | 148 |
for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) { |
149 |
- h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&s->gb, sps->initial_cpb_removal_delay_length); |
|
150 |
- skip_bits(&s->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset |
|
149 |
+ h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&h->gb, sps->initial_cpb_removal_delay_length); |
|
150 |
+ skip_bits(&h->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset |
|
151 | 151 |
} |
152 | 152 |
} |
153 | 153 |
if (sps->vcl_hrd_parameters_present_flag) { |
154 | 154 |
for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) { |
155 |
- h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&s->gb, sps->initial_cpb_removal_delay_length); |
|
156 |
- skip_bits(&s->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset |
|
155 |
+ h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&h->gb, sps->initial_cpb_removal_delay_length); |
|
156 |
+ skip_bits(&h->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset |
|
157 | 157 |
} |
158 | 158 |
} |
159 | 159 |
|
... | ... |
@@ -162,20 +157,18 @@ static int decode_buffering_period(H264Context *h){ |
162 | 162 |
} |
163 | 163 |
|
164 | 164 |
int ff_h264_decode_sei(H264Context *h){ |
165 |
- MpegEncContext * const s = &h->s; |
|
166 |
- |
|
167 |
- while (get_bits_left(&s->gb) > 16) { |
|
165 |
+ while (get_bits_left(&h->gb) > 16) { |
|
168 | 166 |
int size, type; |
169 | 167 |
|
170 | 168 |
type=0; |
171 | 169 |
do{ |
172 |
- type+= show_bits(&s->gb, 8); |
|
173 |
- }while(get_bits(&s->gb, 8) == 255); |
|
170 |
+ type+= show_bits(&h->gb, 8); |
|
171 |
+ }while(get_bits(&h->gb, 8) == 255); |
|
174 | 172 |
|
175 | 173 |
size=0; |
176 | 174 |
do{ |
177 |
- size+= show_bits(&s->gb, 8); |
|
178 |
- }while(get_bits(&s->gb, 8) == 255); |
|
175 |
+ size+= show_bits(&h->gb, 8); |
|
176 |
+ }while(get_bits(&h->gb, 8) == 255); |
|
179 | 177 |
|
180 | 178 |
switch(type){ |
181 | 179 |
case SEI_TYPE_PIC_TIMING: // Picture timing SEI |
... | ... |
@@ -195,11 +188,11 @@ int ff_h264_decode_sei(H264Context *h){ |
195 | 195 |
return -1; |
196 | 196 |
break; |
197 | 197 |
default: |
198 |
- skip_bits(&s->gb, 8*size); |
|
198 |
+ skip_bits(&h->gb, 8*size); |
|
199 | 199 |
} |
200 | 200 |
|
201 | 201 |
//FIXME check bits here |
202 |
- align_get_bits(&s->gb); |
|
202 |
+ align_get_bits(&h->gb); |
|
203 | 203 |
} |
204 | 204 |
|
205 | 205 |
return 0; |
... | ... |
@@ -399,8 +399,6 @@ static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride) |
399 | 399 |
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, |
400 | 400 |
const int chroma_format_idc) |
401 | 401 |
{ |
402 |
-// MpegEncContext * const s = &h->s; |
|
403 |
- |
|
404 | 402 |
#undef FUNC |
405 | 403 |
#undef FUNCC |
406 | 404 |
#define FUNC(a, depth) a ## _ ## depth |
... | ... |
@@ -141,7 +141,7 @@ typedef struct Picture{ |
141 | 141 |
uint16_t *mc_mb_var; ///< Table for motion compensated MB variances |
142 | 142 |
uint8_t *mb_mean; ///< Table for MB luminance |
143 | 143 |
int b_frame_score; /* */ |
144 |
- struct MpegEncContext *owner2; ///< pointer to the MpegEncContext that allocated this picture |
|
144 |
+ void *owner2; ///< pointer to the context that allocated this picture |
|
145 | 145 |
int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change) |
146 | 146 |
} Picture; |
147 | 147 |
|
... | ... |
@@ -66,11 +66,19 @@ |
66 | 66 |
|
67 | 67 |
typedef struct { |
68 | 68 |
H264Context h; |
69 |
+ Picture *cur_pic; |
|
70 |
+ Picture *next_pic; |
|
71 |
+ Picture *last_pic; |
|
69 | 72 |
int halfpel_flag; |
70 | 73 |
int thirdpel_flag; |
71 | 74 |
int unknown_flag; |
72 | 75 |
int next_slice_index; |
73 | 76 |
uint32_t watermark_key; |
77 |
+ int adaptive_quant; |
|
78 |
+ int next_p_frame_damaged; |
|
79 |
+ int h_edge_pos; |
|
80 |
+ int v_edge_pos; |
|
81 |
+ int last_frame_output; |
|
74 | 82 |
} SVQ3Context; |
75 | 83 |
|
76 | 84 |
#define FULLPEL_MODE 1 |
... | ... |
@@ -267,12 +275,13 @@ static inline int svq3_decode_block(GetBitContext *gb, int16_t *block, |
267 | 267 |
return 0; |
268 | 268 |
} |
269 | 269 |
|
270 |
-static inline void svq3_mc_dir_part(MpegEncContext *s, |
|
270 |
+static inline void svq3_mc_dir_part(SVQ3Context *s, |
|
271 | 271 |
int x, int y, int width, int height, |
272 | 272 |
int mx, int my, int dxy, |
273 | 273 |
int thirdpel, int dir, int avg) |
274 | 274 |
{ |
275 |
- const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture; |
|
275 |
+ H264Context *h = &s->h; |
|
276 |
+ const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic; |
|
276 | 277 |
uint8_t *src, *dest; |
277 | 278 |
int i, emu = 0; |
278 | 279 |
int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2 |
... | ... |
@@ -282,7 +291,7 @@ static inline void svq3_mc_dir_part(MpegEncContext *s, |
282 | 282 |
|
283 | 283 |
if (mx < 0 || mx >= s->h_edge_pos - width - 1 || |
284 | 284 |
my < 0 || my >= s->v_edge_pos - height - 1) { |
285 |
- if ((s->flags & CODEC_FLAG_EMU_EDGE)) |
|
285 |
+ if ((h->flags & CODEC_FLAG_EMU_EDGE)) |
|
286 | 286 |
emu = 1; |
287 | 287 |
|
288 | 288 |
mx = av_clip(mx, -16, s->h_edge_pos - width + 15); |
... | ... |
@@ -290,25 +299,25 @@ static inline void svq3_mc_dir_part(MpegEncContext *s, |
290 | 290 |
} |
291 | 291 |
|
292 | 292 |
/* form component predictions */ |
293 |
- dest = s->current_picture.f.data[0] + x + y * s->linesize; |
|
294 |
- src = pic->f.data[0] + mx + my * s->linesize; |
|
293 |
+ dest = h->cur_pic.f.data[0] + x + y * h->linesize; |
|
294 |
+ src = pic->f.data[0] + mx + my * h->linesize; |
|
295 | 295 |
|
296 | 296 |
if (emu) { |
297 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, |
|
297 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize, |
|
298 | 298 |
width + 1, height + 1, |
299 | 299 |
mx, my, s->h_edge_pos, s->v_edge_pos); |
300 |
- src = s->edge_emu_buffer; |
|
300 |
+ src = h->edge_emu_buffer; |
|
301 | 301 |
} |
302 | 302 |
if (thirdpel) |
303 |
- (avg ? s->dsp.avg_tpel_pixels_tab |
|
304 |
- : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, |
|
303 |
+ (avg ? h->dsp.avg_tpel_pixels_tab |
|
304 |
+ : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize, |
|
305 | 305 |
width, height); |
306 | 306 |
else |
307 |
- (avg ? s->dsp.avg_pixels_tab |
|
308 |
- : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, |
|
307 |
+ (avg ? h->dsp.avg_pixels_tab |
|
308 |
+ : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize, |
|
309 | 309 |
height); |
310 | 310 |
|
311 |
- if (!(s->flags & CODEC_FLAG_GRAY)) { |
|
311 |
+ if (!(h->flags & CODEC_FLAG_GRAY)) { |
|
312 | 312 |
mx = mx + (mx < (int) x) >> 1; |
313 | 313 |
my = my + (my < (int) y) >> 1; |
314 | 314 |
width = width >> 1; |
... | ... |
@@ -316,35 +325,35 @@ static inline void svq3_mc_dir_part(MpegEncContext *s, |
316 | 316 |
blocksize++; |
317 | 317 |
|
318 | 318 |
for (i = 1; i < 3; i++) { |
319 |
- dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize; |
|
320 |
- src = pic->f.data[i] + mx + my * s->uvlinesize; |
|
319 |
+ dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize; |
|
320 |
+ src = pic->f.data[i] + mx + my * h->uvlinesize; |
|
321 | 321 |
|
322 | 322 |
if (emu) { |
323 |
- s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, |
|
323 |
+ h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize, |
|
324 | 324 |
width + 1, height + 1, |
325 | 325 |
mx, my, (s->h_edge_pos >> 1), |
326 | 326 |
s->v_edge_pos >> 1); |
327 |
- src = s->edge_emu_buffer; |
|
327 |
+ src = h->edge_emu_buffer; |
|
328 | 328 |
} |
329 | 329 |
if (thirdpel) |
330 |
- (avg ? s->dsp.avg_tpel_pixels_tab |
|
331 |
- : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, |
|
332 |
- s->uvlinesize, |
|
330 |
+ (avg ? h->dsp.avg_tpel_pixels_tab |
|
331 |
+ : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, |
|
332 |
+ h->uvlinesize, |
|
333 | 333 |
width, height); |
334 | 334 |
else |
335 |
- (avg ? s->dsp.avg_pixels_tab |
|
336 |
- : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, |
|
337 |
- s->uvlinesize, |
|
335 |
+ (avg ? h->dsp.avg_pixels_tab |
|
336 |
+ : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src, |
|
337 |
+ h->uvlinesize, |
|
338 | 338 |
height); |
339 | 339 |
} |
340 | 340 |
} |
341 | 341 |
} |
342 | 342 |
|
343 |
-static inline int svq3_mc_dir(H264Context *h, int size, int mode, |
|
343 |
+static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode, |
|
344 | 344 |
int dir, int avg) |
345 | 345 |
{ |
346 | 346 |
int i, j, k, mx, my, dx, dy, x, y; |
347 |
- MpegEncContext *const s = (MpegEncContext *)h; |
|
347 |
+ H264Context *h = &s->h; |
|
348 | 348 |
const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1); |
349 | 349 |
const int part_height = 16 >> ((unsigned)(size + 1) / 3); |
350 | 350 |
const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0; |
... | ... |
@@ -353,19 +362,19 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, |
353 | 353 |
|
354 | 354 |
for (i = 0; i < 16; i += part_height) |
355 | 355 |
for (j = 0; j < 16; j += part_width) { |
356 |
- const int b_xy = (4 * s->mb_x + (j >> 2)) + |
|
357 |
- (4 * s->mb_y + (i >> 2)) * h->b_stride; |
|
356 |
+ const int b_xy = (4 * h->mb_x + (j >> 2)) + |
|
357 |
+ (4 * h->mb_y + (i >> 2)) * h->b_stride; |
|
358 | 358 |
int dxy; |
359 |
- x = 16 * s->mb_x + j; |
|
360 |
- y = 16 * s->mb_y + i; |
|
359 |
+ x = 16 * h->mb_x + j; |
|
360 |
+ y = 16 * h->mb_y + i; |
|
361 | 361 |
k = (j >> 2 & 1) + (i >> 1 & 2) + |
362 | 362 |
(j >> 1 & 4) + (i & 8); |
363 | 363 |
|
364 | 364 |
if (mode != PREDICT_MODE) { |
365 | 365 |
pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my); |
366 | 366 |
} else { |
367 |
- mx = s->next_picture.f.motion_val[0][b_xy][0] << 1; |
|
368 |
- my = s->next_picture.f.motion_val[0][b_xy][1] << 1; |
|
367 |
+ mx = s->next_pic->f.motion_val[0][b_xy][0] << 1; |
|
368 |
+ my = s->next_pic->f.motion_val[0][b_xy][1] << 1; |
|
369 | 369 |
|
370 | 370 |
if (dir == 0) { |
371 | 371 |
mx = mx * h->frame_num_offset / |
... | ... |
@@ -388,11 +397,11 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, |
388 | 388 |
if (mode == PREDICT_MODE) { |
389 | 389 |
dx = dy = 0; |
390 | 390 |
} else { |
391 |
- dy = svq3_get_se_golomb(&s->gb); |
|
392 |
- dx = svq3_get_se_golomb(&s->gb); |
|
391 |
+ dy = svq3_get_se_golomb(&h->gb); |
|
392 |
+ dx = svq3_get_se_golomb(&h->gb); |
|
393 | 393 |
|
394 | 394 |
if (dx == INVALID_VLC || dy == INVALID_VLC) { |
395 |
- av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n"); |
|
395 |
+ av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n"); |
|
396 | 396 |
return -1; |
397 | 397 |
} |
398 | 398 |
} |
... | ... |
@@ -446,7 +455,7 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, |
446 | 446 |
} |
447 | 447 |
|
448 | 448 |
/* write back motion vectors */ |
449 |
- fill_rectangle(s->current_picture.f.motion_val[dir][b_xy], |
|
449 |
+ fill_rectangle(h->cur_pic.f.motion_val[dir][b_xy], |
|
450 | 450 |
part_width >> 2, part_height >> 2, h->b_stride, |
451 | 451 |
pack16to32(mx, my), 4); |
452 | 452 |
} |
... | ... |
@@ -454,46 +463,45 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, |
454 | 454 |
return 0; |
455 | 455 |
} |
456 | 456 |
|
457 |
-static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
|
457 |
+static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type) |
|
458 | 458 |
{ |
459 |
- H264Context *h = &svq3->h; |
|
459 |
+ H264Context *h = &s->h; |
|
460 | 460 |
int i, j, k, m, dir, mode; |
461 | 461 |
int cbp = 0; |
462 | 462 |
uint32_t vlc; |
463 | 463 |
int8_t *top, *left; |
464 |
- MpegEncContext *const s = (MpegEncContext *)h; |
|
465 | 464 |
const int mb_xy = h->mb_xy; |
466 |
- const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride; |
|
465 |
+ const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride; |
|
467 | 466 |
|
468 |
- h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; |
|
469 |
- h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; |
|
467 |
+ h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF; |
|
468 |
+ h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF; |
|
470 | 469 |
h->topright_samples_available = 0xFFFF; |
471 | 470 |
|
472 | 471 |
if (mb_type == 0) { /* SKIP */ |
473 |
- if (s->pict_type == AV_PICTURE_TYPE_P || |
|
474 |
- s->next_picture.f.mb_type[mb_xy] == -1) { |
|
475 |
- svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16, |
|
472 |
+ if (h->pict_type == AV_PICTURE_TYPE_P || |
|
473 |
+ s->next_pic->f.mb_type[mb_xy] == -1) { |
|
474 |
+ svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16, |
|
476 | 475 |
0, 0, 0, 0, 0, 0); |
477 | 476 |
|
478 |
- if (s->pict_type == AV_PICTURE_TYPE_B) |
|
479 |
- svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16, |
|
477 |
+ if (h->pict_type == AV_PICTURE_TYPE_B) |
|
478 |
+ svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16, |
|
480 | 479 |
0, 0, 0, 0, 1, 1); |
481 | 480 |
|
482 | 481 |
mb_type = MB_TYPE_SKIP; |
483 | 482 |
} else { |
484 |
- mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6); |
|
485 |
- if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0) |
|
483 |
+ mb_type = FFMIN(s->next_pic->f.mb_type[mb_xy], 6); |
|
484 |
+ if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0) |
|
486 | 485 |
return -1; |
487 |
- if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0) |
|
486 |
+ if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0) |
|
488 | 487 |
return -1; |
489 | 488 |
|
490 | 489 |
mb_type = MB_TYPE_16x16; |
491 | 490 |
} |
492 | 491 |
} else if (mb_type < 8) { /* INTER */ |
493 |
- if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1(&s->gb)) |
|
492 |
+ if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb)) |
|
494 | 493 |
mode = THIRDPEL_MODE; |
495 |
- else if (svq3->halfpel_flag && |
|
496 |
- svq3->thirdpel_flag == !get_bits1(&s->gb)) |
|
494 |
+ else if (s->halfpel_flag && |
|
495 |
+ s->thirdpel_flag == !get_bits1(&h->gb)) |
|
497 | 496 |
mode = HALFPEL_MODE; |
498 | 497 |
else |
499 | 498 |
mode = FULLPEL_MODE; |
... | ... |
@@ -508,62 +516,62 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
508 | 508 |
*/ |
509 | 509 |
|
510 | 510 |
for (m = 0; m < 2; m++) { |
511 |
- if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) { |
|
511 |
+ if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) { |
|
512 | 512 |
for (i = 0; i < 4; i++) |
513 | 513 |
AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8], |
514 |
- s->current_picture.f.motion_val[m][b_xy - 1 + i * h->b_stride]); |
|
514 |
+ h->cur_pic.f.motion_val[m][b_xy - 1 + i * h->b_stride]); |
|
515 | 515 |
} else { |
516 | 516 |
for (i = 0; i < 4; i++) |
517 | 517 |
AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]); |
518 | 518 |
} |
519 |
- if (s->mb_y > 0) { |
|
519 |
+ if (h->mb_y > 0) { |
|
520 | 520 |
memcpy(h->mv_cache[m][scan8[0] - 1 * 8], |
521 |
- s->current_picture.f.motion_val[m][b_xy - h->b_stride], |
|
521 |
+ h->cur_pic.f.motion_val[m][b_xy - h->b_stride], |
|
522 | 522 |
4 * 2 * sizeof(int16_t)); |
523 | 523 |
memset(&h->ref_cache[m][scan8[0] - 1 * 8], |
524 |
- (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); |
|
524 |
+ (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); |
|
525 | 525 |
|
526 |
- if (s->mb_x < s->mb_width - 1) { |
|
526 |
+ if (h->mb_x < h->mb_width - 1) { |
|
527 | 527 |
AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8], |
528 |
- s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]); |
|
528 |
+ h->cur_pic.f.motion_val[m][b_xy - h->b_stride + 4]); |
|
529 | 529 |
h->ref_cache[m][scan8[0] + 4 - 1 * 8] = |
530 |
- (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 || |
|
531 |
- h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1; |
|
530 |
+ (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 || |
|
531 |
+ h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1; |
|
532 | 532 |
} else |
533 | 533 |
h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE; |
534 |
- if (s->mb_x > 0) { |
|
534 |
+ if (h->mb_x > 0) { |
|
535 | 535 |
AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8], |
536 |
- s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]); |
|
536 |
+ h->cur_pic.f.motion_val[m][b_xy - h->b_stride - 1]); |
|
537 | 537 |
h->ref_cache[m][scan8[0] - 1 - 1 * 8] = |
538 |
- (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1; |
|
538 |
+ (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1; |
|
539 | 539 |
} else |
540 | 540 |
h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE; |
541 | 541 |
} else |
542 | 542 |
memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1], |
543 | 543 |
PART_NOT_AVAILABLE, 8); |
544 | 544 |
|
545 |
- if (s->pict_type != AV_PICTURE_TYPE_B) |
|
545 |
+ if (h->pict_type != AV_PICTURE_TYPE_B) |
|
546 | 546 |
break; |
547 | 547 |
} |
548 | 548 |
|
549 | 549 |
/* decode motion vector(s) and form prediction(s) */ |
550 |
- if (s->pict_type == AV_PICTURE_TYPE_P) { |
|
551 |
- if (svq3_mc_dir(h, mb_type - 1, mode, 0, 0) < 0) |
|
550 |
+ if (h->pict_type == AV_PICTURE_TYPE_P) { |
|
551 |
+ if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0) |
|
552 | 552 |
return -1; |
553 | 553 |
} else { /* AV_PICTURE_TYPE_B */ |
554 | 554 |
if (mb_type != 2) |
555 |
- if (svq3_mc_dir(h, 0, mode, 0, 0) < 0) |
|
555 |
+ if (svq3_mc_dir(s, 0, mode, 0, 0) < 0) |
|
556 | 556 |
return -1; |
557 | 557 |
else |
558 | 558 |
for (i = 0; i < 4; i++) |
559 |
- memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride], |
|
559 |
+ memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride], |
|
560 | 560 |
0, 4 * 2 * sizeof(int16_t)); |
561 | 561 |
if (mb_type != 1) |
562 |
- if (svq3_mc_dir(h, 0, mode, 1, mb_type == 3) < 0) |
|
562 |
+ if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0) |
|
563 | 563 |
return -1; |
564 | 564 |
else |
565 | 565 |
for (i = 0; i < 4; i++) |
566 |
- memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride], |
|
566 |
+ memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride], |
|
567 | 567 |
0, 4 * 2 * sizeof(int16_t)); |
568 | 568 |
} |
569 | 569 |
|
... | ... |
@@ -572,17 +580,17 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
572 | 572 |
memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t)); |
573 | 573 |
|
574 | 574 |
if (mb_type == 8) { |
575 |
- if (s->mb_x > 0) { |
|
575 |
+ if (h->mb_x > 0) { |
|
576 | 576 |
for (i = 0; i < 4; i++) |
577 | 577 |
h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i]; |
578 | 578 |
if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) |
579 | 579 |
h->left_samples_available = 0x5F5F; |
580 | 580 |
} |
581 |
- if (s->mb_y > 0) { |
|
582 |
- h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 0]; |
|
583 |
- h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 1]; |
|
584 |
- h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 2]; |
|
585 |
- h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 3]; |
|
581 |
+ if (h->mb_y > 0) { |
|
582 |
+ h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0]; |
|
583 |
+ h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1]; |
|
584 |
+ h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2]; |
|
585 |
+ h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3]; |
|
586 | 586 |
|
587 | 587 |
if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1) |
588 | 588 |
h->top_samples_available = 0x33FF; |
... | ... |
@@ -590,10 +598,10 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
590 | 590 |
|
591 | 591 |
/* decode prediction codes for luma blocks */ |
592 | 592 |
for (i = 0; i < 16; i += 2) { |
593 |
- vlc = svq3_get_ue_golomb(&s->gb); |
|
593 |
+ vlc = svq3_get_ue_golomb(&h->gb); |
|
594 | 594 |
|
595 | 595 |
if (vlc >= 25) { |
596 |
- av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); |
|
596 |
+ av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); |
|
597 | 597 |
return -1; |
598 | 598 |
} |
599 | 599 |
|
... | ... |
@@ -604,7 +612,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
604 | 604 |
left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]]; |
605 | 605 |
|
606 | 606 |
if (left[1] == -1 || left[2] == -1) { |
607 |
- av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n"); |
|
607 |
+ av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n"); |
|
608 | 608 |
return -1; |
609 | 609 |
} |
610 | 610 |
} |
... | ... |
@@ -618,8 +626,8 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
618 | 618 |
if (mb_type == 8) { |
619 | 619 |
ff_h264_check_intra4x4_pred_mode(h); |
620 | 620 |
|
621 |
- h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; |
|
622 |
- h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; |
|
621 |
+ h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF; |
|
622 |
+ h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF; |
|
623 | 623 |
} else { |
624 | 624 |
for (i = 0; i < 4; i++) |
625 | 625 |
memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4); |
... | ... |
@@ -634,7 +642,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
634 | 634 |
dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1; |
635 | 635 |
|
636 | 636 |
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) { |
637 |
- av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); |
|
637 |
+ av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); |
|
638 | 638 |
return -1; |
639 | 639 |
} |
640 | 640 |
|
... | ... |
@@ -642,29 +650,29 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
642 | 642 |
mb_type = MB_TYPE_INTRA16x16; |
643 | 643 |
} |
644 | 644 |
|
645 |
- if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) { |
|
645 |
+ if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) { |
|
646 | 646 |
for (i = 0; i < 4; i++) |
647 |
- memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride], |
|
647 |
+ memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride], |
|
648 | 648 |
0, 4 * 2 * sizeof(int16_t)); |
649 |
- if (s->pict_type == AV_PICTURE_TYPE_B) { |
|
649 |
+ if (h->pict_type == AV_PICTURE_TYPE_B) { |
|
650 | 650 |
for (i = 0; i < 4; i++) |
651 |
- memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride], |
|
651 |
+ memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride], |
|
652 | 652 |
0, 4 * 2 * sizeof(int16_t)); |
653 | 653 |
} |
654 | 654 |
} |
655 | 655 |
if (!IS_INTRA4x4(mb_type)) { |
656 | 656 |
memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8); |
657 | 657 |
} |
658 |
- if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) { |
|
658 |
+ if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) { |
|
659 | 659 |
memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t)); |
660 |
- s->dsp.clear_blocks(h->mb + 0); |
|
661 |
- s->dsp.clear_blocks(h->mb + 384); |
|
660 |
+ h->dsp.clear_blocks(h->mb + 0); |
|
661 |
+ h->dsp.clear_blocks(h->mb + 384); |
|
662 | 662 |
} |
663 | 663 |
|
664 | 664 |
if (!IS_INTRA16x16(mb_type) && |
665 |
- (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) { |
|
666 |
- if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48) { |
|
667 |
- av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); |
|
665 |
+ (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) { |
|
666 |
+ if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) { |
|
667 |
+ av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); |
|
668 | 668 |
return -1; |
669 | 669 |
} |
670 | 670 |
|
... | ... |
@@ -672,19 +680,19 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
672 | 672 |
: golomb_to_inter_cbp[vlc]; |
673 | 673 |
} |
674 | 674 |
if (IS_INTRA16x16(mb_type) || |
675 |
- (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) { |
|
676 |
- s->qscale += svq3_get_se_golomb(&s->gb); |
|
675 |
+ (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) { |
|
676 |
+ h->qscale += svq3_get_se_golomb(&h->gb); |
|
677 | 677 |
|
678 |
- if (s->qscale > 31u) { |
|
679 |
- av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale); |
|
678 |
+ if (h->qscale > 31u) { |
|
679 |
+ av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale); |
|
680 | 680 |
return -1; |
681 | 681 |
} |
682 | 682 |
} |
683 | 683 |
if (IS_INTRA16x16(mb_type)) { |
684 | 684 |
AV_ZERO128(h->mb_luma_dc[0] + 0); |
685 | 685 |
AV_ZERO128(h->mb_luma_dc[0] + 8); |
686 |
- if (svq3_decode_block(&s->gb, h->mb_luma_dc[0], 0, 1)) { |
|
687 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
686 |
+ if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) { |
|
687 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
688 | 688 |
"error while decoding intra luma dc\n"); |
689 | 689 |
return -1; |
690 | 690 |
} |
... | ... |
@@ -692,7 +700,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
692 | 692 |
|
693 | 693 |
if (cbp) { |
694 | 694 |
const int index = IS_INTRA16x16(mb_type) ? 1 : 0; |
695 |
- const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); |
|
695 |
+ const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); |
|
696 | 696 |
|
697 | 697 |
for (i = 0; i < 4; i++) |
698 | 698 |
if ((cbp & (1 << i))) { |
... | ... |
@@ -702,8 +710,8 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
702 | 702 |
: (4 * i + j); |
703 | 703 |
h->non_zero_count_cache[scan8[k]] = 1; |
704 | 704 |
|
705 |
- if (svq3_decode_block(&s->gb, &h->mb[16 * k], index, type)) { |
|
706 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
705 |
+ if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) { |
|
706 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
707 | 707 |
"error while decoding block\n"); |
708 | 708 |
return -1; |
709 | 709 |
} |
... | ... |
@@ -712,8 +720,8 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
712 | 712 |
|
713 | 713 |
if ((cbp & 0x30)) { |
714 | 714 |
for (i = 1; i < 3; ++i) |
715 |
- if (svq3_decode_block(&s->gb, &h->mb[16 * 16 * i], 0, 3)) { |
|
716 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
715 |
+ if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) { |
|
716 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
717 | 717 |
"error while decoding chroma dc block\n"); |
718 | 718 |
return -1; |
719 | 719 |
} |
... | ... |
@@ -724,8 +732,8 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
724 | 724 |
k = 16 * i + j; |
725 | 725 |
h->non_zero_count_cache[scan8[k]] = 1; |
726 | 726 |
|
727 |
- if (svq3_decode_block(&s->gb, &h->mb[16 * k], 1, 1)) { |
|
728 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
727 |
+ if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) { |
|
728 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
729 | 729 |
"error while decoding chroma ac block\n"); |
730 | 730 |
return -1; |
731 | 731 |
} |
... | ... |
@@ -736,7 +744,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
736 | 736 |
} |
737 | 737 |
|
738 | 738 |
h->cbp = cbp; |
739 |
- s->current_picture.f.mb_type[mb_xy] = mb_type; |
|
739 |
+ h->cur_pic.f.mb_type[mb_xy] = mb_type; |
|
740 | 740 |
|
741 | 741 |
if (IS_INTRA(mb_type)) |
742 | 742 |
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1); |
... | ... |
@@ -746,14 +754,13 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) |
746 | 746 |
|
747 | 747 |
static int svq3_decode_slice_header(AVCodecContext *avctx) |
748 | 748 |
{ |
749 |
- SVQ3Context *svq3 = avctx->priv_data; |
|
750 |
- H264Context *h = &svq3->h; |
|
751 |
- MpegEncContext *s = &h->s; |
|
749 |
+ SVQ3Context *s = avctx->priv_data; |
|
750 |
+ H264Context *h = &s->h; |
|
752 | 751 |
const int mb_xy = h->mb_xy; |
753 | 752 |
int i, header; |
754 | 753 |
unsigned slice_id; |
755 | 754 |
|
756 |
- header = get_bits(&s->gb, 8); |
|
755 |
+ header = get_bits(&h->gb, 8); |
|
757 | 756 |
|
758 | 757 |
if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) { |
759 | 758 |
/* TODO: what? */ |
... | ... |
@@ -762,75 +769,75 @@ static int svq3_decode_slice_header(AVCodecContext *avctx) |
762 | 762 |
} else { |
763 | 763 |
int length = header >> 5 & 3; |
764 | 764 |
|
765 |
- svq3->next_slice_index = get_bits_count(&s->gb) + |
|
766 |
- 8 * show_bits(&s->gb, 8 * length) + |
|
767 |
- 8 * length; |
|
765 |
+ s->next_slice_index = get_bits_count(&h->gb) + |
|
766 |
+ 8 * show_bits(&h->gb, 8 * length) + |
|
767 |
+ 8 * length; |
|
768 | 768 |
|
769 |
- if (svq3->next_slice_index > s->gb.size_in_bits) { |
|
769 |
+ if (s->next_slice_index > h->gb.size_in_bits) { |
|
770 | 770 |
av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n"); |
771 | 771 |
return -1; |
772 | 772 |
} |
773 | 773 |
|
774 |
- s->gb.size_in_bits = svq3->next_slice_index - 8 * (length - 1); |
|
775 |
- skip_bits(&s->gb, 8); |
|
774 |
+ h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1); |
|
775 |
+ skip_bits(&h->gb, 8); |
|
776 | 776 |
|
777 |
- if (svq3->watermark_key) { |
|
778 |
- uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1]); |
|
779 |
- AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1], |
|
780 |
- header ^ svq3->watermark_key); |
|
777 |
+ if (s->watermark_key) { |
|
778 |
+ uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]); |
|
779 |
+ AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1], |
|
780 |
+ header ^ s->watermark_key); |
|
781 | 781 |
} |
782 | 782 |
if (length > 0) { |
783 |
- memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3], |
|
784 |
- &s->gb.buffer[s->gb.size_in_bits >> 3], length - 1); |
|
783 |
+ memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3], |
|
784 |
+ &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1); |
|
785 | 785 |
} |
786 |
- skip_bits_long(&s->gb, 0); |
|
786 |
+ skip_bits_long(&h->gb, 0); |
|
787 | 787 |
} |
788 | 788 |
|
789 |
- if ((slice_id = svq3_get_ue_golomb(&s->gb)) >= 3) { |
|
790 |
- av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id); |
|
789 |
+ if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) { |
|
790 |
+ av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id); |
|
791 | 791 |
return -1; |
792 | 792 |
} |
793 | 793 |
|
794 | 794 |
h->slice_type = golomb_to_pict_type[slice_id]; |
795 | 795 |
|
796 | 796 |
if ((header & 0x9F) == 2) { |
797 |
- i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1)); |
|
798 |
- s->mb_skip_run = get_bits(&s->gb, i) - |
|
799 |
- (s->mb_y * s->mb_width + s->mb_x); |
|
797 |
+ i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1)); |
|
798 |
+ h->mb_skip_run = get_bits(&h->gb, i) - |
|
799 |
+ (h->mb_y * h->mb_width + h->mb_x); |
|
800 | 800 |
} else { |
801 |
- skip_bits1(&s->gb); |
|
802 |
- s->mb_skip_run = 0; |
|
801 |
+ skip_bits1(&h->gb); |
|
802 |
+ h->mb_skip_run = 0; |
|
803 | 803 |
} |
804 | 804 |
|
805 |
- h->slice_num = get_bits(&s->gb, 8); |
|
806 |
- s->qscale = get_bits(&s->gb, 5); |
|
807 |
- s->adaptive_quant = get_bits1(&s->gb); |
|
805 |
+ h->slice_num = get_bits(&h->gb, 8); |
|
806 |
+ h->qscale = get_bits(&h->gb, 5); |
|
807 |
+ s->adaptive_quant = get_bits1(&h->gb); |
|
808 | 808 |
|
809 | 809 |
/* unknown fields */ |
810 |
- skip_bits1(&s->gb); |
|
810 |
+ skip_bits1(&h->gb); |
|
811 | 811 |
|
812 |
- if (svq3->unknown_flag) |
|
813 |
- skip_bits1(&s->gb); |
|
812 |
+ if (s->unknown_flag) |
|
813 |
+ skip_bits1(&h->gb); |
|
814 | 814 |
|
815 |
- skip_bits1(&s->gb); |
|
816 |
- skip_bits(&s->gb, 2); |
|
815 |
+ skip_bits1(&h->gb); |
|
816 |
+ skip_bits(&h->gb, 2); |
|
817 | 817 |
|
818 |
- while (get_bits1(&s->gb)) |
|
819 |
- skip_bits(&s->gb, 8); |
|
818 |
+ while (get_bits1(&h->gb)) |
|
819 |
+ skip_bits(&h->gb, 8); |
|
820 | 820 |
|
821 | 821 |
/* reset intra predictors and invalidate motion vector references */ |
822 |
- if (s->mb_x > 0) { |
|
822 |
+ if (h->mb_x > 0) { |
|
823 | 823 |
memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3, |
824 | 824 |
-1, 4 * sizeof(int8_t)); |
825 |
- memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_x], |
|
826 |
- -1, 8 * sizeof(int8_t) * s->mb_x); |
|
825 |
+ memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x], |
|
826 |
+ -1, 8 * sizeof(int8_t) * h->mb_x); |
|
827 | 827 |
} |
828 |
- if (s->mb_y > 0) { |
|
829 |
- memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_stride], |
|
830 |
- -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x)); |
|
828 |
+ if (h->mb_y > 0) { |
|
829 |
+ memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride], |
|
830 |
+ -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x)); |
|
831 | 831 |
|
832 |
- if (s->mb_x > 0) |
|
833 |
- h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1; |
|
832 |
+ if (h->mb_x > 0) |
|
833 |
+ h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1; |
|
834 | 834 |
} |
835 | 835 |
|
836 | 836 |
return 0; |
... | ... |
@@ -838,29 +845,38 @@ static int svq3_decode_slice_header(AVCodecContext *avctx) |
838 | 838 |
|
839 | 839 |
static av_cold int svq3_decode_init(AVCodecContext *avctx) |
840 | 840 |
{ |
841 |
- SVQ3Context *svq3 = avctx->priv_data; |
|
842 |
- H264Context *h = &svq3->h; |
|
843 |
- MpegEncContext *s = &h->s; |
|
841 |
+ SVQ3Context *s = avctx->priv_data; |
|
842 |
+ H264Context *h = &s->h; |
|
844 | 843 |
int m; |
845 | 844 |
unsigned char *extradata; |
846 | 845 |
unsigned char *extradata_end; |
847 | 846 |
unsigned int size; |
848 | 847 |
int marker_found = 0; |
849 | 848 |
|
849 |
+ s->cur_pic = av_mallocz(sizeof(*s->cur_pic)); |
|
850 |
+ s->last_pic = av_mallocz(sizeof(*s->last_pic)); |
|
851 |
+ s->next_pic = av_mallocz(sizeof(*s->next_pic)); |
|
852 |
+ if (!s->next_pic || !s->last_pic || !s->cur_pic) { |
|
853 |
+ av_freep(&s->cur_pic); |
|
854 |
+ av_freep(&s->last_pic); |
|
855 |
+ av_freep(&s->next_pic); |
|
856 |
+ return AVERROR(ENOMEM); |
|
857 |
+ } |
|
858 |
+ |
|
850 | 859 |
if (ff_h264_decode_init(avctx) < 0) |
851 | 860 |
return -1; |
852 | 861 |
|
853 |
- s->flags = avctx->flags; |
|
854 |
- s->flags2 = avctx->flags2; |
|
855 |
- s->unrestricted_mv = 1; |
|
862 |
+ h->flags = avctx->flags; |
|
856 | 863 |
h->is_complex = 1; |
864 |
+ h->picture_structure = PICT_FRAME; |
|
857 | 865 |
avctx->pix_fmt = avctx->codec->pix_fmts[0]; |
858 | 866 |
|
859 | 867 |
h->chroma_qp[0] = h->chroma_qp[1] = 4; |
868 |
+ h->chroma_x_shift = h->chroma_y_shift = 1; |
|
860 | 869 |
|
861 |
- svq3->halfpel_flag = 1; |
|
862 |
- svq3->thirdpel_flag = 1; |
|
863 |
- svq3->unknown_flag = 0; |
|
870 |
+ s->halfpel_flag = 1; |
|
871 |
+ s->thirdpel_flag = 1; |
|
872 |
+ s->unknown_flag = 0; |
|
864 | 873 |
|
865 | 874 |
/* prowl for the "SEQH" marker in the extradata */ |
866 | 875 |
extradata = (unsigned char *)avctx->extradata; |
... | ... |
@@ -922,8 +938,8 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
922 | 922 |
break; |
923 | 923 |
} |
924 | 924 |
|
925 |
- svq3->halfpel_flag = get_bits1(&gb); |
|
926 |
- svq3->thirdpel_flag = get_bits1(&gb); |
|
925 |
+ s->halfpel_flag = get_bits1(&gb); |
|
926 |
+ s->thirdpel_flag = get_bits1(&gb); |
|
927 | 927 |
|
928 | 928 |
/* unknown fields */ |
929 | 929 |
skip_bits1(&gb); |
... | ... |
@@ -931,7 +947,7 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
931 | 931 |
skip_bits1(&gb); |
932 | 932 |
skip_bits1(&gb); |
933 | 933 |
|
934 |
- s->low_delay = get_bits1(&gb); |
|
934 |
+ h->low_delay = get_bits1(&gb); |
|
935 | 935 |
|
936 | 936 |
/* unknown field */ |
937 | 937 |
skip_bits1(&gb); |
... | ... |
@@ -939,9 +955,9 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
939 | 939 |
while (get_bits1(&gb)) |
940 | 940 |
skip_bits(&gb, 8); |
941 | 941 |
|
942 |
- svq3->unknown_flag = get_bits1(&gb); |
|
943 |
- avctx->has_b_frames = !s->low_delay; |
|
944 |
- if (svq3->unknown_flag) { |
|
942 |
+ s->unknown_flag = get_bits1(&gb); |
|
943 |
+ avctx->has_b_frames = !h->low_delay; |
|
944 |
+ if (s->unknown_flag) { |
|
945 | 945 |
#if CONFIG_ZLIB |
946 | 946 |
unsigned watermark_width = svq3_get_ue_golomb(&gb); |
947 | 947 |
unsigned watermark_height = svq3_get_ue_golomb(&gb); |
... | ... |
@@ -970,11 +986,10 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
970 | 970 |
av_free(buf); |
971 | 971 |
return -1; |
972 | 972 |
} |
973 |
- svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0); |
|
974 |
- svq3->watermark_key = svq3->watermark_key << 16 | |
|
975 |
- svq3->watermark_key; |
|
973 |
+ s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0); |
|
974 |
+ s->watermark_key = s->watermark_key << 16 | s->watermark_key; |
|
976 | 975 |
av_log(avctx, AV_LOG_DEBUG, |
977 |
- "watermark key %#x\n", svq3->watermark_key); |
|
976 |
+ "watermark key %#x\n", s->watermark_key); |
|
978 | 977 |
av_free(buf); |
979 | 978 |
#else |
980 | 979 |
av_log(avctx, AV_LOG_ERROR, |
... | ... |
@@ -984,13 +999,15 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
984 | 984 |
} |
985 | 985 |
} |
986 | 986 |
|
987 |
- s->width = avctx->width; |
|
988 |
- s->height = avctx->height; |
|
989 |
- |
|
990 |
- if (ff_MPV_common_init(s) < 0) |
|
991 |
- return -1; |
|
992 |
- |
|
993 |
- h->b_stride = 4 * s->mb_width; |
|
987 |
+ h->width = avctx->width; |
|
988 |
+ h->height = avctx->height; |
|
989 |
+ h->mb_width = (h->width + 15) / 16; |
|
990 |
+ h->mb_height = (h->height + 15) / 16; |
|
991 |
+ h->mb_stride = h->mb_width + 1; |
|
992 |
+ h->mb_num = h->mb_width * h->mb_height; |
|
993 |
+ h->b_stride = 4 * h->mb_width; |
|
994 |
+ s->h_edge_pos = h->mb_width * 16; |
|
995 |
+ s->v_edge_pos = h->mb_height * 16; |
|
994 | 996 |
|
995 | 997 |
if (ff_h264_alloc_tables(h) < 0) { |
996 | 998 |
av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n"); |
... | ... |
@@ -1000,73 +1017,153 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) |
1000 | 1000 |
return 0; |
1001 | 1001 |
} |
1002 | 1002 |
|
1003 |
+static int get_buffer(AVCodecContext *avctx, Picture *pic) |
|
1004 |
+{ |
|
1005 |
+ SVQ3Context *s = avctx->priv_data; |
|
1006 |
+ H264Context *h = &s->h; |
|
1007 |
+ const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1; |
|
1008 |
+ const int mb_array_size = h->mb_stride * h->mb_height; |
|
1009 |
+ const int b4_stride = h->mb_width * 4 + 1; |
|
1010 |
+ const int b4_array_size = b4_stride * h->mb_height * 4; |
|
1011 |
+ int ret; |
|
1012 |
+ |
|
1013 |
+ if (!pic->motion_val_base[0]) { |
|
1014 |
+ int i; |
|
1015 |
+ |
|
1016 |
+ pic->mb_type_base = av_mallocz((big_mb_num + h->mb_stride) * sizeof(uint32_t)); |
|
1017 |
+ if (!pic->mb_type_base) |
|
1018 |
+ return AVERROR(ENOMEM); |
|
1019 |
+ pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1; |
|
1020 |
+ |
|
1021 |
+ for (i = 0; i < 2; i++) { |
|
1022 |
+ pic->motion_val_base[i] = av_mallocz(2 * (b4_array_size + 4) * sizeof(int16_t)); |
|
1023 |
+ pic->f.ref_index[i] = av_mallocz(4 * mb_array_size); |
|
1024 |
+ if (!pic->motion_val_base[i] || !pic->f.ref_index[i]) |
|
1025 |
+ return AVERROR(ENOMEM); |
|
1026 |
+ |
|
1027 |
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4; |
|
1028 |
+ } |
|
1029 |
+ } |
|
1030 |
+ pic->f.motion_subsample_log2 = 2; |
|
1031 |
+ pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B); |
|
1032 |
+ |
|
1033 |
+ ret = ff_get_buffer(avctx, &pic->f); |
|
1034 |
+ |
|
1035 |
+ h->linesize = pic->f.linesize[0]; |
|
1036 |
+ h->uvlinesize = pic->f.linesize[1]; |
|
1037 |
+ |
|
1038 |
+ return ret; |
|
1039 |
+} |
|
1040 |
+ |
|
1003 | 1041 |
static int svq3_decode_frame(AVCodecContext *avctx, void *data, |
1004 | 1042 |
int *got_frame, AVPacket *avpkt) |
1005 | 1043 |
{ |
1006 | 1044 |
const uint8_t *buf = avpkt->data; |
1007 |
- SVQ3Context *svq3 = avctx->priv_data; |
|
1008 |
- H264Context *h = &svq3->h; |
|
1009 |
- MpegEncContext *s = &h->s; |
|
1045 |
+ SVQ3Context *s = avctx->priv_data; |
|
1046 |
+ H264Context *h = &s->h; |
|
1010 | 1047 |
int buf_size = avpkt->size; |
1011 |
- int m; |
|
1048 |
+ int ret, m, i; |
|
1012 | 1049 |
|
1013 | 1050 |
/* special case for last picture */ |
1014 | 1051 |
if (buf_size == 0) { |
1015 |
- if (s->next_picture_ptr && !s->low_delay) { |
|
1016 |
- *(AVFrame *) data = s->next_picture.f; |
|
1017 |
- s->next_picture_ptr = NULL; |
|
1052 |
+ if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) { |
|
1053 |
+ *(AVFrame *) data = s->next_pic->f; |
|
1054 |
+ s->last_frame_output = 1; |
|
1018 | 1055 |
*got_frame = 1; |
1019 | 1056 |
} |
1020 | 1057 |
return 0; |
1021 | 1058 |
} |
1022 | 1059 |
|
1023 |
- init_get_bits(&s->gb, buf, 8 * buf_size); |
|
1060 |
+ init_get_bits(&h->gb, buf, 8 * buf_size); |
|
1024 | 1061 |
|
1025 |
- s->mb_x = s->mb_y = h->mb_xy = 0; |
|
1062 |
+ h->mb_x = h->mb_y = h->mb_xy = 0; |
|
1026 | 1063 |
|
1027 | 1064 |
if (svq3_decode_slice_header(avctx)) |
1028 | 1065 |
return -1; |
1029 | 1066 |
|
1030 |
- s->pict_type = h->slice_type; |
|
1031 |
- s->picture_number = h->slice_num; |
|
1067 |
+ h->pict_type = h->slice_type; |
|
1032 | 1068 |
|
1033 |
- if (avctx->debug & FF_DEBUG_PICT_INFO) |
|
1034 |
- av_log(h->s.avctx, AV_LOG_DEBUG, |
|
1035 |
- "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n", |
|
1036 |
- av_get_picture_type_char(s->pict_type), |
|
1037 |
- svq3->halfpel_flag, svq3->thirdpel_flag, |
|
1038 |
- s->adaptive_quant, s->qscale, h->slice_num); |
|
1069 |
+ if (h->pict_type != AV_PICTURE_TYPE_B) |
|
1070 |
+ FFSWAP(Picture*, s->next_pic, s->last_pic); |
|
1071 |
+ |
|
1072 |
+ if (s->cur_pic->f.data[0]) |
|
1073 |
+ avctx->release_buffer(avctx, &s->cur_pic->f); |
|
1039 | 1074 |
|
1040 | 1075 |
/* for skipping the frame */ |
1041 |
- s->current_picture.f.pict_type = s->pict_type; |
|
1042 |
- s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I); |
|
1076 |
+ s->cur_pic->f.pict_type = h->pict_type; |
|
1077 |
+ s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I); |
|
1043 | 1078 |
|
1044 |
- /* Skip B-frames if we do not have reference frames. */ |
|
1045 |
- if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B) |
|
1046 |
- return 0; |
|
1047 |
- if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B || |
|
1048 |
- avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I || |
|
1079 |
+ ret = get_buffer(avctx, s->cur_pic); |
|
1080 |
+ if (ret < 0) |
|
1081 |
+ return ret; |
|
1082 |
+ |
|
1083 |
+ h->cur_pic_ptr = s->cur_pic; |
|
1084 |
+ h->cur_pic = *s->cur_pic; |
|
1085 |
+ |
|
1086 |
+ for (i = 0; i < 16; i++) { |
|
1087 |
+ h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1088 |
+ h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3); |
|
1089 |
+ } |
|
1090 |
+ for (i = 0; i < 16; i++) { |
|
1091 |
+ h->block_offset[16 + i] = |
|
1092 |
+ h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1093 |
+ h->block_offset[48 + 16 + i] = |
|
1094 |
+ h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); |
|
1095 |
+ } |
|
1096 |
+ |
|
1097 |
+ if (h->pict_type != AV_PICTURE_TYPE_I) { |
|
1098 |
+ if (!s->last_pic->f.data[0]) { |
|
1099 |
+ av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n"); |
|
1100 |
+ ret = get_buffer(avctx, s->last_pic); |
|
1101 |
+ if (ret < 0) |
|
1102 |
+ return ret; |
|
1103 |
+ memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]); |
|
1104 |
+ memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) * |
|
1105 |
+ s->last_pic->f.linesize[1]); |
|
1106 |
+ memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) * |
|
1107 |
+ s->last_pic->f.linesize[2]); |
|
1108 |
+ } |
|
1109 |
+ |
|
1110 |
+ if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) { |
|
1111 |
+ av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n"); |
|
1112 |
+ ret = get_buffer(avctx, s->next_pic); |
|
1113 |
+ if (ret < 0) |
|
1114 |
+ return ret; |
|
1115 |
+ memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]); |
|
1116 |
+ memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) * |
|
1117 |
+ s->next_pic->f.linesize[1]); |
|
1118 |
+ memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) * |
|
1119 |
+ s->next_pic->f.linesize[2]); |
|
1120 |
+ } |
|
1121 |
+ } |
|
1122 |
+ |
|
1123 |
+ if (avctx->debug & FF_DEBUG_PICT_INFO) |
|
1124 |
+ av_log(h->avctx, AV_LOG_DEBUG, |
|
1125 |
+ "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n", |
|
1126 |
+ av_get_picture_type_char(h->pict_type), |
|
1127 |
+ s->halfpel_flag, s->thirdpel_flag, |
|
1128 |
+ s->adaptive_quant, h->qscale, h->slice_num); |
|
1129 |
+ |
|
1130 |
+ if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B || |
|
1131 |
+ avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I || |
|
1049 | 1132 |
avctx->skip_frame >= AVDISCARD_ALL) |
1050 | 1133 |
return 0; |
1051 | 1134 |
|
1052 | 1135 |
if (s->next_p_frame_damaged) { |
1053 |
- if (s->pict_type == AV_PICTURE_TYPE_B) |
|
1136 |
+ if (h->pict_type == AV_PICTURE_TYPE_B) |
|
1054 | 1137 |
return 0; |
1055 | 1138 |
else |
1056 | 1139 |
s->next_p_frame_damaged = 0; |
1057 | 1140 |
} |
1058 | 1141 |
|
1059 |
- if (ff_h264_frame_start(h) < 0) |
|
1060 |
- return -1; |
|
1061 |
- |
|
1062 |
- if (s->pict_type == AV_PICTURE_TYPE_B) { |
|
1142 |
+ if (h->pict_type == AV_PICTURE_TYPE_B) { |
|
1063 | 1143 |
h->frame_num_offset = h->slice_num - h->prev_frame_num; |
1064 | 1144 |
|
1065 | 1145 |
if (h->frame_num_offset < 0) |
1066 | 1146 |
h->frame_num_offset += 256; |
1067 | 1147 |
if (h->frame_num_offset == 0 || |
1068 | 1148 |
h->frame_num_offset >= h->prev_frame_num_offset) { |
1069 |
- av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n"); |
|
1149 |
+ av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n"); |
|
1070 | 1150 |
return -1; |
1071 | 1151 |
} |
1072 | 1152 |
} else { |
... | ... |
@@ -1089,16 +1186,16 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, |
1089 | 1089 |
} |
1090 | 1090 |
} |
1091 | 1091 |
|
1092 |
- for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
|
1093 |
- for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { |
|
1092 |
+ for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) { |
|
1093 |
+ for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) { |
|
1094 | 1094 |
unsigned mb_type; |
1095 |
- h->mb_xy = s->mb_x + s->mb_y * s->mb_stride; |
|
1095 |
+ h->mb_xy = h->mb_x + h->mb_y * h->mb_stride; |
|
1096 | 1096 |
|
1097 |
- if ((get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits && |
|
1098 |
- ((get_bits_count(&s->gb) & 7) == 0 || |
|
1099 |
- show_bits(&s->gb, -get_bits_count(&s->gb) & 7) == 0)) { |
|
1100 |
- skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb)); |
|
1101 |
- s->gb.size_in_bits = 8 * buf_size; |
|
1097 |
+ if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits && |
|
1098 |
+ ((get_bits_count(&h->gb) & 7) == 0 || |
|
1099 |
+ show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) { |
|
1100 |
+ skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb)); |
|
1101 |
+ h->gb.size_in_bits = 8 * buf_size; |
|
1102 | 1102 |
|
1103 | 1103 |
if (svq3_decode_slice_header(avctx)) |
1104 | 1104 |
return -1; |
... | ... |
@@ -1106,52 +1203,71 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, |
1106 | 1106 |
/* TODO: support s->mb_skip_run */ |
1107 | 1107 |
} |
1108 | 1108 |
|
1109 |
- mb_type = svq3_get_ue_golomb(&s->gb); |
|
1109 |
+ mb_type = svq3_get_ue_golomb(&h->gb); |
|
1110 | 1110 |
|
1111 |
- if (s->pict_type == AV_PICTURE_TYPE_I) |
|
1111 |
+ if (h->pict_type == AV_PICTURE_TYPE_I) |
|
1112 | 1112 |
mb_type += 8; |
1113 |
- else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) |
|
1113 |
+ else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) |
|
1114 | 1114 |
mb_type += 4; |
1115 |
- if (mb_type > 33 || svq3_decode_mb(svq3, mb_type)) { |
|
1116 |
- av_log(h->s.avctx, AV_LOG_ERROR, |
|
1117 |
- "error while decoding MB %d %d\n", s->mb_x, s->mb_y); |
|
1115 |
+ if (mb_type > 33 || svq3_decode_mb(s, mb_type)) { |
|
1116 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
1117 |
+ "error while decoding MB %d %d\n", h->mb_x, h->mb_y); |
|
1118 | 1118 |
return -1; |
1119 | 1119 |
} |
1120 | 1120 |
|
1121 | 1121 |
if (mb_type != 0) |
1122 | 1122 |
ff_h264_hl_decode_mb(h); |
1123 | 1123 |
|
1124 |
- if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) |
|
1125 |
- s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = |
|
1126 |
- (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; |
|
1124 |
+ if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay) |
|
1125 |
+ h->cur_pic.f.mb_type[h->mb_x + h->mb_y * h->mb_stride] = |
|
1126 |
+ (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; |
|
1127 | 1127 |
} |
1128 | 1128 |
|
1129 |
- ff_mpeg_draw_horiz_band(s, 16 * s->mb_y, 16); |
|
1129 |
+ ff_draw_horiz_band(avctx, &h->dsp, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL, |
|
1130 |
+ 16 * h->mb_y, 16, h->picture_structure, 0, 1, |
|
1131 |
+ h->low_delay, h->mb_height * 16, h->mb_width * 16); |
|
1130 | 1132 |
} |
1131 | 1133 |
|
1132 |
- ff_MPV_frame_end(s); |
|
1133 |
- |
|
1134 |
- if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) |
|
1135 |
- *(AVFrame *)data = s->current_picture.f; |
|
1134 |
+ if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay) |
|
1135 |
+ *(AVFrame *)data = s->cur_pic->f; |
|
1136 | 1136 |
else |
1137 |
- *(AVFrame *)data = s->last_picture.f; |
|
1137 |
+ *(AVFrame *)data = s->last_pic->f; |
|
1138 | 1138 |
|
1139 | 1139 |
/* Do not output the last pic after seeking. */ |
1140 |
- if (s->last_picture_ptr || s->low_delay) |
|
1140 |
+ if (s->last_pic->f.data[0] || h->low_delay) |
|
1141 | 1141 |
*got_frame = 1; |
1142 | 1142 |
|
1143 |
+ if (h->pict_type != AV_PICTURE_TYPE_B) { |
|
1144 |
+ FFSWAP(Picture*, s->cur_pic, s->next_pic); |
|
1145 |
+ } |
|
1146 |
+ |
|
1143 | 1147 |
return buf_size; |
1144 | 1148 |
} |
1145 | 1149 |
|
1150 |
+static void free_picture(AVCodecContext *avctx, Picture *pic) |
|
1151 |
+{ |
|
1152 |
+ int i; |
|
1153 |
+ for (i = 0; i < 2; i++) { |
|
1154 |
+ av_freep(&pic->motion_val_base[i]); |
|
1155 |
+ av_freep(&pic->f.ref_index[i]); |
|
1156 |
+ } |
|
1157 |
+ av_freep(&pic->mb_type_base); |
|
1158 |
+ |
|
1159 |
+ if (pic->f.data[0]) |
|
1160 |
+ avctx->release_buffer(avctx, &pic->f); |
|
1161 |
+ av_freep(&pic); |
|
1162 |
+} |
|
1163 |
+ |
|
1146 | 1164 |
static int svq3_decode_end(AVCodecContext *avctx) |
1147 | 1165 |
{ |
1148 |
- SVQ3Context *svq3 = avctx->priv_data; |
|
1149 |
- H264Context *h = &svq3->h; |
|
1150 |
- MpegEncContext *s = &h->s; |
|
1166 |
+ SVQ3Context *s = avctx->priv_data; |
|
1167 |
+ H264Context *h = &s->h; |
|
1151 | 1168 |
|
1152 |
- ff_h264_free_context(h); |
|
1169 |
+ free_picture(avctx, s->cur_pic); |
|
1170 |
+ free_picture(avctx, s->next_pic); |
|
1171 |
+ free_picture(avctx, s->last_pic); |
|
1153 | 1172 |
|
1154 |
- ff_MPV_common_end(s); |
|
1173 |
+ ff_h264_free_context(h); |
|
1155 | 1174 |
|
1156 | 1175 |
return 0; |
1157 | 1176 |
} |
... | ... |
@@ -21,6 +21,7 @@ |
21 | 21 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | 22 |
*/ |
23 | 23 |
|
24 |
+#include "h264.h" |
|
24 | 25 |
#include "vaapi_internal.h" |
25 | 26 |
|
26 | 27 |
/** |
... | ... |
@@ -40,7 +41,7 @@ static void destroy_buffers(VADisplay display, VABufferID *buffers, unsigned int |
40 | 40 |
} |
41 | 41 |
} |
42 | 42 |
|
43 |
-static int render_picture(struct vaapi_context *vactx, VASurfaceID surface) |
|
43 |
+int ff_vaapi_render_picture(struct vaapi_context *vactx, VASurfaceID surface) |
|
44 | 44 |
{ |
45 | 45 |
VABufferID va_buffers[3]; |
46 | 46 |
unsigned int n_va_buffers = 0; |
... | ... |
@@ -77,7 +78,7 @@ static int render_picture(struct vaapi_context *vactx, VASurfaceID surface) |
77 | 77 |
return 0; |
78 | 78 |
} |
79 | 79 |
|
80 |
-static int commit_slices(struct vaapi_context *vactx) |
|
80 |
+int ff_vaapi_commit_slices(struct vaapi_context *vactx) |
|
81 | 81 |
{ |
82 | 82 |
VABufferID *slice_buf_ids; |
83 | 83 |
VABufferID slice_param_buf_id, slice_data_buf_id; |
... | ... |
@@ -152,7 +153,7 @@ VASliceParameterBufferBase *ff_vaapi_alloc_slice(struct vaapi_context *vactx, co |
152 | 152 |
if (!vactx->slice_data) |
153 | 153 |
vactx->slice_data = buffer; |
154 | 154 |
if (vactx->slice_data + vactx->slice_data_size != buffer) { |
155 |
- if (commit_slices(vactx) < 0) |
|
155 |
+ if (ff_vaapi_commit_slices(vactx) < 0) |
|
156 | 156 |
return NULL; |
157 | 157 |
vactx->slice_data = buffer; |
158 | 158 |
} |
... | ... |
@@ -175,23 +176,12 @@ VASliceParameterBufferBase *ff_vaapi_alloc_slice(struct vaapi_context *vactx, co |
175 | 175 |
return slice_param; |
176 | 176 |
} |
177 | 177 |
|
178 |
-int ff_vaapi_common_end_frame(MpegEncContext *s) |
|
178 |
+void ff_vaapi_common_end_frame(AVCodecContext *avctx) |
|
179 | 179 |
{ |
180 |
- struct vaapi_context * const vactx = s->avctx->hwaccel_context; |
|
181 |
- int ret = -1; |
|
180 |
+ struct vaapi_context * const vactx = avctx->hwaccel_context; |
|
182 | 181 |
|
183 |
- av_dlog(s->avctx, "ff_vaapi_common_end_frame()\n"); |
|
182 |
+ av_dlog(avctx, "ff_vaapi_common_end_frame()\n"); |
|
184 | 183 |
|
185 |
- if (commit_slices(vactx) < 0) |
|
186 |
- goto done; |
|
187 |
- if (vactx->n_slice_buf_ids > 0) { |
|
188 |
- if (render_picture(vactx, ff_vaapi_get_surface_id(s->current_picture_ptr)) < 0) |
|
189 |
- goto done; |
|
190 |
- ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
191 |
- } |
|
192 |
- ret = 0; |
|
193 |
- |
|
194 |
-done: |
|
195 | 184 |
destroy_buffers(vactx->display, &vactx->pic_param_buf_id, 1); |
196 | 185 |
destroy_buffers(vactx->display, &vactx->iq_matrix_buf_id, 1); |
197 | 186 |
destroy_buffers(vactx->display, &vactx->bitplane_buf_id, 1); |
... | ... |
@@ -202,6 +192,27 @@ done: |
202 | 202 |
vactx->slice_buf_ids_alloc = 0; |
203 | 203 |
vactx->slice_count = 0; |
204 | 204 |
vactx->slice_params_alloc = 0; |
205 |
+} |
|
206 |
+ |
|
207 |
+int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx) |
|
208 |
+{ |
|
209 |
+ struct vaapi_context * const vactx = avctx->hwaccel_context; |
|
210 |
+ MpegEncContext *s = avctx->priv_data; |
|
211 |
+ int ret; |
|
212 |
+ |
|
213 |
+ ret = ff_vaapi_commit_slices(vactx); |
|
214 |
+ if (ret < 0) |
|
215 |
+ goto finish; |
|
216 |
+ |
|
217 |
+ ret = ff_vaapi_render_picture(vactx, |
|
218 |
+ ff_vaapi_get_surface_id(s->current_picture_ptr)); |
|
219 |
+ if (ret < 0) |
|
220 |
+ goto finish; |
|
221 |
+ |
|
222 |
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
223 |
+ |
|
224 |
+finish: |
|
225 |
+ ff_vaapi_common_end_frame(avctx->priv_data); |
|
205 | 226 |
return ret; |
206 | 227 |
} |
207 | 228 |
|
... | ... |
@@ -224,7 +224,6 @@ static int start_frame(AVCodecContext *avctx, |
224 | 224 |
av_unused uint32_t size) |
225 | 225 |
{ |
226 | 226 |
H264Context * const h = avctx->priv_data; |
227 |
- MpegEncContext * const s = &h->s; |
|
228 | 227 |
struct vaapi_context * const vactx = avctx->hwaccel_context; |
229 | 228 |
VAPictureParameterBufferH264 *pic_param; |
230 | 229 |
VAIQMatrixBufferH264 *iq_matrix; |
... | ... |
@@ -237,11 +236,11 @@ static int start_frame(AVCodecContext *avctx, |
237 | 237 |
pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferH264)); |
238 | 238 |
if (!pic_param) |
239 | 239 |
return -1; |
240 |
- fill_vaapi_pic(&pic_param->CurrPic, s->current_picture_ptr, s->picture_structure); |
|
240 |
+ fill_vaapi_pic(&pic_param->CurrPic, h->cur_pic_ptr, h->picture_structure); |
|
241 | 241 |
if (fill_vaapi_ReferenceFrames(pic_param, h) < 0) |
242 | 242 |
return -1; |
243 |
- pic_param->picture_width_in_mbs_minus1 = s->mb_width - 1; |
|
244 |
- pic_param->picture_height_in_mbs_minus1 = s->mb_height - 1; |
|
243 |
+ pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1; |
|
244 |
+ pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1; |
|
245 | 245 |
pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; |
246 | 246 |
pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; |
247 | 247 |
pic_param->num_ref_frames = h->sps.ref_frame_count; |
... | ... |
@@ -269,7 +268,7 @@ static int start_frame(AVCodecContext *avctx, |
269 | 269 |
pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred; |
270 | 270 |
pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc; |
271 | 271 |
pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode; |
272 |
- pic_param->pic_fields.bits.field_pic_flag = s->picture_structure != PICT_FRAME; |
|
272 |
+ pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME; |
|
273 | 273 |
pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred; |
274 | 274 |
pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present; |
275 | 275 |
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; |
... | ... |
@@ -289,10 +288,24 @@ static int start_frame(AVCodecContext *avctx, |
289 | 289 |
/** End a hardware decoding based frame. */ |
290 | 290 |
static int end_frame(AVCodecContext *avctx) |
291 | 291 |
{ |
292 |
+ struct vaapi_context * const vactx = avctx->hwaccel_context; |
|
292 | 293 |
H264Context * const h = avctx->priv_data; |
294 |
+ int ret; |
|
293 | 295 |
|
294 | 296 |
av_dlog(avctx, "end_frame()\n"); |
295 |
- return ff_vaapi_common_end_frame(&h->s); |
|
297 |
+ ret = ff_vaapi_commit_slices(vactx); |
|
298 |
+ if (ret < 0) |
|
299 |
+ goto finish; |
|
300 |
+ |
|
301 |
+ ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->cur_pic_ptr)); |
|
302 |
+ if (ret < 0) |
|
303 |
+ goto finish; |
|
304 |
+ |
|
305 |
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height); |
|
306 |
+ |
|
307 |
+finish: |
|
308 |
+ ff_vaapi_common_end_frame(avctx); |
|
309 |
+ return ret; |
|
296 | 310 |
} |
297 | 311 |
|
298 | 312 |
/** Decode the given H.264 slice with VA API. */ |
... | ... |
@@ -301,7 +314,6 @@ static int decode_slice(AVCodecContext *avctx, |
301 | 301 |
uint32_t size) |
302 | 302 |
{ |
303 | 303 |
H264Context * const h = avctx->priv_data; |
304 |
- MpegEncContext * const s = &h->s; |
|
305 | 304 |
VASliceParameterBufferH264 *slice_param; |
306 | 305 |
|
307 | 306 |
av_dlog(avctx, "decode_slice(): buffer %p, size %d\n", buffer, size); |
... | ... |
@@ -310,14 +322,14 @@ static int decode_slice(AVCodecContext *avctx, |
310 | 310 |
slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); |
311 | 311 |
if (!slice_param) |
312 | 312 |
return -1; |
313 |
- slice_param->slice_data_bit_offset = get_bits_count(&h->s.gb) + 8; /* bit buffer started beyond nal_unit_type */ |
|
314 |
- slice_param->first_mb_in_slice = (s->mb_y >> FIELD_OR_MBAFF_PICTURE) * s->mb_width + s->mb_x; |
|
313 |
+ slice_param->slice_data_bit_offset = get_bits_count(&h->gb) + 8; /* bit buffer started beyond nal_unit_type */ |
|
314 |
+ slice_param->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE) * h->mb_width + h->mb_x; |
|
315 | 315 |
slice_param->slice_type = ff_h264_get_slice_type(h); |
316 | 316 |
slice_param->direct_spatial_mv_pred_flag = h->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0; |
317 | 317 |
slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0; |
318 | 318 |
slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0; |
319 | 319 |
slice_param->cabac_init_idc = h->cabac_init_idc; |
320 |
- slice_param->slice_qp_delta = s->qscale - h->pps.init_qp; |
|
320 |
+ slice_param->slice_qp_delta = h->qscale - h->pps.init_qp; |
|
321 | 321 |
slice_param->disable_deblocking_filter_idc = h->deblocking_filter < 2 ? !h->deblocking_filter : h->deblocking_filter; |
322 | 322 |
slice_param->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2 - 26; |
323 | 323 |
slice_param->slice_beta_offset_div2 = h->slice_beta_offset / 2 - 26; |
... | ... |
@@ -42,7 +42,7 @@ static inline VASurfaceID ff_vaapi_get_surface_id(Picture *pic) |
42 | 42 |
} |
43 | 43 |
|
44 | 44 |
/** Common AVHWAccel.end_frame() implementation */ |
45 |
-int ff_vaapi_common_end_frame(MpegEncContext *s); |
|
45 |
+void ff_vaapi_common_end_frame(AVCodecContext *avctx); |
|
46 | 46 |
|
47 | 47 |
/** Allocate a new picture parameter buffer */ |
48 | 48 |
void *ff_vaapi_alloc_pic_param(struct vaapi_context *vactx, unsigned int size); |
... | ... |
@@ -63,6 +63,10 @@ uint8_t *ff_vaapi_alloc_bitplane(struct vaapi_context *vactx, uint32_t size); |
63 | 63 |
*/ |
64 | 64 |
VASliceParameterBufferBase *ff_vaapi_alloc_slice(struct vaapi_context *vactx, const uint8_t *buffer, uint32_t size); |
65 | 65 |
|
66 |
+int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx); |
|
67 |
+int ff_vaapi_commit_slices(struct vaapi_context *vactx); |
|
68 |
+int ff_vaapi_render_picture(struct vaapi_context *vactx, VASurfaceID surface); |
|
69 |
+ |
|
66 | 70 |
/* @} */ |
67 | 71 |
|
68 | 72 |
#endif /* AVCODEC_VAAPI_INTERNAL_H */ |
... | ... |
@@ -99,11 +99,6 @@ static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_ |
99 | 99 |
return 0; |
100 | 100 |
} |
101 | 101 |
|
102 |
-static int vaapi_mpeg2_end_frame(AVCodecContext *avctx) |
|
103 |
-{ |
|
104 |
- return ff_vaapi_common_end_frame(avctx->priv_data); |
|
105 |
-} |
|
106 |
- |
|
107 | 102 |
static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) |
108 | 103 |
{ |
109 | 104 |
MpegEncContext * const s = avctx->priv_data; |
... | ... |
@@ -144,6 +139,6 @@ AVHWAccel ff_mpeg2_vaapi_hwaccel = { |
144 | 144 |
.id = AV_CODEC_ID_MPEG2VIDEO, |
145 | 145 |
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, |
146 | 146 |
.start_frame = vaapi_mpeg2_start_frame, |
147 |
- .end_frame = vaapi_mpeg2_end_frame, |
|
147 |
+ .end_frame = ff_vaapi_mpeg_end_frame, |
|
148 | 148 |
.decode_slice = vaapi_mpeg2_decode_slice, |
149 | 149 |
}; |
... | ... |
@@ -115,11 +115,6 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_ |
115 | 115 |
return 0; |
116 | 116 |
} |
117 | 117 |
|
118 |
-static int vaapi_mpeg4_end_frame(AVCodecContext *avctx) |
|
119 |
-{ |
|
120 |
- return ff_vaapi_common_end_frame(avctx->priv_data); |
|
121 |
-} |
|
122 |
- |
|
123 | 118 |
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) |
124 | 119 |
{ |
125 | 120 |
MpegEncContext * const s = avctx->priv_data; |
... | ... |
@@ -156,7 +151,7 @@ AVHWAccel ff_mpeg4_vaapi_hwaccel = { |
156 | 156 |
.id = AV_CODEC_ID_MPEG4, |
157 | 157 |
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, |
158 | 158 |
.start_frame = vaapi_mpeg4_start_frame, |
159 |
- .end_frame = vaapi_mpeg4_end_frame, |
|
159 |
+ .end_frame = ff_vaapi_mpeg_end_frame, |
|
160 | 160 |
.decode_slice = vaapi_mpeg4_decode_slice, |
161 | 161 |
}; |
162 | 162 |
#endif |
... | ... |
@@ -168,7 +163,7 @@ AVHWAccel ff_h263_vaapi_hwaccel = { |
168 | 168 |
.id = AV_CODEC_ID_H263, |
169 | 169 |
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, |
170 | 170 |
.start_frame = vaapi_mpeg4_start_frame, |
171 |
- .end_frame = vaapi_mpeg4_end_frame, |
|
171 |
+ .end_frame = ff_vaapi_mpeg_end_frame, |
|
172 | 172 |
.decode_slice = vaapi_mpeg4_decode_slice, |
173 | 173 |
}; |
174 | 174 |
#endif |
... | ... |
@@ -310,13 +310,6 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t |
310 | 310 |
return 0; |
311 | 311 |
} |
312 | 312 |
|
313 |
-static int vaapi_vc1_end_frame(AVCodecContext *avctx) |
|
314 |
-{ |
|
315 |
- VC1Context * const v = avctx->priv_data; |
|
316 |
- |
|
317 |
- return ff_vaapi_common_end_frame(&v->s); |
|
318 |
-} |
|
319 |
- |
|
320 | 313 |
static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) |
321 | 314 |
{ |
322 | 315 |
VC1Context * const v = avctx->priv_data; |
... | ... |
@@ -347,7 +340,7 @@ AVHWAccel ff_wmv3_vaapi_hwaccel = { |
347 | 347 |
.id = AV_CODEC_ID_WMV3, |
348 | 348 |
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, |
349 | 349 |
.start_frame = vaapi_vc1_start_frame, |
350 |
- .end_frame = vaapi_vc1_end_frame, |
|
350 |
+ .end_frame = ff_vaapi_mpeg_end_frame, |
|
351 | 351 |
.decode_slice = vaapi_vc1_decode_slice, |
352 | 352 |
}; |
353 | 353 |
#endif |
... | ... |
@@ -358,6 +351,6 @@ AVHWAccel ff_vc1_vaapi_hwaccel = { |
358 | 358 |
.id = AV_CODEC_ID_VC1, |
359 | 359 |
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, |
360 | 360 |
.start_frame = vaapi_vc1_start_frame, |
361 |
- .end_frame = vaapi_vc1_end_frame, |
|
361 |
+ .end_frame = ff_vaapi_mpeg_end_frame, |
|
362 | 362 |
.decode_slice = vaapi_vc1_decode_slice, |
363 | 363 |
}; |
... | ... |
@@ -241,7 +241,7 @@ static int end_frame(AVCodecContext *avctx) |
241 | 241 |
{ |
242 | 242 |
H264Context *h = avctx->priv_data; |
243 | 243 |
struct vda_context *vda_ctx = avctx->hwaccel_context; |
244 |
- AVFrame *frame = &h->s.current_picture_ptr->f; |
|
244 |
+ AVFrame *frame = &h->cur_pic_ptr->f; |
|
245 | 245 |
int status; |
246 | 246 |
|
247 | 247 |
if (!vda_ctx->decoder || !vda_ctx->priv_bitstream) |
... | ... |
@@ -48,20 +48,18 @@ int ff_vdpau_common_start_frame(AVCodecContext *avctx, |
48 | 48 |
return 0; |
49 | 49 |
} |
50 | 50 |
|
51 |
-int ff_vdpau_common_end_frame(AVCodecContext *avctx) |
|
51 |
+int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx) |
|
52 | 52 |
{ |
53 |
- MpegEncContext * const s = avctx->priv_data; |
|
54 | 53 |
AVVDPAUContext *hwctx = avctx->hwaccel_context; |
54 |
+ MpegEncContext *s = avctx->priv_data; |
|
55 |
+ VdpVideoSurface surf = ff_vdpau_get_surface_id(s->current_picture_ptr); |
|
55 | 56 |
|
56 |
- if (hwctx->bitstream_buffers_used) { |
|
57 |
- VdpVideoSurface surf = ff_vdpau_get_surface_id(s->current_picture_ptr); |
|
57 |
+ hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info, |
|
58 |
+ hwctx->bitstream_buffers_used, hwctx->bitstream_buffers); |
|
58 | 59 |
|
59 |
- hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info, |
|
60 |
- hwctx->bitstream_buffers_used, hwctx->bitstream_buffers); |
|
60 |
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
61 |
+ hwctx->bitstream_buffers_used = 0; |
|
61 | 62 |
|
62 |
- ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
63 |
- hwctx->bitstream_buffers_used = 0; |
|
64 |
- } |
|
65 | 63 |
return 0; |
66 | 64 |
} |
67 | 65 |
|
... | ... |
@@ -87,15 +85,14 @@ int ff_vdpau_add_buffer(AVCodecContext *avctx, |
87 | 87 |
|
88 | 88 |
/* Obsolete non-hwaccel VDPAU support below... */ |
89 | 89 |
|
90 |
-void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) |
|
90 |
+void ff_vdpau_h264_set_reference_frames(H264Context *h) |
|
91 | 91 |
{ |
92 |
- H264Context *h = s->avctx->priv_data; |
|
93 | 92 |
struct vdpau_render_state *render, *render_ref; |
94 | 93 |
VdpReferenceFrameH264 *rf, *rf2; |
95 | 94 |
Picture *pic; |
96 | 95 |
int i, list, pic_frame_idx; |
97 | 96 |
|
98 |
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
97 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
99 | 98 |
assert(render); |
100 | 99 |
|
101 | 100 |
rf = &render->info.h264.referenceFrames[0]; |
... | ... |
@@ -156,12 +153,9 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) |
156 | 156 |
} |
157 | 157 |
} |
158 | 158 |
|
159 |
-void ff_vdpau_add_data_chunk(MpegEncContext *s, |
|
160 |
- const uint8_t *buf, int buf_size) |
|
159 |
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size) |
|
161 | 160 |
{ |
162 |
- struct vdpau_render_state *render; |
|
163 |
- |
|
164 |
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
161 |
+ struct vdpau_render_state *render = (struct vdpau_render_state*)data; |
|
165 | 162 |
assert(render); |
166 | 163 |
|
167 | 164 |
render->bitstream_buffers= av_fast_realloc( |
... | ... |
@@ -176,17 +170,16 @@ void ff_vdpau_add_data_chunk(MpegEncContext *s, |
176 | 176 |
render->bitstream_buffers_used++; |
177 | 177 |
} |
178 | 178 |
|
179 |
-void ff_vdpau_h264_picture_start(MpegEncContext *s) |
|
179 |
+void ff_vdpau_h264_picture_start(H264Context *h) |
|
180 | 180 |
{ |
181 |
- H264Context *h = s->avctx->priv_data; |
|
182 | 181 |
struct vdpau_render_state *render; |
183 | 182 |
int i; |
184 | 183 |
|
185 |
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
184 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
186 | 185 |
assert(render); |
187 | 186 |
|
188 | 187 |
for (i = 0; i < 2; ++i) { |
189 |
- int foc = s->current_picture_ptr->field_poc[i]; |
|
188 |
+ int foc = h->cur_pic_ptr->field_poc[i]; |
|
190 | 189 |
if (foc == INT_MAX) |
191 | 190 |
foc = 0; |
192 | 191 |
render->info.h264.field_order_cnt[i] = foc; |
... | ... |
@@ -195,21 +188,20 @@ void ff_vdpau_h264_picture_start(MpegEncContext *s) |
195 | 195 |
render->info.h264.frame_num = h->frame_num; |
196 | 196 |
} |
197 | 197 |
|
198 |
-void ff_vdpau_h264_picture_complete(MpegEncContext *s) |
|
198 |
+void ff_vdpau_h264_picture_complete(H264Context *h) |
|
199 | 199 |
{ |
200 |
- H264Context *h = s->avctx->priv_data; |
|
201 | 200 |
struct vdpau_render_state *render; |
202 | 201 |
|
203 |
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
202 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
204 | 203 |
assert(render); |
205 | 204 |
|
206 | 205 |
render->info.h264.slice_count = h->slice_num; |
207 | 206 |
if (render->info.h264.slice_count < 1) |
208 | 207 |
return; |
209 | 208 |
|
210 |
- render->info.h264.is_reference = (s->current_picture_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; |
|
211 |
- render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; |
|
212 |
- render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; |
|
209 |
+ render->info.h264.is_reference = (h->cur_pic_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; |
|
210 |
+ render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; |
|
211 |
+ render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; |
|
213 | 212 |
render->info.h264.num_ref_frames = h->sps.ref_frame_count; |
214 | 213 |
render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; |
215 | 214 |
render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; |
... | ... |
@@ -235,7 +227,7 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s) |
235 | 235 |
memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); |
236 | 236 |
memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); |
237 | 237 |
|
238 |
- ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
238 |
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height); |
|
239 | 239 |
render->bitstream_buffers_used = 0; |
240 | 240 |
} |
241 | 241 |
|
... | ... |
@@ -287,7 +279,7 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, |
287 | 287 |
render->info.mpeg.forward_reference = last->surface; |
288 | 288 |
} |
289 | 289 |
|
290 |
- ff_vdpau_add_data_chunk(s, buf, buf_size); |
|
290 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
291 | 291 |
|
292 | 292 |
render->info.mpeg.slice_count = slice_count; |
293 | 293 |
|
... | ... |
@@ -357,7 +349,7 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, |
357 | 357 |
render->info.vc1.forward_reference = last->surface; |
358 | 358 |
} |
359 | 359 |
|
360 |
- ff_vdpau_add_data_chunk(s, buf, buf_size); |
|
360 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
361 | 361 |
|
362 | 362 |
render->info.vc1.slice_count = 1; |
363 | 363 |
|
... | ... |
@@ -413,7 +405,7 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, |
413 | 413 |
render->info.mpeg4.forward_reference = last->surface; |
414 | 414 |
} |
415 | 415 |
|
416 |
- ff_vdpau_add_data_chunk(s, buf, buf_size); |
|
416 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
417 | 417 |
|
418 | 418 |
ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
419 | 419 |
render->bitstream_buffers_used = 0; |
... | ... |
@@ -119,9 +119,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, |
119 | 119 |
{ |
120 | 120 |
H264Context * const h = avctx->priv_data; |
121 | 121 |
AVVDPAUContext *hwctx = avctx->hwaccel_context; |
122 |
- MpegEncContext * const s = &h->s; |
|
123 | 122 |
VdpPictureInfoH264 *info = &hwctx->info.h264; |
124 |
- Picture *pic = s->current_picture_ptr; |
|
123 |
+ Picture *pic = h->cur_pic_ptr; |
|
125 | 124 |
|
126 | 125 |
/* init VdpPictureInfoH264 */ |
127 | 126 |
info->slice_count = 0; |
... | ... |
@@ -129,8 +128,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, |
129 | 129 |
info->field_order_cnt[1] = h264_foc(pic->field_poc[1]); |
130 | 130 |
info->is_reference = h->nal_ref_idc != 0; |
131 | 131 |
info->frame_num = h->frame_num; |
132 |
- info->field_pic_flag = s->picture_structure != PICT_FRAME; |
|
133 |
- info->bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; |
|
132 |
+ info->field_pic_flag = h->picture_structure != PICT_FRAME; |
|
133 |
+ info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; |
|
134 | 134 |
info->num_ref_frames = h->sps.ref_frame_count; |
135 | 135 |
info->mb_adaptive_frame_field_flag = h->sps.mb_aff && !info->field_pic_flag; |
136 | 136 |
info->constrained_intra_pred_flag = h->pps.constrained_intra_pred; |
... | ... |
@@ -185,12 +184,27 @@ static int vdpau_h264_decode_slice(AVCodecContext *avctx, |
185 | 185 |
return 0; |
186 | 186 |
} |
187 | 187 |
|
188 |
+static int vdpau_h264_end_frame(AVCodecContext *avctx) |
|
189 |
+{ |
|
190 |
+ AVVDPAUContext *hwctx = avctx->hwaccel_context; |
|
191 |
+ H264Context *h = avctx->priv_data; |
|
192 |
+ VdpVideoSurface surf = ff_vdpau_get_surface_id(h->cur_pic_ptr); |
|
193 |
+ |
|
194 |
+ hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info, |
|
195 |
+ hwctx->bitstream_buffers_used, hwctx->bitstream_buffers); |
|
196 |
+ |
|
197 |
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height); |
|
198 |
+ hwctx->bitstream_buffers_used = 0; |
|
199 |
+ |
|
200 |
+ return 0; |
|
201 |
+} |
|
202 |
+ |
|
188 | 203 |
AVHWAccel ff_h264_vdpau_hwaccel = { |
189 | 204 |
.name = "h264_vdpau", |
190 | 205 |
.type = AVMEDIA_TYPE_VIDEO, |
191 | 206 |
.id = AV_CODEC_ID_H264, |
192 | 207 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
193 | 208 |
.start_frame = vdpau_h264_start_frame, |
194 |
- .end_frame = ff_vdpau_common_end_frame, |
|
209 |
+ .end_frame = vdpau_h264_end_frame, |
|
195 | 210 |
.decode_slice = vdpau_h264_decode_slice, |
196 | 211 |
}; |
... | ... |
@@ -25,6 +25,7 @@ |
25 | 25 |
#define AVCODEC_VDPAU_INTERNAL_H |
26 | 26 |
|
27 | 27 |
#include <stdint.h> |
28 |
+#include "h264.h" |
|
28 | 29 |
#include "mpegvideo.h" |
29 | 30 |
|
30 | 31 |
/** Extract VdpVideoSurface from a Picture */ |
... | ... |
@@ -35,20 +36,20 @@ static inline uintptr_t ff_vdpau_get_surface_id(Picture *pic) |
35 | 35 |
|
36 | 36 |
int ff_vdpau_common_start_frame(AVCodecContext *avctx, |
37 | 37 |
const uint8_t *buffer, uint32_t size); |
38 |
-int ff_vdpau_common_end_frame(AVCodecContext *avctx); |
|
38 |
+int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx); |
|
39 | 39 |
int ff_vdpau_add_buffer(AVCodecContext *avctx, |
40 | 40 |
const uint8_t *buf, uint32_t buf_size); |
41 | 41 |
|
42 | 42 |
|
43 |
-void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, |
|
43 |
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, |
|
44 | 44 |
int buf_size); |
45 | 45 |
|
46 | 46 |
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, |
47 | 47 |
int buf_size, int slice_count); |
48 | 48 |
|
49 |
-void ff_vdpau_h264_picture_start(MpegEncContext *s); |
|
50 |
-void ff_vdpau_h264_set_reference_frames(MpegEncContext *s); |
|
51 |
-void ff_vdpau_h264_picture_complete(MpegEncContext *s); |
|
49 |
+void ff_vdpau_h264_picture_start(H264Context *h); |
|
50 |
+void ff_vdpau_h264_set_reference_frames(H264Context *h); |
|
51 |
+void ff_vdpau_h264_picture_complete(H264Context *h); |
|
52 | 52 |
|
53 | 53 |
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, |
54 | 54 |
int buf_size); |
... | ... |
@@ -98,7 +98,7 @@ AVHWAccel ff_mpeg1_vdpau_hwaccel = { |
98 | 98 |
.id = AV_CODEC_ID_MPEG1VIDEO, |
99 | 99 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
100 | 100 |
.start_frame = vdpau_mpeg_start_frame, |
101 |
- .end_frame = ff_vdpau_common_end_frame, |
|
101 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
102 | 102 |
.decode_slice = vdpau_mpeg_decode_slice, |
103 | 103 |
}; |
104 | 104 |
#endif |
... | ... |
@@ -110,7 +110,7 @@ AVHWAccel ff_mpeg2_vdpau_hwaccel = { |
110 | 110 |
.id = AV_CODEC_ID_MPEG2VIDEO, |
111 | 111 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
112 | 112 |
.start_frame = vdpau_mpeg_start_frame, |
113 |
- .end_frame = ff_vdpau_common_end_frame, |
|
113 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
114 | 114 |
.decode_slice = vdpau_mpeg_decode_slice, |
115 | 115 |
}; |
116 | 116 |
#endif |
... | ... |
@@ -92,7 +92,7 @@ AVHWAccel ff_h263_vdpau_hwaccel = { |
92 | 92 |
.id = AV_CODEC_ID_H263, |
93 | 93 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
94 | 94 |
.start_frame = vdpau_mpeg4_start_frame, |
95 |
- .end_frame = ff_vdpau_common_end_frame, |
|
95 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
96 | 96 |
.decode_slice = vdpau_mpeg4_decode_slice, |
97 | 97 |
}; |
98 | 98 |
#endif |
... | ... |
@@ -104,7 +104,7 @@ AVHWAccel ff_mpeg4_vdpau_hwaccel = { |
104 | 104 |
.id = AV_CODEC_ID_MPEG4, |
105 | 105 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
106 | 106 |
.start_frame = vdpau_mpeg4_start_frame, |
107 |
- .end_frame = ff_vdpau_common_end_frame, |
|
107 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
108 | 108 |
.decode_slice = vdpau_mpeg4_decode_slice, |
109 | 109 |
}; |
110 | 110 |
#endif |
... | ... |
@@ -112,7 +112,7 @@ AVHWAccel ff_wmv3_vdpau_hwaccel = { |
112 | 112 |
.id = AV_CODEC_ID_WMV3, |
113 | 113 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
114 | 114 |
.start_frame = vdpau_vc1_start_frame, |
115 |
- .end_frame = ff_vdpau_common_end_frame, |
|
115 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
116 | 116 |
.decode_slice = vdpau_vc1_decode_slice, |
117 | 117 |
}; |
118 | 118 |
#endif |
... | ... |
@@ -123,6 +123,6 @@ AVHWAccel ff_vc1_vdpau_hwaccel = { |
123 | 123 |
.id = AV_CODEC_ID_VC1, |
124 | 124 |
.pix_fmt = AV_PIX_FMT_VDPAU, |
125 | 125 |
.start_frame = vdpau_vc1_start_frame, |
126 |
- .end_frame = ff_vdpau_common_end_frame, |
|
126 |
+ .end_frame = ff_vdpau_mpeg_end_frame, |
|
127 | 127 |
.decode_slice = vdpau_vc1_decode_slice, |
128 | 128 |
}; |