... | ... |
@@ -90,7 +90,7 @@ AVCodec ff_flv_encoder = { |
90 | 90 |
.id = CODEC_ID_FLV1, |
91 | 91 |
.priv_data_size = sizeof(MpegEncContext), |
92 | 92 |
.init = ff_MPV_encode_init, |
93 |
- .encode = ff_MPV_encode_picture, |
|
93 |
+ .encode2 = ff_MPV_encode_picture, |
|
94 | 94 |
.close = ff_MPV_encode_end, |
95 | 95 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
96 | 96 |
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), |
... | ... |
@@ -327,7 +327,7 @@ AVCodec ff_h261_encoder = { |
327 | 327 |
.id = CODEC_ID_H261, |
328 | 328 |
.priv_data_size = sizeof(H261Context), |
329 | 329 |
.init = ff_MPV_encode_init, |
330 |
- .encode = ff_MPV_encode_picture, |
|
330 |
+ .encode2 = ff_MPV_encode_picture, |
|
331 | 331 |
.close = ff_MPV_encode_end, |
332 | 332 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
333 | 333 |
.long_name= NULL_IF_CONFIG_SMALL("H.261"), |
... | ... |
@@ -451,7 +451,7 @@ AVCodec ff_mjpeg_encoder = { |
451 | 451 |
.id = CODEC_ID_MJPEG, |
452 | 452 |
.priv_data_size = sizeof(MpegEncContext), |
453 | 453 |
.init = ff_MPV_encode_init, |
454 |
- .encode = ff_MPV_encode_picture, |
|
454 |
+ .encode2 = ff_MPV_encode_picture, |
|
455 | 455 |
.close = ff_MPV_encode_end, |
456 | 456 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE}, |
457 | 457 |
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), |
... | ... |
@@ -954,7 +954,7 @@ AVCodec ff_mpeg1video_encoder = { |
954 | 954 |
.id = CODEC_ID_MPEG1VIDEO, |
955 | 955 |
.priv_data_size = sizeof(MpegEncContext), |
956 | 956 |
.init = encode_init, |
957 |
- .encode = ff_MPV_encode_picture, |
|
957 |
+ .encode2 = ff_MPV_encode_picture, |
|
958 | 958 |
.close = ff_MPV_encode_end, |
959 | 959 |
.supported_framerates= avpriv_frame_rate_tab+1, |
960 | 960 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
... | ... |
@@ -969,7 +969,7 @@ AVCodec ff_mpeg2video_encoder = { |
969 | 969 |
.id = CODEC_ID_MPEG2VIDEO, |
970 | 970 |
.priv_data_size = sizeof(MpegEncContext), |
971 | 971 |
.init = encode_init, |
972 |
- .encode = ff_MPV_encode_picture, |
|
972 |
+ .encode2 = ff_MPV_encode_picture, |
|
973 | 973 |
.close = ff_MPV_encode_end, |
974 | 974 |
.supported_framerates= avpriv_frame_rate_tab+1, |
975 | 975 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, |
... | ... |
@@ -1336,7 +1336,7 @@ AVCodec ff_mpeg4_encoder = { |
1336 | 1336 |
.id = CODEC_ID_MPEG4, |
1337 | 1337 |
.priv_data_size = sizeof(MpegEncContext), |
1338 | 1338 |
.init = encode_init, |
1339 |
- .encode = ff_MPV_encode_picture, |
|
1339 |
+ .encode2 = ff_MPV_encode_picture, |
|
1340 | 1340 |
.close = ff_MPV_encode_end, |
1341 | 1341 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
1342 | 1342 |
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, |
... | ... |
@@ -261,6 +261,14 @@ typedef struct MpegEncContext { |
261 | 261 |
* offsets used in asm. */ |
262 | 262 |
|
263 | 263 |
int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video() |
264 |
+ /** |
|
265 |
+ * pts difference between the first and second input frame, used for |
|
266 |
+ * calculating dts of the first frame when there's a delay */ |
|
267 |
+ int64_t dts_delta; |
|
268 |
+ /** |
|
269 |
+ * reordered pts to be used as dts for the next output frame when there's |
|
270 |
+ * a delay */ |
|
271 |
+ int64_t reordered_pts; |
|
264 | 272 |
|
265 | 273 |
/** bit output */ |
266 | 274 |
PutBitContext pb; |
... | ... |
@@ -694,7 +702,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx); |
694 | 694 |
void ff_MPV_frame_end(MpegEncContext *s); |
695 | 695 |
int ff_MPV_encode_init(AVCodecContext *avctx); |
696 | 696 |
int ff_MPV_encode_end(AVCodecContext *avctx); |
697 |
-int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data); |
|
697 |
+int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, |
|
698 |
+ const AVFrame *frame, int *got_packet); |
|
698 | 699 |
void ff_MPV_common_init_mmx(MpegEncContext *s); |
699 | 700 |
void ff_MPV_common_init_axp(MpegEncContext *s); |
700 | 701 |
void ff_MPV_common_init_mmi(MpegEncContext *s); |
... | ... |
@@ -909,6 +909,9 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg) |
909 | 909 |
"last=%"PRId64"\n", pts, s->user_specified_pts); |
910 | 910 |
return -1; |
911 | 911 |
} |
912 |
+ |
|
913 |
+ if (!s->low_delay && pic_arg->display_picture_number == 1) |
|
914 |
+ s->dts_delta = time - last; |
|
912 | 915 |
} |
913 | 916 |
s->user_specified_pts = pts; |
914 | 917 |
} else { |
... | ... |
@@ -1374,20 +1377,23 @@ no_output_pic: |
1374 | 1374 |
return 0; |
1375 | 1375 |
} |
1376 | 1376 |
|
1377 |
-int ff_MPV_encode_picture(AVCodecContext *avctx, |
|
1378 |
- unsigned char *buf, int buf_size, void *data) |
|
1377 |
+int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, |
|
1378 |
+ const AVFrame *pic_arg, int *got_packet) |
|
1379 | 1379 |
{ |
1380 | 1380 |
MpegEncContext *s = avctx->priv_data; |
1381 |
- AVFrame *pic_arg = data; |
|
1382 |
- int i, stuffing_count; |
|
1381 |
+ int i, stuffing_count, ret; |
|
1383 | 1382 |
int context_count = s->slice_context_count; |
1384 | 1383 |
|
1384 |
+ if (!pkt->data && |
|
1385 |
+ (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0) |
|
1386 |
+ return ret; |
|
1387 |
+ |
|
1385 | 1388 |
for (i = 0; i < context_count; i++) { |
1386 | 1389 |
int start_y = s->thread_context[i]->start_mb_y; |
1387 | 1390 |
int end_y = s->thread_context[i]-> end_mb_y; |
1388 | 1391 |
int h = s->mb_height; |
1389 |
- uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h); |
|
1390 |
- uint8_t *end = buf + (size_t)(((int64_t) buf_size) * end_y / h); |
|
1392 |
+ uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h); |
|
1393 |
+ uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h); |
|
1391 | 1394 |
|
1392 | 1395 |
init_put_bits(&s->thread_context[i]->pb, start, end - start); |
1393 | 1396 |
} |
... | ... |
@@ -1547,13 +1553,27 @@ vbv_retry: |
1547 | 1547 |
} |
1548 | 1548 |
s->total_bits += s->frame_bits; |
1549 | 1549 |
avctx->frame_bits = s->frame_bits; |
1550 |
+ |
|
1551 |
+ pkt->pts = s->current_picture.f.pts; |
|
1552 |
+ if (!s->low_delay) { |
|
1553 |
+ if (!s->current_picture.f.coded_picture_number) |
|
1554 |
+ pkt->dts = pkt->pts - s->dts_delta; |
|
1555 |
+ else |
|
1556 |
+ pkt->dts = s->reordered_pts; |
|
1557 |
+ s->reordered_pts = s->input_picture[0]->f.pts; |
|
1558 |
+ } else |
|
1559 |
+ pkt->dts = pkt->pts; |
|
1560 |
+ if (s->current_picture.f.key_frame) |
|
1561 |
+ pkt->flags |= AV_PKT_FLAG_KEY; |
|
1550 | 1562 |
} else { |
1551 | 1563 |
assert((put_bits_ptr(&s->pb) == s->pb.buf)); |
1552 | 1564 |
s->frame_bits = 0; |
1553 | 1565 |
} |
1554 | 1566 |
assert((s->frame_bits & 7) == 0); |
1555 | 1567 |
|
1556 |
- return s->frame_bits / 8; |
|
1568 |
+ pkt->size = s->frame_bits / 8; |
|
1569 |
+ *got_packet = !!pkt->size; |
|
1570 |
+ return 0; |
|
1557 | 1571 |
} |
1558 | 1572 |
|
1559 | 1573 |
static inline void dct_single_coeff_elimination(MpegEncContext *s, |
... | ... |
@@ -4042,7 +4062,7 @@ AVCodec ff_h263_encoder = { |
4042 | 4042 |
.id = CODEC_ID_H263, |
4043 | 4043 |
.priv_data_size = sizeof(MpegEncContext), |
4044 | 4044 |
.init = ff_MPV_encode_init, |
4045 |
- .encode = ff_MPV_encode_picture, |
|
4045 |
+ .encode2 = ff_MPV_encode_picture, |
|
4046 | 4046 |
.close = ff_MPV_encode_end, |
4047 | 4047 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
4048 | 4048 |
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"), |
... | ... |
@@ -4069,7 +4089,7 @@ AVCodec ff_h263p_encoder = { |
4069 | 4069 |
.id = CODEC_ID_H263P, |
4070 | 4070 |
.priv_data_size = sizeof(MpegEncContext), |
4071 | 4071 |
.init = ff_MPV_encode_init, |
4072 |
- .encode = ff_MPV_encode_picture, |
|
4072 |
+ .encode2 = ff_MPV_encode_picture, |
|
4073 | 4073 |
.close = ff_MPV_encode_end, |
4074 | 4074 |
.capabilities = CODEC_CAP_SLICE_THREADS, |
4075 | 4075 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
... | ... |
@@ -4083,7 +4103,7 @@ AVCodec ff_msmpeg4v2_encoder = { |
4083 | 4083 |
.id = CODEC_ID_MSMPEG4V2, |
4084 | 4084 |
.priv_data_size = sizeof(MpegEncContext), |
4085 | 4085 |
.init = ff_MPV_encode_init, |
4086 |
- .encode = ff_MPV_encode_picture, |
|
4086 |
+ .encode2 = ff_MPV_encode_picture, |
|
4087 | 4087 |
.close = ff_MPV_encode_end, |
4088 | 4088 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
4089 | 4089 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), |
... | ... |
@@ -4095,7 +4115,7 @@ AVCodec ff_msmpeg4v3_encoder = { |
4095 | 4095 |
.id = CODEC_ID_MSMPEG4V3, |
4096 | 4096 |
.priv_data_size = sizeof(MpegEncContext), |
4097 | 4097 |
.init = ff_MPV_encode_init, |
4098 |
- .encode = ff_MPV_encode_picture, |
|
4098 |
+ .encode2 = ff_MPV_encode_picture, |
|
4099 | 4099 |
.close = ff_MPV_encode_end, |
4100 | 4100 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
4101 | 4101 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), |
... | ... |
@@ -4107,7 +4127,7 @@ AVCodec ff_wmv1_encoder = { |
4107 | 4107 |
.id = CODEC_ID_WMV1, |
4108 | 4108 |
.priv_data_size = sizeof(MpegEncContext), |
4109 | 4109 |
.init = ff_MPV_encode_init, |
4110 |
- .encode = ff_MPV_encode_picture, |
|
4110 |
+ .encode2 = ff_MPV_encode_picture, |
|
4111 | 4111 |
.close = ff_MPV_encode_end, |
4112 | 4112 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
4113 | 4113 |
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), |
... | ... |
@@ -62,7 +62,7 @@ AVCodec ff_rv10_encoder = { |
62 | 62 |
.id = CODEC_ID_RV10, |
63 | 63 |
.priv_data_size = sizeof(MpegEncContext), |
64 | 64 |
.init = ff_MPV_encode_init, |
65 |
- .encode = ff_MPV_encode_picture, |
|
65 |
+ .encode2 = ff_MPV_encode_picture, |
|
66 | 66 |
.close = ff_MPV_encode_end, |
67 | 67 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
68 | 68 |
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), |
... | ... |
@@ -63,7 +63,7 @@ AVCodec ff_rv20_encoder = { |
63 | 63 |
.id = CODEC_ID_RV20, |
64 | 64 |
.priv_data_size = sizeof(MpegEncContext), |
65 | 65 |
.init = ff_MPV_encode_init, |
66 |
- .encode = ff_MPV_encode_picture, |
|
66 |
+ .encode2 = ff_MPV_encode_picture, |
|
67 | 67 |
.close = ff_MPV_encode_end, |
68 | 68 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
69 | 69 |
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), |
... | ... |
@@ -217,7 +217,7 @@ AVCodec ff_wmv2_encoder = { |
217 | 217 |
.id = CODEC_ID_WMV2, |
218 | 218 |
.priv_data_size = sizeof(Wmv2Context), |
219 | 219 |
.init = wmv2_encode_init, |
220 |
- .encode = ff_MPV_encode_picture, |
|
220 |
+ .encode2 = ff_MPV_encode_picture, |
|
221 | 221 |
.close = ff_MPV_encode_end, |
222 | 222 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, |
223 | 223 |
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"), |