This reverts commit bf36dc50ea448999c8f8c7a35f6139a7040f6275, reversing
changes made to b7fc2693c70fe72936e4ce124c802ac23857c476.
Conflicts:
libavcodec/h264.c
Keeping support for the old VDPAU API has been requested by our VDPAU maintainer
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
... | ... |
@@ -1937,8 +1937,14 @@ h264_vda_decoder_deps="vda" |
1937 | 1937 |
h264_vda_decoder_select="h264_decoder" |
1938 | 1938 |
h264_vda_hwaccel_deps="vda" |
1939 | 1939 |
h264_vda_hwaccel_select="h264_decoder" |
1940 |
+h264_vdpau_decoder_deps="vdpau" |
|
1941 |
+h264_vdpau_decoder_select="h264_decoder" |
|
1940 | 1942 |
h264_vdpau_hwaccel_deps="vdpau" |
1941 | 1943 |
h264_vdpau_hwaccel_select="h264_decoder" |
1944 |
+mpeg_vdpau_decoder_deps="vdpau" |
|
1945 |
+mpeg_vdpau_decoder_select="mpeg2video_decoder" |
|
1946 |
+mpeg1_vdpau_decoder_deps="vdpau" |
|
1947 |
+mpeg1_vdpau_decoder_select="mpeg1video_decoder" |
|
1942 | 1948 |
mpeg1_vdpau_hwaccel_deps="vdpau" |
1943 | 1949 |
mpeg1_vdpau_hwaccel_select="mpeg1video_decoder" |
1944 | 1950 |
mpeg2_crystalhd_decoder_select="crystalhd" |
... | ... |
@@ -1951,6 +1957,8 @@ mpeg2_vdpau_hwaccel_select="mpeg2video_decoder" |
1951 | 1951 |
mpeg4_crystalhd_decoder_select="crystalhd" |
1952 | 1952 |
mpeg4_vaapi_hwaccel_deps="vaapi" |
1953 | 1953 |
mpeg4_vaapi_hwaccel_select="mpeg4_decoder" |
1954 |
+mpeg4_vdpau_decoder_deps="vdpau" |
|
1955 |
+mpeg4_vdpau_decoder_select="mpeg4_decoder" |
|
1954 | 1956 |
mpeg4_vdpau_hwaccel_deps="vdpau" |
1955 | 1957 |
mpeg4_vdpau_hwaccel_select="mpeg4_decoder" |
1956 | 1958 |
msmpeg4_crystalhd_decoder_select="crystalhd" |
... | ... |
@@ -1959,11 +1967,14 @@ vc1_dxva2_hwaccel_deps="dxva2" |
1959 | 1959 |
vc1_dxva2_hwaccel_select="vc1_decoder" |
1960 | 1960 |
vc1_vaapi_hwaccel_deps="vaapi" |
1961 | 1961 |
vc1_vaapi_hwaccel_select="vc1_decoder" |
1962 |
+vc1_vdpau_decoder_deps="vdpau" |
|
1963 |
+vc1_vdpau_decoder_select="vc1_decoder" |
|
1962 | 1964 |
vc1_vdpau_hwaccel_deps="vdpau" |
1963 | 1965 |
vc1_vdpau_hwaccel_select="vc1_decoder" |
1964 | 1966 |
wmv3_crystalhd_decoder_select="crystalhd" |
1965 | 1967 |
wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel" |
1966 | 1968 |
wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel" |
1969 |
+wmv3_vdpau_decoder_select="vc1_vdpau_decoder" |
|
1967 | 1970 |
wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel" |
1968 | 1971 |
|
1969 | 1972 |
# parsers |
... | ... |
@@ -163,6 +163,7 @@ void avcodec_register_all(void) |
163 | 163 |
REGISTER_DECODER(H264, h264); |
164 | 164 |
REGISTER_DECODER(H264_CRYSTALHD, h264_crystalhd); |
165 | 165 |
REGISTER_DECODER(H264_VDA, h264_vda); |
166 |
+ REGISTER_DECODER(H264_VDPAU, h264_vdpau); |
|
166 | 167 |
REGISTER_ENCDEC (HUFFYUV, huffyuv); |
167 | 168 |
REGISTER_DECODER(IDCIN, idcin); |
168 | 169 |
REGISTER_DECODER(IFF_BYTERUN1, iff_byterun1); |
... | ... |
@@ -191,7 +192,10 @@ void avcodec_register_all(void) |
191 | 191 |
REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video); |
192 | 192 |
REGISTER_ENCDEC (MPEG4, mpeg4); |
193 | 193 |
REGISTER_DECODER(MPEG4_CRYSTALHD, mpeg4_crystalhd); |
194 |
+ REGISTER_DECODER(MPEG4_VDPAU, mpeg4_vdpau); |
|
194 | 195 |
REGISTER_DECODER(MPEGVIDEO, mpegvideo); |
196 |
+ REGISTER_DECODER(MPEG_VDPAU, mpeg_vdpau); |
|
197 |
+ REGISTER_DECODER(MPEG1_VDPAU, mpeg1_vdpau); |
|
195 | 198 |
REGISTER_DECODER(MPEG2_CRYSTALHD, mpeg2_crystalhd); |
196 | 199 |
REGISTER_DECODER(MSA1, msa1); |
197 | 200 |
REGISTER_DECODER(MSMPEG4_CRYSTALHD, msmpeg4_crystalhd); |
... | ... |
@@ -270,6 +274,7 @@ void avcodec_register_all(void) |
270 | 270 |
REGISTER_DECODER(VBLE, vble); |
271 | 271 |
REGISTER_DECODER(VC1, vc1); |
272 | 272 |
REGISTER_DECODER(VC1_CRYSTALHD, vc1_crystalhd); |
273 |
+ REGISTER_DECODER(VC1_VDPAU, vc1_vdpau); |
|
273 | 274 |
REGISTER_DECODER(VC1IMAGE, vc1image); |
274 | 275 |
REGISTER_DECODER(VCR1, vcr1); |
275 | 276 |
REGISTER_DECODER(VMDVIDEO, vmdvideo); |
... | ... |
@@ -286,6 +291,7 @@ void avcodec_register_all(void) |
286 | 286 |
REGISTER_ENCDEC (WMV2, wmv2); |
287 | 287 |
REGISTER_DECODER(WMV3, wmv3); |
288 | 288 |
REGISTER_DECODER(WMV3_CRYSTALHD, wmv3_crystalhd); |
289 |
+ REGISTER_DECODER(WMV3_VDPAU, wmv3_vdpau); |
|
289 | 290 |
REGISTER_DECODER(WMV3IMAGE, wmv3image); |
290 | 291 |
REGISTER_DECODER(WNV1, wnv1); |
291 | 292 |
REGISTER_DECODER(XAN_WC3, xan_wc3); |
... | ... |
@@ -854,6 +854,7 @@ void ff_er_frame_end(ERContext *s) |
854 | 854 |
if (!s->avctx->err_recognition || s->error_count == 0 || |
855 | 855 |
s->avctx->lowres || |
856 | 856 |
s->avctx->hwaccel || |
857 |
+ s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || |
|
857 | 858 |
!s->cur_pic || s->cur_pic->field_picture || |
858 | 859 |
s->error_count == 3 * s->mb_width * |
859 | 860 |
(s->avctx->skip_top + s->avctx->skip_bottom)) { |
... | ... |
@@ -36,6 +36,7 @@ |
36 | 36 |
#include "h263_parser.h" |
37 | 37 |
#include "mpeg4video_parser.h" |
38 | 38 |
#include "msmpeg4.h" |
39 |
+#include "vdpau_internal.h" |
|
39 | 40 |
#include "thread.h" |
40 | 41 |
#include "flv.h" |
41 | 42 |
#include "mpeg4video.h" |
... | ... |
@@ -651,6 +652,11 @@ retry: |
651 | 651 |
if (!s->divx_packed && !avctx->hwaccel) |
652 | 652 |
ff_thread_finish_setup(avctx); |
653 | 653 |
|
654 |
+ if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) { |
|
655 |
+ ff_vdpau_mpeg4_decode_picture(s, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); |
|
656 |
+ goto frame_end; |
|
657 |
+ } |
|
658 |
+ |
|
654 | 659 |
if (avctx->hwaccel) { |
655 | 660 |
if ((ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer)) < 0) |
656 | 661 |
return ret; |
... | ... |
@@ -695,6 +701,7 @@ retry: |
695 | 695 |
} |
696 | 696 |
|
697 | 697 |
av_assert1(s->bitstream_buffer_size==0); |
698 |
+frame_end: |
|
698 | 699 |
/* divx 5.01+ bitstream reorder stuff */ |
699 | 700 |
if(s->codec_id==AV_CODEC_ID_MPEG4 && s->divx_packed){ |
700 | 701 |
int current_pos= s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb)>>3); |
... | ... |
@@ -46,6 +46,7 @@ |
46 | 46 |
#include "rectangle.h" |
47 | 47 |
#include "svq3.h" |
48 | 48 |
#include "thread.h" |
49 |
+#include "vdpau_internal.h" |
|
49 | 50 |
|
50 | 51 |
// #undef NDEBUG |
51 | 52 |
#include <assert.h> |
... | ... |
@@ -2827,6 +2828,10 @@ static int field_end(H264Context *h, int in_setup) |
2827 | 2827 |
int err = 0; |
2828 | 2828 |
h->mb_y = 0; |
2829 | 2829 |
|
2830 |
+ if (CONFIG_H264_VDPAU_DECODER && |
|
2831 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2832 |
+ ff_vdpau_h264_set_reference_frames(h); |
|
2833 |
+ |
|
2830 | 2834 |
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { |
2831 | 2835 |
if (!h->droppable) { |
2832 | 2836 |
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); |
... | ... |
@@ -2844,6 +2849,10 @@ static int field_end(H264Context *h, int in_setup) |
2844 | 2844 |
"hardware accelerator failed to decode picture\n"); |
2845 | 2845 |
} |
2846 | 2846 |
|
2847 |
+ if (CONFIG_H264_VDPAU_DECODER && |
|
2848 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
2849 |
+ ff_vdpau_h264_picture_complete(h); |
|
2850 |
+ |
|
2847 | 2851 |
/* |
2848 | 2852 |
* FIXME: Error handling code does not seem to support interlaced |
2849 | 2853 |
* when slices span multiple rows |
... | ... |
@@ -2949,6 +2958,13 @@ static int h264_set_parameter_from_sps(H264Context *h) |
2949 | 2949 |
|
2950 | 2950 |
if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || |
2951 | 2951 |
h->cur_chroma_format_idc != h->sps.chroma_format_idc) { |
2952 |
+ if (h->avctx->codec && |
|
2953 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU && |
|
2954 |
+ (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { |
|
2955 |
+ av_log(h->avctx, AV_LOG_ERROR, |
|
2956 |
+ "VDPAU decoding does not support video colorspace.\n"); |
|
2957 |
+ return AVERROR_INVALIDDATA; |
|
2958 |
+ } |
|
2952 | 2959 |
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 && |
2953 | 2960 |
h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13) { |
2954 | 2961 |
h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; |
... | ... |
@@ -4474,7 +4490,8 @@ static int execute_decode_slices(H264Context *h, int context_count) |
4474 | 4474 |
H264Context *hx; |
4475 | 4475 |
int i; |
4476 | 4476 |
|
4477 |
- if (h->avctx->hwaccel) |
|
4477 |
+ if (h->avctx->hwaccel || |
|
4478 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
4478 | 4479 |
return 0; |
4479 | 4480 |
if (context_count == 1) { |
4480 | 4481 |
return decode_slice(avctx, &h); |
... | ... |
@@ -4723,6 +4740,9 @@ again: |
4723 | 4723 |
if (h->avctx->hwaccel && |
4724 | 4724 |
(ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0) |
4725 | 4725 |
return ret; |
4726 |
+ if (CONFIG_H264_VDPAU_DECODER && |
|
4727 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) |
|
4728 |
+ ff_vdpau_h264_picture_start(h); |
|
4726 | 4729 |
} |
4727 | 4730 |
|
4728 | 4731 |
if (hx->redundant_pic_count == 0 && |
... | ... |
@@ -4739,6 +4759,14 @@ again: |
4739 | 4739 |
consumed); |
4740 | 4740 |
if (ret < 0) |
4741 | 4741 |
return ret; |
4742 |
+ } else if (CONFIG_H264_VDPAU_DECODER && |
|
4743 |
+ h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { |
|
4744 |
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], |
|
4745 |
+ start_code, |
|
4746 |
+ sizeof(start_code)); |
|
4747 |
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], |
|
4748 |
+ &buf[buf_index - consumed], |
|
4749 |
+ consumed); |
|
4742 | 4750 |
} else |
4743 | 4751 |
context_count++; |
4744 | 4752 |
} |
... | ... |
@@ -5050,6 +5078,12 @@ static const AVClass h264_class = { |
5050 | 5050 |
.version = LIBAVUTIL_VERSION_INT, |
5051 | 5051 |
}; |
5052 | 5052 |
|
5053 |
+static const AVClass h264_vdpau_class = { |
|
5054 |
+ .class_name = "H264 VDPAU Decoder", |
|
5055 |
+ .item_name = av_default_item_name, |
|
5056 |
+ .option = h264_options, |
|
5057 |
+ .version = LIBAVUTIL_VERSION_INT, |
|
5058 |
+}; |
|
5053 | 5059 |
|
5054 | 5060 |
AVCodec ff_h264_decoder = { |
5055 | 5061 |
.name = "h264", |
... | ... |
@@ -5070,3 +5104,21 @@ AVCodec ff_h264_decoder = { |
5070 | 5070 |
.priv_class = &h264_class, |
5071 | 5071 |
}; |
5072 | 5072 |
|
5073 |
+#if CONFIG_H264_VDPAU_DECODER |
|
5074 |
+AVCodec ff_h264_vdpau_decoder = { |
|
5075 |
+ .name = "h264_vdpau", |
|
5076 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
5077 |
+ .id = AV_CODEC_ID_H264, |
|
5078 |
+ .priv_data_size = sizeof(H264Context), |
|
5079 |
+ .init = ff_h264_decode_init, |
|
5080 |
+ .close = h264_decode_end, |
|
5081 |
+ .decode = decode_frame, |
|
5082 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, |
|
5083 |
+ .flush = flush_dpb, |
|
5084 |
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), |
|
5085 |
+ .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264, |
|
5086 |
+ AV_PIX_FMT_NONE}, |
|
5087 |
+ .profiles = NULL_IF_CONFIG_SMALL(profiles), |
|
5088 |
+ .priv_class = &h264_vdpau_class, |
|
5089 |
+}; |
|
5090 |
+#endif |
... | ... |
@@ -35,6 +35,7 @@ |
35 | 35 |
#include "mpeg12.h" |
36 | 36 |
#include "mpeg12data.h" |
37 | 37 |
#include "bytestream.h" |
38 |
+#include "vdpau_internal.h" |
|
38 | 39 |
#include "xvmc_internal.h" |
39 | 40 |
#include "thread.h" |
40 | 41 |
|
... | ... |
@@ -1152,6 +1153,10 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { |
1152 | 1152 |
AV_PIX_FMT_NONE |
1153 | 1153 |
}; |
1154 | 1154 |
|
1155 |
+static inline int uses_vdpau(AVCodecContext *avctx) { |
|
1156 |
+ return avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG2; |
|
1157 |
+} |
|
1158 |
+ |
|
1155 | 1159 |
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) |
1156 | 1160 |
{ |
1157 | 1161 |
Mpeg1Context *s1 = avctx->priv_data; |
... | ... |
@@ -1178,7 +1183,7 @@ static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx) |
1178 | 1178 |
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); |
1179 | 1179 |
// until then pix_fmt may be changed right after codec init |
1180 | 1180 |
if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || |
1181 |
- avctx->hwaccel) |
|
1181 |
+ avctx->hwaccel || uses_vdpau(avctx)) |
|
1182 | 1182 |
if (avctx->idct_algo == FF_IDCT_AUTO) |
1183 | 1183 |
avctx->idct_algo = FF_IDCT_SIMPLE; |
1184 | 1184 |
} |
... | ... |
@@ -2165,6 +2170,10 @@ static int decode_chunks(AVCodecContext *avctx, |
2165 | 2165 |
s2->er.error_count += s2->thread_context[i]->er.error_count; |
2166 | 2166 |
} |
2167 | 2167 |
|
2168 |
+ if ((CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER) |
|
2169 |
+ && uses_vdpau(avctx)) |
|
2170 |
+ ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); |
|
2171 |
+ |
|
2168 | 2172 |
ret = slice_end(avctx, picture); |
2169 | 2173 |
if (ret < 0) |
2170 | 2174 |
return ret; |
... | ... |
@@ -2392,6 +2401,11 @@ static int decode_chunks(AVCodecContext *avctx, |
2392 | 2392 |
return AVERROR_INVALIDDATA; |
2393 | 2393 |
} |
2394 | 2394 |
|
2395 |
+ if (uses_vdpau(avctx)) { |
|
2396 |
+ s->slice_count++; |
|
2397 |
+ break; |
|
2398 |
+ } |
|
2399 |
+ |
|
2395 | 2400 |
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && |
2396 | 2401 |
!avctx->hwaccel) { |
2397 | 2402 |
int threshold = (s2->mb_height * s->slice_count + |
... | ... |
@@ -2608,3 +2622,35 @@ AVCodec ff_mpeg_xvmc_decoder = { |
2608 | 2608 |
}; |
2609 | 2609 |
|
2610 | 2610 |
#endif |
2611 |
+ |
|
2612 |
+#if CONFIG_MPEG_VDPAU_DECODER |
|
2613 |
+AVCodec ff_mpeg_vdpau_decoder = { |
|
2614 |
+ .name = "mpegvideo_vdpau", |
|
2615 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
2616 |
+ .id = AV_CODEC_ID_MPEG2VIDEO, |
|
2617 |
+ .priv_data_size = sizeof(Mpeg1Context), |
|
2618 |
+ .init = mpeg_decode_init, |
|
2619 |
+ .close = mpeg_decode_end, |
|
2620 |
+ .decode = mpeg_decode_frame, |
|
2621 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | |
|
2622 |
+ CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
|
2623 |
+ .flush = flush, |
|
2624 |
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"), |
|
2625 |
+}; |
|
2626 |
+#endif |
|
2627 |
+ |
|
2628 |
+#if CONFIG_MPEG1_VDPAU_DECODER |
|
2629 |
+AVCodec ff_mpeg1_vdpau_decoder = { |
|
2630 |
+ .name = "mpeg1video_vdpau", |
|
2631 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
2632 |
+ .id = AV_CODEC_ID_MPEG1VIDEO, |
|
2633 |
+ .priv_data_size = sizeof(Mpeg1Context), |
|
2634 |
+ .init = mpeg_decode_init, |
|
2635 |
+ .close = mpeg_decode_end, |
|
2636 |
+ .decode = mpeg_decode_frame, |
|
2637 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | |
|
2638 |
+ CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
|
2639 |
+ .flush = flush, |
|
2640 |
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"), |
|
2641 |
+}; |
|
2642 |
+#endif |
... | ... |
@@ -2384,3 +2384,21 @@ AVCodec ff_mpeg4_decoder = { |
2384 | 2384 |
.priv_class = &mpeg4_class, |
2385 | 2385 |
}; |
2386 | 2386 |
|
2387 |
+ |
|
2388 |
+#if CONFIG_MPEG4_VDPAU_DECODER |
|
2389 |
+AVCodec ff_mpeg4_vdpau_decoder = { |
|
2390 |
+ .name = "mpeg4_vdpau", |
|
2391 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
2392 |
+ .id = AV_CODEC_ID_MPEG4, |
|
2393 |
+ .priv_data_size = sizeof(MpegEncContext), |
|
2394 |
+ .init = decode_init, |
|
2395 |
+ .close = ff_h263_decode_end, |
|
2396 |
+ .decode = ff_h263_decode_frame, |
|
2397 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | |
|
2398 |
+ CODEC_CAP_HWACCEL_VDPAU, |
|
2399 |
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"), |
|
2400 |
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_MPEG4, |
|
2401 |
+ AV_PIX_FMT_NONE }, |
|
2402 |
+ .priv_class = &mpeg4_vdpau_class, |
|
2403 |
+}; |
|
2404 |
+#endif |
... | ... |
@@ -1722,6 +1722,7 @@ void ff_MPV_frame_end(MpegEncContext *s) |
1722 | 1722 |
ff_xvmc_field_end(s); |
1723 | 1723 |
} else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) && |
1724 | 1724 |
!s->avctx->hwaccel && |
1725 |
+ !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && |
|
1725 | 1726 |
s->unrestricted_mv && |
1726 | 1727 |
s->current_picture.reference && |
1727 | 1728 |
!s->intra_only && |
... | ... |
@@ -2925,6 +2926,7 @@ void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, |
2925 | 2925 |
} |
2926 | 2926 |
|
2927 | 2927 |
if (!avctx->hwaccel && |
2928 |
+ !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && |
|
2928 | 2929 |
draw_edges && |
2929 | 2930 |
cur->reference && |
2930 | 2931 |
!(avctx->flags & CODEC_FLAG_EMU_EDGE)) { |
... | ... |
@@ -38,6 +38,7 @@ |
38 | 38 |
#include "msmpeg4data.h" |
39 | 39 |
#include "unary.h" |
40 | 40 |
#include "mathops.h" |
41 |
+#include "vdpau_internal.h" |
|
41 | 42 |
#include "libavutil/avassert.h" |
42 | 43 |
|
43 | 44 |
#undef NDEBUG |
... | ... |
@@ -5778,6 +5779,13 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, |
5778 | 5778 |
return buf_size; |
5779 | 5779 |
} |
5780 | 5780 |
|
5781 |
+ if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { |
|
5782 |
+ if (v->profile < PROFILE_ADVANCED) |
|
5783 |
+ avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; |
|
5784 |
+ else |
|
5785 |
+ avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; |
|
5786 |
+ } |
|
5787 |
+ |
|
5781 | 5788 |
//for advanced profile we may need to parse and unescape data |
5782 | 5789 |
if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { |
5783 | 5790 |
int buf_size2 = 0; |
... | ... |
@@ -5796,7 +5804,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, |
5796 | 5796 |
if (size <= 0) continue; |
5797 | 5797 |
switch (AV_RB32(start)) { |
5798 | 5798 |
case VC1_CODE_FRAME: |
5799 |
- if (avctx->hwaccel) |
|
5799 |
+ if (avctx->hwaccel || |
|
5800 |
+ s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) |
|
5800 | 5801 |
buf_start = start; |
5801 | 5802 |
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); |
5802 | 5803 |
break; |
... | ... |
@@ -5999,7 +6008,15 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, |
5999 | 5999 |
s->me.qpel_put = s->dsp.put_qpel_pixels_tab; |
6000 | 6000 |
s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; |
6001 | 6001 |
|
6002 |
- if (avctx->hwaccel) { |
|
6002 |
+ if ((CONFIG_VC1_VDPAU_DECODER) |
|
6003 |
+ &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { |
|
6004 |
+ if (v->field_mode && buf_start_second_field) { |
|
6005 |
+ ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start); |
|
6006 |
+ ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field); |
|
6007 |
+ } else { |
|
6008 |
+ ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); |
|
6009 |
+ } |
|
6010 |
+ } else if (avctx->hwaccel) { |
|
6003 | 6011 |
if (v->field_mode && buf_start_second_field) { |
6004 | 6012 |
// decode first field |
6005 | 6013 |
s->picture_structure = PICT_BOTTOM_FIELD - v->tff; |
... | ... |
@@ -6232,6 +6249,38 @@ AVCodec ff_wmv3_decoder = { |
6232 | 6232 |
}; |
6233 | 6233 |
#endif |
6234 | 6234 |
|
6235 |
+#if CONFIG_WMV3_VDPAU_DECODER |
|
6236 |
+AVCodec ff_wmv3_vdpau_decoder = { |
|
6237 |
+ .name = "wmv3_vdpau", |
|
6238 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
6239 |
+ .id = AV_CODEC_ID_WMV3, |
|
6240 |
+ .priv_data_size = sizeof(VC1Context), |
|
6241 |
+ .init = vc1_decode_init, |
|
6242 |
+ .close = ff_vc1_decode_end, |
|
6243 |
+ .decode = vc1_decode_frame, |
|
6244 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, |
|
6245 |
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), |
|
6246 |
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE }, |
|
6247 |
+ .profiles = NULL_IF_CONFIG_SMALL(profiles) |
|
6248 |
+}; |
|
6249 |
+#endif |
|
6250 |
+ |
|
6251 |
+#if CONFIG_VC1_VDPAU_DECODER |
|
6252 |
+AVCodec ff_vc1_vdpau_decoder = { |
|
6253 |
+ .name = "vc1_vdpau", |
|
6254 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
6255 |
+ .id = AV_CODEC_ID_VC1, |
|
6256 |
+ .priv_data_size = sizeof(VC1Context), |
|
6257 |
+ .init = vc1_decode_init, |
|
6258 |
+ .close = ff_vc1_decode_end, |
|
6259 |
+ .decode = vc1_decode_frame, |
|
6260 |
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, |
|
6261 |
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), |
|
6262 |
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE }, |
|
6263 |
+ .profiles = NULL_IF_CONFIG_SMALL(profiles) |
|
6264 |
+}; |
|
6265 |
+#endif |
|
6266 |
+ |
|
6235 | 6267 |
#if CONFIG_WMV3IMAGE_DECODER |
6236 | 6268 |
AVCodec ff_wmv3image_decoder = { |
6237 | 6269 |
.name = "wmv3image", |
... | ... |
@@ -101,4 +101,341 @@ int ff_vdpau_add_buffer(Picture *pic, const uint8_t *buf, uint32_t size) |
101 | 101 |
buffers->bitstream_bytes = size; |
102 | 102 |
return 0; |
103 | 103 |
} |
104 |
+ |
|
105 |
+/* Obsolete non-hwaccel VDPAU support below... */ |
|
106 |
+ |
|
107 |
+void ff_vdpau_h264_set_reference_frames(H264Context *h) |
|
108 |
+{ |
|
109 |
+ struct vdpau_render_state *render, *render_ref; |
|
110 |
+ VdpReferenceFrameH264 *rf, *rf2; |
|
111 |
+ Picture *pic; |
|
112 |
+ int i, list, pic_frame_idx; |
|
113 |
+ |
|
114 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
115 |
+ assert(render); |
|
116 |
+ |
|
117 |
+ rf = &render->info.h264.referenceFrames[0]; |
|
118 |
+#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) |
|
119 |
+ |
|
120 |
+ for (list = 0; list < 2; ++list) { |
|
121 |
+ Picture **lp = list ? h->long_ref : h->short_ref; |
|
122 |
+ int ls = list ? 16 : h->short_ref_count; |
|
123 |
+ |
|
124 |
+ for (i = 0; i < ls; ++i) { |
|
125 |
+ pic = lp[i]; |
|
126 |
+ if (!pic || !pic->reference) |
|
127 |
+ continue; |
|
128 |
+ pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; |
|
129 |
+ |
|
130 |
+ render_ref = (struct vdpau_render_state *)pic->f.data[0]; |
|
131 |
+ assert(render_ref); |
|
132 |
+ |
|
133 |
+ rf2 = &render->info.h264.referenceFrames[0]; |
|
134 |
+ while (rf2 != rf) { |
|
135 |
+ if ( |
|
136 |
+ (rf2->surface == render_ref->surface) |
|
137 |
+ && (rf2->is_long_term == pic->long_ref) |
|
138 |
+ && (rf2->frame_idx == pic_frame_idx) |
|
139 |
+ ) |
|
140 |
+ break; |
|
141 |
+ ++rf2; |
|
142 |
+ } |
|
143 |
+ if (rf2 != rf) { |
|
144 |
+ rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; |
|
145 |
+ rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; |
|
146 |
+ continue; |
|
147 |
+ } |
|
148 |
+ |
|
149 |
+ if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) |
|
150 |
+ continue; |
|
151 |
+ |
|
152 |
+ rf->surface = render_ref->surface; |
|
153 |
+ rf->is_long_term = pic->long_ref; |
|
154 |
+ rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; |
|
155 |
+ rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; |
|
156 |
+ rf->field_order_cnt[0] = pic->field_poc[0]; |
|
157 |
+ rf->field_order_cnt[1] = pic->field_poc[1]; |
|
158 |
+ rf->frame_idx = pic_frame_idx; |
|
159 |
+ |
|
160 |
+ ++rf; |
|
161 |
+ } |
|
162 |
+ } |
|
163 |
+ |
|
164 |
+ for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { |
|
165 |
+ rf->surface = VDP_INVALID_HANDLE; |
|
166 |
+ rf->is_long_term = 0; |
|
167 |
+ rf->top_is_reference = 0; |
|
168 |
+ rf->bottom_is_reference = 0; |
|
169 |
+ rf->field_order_cnt[0] = 0; |
|
170 |
+ rf->field_order_cnt[1] = 0; |
|
171 |
+ rf->frame_idx = 0; |
|
172 |
+ } |
|
173 |
+} |
|
174 |
+ |
|
175 |
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size) |
|
176 |
+{ |
|
177 |
+ struct vdpau_render_state *render = (struct vdpau_render_state*)data; |
|
178 |
+ assert(render); |
|
179 |
+ |
|
180 |
+ render->bitstream_buffers= av_fast_realloc( |
|
181 |
+ render->bitstream_buffers, |
|
182 |
+ &render->bitstream_buffers_allocated, |
|
183 |
+ sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) |
|
184 |
+ ); |
|
185 |
+ |
|
186 |
+ render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; |
|
187 |
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; |
|
188 |
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; |
|
189 |
+ render->bitstream_buffers_used++; |
|
190 |
+} |
|
191 |
+ |
|
192 |
+#if CONFIG_H264_VDPAU_DECODER |
|
193 |
+void ff_vdpau_h264_picture_start(H264Context *h) |
|
194 |
+{ |
|
195 |
+ struct vdpau_render_state *render; |
|
196 |
+ int i; |
|
197 |
+ |
|
198 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
199 |
+ assert(render); |
|
200 |
+ |
|
201 |
+ for (i = 0; i < 2; ++i) { |
|
202 |
+ int foc = h->cur_pic_ptr->field_poc[i]; |
|
203 |
+ if (foc == INT_MAX) |
|
204 |
+ foc = 0; |
|
205 |
+ render->info.h264.field_order_cnt[i] = foc; |
|
206 |
+ } |
|
207 |
+ |
|
208 |
+ render->info.h264.frame_num = h->frame_num; |
|
209 |
+} |
|
210 |
+ |
|
211 |
+void ff_vdpau_h264_picture_complete(H264Context *h) |
|
212 |
+{ |
|
213 |
+ struct vdpau_render_state *render; |
|
214 |
+ |
|
215 |
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; |
|
216 |
+ assert(render); |
|
217 |
+ |
|
218 |
+ render->info.h264.slice_count = h->slice_num; |
|
219 |
+ if (render->info.h264.slice_count < 1) |
|
220 |
+ return; |
|
221 |
+ |
|
222 |
+ render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; |
|
223 |
+ render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; |
|
224 |
+ render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; |
|
225 |
+ render->info.h264.num_ref_frames = h->sps.ref_frame_count; |
|
226 |
+ render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; |
|
227 |
+ render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; |
|
228 |
+ render->info.h264.weighted_pred_flag = h->pps.weighted_pred; |
|
229 |
+ render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; |
|
230 |
+ render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; |
|
231 |
+ render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; |
|
232 |
+ render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; |
|
233 |
+ render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; |
|
234 |
+ render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; |
|
235 |
+ render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; |
|
236 |
+ render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; |
|
237 |
+ render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; |
|
238 |
+ render->info.h264.pic_order_cnt_type = h->sps.poc_type; |
|
239 |
+ render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; |
|
240 |
+ render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; |
|
241 |
+ render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; |
|
242 |
+ render->info.h264.entropy_coding_mode_flag = h->pps.cabac; |
|
243 |
+ render->info.h264.pic_order_present_flag = h->pps.pic_order_present; |
|
244 |
+ render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; |
|
245 |
+ render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; |
|
246 |
+ memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); |
|
247 |
+ memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); |
|
248 |
+ memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); |
|
249 |
+ |
|
250 |
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height); |
|
251 |
+ render->bitstream_buffers_used = 0; |
|
252 |
+} |
|
253 |
+#endif /* CONFIG_H264_VDPAU_DECODER */ |
|
254 |
+ |
|
255 |
+#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER |
|
256 |
+void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, |
|
257 |
+ int buf_size, int slice_count) |
|
258 |
+{ |
|
259 |
+ struct vdpau_render_state *render, *last, *next; |
|
260 |
+ int i; |
|
261 |
+ |
|
262 |
+ if (!s->current_picture_ptr) return; |
|
263 |
+ |
|
264 |
+ render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
265 |
+ assert(render); |
|
266 |
+ |
|
267 |
+ /* fill VdpPictureInfoMPEG1Or2 struct */ |
|
268 |
+ render->info.mpeg.picture_structure = s->picture_structure; |
|
269 |
+ render->info.mpeg.picture_coding_type = s->pict_type; |
|
270 |
+ render->info.mpeg.intra_dc_precision = s->intra_dc_precision; |
|
271 |
+ render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; |
|
272 |
+ render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; |
|
273 |
+ render->info.mpeg.intra_vlc_format = s->intra_vlc_format; |
|
274 |
+ render->info.mpeg.alternate_scan = s->alternate_scan; |
|
275 |
+ render->info.mpeg.q_scale_type = s->q_scale_type; |
|
276 |
+ render->info.mpeg.top_field_first = s->top_field_first; |
|
277 |
+ render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 |
|
278 |
+ render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 |
|
279 |
+ render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. |
|
280 |
+ render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; |
|
281 |
+ render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; |
|
282 |
+ render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; |
|
283 |
+ for (i = 0; i < 64; ++i) { |
|
284 |
+ render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; |
|
285 |
+ render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; |
|
286 |
+ } |
|
287 |
+ |
|
288 |
+ render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; |
|
289 |
+ render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; |
|
290 |
+ |
|
291 |
+ switch(s->pict_type){ |
|
292 |
+ case AV_PICTURE_TYPE_B: |
|
293 |
+ next = (struct vdpau_render_state *)s->next_picture.f.data[0]; |
|
294 |
+ assert(next); |
|
295 |
+ render->info.mpeg.backward_reference = next->surface; |
|
296 |
+ // no return here, going to set forward prediction |
|
297 |
+ case AV_PICTURE_TYPE_P: |
|
298 |
+ last = (struct vdpau_render_state *)s->last_picture.f.data[0]; |
|
299 |
+ if (!last) // FIXME: Does this test make sense? |
|
300 |
+ last = render; // predict second field from the first |
|
301 |
+ render->info.mpeg.forward_reference = last->surface; |
|
302 |
+ } |
|
303 |
+ |
|
304 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
305 |
+ |
|
306 |
+ render->info.mpeg.slice_count = slice_count; |
|
307 |
+ |
|
308 |
+ if (slice_count) |
|
309 |
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
310 |
+ render->bitstream_buffers_used = 0; |
|
311 |
+} |
|
312 |
+#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */ |
|
313 |
+ |
|
314 |
+#if CONFIG_VC1_VDPAU_DECODER |
|
315 |
+void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, |
|
316 |
+ int buf_size) |
|
317 |
+{ |
|
318 |
+ VC1Context *v = s->avctx->priv_data; |
|
319 |
+ struct vdpau_render_state *render, *last, *next; |
|
320 |
+ |
|
321 |
+ render = (struct vdpau_render_state *)s->current_picture.f.data[0]; |
|
322 |
+ assert(render); |
|
323 |
+ |
|
324 |
+ /* fill LvPictureInfoVC1 struct */ |
|
325 |
+ render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0; |
|
326 |
+ render->info.vc1.postprocflag = v->postprocflag; |
|
327 |
+ render->info.vc1.pulldown = v->broadcast; |
|
328 |
+ render->info.vc1.interlace = v->interlace; |
|
329 |
+ render->info.vc1.tfcntrflag = v->tfcntrflag; |
|
330 |
+ render->info.vc1.finterpflag = v->finterpflag; |
|
331 |
+ render->info.vc1.psf = v->psf; |
|
332 |
+ render->info.vc1.dquant = v->dquant; |
|
333 |
+ render->info.vc1.panscan_flag = v->panscanflag; |
|
334 |
+ render->info.vc1.refdist_flag = v->refdist_flag; |
|
335 |
+ render->info.vc1.quantizer = v->quantizer_mode; |
|
336 |
+ render->info.vc1.extended_mv = v->extended_mv; |
|
337 |
+ render->info.vc1.extended_dmv = v->extended_dmv; |
|
338 |
+ render->info.vc1.overlap = v->overlap; |
|
339 |
+ render->info.vc1.vstransform = v->vstransform; |
|
340 |
+ render->info.vc1.loopfilter = v->s.loop_filter; |
|
341 |
+ render->info.vc1.fastuvmc = v->fastuvmc; |
|
342 |
+ render->info.vc1.range_mapy_flag = v->range_mapy_flag; |
|
343 |
+ render->info.vc1.range_mapy = v->range_mapy; |
|
344 |
+ render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; |
|
345 |
+ render->info.vc1.range_mapuv = v->range_mapuv; |
|
346 |
+ /* Specific to simple/main profile only */ |
|
347 |
+ render->info.vc1.multires = v->multires; |
|
348 |
+ render->info.vc1.syncmarker = v->s.resync_marker; |
|
349 |
+ render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1); |
|
350 |
+ render->info.vc1.maxbframes = v->s.max_b_frames; |
|
351 |
+ |
|
352 |
+ render->info.vc1.deblockEnable = v->postprocflag & 1; |
|
353 |
+ render->info.vc1.pquant = v->pq; |
|
354 |
+ |
|
355 |
+ render->info.vc1.forward_reference = VDP_INVALID_HANDLE; |
|
356 |
+ render->info.vc1.backward_reference = VDP_INVALID_HANDLE; |
|
357 |
+ |
|
358 |
+ if (v->bi_type) |
|
359 |
+ render->info.vc1.picture_type = 4; |
|
360 |
+ else |
|
361 |
+ render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3; |
|
362 |
+ |
|
363 |
+ switch(s->pict_type){ |
|
364 |
+ case AV_PICTURE_TYPE_B: |
|
365 |
+ next = (struct vdpau_render_state *)s->next_picture.f.data[0]; |
|
366 |
+ assert(next); |
|
367 |
+ render->info.vc1.backward_reference = next->surface; |
|
368 |
+ // no break here, going to set forward prediction |
|
369 |
+ case AV_PICTURE_TYPE_P: |
|
370 |
+ last = (struct vdpau_render_state *)s->last_picture.f.data[0]; |
|
371 |
+ if (!last) // FIXME: Does this test make sense? |
|
372 |
+ last = render; // predict second field from the first |
|
373 |
+ render->info.vc1.forward_reference = last->surface; |
|
374 |
+ } |
|
375 |
+ |
|
376 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
377 |
+ |
|
378 |
+ render->info.vc1.slice_count = 1; |
|
379 |
+ |
|
380 |
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
381 |
+ render->bitstream_buffers_used = 0; |
|
382 |
+} |
|
383 |
+#endif /* (CONFIG_VC1_VDPAU_DECODER */ |
|
384 |
+ |
|
385 |
+#if CONFIG_MPEG4_VDPAU_DECODER |
|
386 |
+void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, |
|
387 |
+ int buf_size) |
|
388 |
+{ |
|
389 |
+ struct vdpau_render_state *render, *last, *next; |
|
390 |
+ int i; |
|
391 |
+ |
|
392 |
+ if (!s->current_picture_ptr) return; |
|
393 |
+ |
|
394 |
+ render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; |
|
395 |
+ assert(render); |
|
396 |
+ |
|
397 |
+ /* fill VdpPictureInfoMPEG4Part2 struct */ |
|
398 |
+ render->info.mpeg4.trd[0] = s->pp_time; |
|
399 |
+ render->info.mpeg4.trb[0] = s->pb_time; |
|
400 |
+ render->info.mpeg4.trd[1] = s->pp_field_time >> 1; |
|
401 |
+ render->info.mpeg4.trb[1] = s->pb_field_time >> 1; |
|
402 |
+ render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den; |
|
403 |
+ render->info.mpeg4.vop_coding_type = 0; |
|
404 |
+ render->info.mpeg4.vop_fcode_forward = s->f_code; |
|
405 |
+ render->info.mpeg4.vop_fcode_backward = s->b_code; |
|
406 |
+ render->info.mpeg4.resync_marker_disable = !s->resync_marker; |
|
407 |
+ render->info.mpeg4.interlaced = !s->progressive_sequence; |
|
408 |
+ render->info.mpeg4.quant_type = s->mpeg_quant; |
|
409 |
+ render->info.mpeg4.quarter_sample = s->quarter_sample; |
|
410 |
+ render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263; |
|
411 |
+ render->info.mpeg4.rounding_control = s->no_rounding; |
|
412 |
+ render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan; |
|
413 |
+ render->info.mpeg4.top_field_first = s->top_field_first; |
|
414 |
+ for (i = 0; i < 64; ++i) { |
|
415 |
+ render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i]; |
|
416 |
+ render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; |
|
417 |
+ } |
|
418 |
+ render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE; |
|
419 |
+ render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE; |
|
420 |
+ |
|
421 |
+ switch (s->pict_type) { |
|
422 |
+ case AV_PICTURE_TYPE_B: |
|
423 |
+ next = (struct vdpau_render_state *)s->next_picture.f.data[0]; |
|
424 |
+ assert(next); |
|
425 |
+ render->info.mpeg4.backward_reference = next->surface; |
|
426 |
+ render->info.mpeg4.vop_coding_type = 2; |
|
427 |
+ // no break here, going to set forward prediction |
|
428 |
+ case AV_PICTURE_TYPE_P: |
|
429 |
+ last = (struct vdpau_render_state *)s->last_picture.f.data[0]; |
|
430 |
+ assert(last); |
|
431 |
+ render->info.mpeg4.forward_reference = last->surface; |
|
432 |
+ } |
|
433 |
+ |
|
434 |
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); |
|
435 |
+ |
|
436 |
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); |
|
437 |
+ render->bitstream_buffers_used = 0; |
|
438 |
+} |
|
439 |
+#endif /* CONFIG_MPEG4_VDPAU_DECODER */ |
|
440 |
+ |
|
104 | 441 |
/* @}*/ |
... | ... |
@@ -72,4 +72,21 @@ int ff_vdpau_common_start_frame(Picture *pic, |
72 | 72 |
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx); |
73 | 73 |
int ff_vdpau_add_buffer(Picture *pic, const uint8_t *buf, uint32_t buf_size); |
74 | 74 |
|
75 |
+ |
|
76 |
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, |
|
77 |
+ int buf_size); |
|
78 |
+ |
|
79 |
+void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, |
|
80 |
+ int buf_size, int slice_count); |
|
81 |
+ |
|
82 |
+void ff_vdpau_h264_picture_start(H264Context *h); |
|
83 |
+void ff_vdpau_h264_set_reference_frames(H264Context *h); |
|
84 |
+void ff_vdpau_h264_picture_complete(H264Context *h); |
|
85 |
+ |
|
86 |
+void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, |
|
87 |
+ int buf_size); |
|
88 |
+ |
|
89 |
+void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, |
|
90 |
+ int buf_size); |
|
91 |
+ |
|
75 | 92 |
#endif /* AVCODEC_VDPAU_INTERNAL_H */ |