With the introduction of HEVC 444 support, we technically have two
codecs that can handle 444 - HEVC and MJPEG. In the case of MJPEG,
it can decode, but can only output one of the semi-planar formats.
That means we need additional logic to decide whether to use a
444 output format or not.
... | ... |
@@ -298,7 +298,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx) |
298 | 298 |
av_log(avctx, AV_LOG_ERROR, "Unsupported chroma format\n"); |
299 | 299 |
return AVERROR(ENOSYS); |
300 | 300 |
} |
301 |
- chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444; |
|
301 |
+ chroma_444 = ctx->supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444; |
|
302 | 302 |
|
303 | 303 |
if (!avctx->hw_frames_ctx) { |
304 | 304 |
ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_CUDA); |
... | ... |
@@ -587,7 +587,8 @@ static AVBufferRef *nvdec_alloc_dummy(int size) |
587 | 587 |
|
588 | 588 |
int ff_nvdec_frame_params(AVCodecContext *avctx, |
589 | 589 |
AVBufferRef *hw_frames_ctx, |
590 |
- int dpb_size) |
|
590 |
+ int dpb_size, |
|
591 |
+ int supports_444) |
|
591 | 592 |
{ |
592 | 593 |
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data; |
593 | 594 |
const AVPixFmtDescriptor *sw_desc; |
... | ... |
@@ -608,7 +609,7 @@ int ff_nvdec_frame_params(AVCodecContext *avctx, |
608 | 608 |
av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n"); |
609 | 609 |
return AVERROR(EINVAL); |
610 | 610 |
} |
611 |
- chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444; |
|
611 |
+ chroma_444 = supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444; |
|
612 | 612 |
|
613 | 613 |
frames_ctx->format = AV_PIX_FMT_CUDA; |
614 | 614 |
frames_ctx->width = (avctx->coded_width + 1) & ~1; |
... | ... |
@@ -61,6 +61,8 @@ typedef struct NVDECContext { |
61 | 61 |
unsigned *slice_offsets; |
62 | 62 |
int nb_slices; |
63 | 63 |
unsigned int slice_offsets_allocated; |
64 |
+ |
|
65 |
+ int supports_444; |
|
64 | 66 |
} NVDECContext; |
65 | 67 |
|
66 | 68 |
int ff_nvdec_decode_init(AVCodecContext *avctx); |
... | ... |
@@ -72,7 +74,8 @@ int ff_nvdec_simple_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, |
72 | 72 |
uint32_t size); |
73 | 73 |
int ff_nvdec_frame_params(AVCodecContext *avctx, |
74 | 74 |
AVBufferRef *hw_frames_ctx, |
75 |
- int dpb_size); |
|
75 |
+ int dpb_size, |
|
76 |
+ int supports_444); |
|
76 | 77 |
int ff_nvdec_get_ref_idx(AVFrame *frame); |
77 | 78 |
|
78 | 79 |
#endif /* AVCODEC_NVDEC_H */ |
... | ... |
@@ -166,7 +166,7 @@ static int nvdec_h264_frame_params(AVCodecContext *avctx, |
166 | 166 |
{ |
167 | 167 |
const H264Context *h = avctx->priv_data; |
168 | 168 |
const SPS *sps = h->ps.sps; |
169 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames); |
|
169 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames, 0); |
|
170 | 170 |
} |
171 | 171 |
|
172 | 172 |
const AVHWAccel ff_h264_nvdec_hwaccel = { |
... | ... |
@@ -299,7 +299,13 @@ static int nvdec_hevc_frame_params(AVCodecContext *avctx, |
299 | 299 |
{ |
300 | 300 |
const HEVCContext *s = avctx->priv_data; |
301 | 301 |
const HEVCSPS *sps = s->ps.sps; |
302 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1); |
|
302 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1, 1); |
|
303 |
+} |
|
304 |
+ |
|
305 |
+static int nvdec_hevc_decode_init(AVCodecContext *avctx) { |
|
306 |
+ NVDECContext *ctx = avctx->internal->hwaccel_priv_data; |
|
307 |
+ ctx->supports_444 = 1; |
|
308 |
+ return ff_nvdec_decode_init(avctx); |
|
303 | 309 |
} |
304 | 310 |
|
305 | 311 |
const AVHWAccel ff_hevc_nvdec_hwaccel = { |
... | ... |
@@ -311,7 +317,7 @@ const AVHWAccel ff_hevc_nvdec_hwaccel = { |
311 | 311 |
.end_frame = ff_nvdec_end_frame, |
312 | 312 |
.decode_slice = nvdec_hevc_decode_slice, |
313 | 313 |
.frame_params = nvdec_hevc_frame_params, |
314 |
- .init = ff_nvdec_decode_init, |
|
314 |
+ .init = nvdec_hevc_decode_init, |
|
315 | 315 |
.uninit = ff_nvdec_decode_uninit, |
316 | 316 |
.priv_data_size = sizeof(NVDECContext), |
317 | 317 |
}; |
... | ... |
@@ -66,7 +66,7 @@ static int nvdec_mjpeg_frame_params(AVCodecContext *avctx, |
66 | 66 |
AVBufferRef *hw_frames_ctx) |
67 | 67 |
{ |
68 | 68 |
// Only need storage for the current frame |
69 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1); |
|
69 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1, 0); |
|
70 | 70 |
} |
71 | 71 |
|
72 | 72 |
#if CONFIG_MJPEG_NVDEC_HWACCEL |
... | ... |
@@ -87,7 +87,7 @@ static int nvdec_mpeg12_frame_params(AVCodecContext *avctx, |
87 | 87 |
AVBufferRef *hw_frames_ctx) |
88 | 88 |
{ |
89 | 89 |
// Each frame can at most have one P and one B reference |
90 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); |
|
90 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0); |
|
91 | 91 |
} |
92 | 92 |
|
93 | 93 |
#if CONFIG_MPEG2_NVDEC_HWACCEL |
... | ... |
@@ -103,7 +103,7 @@ static int nvdec_mpeg4_frame_params(AVCodecContext *avctx, |
103 | 103 |
AVBufferRef *hw_frames_ctx) |
104 | 104 |
{ |
105 | 105 |
// Each frame can at most have one P and one B reference |
106 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); |
|
106 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0); |
|
107 | 107 |
} |
108 | 108 |
|
109 | 109 |
const AVHWAccel ff_mpeg4_nvdec_hwaccel = { |
... | ... |
@@ -107,7 +107,7 @@ static int nvdec_vc1_frame_params(AVCodecContext *avctx, |
107 | 107 |
AVBufferRef *hw_frames_ctx) |
108 | 108 |
{ |
109 | 109 |
// Each frame can at most have one P and one B reference |
110 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); |
|
110 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0); |
|
111 | 111 |
} |
112 | 112 |
|
113 | 113 |
const AVHWAccel ff_vc1_nvdec_hwaccel = { |
... | ... |
@@ -87,7 +87,7 @@ static int nvdec_vp8_frame_params(AVCodecContext *avctx, |
87 | 87 |
AVBufferRef *hw_frames_ctx) |
88 | 88 |
{ |
89 | 89 |
// VP8 uses a fixed size pool of 3 possible reference frames |
90 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3); |
|
90 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3, 0); |
|
91 | 91 |
} |
92 | 92 |
|
93 | 93 |
AVHWAccel ff_vp8_nvdec_hwaccel = { |
... | ... |
@@ -166,7 +166,7 @@ static int nvdec_vp9_frame_params(AVCodecContext *avctx, |
166 | 166 |
AVBufferRef *hw_frames_ctx) |
167 | 167 |
{ |
168 | 168 |
// VP9 uses a fixed size pool of 8 possible reference frames |
169 |
- return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8); |
|
169 |
+ return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8, 0); |
|
170 | 170 |
} |
171 | 171 |
|
172 | 172 |
const AVHWAccel ff_vp9_nvdec_hwaccel = { |