This adds a new API, which allows the API user to query the required
AVHWFramesContext parameters. This also reduces code duplication across
the hwaccels by introducing ff_decode_get_hw_frames_ctx(), which uses
the new API function. It takes care of initializing the hw_frames_ctx
if needed, and does additional error handling and API usage checking.
Support for VDA and Cuvid missing.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
... | ... |
@@ -13,6 +13,9 @@ libavutil: 2017-03-23 |
13 | 13 |
|
14 | 14 |
API changes, most recent first: |
15 | 15 |
|
16 |
+2017-xx-xx - xxxxxxx - lavc 58.5.0 - avcodec.h |
|
17 |
+ Add avcodec_get_hw_frames_parameters(). |
|
18 |
+ |
|
16 | 19 |
2017-xx-xx - xxxxxxx - lavu 56.6.0 - pixdesc.h |
17 | 20 |
Add av_color_range_from_name(), av_color_primaries_from_name(), |
18 | 21 |
av_color_transfer_from_name(), av_color_space_from_name(), and |
... | ... |
@@ -2990,6 +2990,16 @@ typedef struct AVHWAccel { |
2990 | 2990 |
* Internal hwaccel capabilities. |
2991 | 2991 |
*/ |
2992 | 2992 |
int caps_internal; |
2993 |
+ |
|
2994 |
+ /** |
|
2995 |
+ * Fill the given hw_frames context with current codec parameters. Called |
|
2996 |
+ * from get_format. Refer to avcodec_get_hw_frames_parameters() for |
|
2997 |
+ * details. |
|
2998 |
+ * |
|
2999 |
+ * This CAN be called before AVHWAccel.init is called, and you must assume |
|
3000 |
+ * that avctx->hwaccel_priv_data is invalid. |
|
3001 |
+ */ |
|
3002 |
+ int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); |
|
2993 | 3003 |
} AVHWAccel; |
2994 | 3004 |
|
2995 | 3005 |
/** |
... | ... |
@@ -3984,6 +3994,109 @@ int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); |
3984 | 3984 |
*/ |
3985 | 3985 |
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); |
3986 | 3986 |
|
3987 |
+/** |
|
3988 |
+ * Create and return a AVHWFramesContext with values adequate for hardware |
|
3989 |
+ * decoding. This is meant to get called from the get_format callback, and is |
|
3990 |
+ * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx. |
|
3991 |
+ * This API is for decoding with certain hardware acceleration modes/APIs only. |
|
3992 |
+ * |
|
3993 |
+ * The returned AVHWFramesContext is not initialized. The caller must do this |
|
3994 |
+ * with av_hwframe_ctx_init(). |
|
3995 |
+ * |
|
3996 |
+ * Calling this function is not a requirement, but makes it simpler to avoid |
|
3997 |
+ * codec or hardware API specific details when manually allocating frames. |
|
3998 |
+ * |
|
3999 |
+ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx, |
|
4000 |
+ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes |
|
4001 |
+ * it unnecessary to call this function or having to care about |
|
4002 |
+ * AVHWFramesContext initialization at all. |
|
4003 |
+ * |
|
4004 |
+ * There are a number of requirements for calling this function: |
|
4005 |
+ * |
|
4006 |
+ * - It must be called from get_format with the same avctx parameter that was |
|
4007 |
+ * passed to get_format. Calling it outside of get_format is not allowed, and |
|
4008 |
+ * can trigger undefined behavior. |
|
4009 |
+ * - The function is not always supported (see description of return values). |
|
4010 |
+ * Even if this function returns successfully, hwaccel initialization could |
|
4011 |
+ * fail later. (The degree to which implementations check whether the stream |
|
4012 |
+ * is actually supported varies. Some do this check only after the user's |
|
4013 |
+ * get_format callback returns.) |
|
4014 |
+ * - The hw_pix_fmt must be one of the choices suggested by get_format. If the |
|
4015 |
+ * user decides to use a AVHWFramesContext prepared with this API function, |
|
4016 |
+ * the user must return the same hw_pix_fmt from get_format. |
|
4017 |
+ * - The device_ref passed to this function must support the given hw_pix_fmt. |
|
4018 |
+ * - After calling this API function, it is the user's responsibility to |
|
4019 |
+ * initialize the AVHWFramesContext (returned by the out_frames_ref parameter), |
|
4020 |
+ * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be done |
|
4021 |
+ * before returning from get_format (this is implied by the normal |
|
4022 |
+ * AVCodecContext.hw_frames_ctx API rules). |
|
4023 |
+ * - The AVHWFramesContext parameters may change every time time get_format is |
|
4024 |
+ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So |
|
4025 |
+ * you are inherently required to go through this process again on every |
|
4026 |
+ * get_format call. |
|
4027 |
+ * - It is perfectly possible to call this function without actually using |
|
4028 |
+ * the resulting AVHWFramesContext. One use-case might be trying to reuse a |
|
4029 |
+ * previously initialized AVHWFramesContext, and calling this API function |
|
4030 |
+ * only to test whether the required frame parameters have changed. |
|
4031 |
+ * - Fields that use dynamically allocated values of any kind must not be set |
|
4032 |
+ * by the user unless setting them is explicitly allowed by the documentation. |
|
4033 |
+ * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque, |
|
4034 |
+ * the new free callback must call the potentially set previous free callback. |
|
4035 |
+ * This API call may set any dynamically allocated fields, including the free |
|
4036 |
+ * callback. |
|
4037 |
+ * |
|
4038 |
+ * The function will set at least the following fields on AVHWFramesContext |
|
4039 |
+ * (potentially more, depending on hwaccel API): |
|
4040 |
+ * |
|
4041 |
+ * - All fields set by av_hwframe_ctx_alloc(). |
|
4042 |
+ * - Set the format field to hw_pix_fmt. |
|
4043 |
+ * - Set the sw_format field to the most suited and most versatile format. (An |
|
4044 |
+ * implication is that this will prefer generic formats over opaque formats |
|
4045 |
+ * with arbitrary restrictions, if possible.) |
|
4046 |
+ * - Set the width/height fields to the coded frame size, rounded up to the |
|
4047 |
+ * API-specific minimum alignment. |
|
4048 |
+ * - Only _if_ the hwaccel requires a pre-allocated pool: set the initial_pool_size |
|
4049 |
+ * field to the number of maximum reference surfaces possible with the codec, |
|
4050 |
+ * plus 1 surface for the user to work (meaning the user can safely reference |
|
4051 |
+ * at most 1 decoded surface at a time), plus additional buffering introduced |
|
4052 |
+ * by frame threading. If the hwaccel does not require pre-allocation, the |
|
4053 |
+ * field is left to 0, and the decoder will allocate new surfaces on demand |
|
4054 |
+ * during decoding. |
|
4055 |
+ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying |
|
4056 |
+ * hardware API. |
|
4057 |
+ * |
|
4058 |
+ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(), but |
|
4059 |
+ * with basic frame parameters set. |
|
4060 |
+ * |
|
4061 |
+ * The function is stateless, and does not change the AVCodecContext or the |
|
4062 |
+ * device_ref AVHWDeviceContext. |
|
4063 |
+ * |
|
4064 |
+ * @param avctx The context which is currently calling get_format, and which |
|
4065 |
+ * implicitly contains all state needed for filling the returned |
|
4066 |
+ * AVHWFramesContext properly. |
|
4067 |
+ * @param device_ref A reference to the AVHWDeviceContext describing the device |
|
4068 |
+ * which will be used by the hardware decoder. |
|
4069 |
+ * @param hw_pix_fmt The hwaccel format you are going to return from get_format. |
|
4070 |
+ * @param out_frames_ref On success, set to a reference to an _uninitialized_ |
|
4071 |
+ * AVHWFramesContext, created from the given device_ref. |
|
4072 |
+ * Fields will be set to values required for decoding. |
|
4073 |
+ * Not changed if an error is returned. |
|
4074 |
+ * @return zero on success, a negative value on error. The following error codes |
|
4075 |
+ * have special semantics: |
|
4076 |
+ * AVERROR(ENOENT): the decoder does not support this functionality. Setup |
|
4077 |
+ * is always manual, or it is a decoder which does not |
|
4078 |
+ * support setting AVCodecContext.hw_frames_ctx at all, |
|
4079 |
+ * or it is a software format. |
|
4080 |
+ * AVERROR(EINVAL): it is known that hardware decoding is not supported for |
|
4081 |
+ * this configuration, or the device_ref is not supported |
|
4082 |
+ * for the hwaccel referenced by hw_pix_fmt. |
|
4083 |
+ */ |
|
4084 |
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, |
|
4085 |
+ AVBufferRef *device_ref, |
|
4086 |
+ enum AVPixelFormat hw_pix_fmt, |
|
4087 |
+ AVBufferRef **out_frames_ref); |
|
4088 |
+ |
|
4089 |
+ |
|
3987 | 4090 |
|
3988 | 4091 |
/** |
3989 | 4092 |
* @defgroup lavc_parsing Frame parsing |
... | ... |
@@ -669,6 +669,88 @@ static AVHWAccel *find_hwaccel(enum AVCodecID codec_id, |
669 | 669 |
return NULL; |
670 | 670 |
} |
671 | 671 |
|
672 |
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, |
|
673 |
+ enum AVHWDeviceType dev_type) |
|
674 |
+{ |
|
675 |
+ AVHWDeviceContext *device_ctx; |
|
676 |
+ AVHWFramesContext *frames_ctx; |
|
677 |
+ int ret; |
|
678 |
+ |
|
679 |
+ if (!avctx->hwaccel) |
|
680 |
+ return AVERROR(ENOSYS); |
|
681 |
+ |
|
682 |
+ if (avctx->hw_frames_ctx) |
|
683 |
+ return 0; |
|
684 |
+ if (!avctx->hw_device_ctx) { |
|
685 |
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is " |
|
686 |
+ "required for hardware accelerated decoding.\n"); |
|
687 |
+ return AVERROR(EINVAL); |
|
688 |
+ } |
|
689 |
+ |
|
690 |
+ device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data; |
|
691 |
+ if (device_ctx->type != dev_type) { |
|
692 |
+ av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware " |
|
693 |
+ "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type), |
|
694 |
+ av_hwdevice_get_type_name(device_ctx->type)); |
|
695 |
+ return AVERROR(EINVAL); |
|
696 |
+ } |
|
697 |
+ |
|
698 |
+ ret = avcodec_get_hw_frames_parameters(avctx, |
|
699 |
+ avctx->hw_device_ctx, |
|
700 |
+ avctx->hwaccel->pix_fmt, |
|
701 |
+ avctx->hw_frames_ctx); |
|
702 |
+ if (ret < 0) { |
|
703 |
+ av_buffer_unref(&avctx->hw_frames_ctx); |
|
704 |
+ return ret; |
|
705 |
+ } |
|
706 |
+ |
|
707 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
708 |
+ |
|
709 |
+ |
|
710 |
+ if (frames_ctx->initial_pool_size) { |
|
711 |
+ // We guarantee 4 base work surfaces. The function above guarantees 1 |
|
712 |
+ // (the absolute minimum), so add the missing count. |
|
713 |
+ frames_ctx->initial_pool_size += 3; |
|
714 |
+ |
|
715 |
+ // Add an additional surface per thread is frame threading is enabled. |
|
716 |
+ if (avctx->active_thread_type & FF_THREAD_FRAME) |
|
717 |
+ frames_ctx->initial_pool_size += avctx->thread_count; |
|
718 |
+ } |
|
719 |
+ |
|
720 |
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
721 |
+ if (ret < 0) { |
|
722 |
+ av_buffer_unref(&avctx->hw_frames_ctx); |
|
723 |
+ return ret; |
|
724 |
+ } |
|
725 |
+ |
|
726 |
+ return 0; |
|
727 |
+} |
|
728 |
+ |
|
729 |
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, |
|
730 |
+ AVBufferRef *device_ref, |
|
731 |
+ enum AVPixelFormat hw_pix_fmt, |
|
732 |
+ AVBufferRef **out_frames_ref) |
|
733 |
+{ |
|
734 |
+ AVBufferRef *frames_ref = NULL; |
|
735 |
+ AVHWAccel *hwa = find_hwaccel(avctx->codec_id, hw_pix_fmt); |
|
736 |
+ int ret; |
|
737 |
+ |
|
738 |
+ if (!hwa || !hwa->frame_params) |
|
739 |
+ return AVERROR(ENOENT); |
|
740 |
+ |
|
741 |
+ frames_ref = av_hwframe_ctx_alloc(device_ref); |
|
742 |
+ if (!frames_ref) |
|
743 |
+ return AVERROR(ENOMEM); |
|
744 |
+ |
|
745 |
+ ret = hwa->frame_params(avctx, frames_ref); |
|
746 |
+ if (ret >= 0) { |
|
747 |
+ *out_frames_ref = frames_ref; |
|
748 |
+ } else { |
|
749 |
+ av_buffer_unref(&frames_ref); |
|
750 |
+ } |
|
751 |
+ return ret; |
|
752 |
+} |
|
753 |
+ |
|
672 | 754 |
static int setup_hwaccel(AVCodecContext *avctx, |
673 | 755 |
const enum AVPixelFormat fmt, |
674 | 756 |
const char *name) |
... | ... |
@@ -23,6 +23,7 @@ |
23 | 23 |
|
24 | 24 |
#include "libavutil/buffer.h" |
25 | 25 |
#include "libavutil/frame.h" |
26 |
+#include "libavutil/hwcontext.h" |
|
26 | 27 |
|
27 | 28 |
#include "avcodec.h" |
28 | 29 |
|
... | ... |
@@ -70,4 +71,12 @@ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt); |
70 | 70 |
|
71 | 71 |
void ff_decode_bsfs_uninit(AVCodecContext *avctx); |
72 | 72 |
|
73 |
+/** |
|
74 |
+ * Make sure avctx.hw_frames_ctx is set. If it's not set, the function will |
|
75 |
+ * try to allocate it from hw_device_ctx. If that is not possible, an error |
|
76 |
+ * message is printed, and an error code is returned. |
|
77 |
+ */ |
|
78 |
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, |
|
79 |
+ enum AVHWDeviceType dev_type); |
|
80 |
+ |
|
73 | 81 |
#endif /* AVCODEC_DECODE_H */ |
... | ... |
@@ -29,6 +29,7 @@ |
29 | 29 |
#include "libavutil/time.h" |
30 | 30 |
|
31 | 31 |
#include "avcodec.h" |
32 |
+#include "decode.h" |
|
32 | 33 |
#include "dxva2_internal.h" |
33 | 34 |
|
34 | 35 |
/* define all the GUIDs used directly here, |
... | ... |
@@ -572,14 +573,20 @@ static void ff_dxva2_unlock(AVCodecContext *avctx) |
572 | 572 |
#endif |
573 | 573 |
} |
574 | 574 |
|
575 |
-// This must work before the decoder is created. |
|
576 |
-// This somehow needs to be exported to the user. |
|
577 |
-static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frames_ctx) |
|
575 |
+int ff_dxva2_common_frame_params(AVCodecContext *avctx, |
|
576 |
+ AVBufferRef *hw_frames_ctx) |
|
578 | 577 |
{ |
579 |
- FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); |
|
578 |
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext *)hw_frames_ctx->data; |
|
579 |
+ AVHWDeviceContext *device_ctx = frames_ctx->device_ctx; |
|
580 | 580 |
int surface_alignment, num_surfaces; |
581 | 581 |
|
582 |
- frames_ctx->format = sctx->pix_fmt; |
|
582 |
+ if (device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) { |
|
583 |
+ frames_ctx->format = AV_PIX_FMT_DXVA2_VLD; |
|
584 |
+ } else if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) { |
|
585 |
+ frames_ctx->format = AV_PIX_FMT_D3D11; |
|
586 |
+ } else { |
|
587 |
+ return AVERROR(EINVAL); |
|
588 |
+ } |
|
583 | 589 |
|
584 | 590 |
/* decoding MPEG-2 requires additional alignment on some Intel GPUs, |
585 | 591 |
but it causes issues for H.264 on certain AMD GPUs..... */ |
... | ... |
@@ -592,8 +599,8 @@ static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frame |
592 | 592 |
else |
593 | 593 |
surface_alignment = 16; |
594 | 594 |
|
595 |
- /* 4 base work surfaces */ |
|
596 |
- num_surfaces = 4; |
|
595 |
+ /* 1 base work surface */ |
|
596 |
+ num_surfaces = 1; |
|
597 | 597 |
|
598 | 598 |
/* add surfaces based on number of possible refs */ |
599 | 599 |
if (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_HEVC) |
... | ... |
@@ -627,12 +634,16 @@ static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frame |
627 | 627 |
frames_hwctx->BindFlags |= D3D11_BIND_DECODER; |
628 | 628 |
} |
629 | 629 |
#endif |
630 |
+ |
|
631 |
+ return 0; |
|
630 | 632 |
} |
631 | 633 |
|
632 | 634 |
int ff_dxva2_decode_init(AVCodecContext *avctx) |
633 | 635 |
{ |
634 | 636 |
FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); |
635 |
- AVHWFramesContext *frames_ctx = NULL; |
|
637 |
+ AVHWFramesContext *frames_ctx; |
|
638 |
+ enum AVHWDeviceType dev_type = avctx->hwaccel->pix_fmt == AV_PIX_FMT_DXVA2_VLD |
|
639 |
+ ? AV_HWDEVICE_TYPE_DXVA2 : AV_HWDEVICE_TYPE_D3D11VA; |
|
636 | 640 |
int ret = 0; |
637 | 641 |
|
638 | 642 |
// Old API. |
... | ... |
@@ -642,32 +653,14 @@ int ff_dxva2_decode_init(AVCodecContext *avctx) |
642 | 642 |
// (avctx->pix_fmt is not updated yet at this point) |
643 | 643 |
sctx->pix_fmt = avctx->hwaccel->pix_fmt; |
644 | 644 |
|
645 |
- if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) { |
|
646 |
- av_log(avctx, AV_LOG_ERROR, "Either a hw_frames_ctx or a hw_device_ctx needs to be set for hardware decoding.\n"); |
|
647 |
- return AVERROR(EINVAL); |
|
648 |
- } |
|
649 |
- |
|
650 |
- if (avctx->hw_frames_ctx) { |
|
651 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
652 |
- } else { |
|
653 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
654 |
- if (!avctx->hw_frames_ctx) |
|
655 |
- return AVERROR(ENOMEM); |
|
656 |
- |
|
657 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
658 |
- |
|
659 |
- dxva_adjust_hwframes(avctx, frames_ctx); |
|
660 |
- |
|
661 |
- ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
662 |
- if (ret < 0) |
|
663 |
- goto fail; |
|
664 |
- } |
|
645 |
+ ret = ff_decode_get_hw_frames_ctx(avctx, dev_type); |
|
646 |
+ if (ret < 0) |
|
647 |
+ return ret; |
|
665 | 648 |
|
649 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
666 | 650 |
sctx->device_ctx = frames_ctx->device_ctx; |
667 | 651 |
|
668 |
- if (frames_ctx->format != sctx->pix_fmt || |
|
669 |
- !((sctx->pix_fmt == AV_PIX_FMT_D3D11 && CONFIG_D3D11VA) || |
|
670 |
- (sctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD && CONFIG_DXVA2))) { |
|
652 |
+ if (frames_ctx->format != sctx->pix_fmt) { |
|
671 | 653 |
av_log(avctx, AV_LOG_ERROR, "Invalid pixfmt for hwaccel!\n"); |
672 | 654 |
ret = AVERROR(EINVAL); |
673 | 655 |
goto fail; |
... | ... |
@@ -523,6 +523,7 @@ AVHWAccel ff_h264_dxva2_hwaccel = { |
523 | 523 |
.start_frame = dxva2_h264_start_frame, |
524 | 524 |
.decode_slice = dxva2_h264_decode_slice, |
525 | 525 |
.end_frame = dxva2_h264_end_frame, |
526 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
526 | 527 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
527 | 528 |
.priv_data_size = sizeof(FFDXVASharedContext), |
528 | 529 |
}; |
... | ... |
@@ -539,6 +540,7 @@ AVHWAccel ff_h264_d3d11va_hwaccel = { |
539 | 539 |
.start_frame = dxva2_h264_start_frame, |
540 | 540 |
.decode_slice = dxva2_h264_decode_slice, |
541 | 541 |
.end_frame = dxva2_h264_end_frame, |
542 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
542 | 543 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
543 | 544 |
.priv_data_size = sizeof(FFDXVASharedContext), |
544 | 545 |
}; |
... | ... |
@@ -555,6 +557,7 @@ AVHWAccel ff_h264_d3d11va2_hwaccel = { |
555 | 555 |
.start_frame = dxva2_h264_start_frame, |
556 | 556 |
.decode_slice = dxva2_h264_decode_slice, |
557 | 557 |
.end_frame = dxva2_h264_end_frame, |
558 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
558 | 559 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
559 | 560 |
.priv_data_size = sizeof(FFDXVASharedContext), |
560 | 561 |
}; |
... | ... |
@@ -432,6 +432,7 @@ AVHWAccel ff_hevc_dxva2_hwaccel = { |
432 | 432 |
.start_frame = dxva2_hevc_start_frame, |
433 | 433 |
.decode_slice = dxva2_hevc_decode_slice, |
434 | 434 |
.end_frame = dxva2_hevc_end_frame, |
435 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
435 | 436 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
436 | 437 |
.priv_data_size = sizeof(FFDXVASharedContext), |
437 | 438 |
}; |
... | ... |
@@ -448,6 +449,7 @@ AVHWAccel ff_hevc_d3d11va_hwaccel = { |
448 | 448 |
.start_frame = dxva2_hevc_start_frame, |
449 | 449 |
.decode_slice = dxva2_hevc_decode_slice, |
450 | 450 |
.end_frame = dxva2_hevc_end_frame, |
451 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
451 | 452 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
452 | 453 |
.priv_data_size = sizeof(FFDXVASharedContext), |
453 | 454 |
}; |
... | ... |
@@ -464,6 +466,7 @@ AVHWAccel ff_hevc_d3d11va2_hwaccel = { |
464 | 464 |
.start_frame = dxva2_hevc_start_frame, |
465 | 465 |
.decode_slice = dxva2_hevc_decode_slice, |
466 | 466 |
.end_frame = dxva2_hevc_end_frame, |
467 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
467 | 468 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
468 | 469 |
.priv_data_size = sizeof(FFDXVASharedContext), |
469 | 470 |
}; |
... | ... |
@@ -156,6 +156,9 @@ int ff_dxva2_decode_init(AVCodecContext *avctx); |
156 | 156 |
|
157 | 157 |
int ff_dxva2_decode_uninit(AVCodecContext *avctx); |
158 | 158 |
|
159 |
+int ff_dxva2_common_frame_params(AVCodecContext *avctx, |
|
160 |
+ AVBufferRef *hw_frames_ctx); |
|
161 |
+ |
|
159 | 162 |
int ff_dxva2_is_d3d11(const AVCodecContext *avctx); |
160 | 163 |
|
161 | 164 |
#endif /* AVCODEC_DXVA2_INTERNAL_H */ |
... | ... |
@@ -328,6 +328,7 @@ AVHWAccel ff_mpeg2_dxva2_hwaccel = { |
328 | 328 |
.start_frame = dxva2_mpeg2_start_frame, |
329 | 329 |
.decode_slice = dxva2_mpeg2_decode_slice, |
330 | 330 |
.end_frame = dxva2_mpeg2_end_frame, |
331 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
331 | 332 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
332 | 333 |
.priv_data_size = sizeof(FFDXVASharedContext), |
333 | 334 |
}; |
... | ... |
@@ -344,6 +345,7 @@ AVHWAccel ff_mpeg2_d3d11va_hwaccel = { |
344 | 344 |
.start_frame = dxva2_mpeg2_start_frame, |
345 | 345 |
.decode_slice = dxva2_mpeg2_decode_slice, |
346 | 346 |
.end_frame = dxva2_mpeg2_end_frame, |
347 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
347 | 348 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
348 | 349 |
.priv_data_size = sizeof(FFDXVASharedContext), |
349 | 350 |
}; |
... | ... |
@@ -360,6 +362,7 @@ AVHWAccel ff_mpeg2_d3d11va2_hwaccel = { |
360 | 360 |
.start_frame = dxva2_mpeg2_start_frame, |
361 | 361 |
.decode_slice = dxva2_mpeg2_decode_slice, |
362 | 362 |
.end_frame = dxva2_mpeg2_end_frame, |
363 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
363 | 364 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
364 | 365 |
.priv_data_size = sizeof(FFDXVASharedContext), |
365 | 366 |
}; |
... | ... |
@@ -328,6 +328,7 @@ AVHWAccel ff_wmv3_dxva2_hwaccel = { |
328 | 328 |
.start_frame = dxva2_vc1_start_frame, |
329 | 329 |
.decode_slice = dxva2_vc1_decode_slice, |
330 | 330 |
.end_frame = dxva2_vc1_end_frame, |
331 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
331 | 332 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
332 | 333 |
.priv_data_size = sizeof(FFDXVASharedContext), |
333 | 334 |
}; |
... | ... |
@@ -344,6 +345,7 @@ AVHWAccel ff_vc1_dxva2_hwaccel = { |
344 | 344 |
.start_frame = dxva2_vc1_start_frame, |
345 | 345 |
.decode_slice = dxva2_vc1_decode_slice, |
346 | 346 |
.end_frame = dxva2_vc1_end_frame, |
347 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
347 | 348 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
348 | 349 |
.priv_data_size = sizeof(FFDXVASharedContext), |
349 | 350 |
}; |
... | ... |
@@ -360,6 +362,7 @@ AVHWAccel ff_wmv3_d3d11va_hwaccel = { |
360 | 360 |
.start_frame = dxva2_vc1_start_frame, |
361 | 361 |
.decode_slice = dxva2_vc1_decode_slice, |
362 | 362 |
.end_frame = dxva2_vc1_end_frame, |
363 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
363 | 364 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
364 | 365 |
.priv_data_size = sizeof(FFDXVASharedContext), |
365 | 366 |
}; |
... | ... |
@@ -376,6 +379,7 @@ AVHWAccel ff_wmv3_d3d11va2_hwaccel = { |
376 | 376 |
.start_frame = dxva2_vc1_start_frame, |
377 | 377 |
.decode_slice = dxva2_vc1_decode_slice, |
378 | 378 |
.end_frame = dxva2_vc1_end_frame, |
379 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
379 | 380 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
380 | 381 |
.priv_data_size = sizeof(FFDXVASharedContext), |
381 | 382 |
}; |
... | ... |
@@ -392,6 +396,7 @@ AVHWAccel ff_vc1_d3d11va_hwaccel = { |
392 | 392 |
.start_frame = dxva2_vc1_start_frame, |
393 | 393 |
.decode_slice = dxva2_vc1_decode_slice, |
394 | 394 |
.end_frame = dxva2_vc1_end_frame, |
395 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
395 | 396 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
396 | 397 |
.priv_data_size = sizeof(FFDXVASharedContext), |
397 | 398 |
}; |
... | ... |
@@ -21,6 +21,7 @@ |
21 | 21 |
#include "libavutil/pixdesc.h" |
22 | 22 |
|
23 | 23 |
#include "avcodec.h" |
24 |
+#include "decode.h" |
|
24 | 25 |
#include "internal.h" |
25 | 26 |
#include "vaapi_decode.h" |
26 | 27 |
|
... | ... |
@@ -270,10 +271,15 @@ static const struct { |
270 | 270 |
#undef MAP |
271 | 271 |
}; |
272 | 272 |
|
273 |
-static int vaapi_decode_make_config(AVCodecContext *avctx) |
|
273 |
+/* |
|
274 |
+ * Set *va_config and the frames_ref fields from the current codec parameters |
|
275 |
+ * in avctx. |
|
276 |
+ */ |
|
277 |
+static int vaapi_decode_make_config(AVCodecContext *avctx, |
|
278 |
+ AVBufferRef *device_ref, |
|
279 |
+ VAConfigID *va_config, |
|
280 |
+ AVBufferRef *frames_ref) |
|
274 | 281 |
{ |
275 |
- VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
|
276 |
- |
|
277 | 282 |
AVVAAPIHWConfig *hwconfig = NULL; |
278 | 283 |
AVHWFramesConstraints *constraints = NULL; |
279 | 284 |
VAStatus vas; |
... | ... |
@@ -283,13 +289,16 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
283 | 283 |
int profile_count, exact_match, alt_profile; |
284 | 284 |
const AVPixFmtDescriptor *sw_desc, *desc; |
285 | 285 |
|
286 |
+ AVHWDeviceContext *device = (AVHWDeviceContext*)device_ref->data; |
|
287 |
+ AVVAAPIDeviceContext *hwctx = device->hwctx; |
|
288 |
+ |
|
286 | 289 |
codec_desc = avcodec_descriptor_get(avctx->codec_id); |
287 | 290 |
if (!codec_desc) { |
288 | 291 |
err = AVERROR(EINVAL); |
289 | 292 |
goto fail; |
290 | 293 |
} |
291 | 294 |
|
292 |
- profile_count = vaMaxNumProfiles(ctx->hwctx->display); |
|
295 |
+ profile_count = vaMaxNumProfiles(hwctx->display); |
|
293 | 296 |
profile_list = av_malloc_array(profile_count, |
294 | 297 |
sizeof(VAProfile)); |
295 | 298 |
if (!profile_list) { |
... | ... |
@@ -297,7 +306,7 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
297 | 297 |
goto fail; |
298 | 298 |
} |
299 | 299 |
|
300 |
- vas = vaQueryConfigProfiles(ctx->hwctx->display, |
|
300 |
+ vas = vaQueryConfigProfiles(hwctx->display, |
|
301 | 301 |
profile_list, &profile_count); |
302 | 302 |
if (vas != VA_STATUS_SUCCESS) { |
303 | 303 |
av_log(avctx, AV_LOG_ERROR, "Failed to query profiles: " |
... | ... |
@@ -355,12 +364,9 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
355 | 355 |
} |
356 | 356 |
} |
357 | 357 |
|
358 |
- ctx->va_profile = profile; |
|
359 |
- ctx->va_entrypoint = VAEntrypointVLD; |
|
360 |
- |
|
361 |
- vas = vaCreateConfig(ctx->hwctx->display, ctx->va_profile, |
|
362 |
- ctx->va_entrypoint, NULL, 0, |
|
363 |
- &ctx->va_config); |
|
358 |
+ vas = vaCreateConfig(hwctx->display, profile, |
|
359 |
+ VAEntrypointVLD, NULL, 0, |
|
360 |
+ va_config); |
|
364 | 361 |
if (vas != VA_STATUS_SUCCESS) { |
365 | 362 |
av_log(avctx, AV_LOG_ERROR, "Failed to create decode " |
366 | 363 |
"configuration: %d (%s).\n", vas, vaErrorStr(vas)); |
... | ... |
@@ -368,20 +374,15 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
368 | 368 |
goto fail; |
369 | 369 |
} |
370 | 370 |
|
371 |
- hwconfig = av_hwdevice_hwconfig_alloc(avctx->hw_device_ctx ? |
|
372 |
- avctx->hw_device_ctx : |
|
373 |
- ctx->frames->device_ref); |
|
371 |
+ hwconfig = av_hwdevice_hwconfig_alloc(device_ref); |
|
374 | 372 |
if (!hwconfig) { |
375 | 373 |
err = AVERROR(ENOMEM); |
376 | 374 |
goto fail; |
377 | 375 |
} |
378 |
- hwconfig->config_id = ctx->va_config; |
|
376 |
+ hwconfig->config_id = *va_config; |
|
379 | 377 |
|
380 | 378 |
constraints = |
381 |
- av_hwdevice_get_hwframe_constraints(avctx->hw_device_ctx ? |
|
382 |
- avctx->hw_device_ctx : |
|
383 |
- ctx->frames->device_ref, |
|
384 |
- hwconfig); |
|
379 |
+ av_hwdevice_get_hwframe_constraints(device_ref, hwconfig); |
|
385 | 380 |
if (!constraints) { |
386 | 381 |
err = AVERROR(ENOMEM); |
387 | 382 |
goto fail; |
... | ... |
@@ -407,48 +408,52 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
407 | 407 |
goto fail; |
408 | 408 |
} |
409 | 409 |
|
410 |
- // Find the first format in the list which matches the expected |
|
411 |
- // bit depth and subsampling. If none are found (this can happen |
|
412 |
- // when 10-bit streams are decoded to 8-bit surfaces, for example) |
|
413 |
- // then just take the first format on the list. |
|
414 |
- ctx->surface_format = constraints->valid_sw_formats[0]; |
|
415 |
- sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
|
416 |
- for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { |
|
417 |
- desc = av_pix_fmt_desc_get(constraints->valid_sw_formats[i]); |
|
418 |
- if (desc->nb_components != sw_desc->nb_components || |
|
419 |
- desc->log2_chroma_w != sw_desc->log2_chroma_w || |
|
420 |
- desc->log2_chroma_h != sw_desc->log2_chroma_h) |
|
421 |
- continue; |
|
422 |
- for (j = 0; j < desc->nb_components; j++) { |
|
423 |
- if (desc->comp[j].depth != sw_desc->comp[j].depth) |
|
424 |
- break; |
|
410 |
+ if (frames_ref) { |
|
411 |
+ AVHWFramesContext *frames = (AVHWFramesContext *)frames_ref->data; |
|
412 |
+ |
|
413 |
+ frames->format = AV_PIX_FMT_VAAPI; |
|
414 |
+ frames->width = avctx->coded_width; |
|
415 |
+ frames->height = avctx->coded_height; |
|
416 |
+ |
|
417 |
+ // Find the first format in the list which matches the expected |
|
418 |
+ // bit depth and subsampling. If none are found (this can happen |
|
419 |
+ // when 10-bit streams are decoded to 8-bit surfaces, for example) |
|
420 |
+ // then just take the first format on the list. |
|
421 |
+ frames->sw_format = constraints->valid_sw_formats[0]; |
|
422 |
+ sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
|
423 |
+ for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { |
|
424 |
+ desc = av_pix_fmt_desc_get(constraints->valid_sw_formats[i]); |
|
425 |
+ if (desc->nb_components != sw_desc->nb_components || |
|
426 |
+ desc->log2_chroma_w != sw_desc->log2_chroma_w || |
|
427 |
+ desc->log2_chroma_h != sw_desc->log2_chroma_h) |
|
428 |
+ continue; |
|
429 |
+ for (j = 0; j < desc->nb_components; j++) { |
|
430 |
+ if (desc->comp[j].depth != sw_desc->comp[j].depth) |
|
431 |
+ break; |
|
432 |
+ } |
|
433 |
+ if (j < desc->nb_components) |
|
434 |
+ continue; |
|
435 |
+ frames->sw_format = constraints->valid_sw_formats[i]; |
|
436 |
+ break; |
|
425 | 437 |
} |
426 |
- if (j < desc->nb_components) |
|
427 |
- continue; |
|
428 |
- ctx->surface_format = constraints->valid_sw_formats[i]; |
|
429 |
- break; |
|
430 |
- } |
|
431 | 438 |
|
432 |
- // Start with at least four surfaces. |
|
433 |
- ctx->surface_count = 4; |
|
434 |
- // Add per-codec number of surfaces used for storing reference frames. |
|
435 |
- switch (avctx->codec_id) { |
|
436 |
- case AV_CODEC_ID_H264: |
|
437 |
- case AV_CODEC_ID_HEVC: |
|
438 |
- ctx->surface_count += 16; |
|
439 |
- break; |
|
440 |
- case AV_CODEC_ID_VP9: |
|
441 |
- ctx->surface_count += 8; |
|
442 |
- break; |
|
443 |
- case AV_CODEC_ID_VP8: |
|
444 |
- ctx->surface_count += 3; |
|
445 |
- break; |
|
446 |
- default: |
|
447 |
- ctx->surface_count += 2; |
|
439 |
+ frames->initial_pool_size = 1; |
|
440 |
+ // Add per-codec number of surfaces used for storing reference frames. |
|
441 |
+ switch (avctx->codec_id) { |
|
442 |
+ case AV_CODEC_ID_H264: |
|
443 |
+ case AV_CODEC_ID_HEVC: |
|
444 |
+ frames->initial_pool_size += 16; |
|
445 |
+ break; |
|
446 |
+ case AV_CODEC_ID_VP9: |
|
447 |
+ frames->initial_pool_size += 8; |
|
448 |
+ break; |
|
449 |
+ case AV_CODEC_ID_VP8: |
|
450 |
+ frames->initial_pool_size += 3; |
|
451 |
+ break; |
|
452 |
+ default: |
|
453 |
+ frames->initial_pool_size += 2; |
|
454 |
+ } |
|
448 | 455 |
} |
449 |
- // Add an additional surface per thread is frame threading is enabled. |
|
450 |
- if (avctx->active_thread_type & FF_THREAD_FRAME) |
|
451 |
- ctx->surface_count += avctx->thread_count; |
|
452 | 456 |
|
453 | 457 |
av_hwframe_constraints_free(&constraints); |
454 | 458 |
av_freep(&hwconfig); |
... | ... |
@@ -458,14 +463,38 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
458 | 458 |
fail: |
459 | 459 |
av_hwframe_constraints_free(&constraints); |
460 | 460 |
av_freep(&hwconfig); |
461 |
- if (ctx->va_config != VA_INVALID_ID) { |
|
462 |
- vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
|
463 |
- ctx->va_config = VA_INVALID_ID; |
|
461 |
+ if (*va_config != VA_INVALID_ID) { |
|
462 |
+ vaDestroyConfig(hwctx->display, *va_config); |
|
463 |
+ *va_config = VA_INVALID_ID; |
|
464 | 464 |
} |
465 | 465 |
av_freep(&profile_list); |
466 | 466 |
return err; |
467 | 467 |
} |
468 | 468 |
|
469 |
+int ff_vaapi_common_frame_params(AVCodecContext *avctx, |
|
470 |
+ AVBufferRef *hw_frames_ctx) |
|
471 |
+{ |
|
472 |
+ AVHWFramesContext *hw_frames = (AVHWFramesContext *)hw_frames_ctx->data; |
|
473 |
+ AVHWDeviceContext *device_ctx = hw_frames->device_ctx; |
|
474 |
+ AVVAAPIDeviceContext *hwctx; |
|
475 |
+ VAConfigID va_config = VA_INVALID_ID; |
|
476 |
+ int err; |
|
477 |
+ |
|
478 |
+ if (device_ctx->type != AV_HWDEVICE_TYPE_VAAPI) |
|
479 |
+ return AVERROR(EINVAL); |
|
480 |
+ hwctx = device_ctx->hwctx; |
|
481 |
+ |
|
482 |
+ err = vaapi_decode_make_config(avctx, hw_frames->device_ref, &va_config, |
|
483 |
+ hw_frames_ctx); |
|
484 |
+ if (err) |
|
485 |
+ return err; |
|
486 |
+ |
|
487 |
+ if (va_config != VA_INVALID_ID) |
|
488 |
+ vaDestroyConfig(hwctx->display, va_config); |
|
489 |
+ |
|
490 |
+ return 0; |
|
491 |
+} |
|
492 |
+ |
|
469 | 493 |
int ff_vaapi_decode_init(AVCodecContext *avctx) |
470 | 494 |
{ |
471 | 495 |
VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
... | ... |
@@ -502,36 +531,8 @@ int ff_vaapi_decode_init(AVCodecContext *avctx) |
502 | 502 |
ctx->hwctx->driver_quirks = |
503 | 503 |
AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS; |
504 | 504 |
|
505 |
- } else |
|
506 |
-#endif |
|
507 |
- if (avctx->hw_frames_ctx) { |
|
508 |
- // This structure has a shorter lifetime than the enclosing |
|
509 |
- // AVCodecContext, so we inherit the references from there |
|
510 |
- // and do not need to make separate ones. |
|
511 |
- |
|
512 |
- ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
513 |
- ctx->hwfc = ctx->frames->hwctx; |
|
514 |
- ctx->device = ctx->frames->device_ctx; |
|
515 |
- ctx->hwctx = ctx->device->hwctx; |
|
516 |
- |
|
517 |
- } else if (avctx->hw_device_ctx) { |
|
518 |
- ctx->device = (AVHWDeviceContext*)avctx->hw_device_ctx->data; |
|
519 |
- ctx->hwctx = ctx->device->hwctx; |
|
520 |
- |
|
521 |
- if (ctx->device->type != AV_HWDEVICE_TYPE_VAAPI) { |
|
522 |
- av_log(avctx, AV_LOG_ERROR, "Device supplied for VAAPI " |
|
523 |
- "decoding must be a VAAPI device (not %d).\n", |
|
524 |
- ctx->device->type); |
|
525 |
- err = AVERROR(EINVAL); |
|
526 |
- goto fail; |
|
527 |
- } |
|
528 |
- |
|
529 |
- } else { |
|
530 |
- av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context " |
|
531 |
- "is required for VAAPI decoding.\n"); |
|
532 |
- err = AVERROR(EINVAL); |
|
533 |
- goto fail; |
|
534 | 505 |
} |
506 |
+#endif |
|
535 | 507 |
|
536 | 508 |
#if FF_API_VAAPI_CONTEXT |
537 | 509 |
if (ctx->have_old_context) { |
... | ... |
@@ -543,34 +544,19 @@ int ff_vaapi_decode_init(AVCodecContext *avctx) |
543 | 543 |
} else { |
544 | 544 |
#endif |
545 | 545 |
|
546 |
- err = vaapi_decode_make_config(avctx); |
|
547 |
- if (err) |
|
546 |
+ err = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VAAPI); |
|
547 |
+ if (err < 0) |
|
548 | 548 |
goto fail; |
549 | 549 |
|
550 |
- if (!avctx->hw_frames_ctx) { |
|
551 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
552 |
- if (!avctx->hw_frames_ctx) { |
|
553 |
- err = AVERROR(ENOMEM); |
|
554 |
- goto fail; |
|
555 |
- } |
|
556 |
- ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
557 |
- |
|
558 |
- ctx->frames->format = AV_PIX_FMT_VAAPI; |
|
559 |
- ctx->frames->width = avctx->coded_width; |
|
560 |
- ctx->frames->height = avctx->coded_height; |
|
561 |
- |
|
562 |
- ctx->frames->sw_format = ctx->surface_format; |
|
563 |
- ctx->frames->initial_pool_size = ctx->surface_count; |
|
550 |
+ ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
551 |
+ ctx->hwfc = ctx->frames->hwctx; |
|
552 |
+ ctx->device = ctx->frames->device_ctx; |
|
553 |
+ ctx->hwctx = ctx->device->hwctx; |
|
564 | 554 |
|
565 |
- err = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
566 |
- if (err < 0) { |
|
567 |
- av_log(avctx, AV_LOG_ERROR, "Failed to initialise internal " |
|
568 |
- "frames context: %d.\n", err); |
|
569 |
- goto fail; |
|
570 |
- } |
|
571 |
- |
|
572 |
- ctx->hwfc = ctx->frames->hwctx; |
|
573 |
- } |
|
555 |
+ err = vaapi_decode_make_config(avctx, ctx->frames->device_ref, |
|
556 |
+ &ctx->va_config, avctx->hw_frames_ctx); |
|
557 |
+ if (err) |
|
558 |
+ goto fail; |
|
574 | 559 |
|
575 | 560 |
vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, |
576 | 561 |
avctx->coded_width, avctx->coded_height, |
... | ... |
@@ -53,8 +53,6 @@ typedef struct VAAPIDecodePicture { |
53 | 53 |
} VAAPIDecodePicture; |
54 | 54 |
|
55 | 55 |
typedef struct VAAPIDecodeContext { |
56 |
- VAProfile va_profile; |
|
57 |
- VAEntrypoint va_entrypoint; |
|
58 | 56 |
VAConfigID va_config; |
59 | 57 |
VAContextID va_context; |
60 | 58 |
|
... | ... |
@@ -96,4 +94,7 @@ int ff_vaapi_decode_cancel(AVCodecContext *avctx, |
96 | 96 |
int ff_vaapi_decode_init(AVCodecContext *avctx); |
97 | 97 |
int ff_vaapi_decode_uninit(AVCodecContext *avctx); |
98 | 98 |
|
99 |
+int ff_vaapi_common_frame_params(AVCodecContext *avctx, |
|
100 |
+ AVBufferRef *hw_frames_ctx); |
|
101 |
+ |
|
99 | 102 |
#endif /* AVCODEC_VAAPI_DECODE_H */ |
... | ... |
@@ -399,6 +399,7 @@ AVHWAccel ff_h264_vaapi_hwaccel = { |
399 | 399 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
400 | 400 |
.init = &ff_vaapi_decode_init, |
401 | 401 |
.uninit = &ff_vaapi_decode_uninit, |
402 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
402 | 403 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
403 | 404 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
404 | 405 |
}; |
... | ... |
@@ -434,6 +434,7 @@ AVHWAccel ff_hevc_vaapi_hwaccel = { |
434 | 434 |
.frame_priv_data_size = sizeof(VAAPIDecodePictureHEVC), |
435 | 435 |
.init = ff_vaapi_decode_init, |
436 | 436 |
.uninit = ff_vaapi_decode_uninit, |
437 |
+ .frame_params = ff_vaapi_common_frame_params, |
|
437 | 438 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
438 | 439 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
439 | 440 |
}; |
... | ... |
@@ -184,6 +184,7 @@ AVHWAccel ff_mpeg2_vaapi_hwaccel = { |
184 | 184 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
185 | 185 |
.init = &ff_vaapi_decode_init, |
186 | 186 |
.uninit = &ff_vaapi_decode_uninit, |
187 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
187 | 188 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
188 | 189 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
189 | 190 |
}; |
... | ... |
@@ -200,6 +200,7 @@ AVHWAccel ff_mpeg4_vaapi_hwaccel = { |
200 | 200 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
201 | 201 |
.init = &ff_vaapi_decode_init, |
202 | 202 |
.uninit = &ff_vaapi_decode_uninit, |
203 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
203 | 204 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
204 | 205 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
205 | 206 |
}; |
... | ... |
@@ -217,6 +218,7 @@ AVHWAccel ff_h263_vaapi_hwaccel = { |
217 | 217 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
218 | 218 |
.init = &ff_vaapi_decode_init, |
219 | 219 |
.uninit = &ff_vaapi_decode_uninit, |
220 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
220 | 221 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
221 | 222 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
222 | 223 |
}; |
... | ... |
@@ -399,6 +399,7 @@ AVHWAccel ff_wmv3_vaapi_hwaccel = { |
399 | 399 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
400 | 400 |
.init = &ff_vaapi_decode_init, |
401 | 401 |
.uninit = &ff_vaapi_decode_uninit, |
402 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
402 | 403 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
403 | 404 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
404 | 405 |
}; |
... | ... |
@@ -415,6 +416,7 @@ AVHWAccel ff_vc1_vaapi_hwaccel = { |
415 | 415 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
416 | 416 |
.init = &ff_vaapi_decode_init, |
417 | 417 |
.uninit = &ff_vaapi_decode_uninit, |
418 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
418 | 419 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
419 | 420 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
420 | 421 |
}; |
... | ... |
@@ -232,5 +232,6 @@ AVHWAccel ff_vp8_vaapi_hwaccel = { |
232 | 232 |
.init = &ff_vaapi_decode_init, |
233 | 233 |
.uninit = &ff_vaapi_decode_uninit, |
234 | 234 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
235 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
235 | 236 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
236 | 237 |
}; |
... | ... |
@@ -24,6 +24,7 @@ |
24 | 24 |
#include <limits.h> |
25 | 25 |
|
26 | 26 |
#include "avcodec.h" |
27 |
+#include "decode.h" |
|
27 | 28 |
#include "internal.h" |
28 | 29 |
#include "h264dec.h" |
29 | 30 |
#include "vc1.h" |
... | ... |
@@ -100,6 +101,25 @@ int av_vdpau_get_surface_parameters(AVCodecContext *avctx, |
100 | 100 |
return 0; |
101 | 101 |
} |
102 | 102 |
|
103 |
+int ff_vdpau_common_frame_params(AVCodecContext *avctx, |
|
104 |
+ AVBufferRef *hw_frames_ctx) |
|
105 |
+{ |
|
106 |
+ AVHWFramesContext *hw_frames = (AVHWFramesContext*)hw_frames_ctx->data; |
|
107 |
+ VdpChromaType type; |
|
108 |
+ uint32_t width; |
|
109 |
+ uint32_t height; |
|
110 |
+ |
|
111 |
+ if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height)) |
|
112 |
+ return AVERROR(EINVAL); |
|
113 |
+ |
|
114 |
+ hw_frames->format = AV_PIX_FMT_VDPAU; |
|
115 |
+ hw_frames->sw_format = avctx->sw_pix_fmt; |
|
116 |
+ hw_frames->width = width; |
|
117 |
+ hw_frames->height = height; |
|
118 |
+ |
|
119 |
+ return 0; |
|
120 |
+} |
|
121 |
+ |
|
103 | 122 |
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
104 | 123 |
int level) |
105 | 124 |
{ |
... | ... |
@@ -115,6 +135,7 @@ int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
115 | 115 |
VdpChromaType type; |
116 | 116 |
uint32_t width; |
117 | 117 |
uint32_t height; |
118 |
+ int ret; |
|
118 | 119 |
|
119 | 120 |
vdctx->width = UINT32_MAX; |
120 | 121 |
vdctx->height = UINT32_MAX; |
... | ... |
@@ -142,41 +163,14 @@ int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
142 | 142 |
type != VDP_CHROMA_TYPE_420) |
143 | 143 |
return AVERROR(ENOSYS); |
144 | 144 |
} else { |
145 |
- AVHWFramesContext *frames_ctx = NULL; |
|
145 |
+ AVHWFramesContext *frames_ctx; |
|
146 | 146 |
AVVDPAUDeviceContext *dev_ctx; |
147 | 147 |
|
148 |
- // We assume the hw_frames_ctx always survives until ff_vdpau_common_uninit |
|
149 |
- // is called. This holds true as the user is not allowed to touch |
|
150 |
- // hw_device_ctx, or hw_frames_ctx after get_format (and ff_get_format |
|
151 |
- // itself also uninits before unreffing hw_frames_ctx). |
|
152 |
- if (avctx->hw_frames_ctx) { |
|
153 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
154 |
- } else if (avctx->hw_device_ctx) { |
|
155 |
- int ret; |
|
156 |
- |
|
157 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
158 |
- if (!avctx->hw_frames_ctx) |
|
159 |
- return AVERROR(ENOMEM); |
|
160 |
- |
|
161 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
162 |
- frames_ctx->format = AV_PIX_FMT_VDPAU; |
|
163 |
- frames_ctx->sw_format = avctx->sw_pix_fmt; |
|
164 |
- frames_ctx->width = avctx->coded_width; |
|
165 |
- frames_ctx->height = avctx->coded_height; |
|
166 |
- |
|
167 |
- ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
168 |
- if (ret < 0) { |
|
169 |
- av_buffer_unref(&avctx->hw_frames_ctx); |
|
170 |
- return ret; |
|
171 |
- } |
|
172 |
- } |
|
173 |
- |
|
174 |
- if (!frames_ctx) { |
|
175 |
- av_log(avctx, AV_LOG_ERROR, "A hardware frames context is " |
|
176 |
- "required for VDPAU decoding.\n"); |
|
177 |
- return AVERROR(EINVAL); |
|
178 |
- } |
|
148 |
+ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VDPAU); |
|
149 |
+ if (ret < 0) |
|
150 |
+ return ret; |
|
179 | 151 |
|
152 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
180 | 153 |
dev_ctx = frames_ctx->device_ctx->hwctx; |
181 | 154 |
|
182 | 155 |
vdctx->device = dev_ctx->device; |
... | ... |
@@ -273,6 +273,7 @@ AVHWAccel ff_h264_vdpau_hwaccel = { |
273 | 273 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
274 | 274 |
.init = vdpau_h264_init, |
275 | 275 |
.uninit = ff_vdpau_common_uninit, |
276 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
276 | 277 |
.priv_data_size = sizeof(VDPAUContext), |
277 | 278 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
278 | 279 |
}; |
... | ... |
@@ -424,6 +424,7 @@ AVHWAccel ff_hevc_vdpau_hwaccel = { |
424 | 424 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
425 | 425 |
.init = vdpau_hevc_init, |
426 | 426 |
.uninit = ff_vdpau_common_uninit, |
427 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
427 | 428 |
.priv_data_size = sizeof(VDPAUContext), |
428 | 429 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
429 | 430 |
}; |
... | ... |
@@ -119,5 +119,7 @@ int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, |
119 | 119 |
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx); |
120 | 120 |
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic, const uint8_t *buf, |
121 | 121 |
uint32_t buf_size); |
122 |
+int ff_vdpau_common_frame_params(AVCodecContext *avctx, |
|
123 |
+ AVBufferRef *hw_frames_ctx); |
|
122 | 124 |
|
123 | 125 |
#endif /* AVCODEC_VDPAU_INTERNAL_H */ |
... | ... |
@@ -149,6 +149,7 @@ AVHWAccel ff_mpeg2_vdpau_hwaccel = { |
149 | 149 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
150 | 150 |
.init = vdpau_mpeg2_init, |
151 | 151 |
.uninit = ff_vdpau_common_uninit, |
152 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
152 | 153 |
.priv_data_size = sizeof(VDPAUContext), |
153 | 154 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
154 | 155 |
}; |
... | ... |
@@ -118,6 +118,7 @@ AVHWAccel ff_mpeg4_vdpau_hwaccel = { |
118 | 118 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
119 | 119 |
.init = vdpau_mpeg4_init, |
120 | 120 |
.uninit = ff_vdpau_common_uninit, |
121 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
121 | 122 |
.priv_data_size = sizeof(VDPAUContext), |
122 | 123 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
123 | 124 |
}; |
... | ... |
@@ -143,6 +143,7 @@ AVHWAccel ff_wmv3_vdpau_hwaccel = { |
143 | 143 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
144 | 144 |
.init = vdpau_vc1_init, |
145 | 145 |
.uninit = ff_vdpau_common_uninit, |
146 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
146 | 147 |
.priv_data_size = sizeof(VDPAUContext), |
147 | 148 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
148 | 149 |
}; |
... | ... |
@@ -159,6 +160,7 @@ AVHWAccel ff_vc1_vdpau_hwaccel = { |
159 | 159 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
160 | 160 |
.init = vdpau_vc1_init, |
161 | 161 |
.uninit = ff_vdpau_common_uninit, |
162 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
162 | 163 |
.priv_data_size = sizeof(VDPAUContext), |
163 | 164 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
164 | 165 |
}; |
... | ... |
@@ -28,7 +28,7 @@ |
28 | 28 |
#include "libavutil/version.h" |
29 | 29 |
|
30 | 30 |
#define LIBAVCODEC_VERSION_MAJOR 58 |
31 |
-#define LIBAVCODEC_VERSION_MINOR 4 |
|
31 |
+#define LIBAVCODEC_VERSION_MINOR 5 |
|
32 | 32 |
#define LIBAVCODEC_VERSION_MICRO 0 |
33 | 33 |
|
34 | 34 |
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ |