* commit 'b46a77f19ddc4b2b5fa3187835ceb602a5244e24':
lavc: external hardware frame pool initialization
Includes the fix from e724bdfffbd3c27aac53d1f32f20f105f37caef0
Merged-by: James Almer <jamrial@gmail.com>
| ... | ... |
@@ -15,6 +15,9 @@ libavutil: 2017-10-21 |
| 15 | 15 |
|
| 16 | 16 |
API changes, most recent first: |
| 17 | 17 |
|
| 18 |
+2017-xx-xx - xxxxxxx - lavc 58.3.100 - avcodec.h |
|
| 19 |
+ Add avcodec_get_hw_frames_parameters(). |
|
| 20 |
+ |
|
| 18 | 21 |
-------- 8< --------- FFmpeg 3.4 was cut here -------- 8< --------- |
| 19 | 22 |
|
| 20 | 23 |
2017-09-28 - b6cf66ae1c - lavc 57.106.104 - avcodec.h |
| ... | ... |
@@ -3534,6 +3534,16 @@ typedef struct AVHWAccel {
|
| 3534 | 3534 |
int caps_internal; |
| 3535 | 3535 |
|
| 3536 | 3536 |
/** |
| 3537 |
+ * Fill the given hw_frames context with current codec parameters. Called |
|
| 3538 |
+ * from get_format. Refer to avcodec_get_hw_frames_parameters() for |
|
| 3539 |
+ * details. |
|
| 3540 |
+ * |
|
| 3541 |
+ * This CAN be called before AVHWAccel.init is called, and you must assume |
|
| 3542 |
+ * that avctx->hwaccel_priv_data is invalid. |
|
| 3543 |
+ */ |
|
| 3544 |
+ int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); |
|
| 3545 |
+ |
|
| 3546 |
+ /** |
|
| 3537 | 3547 |
* Some hwaccels are ambiguous if only the id and pix_fmt fields are used. |
| 3538 | 3548 |
* If non-NULL, the associated AVCodec must have |
| 3539 | 3549 |
* FF_CODEC_CAP_HWACCEL_REQUIRE_CLASS set. |
| ... | ... |
@@ -4674,6 +4684,109 @@ int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); |
| 4674 | 4674 |
*/ |
| 4675 | 4675 |
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); |
| 4676 | 4676 |
|
| 4677 |
+/** |
|
| 4678 |
+ * Create and return a AVHWFramesContext with values adequate for hardware |
|
| 4679 |
+ * decoding. This is meant to get called from the get_format callback, and is |
|
| 4680 |
+ * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx. |
|
| 4681 |
+ * This API is for decoding with certain hardware acceleration modes/APIs only. |
|
| 4682 |
+ * |
|
| 4683 |
+ * The returned AVHWFramesContext is not initialized. The caller must do this |
|
| 4684 |
+ * with av_hwframe_ctx_init(). |
|
| 4685 |
+ * |
|
| 4686 |
+ * Calling this function is not a requirement, but makes it simpler to avoid |
|
| 4687 |
+ * codec or hardware API specific details when manually allocating frames. |
|
| 4688 |
+ * |
|
| 4689 |
+ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx, |
|
| 4690 |
+ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes |
|
| 4691 |
+ * it unnecessary to call this function or having to care about |
|
| 4692 |
+ * AVHWFramesContext initialization at all. |
|
| 4693 |
+ * |
|
| 4694 |
+ * There are a number of requirements for calling this function: |
|
| 4695 |
+ * |
|
| 4696 |
+ * - It must be called from get_format with the same avctx parameter that was |
|
| 4697 |
+ * passed to get_format. Calling it outside of get_format is not allowed, and |
|
| 4698 |
+ * can trigger undefined behavior. |
|
| 4699 |
+ * - The function is not always supported (see description of return values). |
|
| 4700 |
+ * Even if this function returns successfully, hwaccel initialization could |
|
| 4701 |
+ * fail later. (The degree to which implementations check whether the stream |
|
| 4702 |
+ * is actually supported varies. Some do this check only after the user's |
|
| 4703 |
+ * get_format callback returns.) |
|
| 4704 |
+ * - The hw_pix_fmt must be one of the choices suggested by get_format. If the |
|
| 4705 |
+ * user decides to use a AVHWFramesContext prepared with this API function, |
|
| 4706 |
+ * the user must return the same hw_pix_fmt from get_format. |
|
| 4707 |
+ * - The device_ref passed to this function must support the given hw_pix_fmt. |
|
| 4708 |
+ * - After calling this API function, it is the user's responsibility to |
|
| 4709 |
+ * initialize the AVHWFramesContext (returned by the out_frames_ref parameter), |
|
| 4710 |
+ * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be done |
|
| 4711 |
+ * before returning from get_format (this is implied by the normal |
|
| 4712 |
+ * AVCodecContext.hw_frames_ctx API rules). |
|
| 4713 |
+ * - The AVHWFramesContext parameters may change every time time get_format is |
|
| 4714 |
+ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So |
|
| 4715 |
+ * you are inherently required to go through this process again on every |
|
| 4716 |
+ * get_format call. |
|
| 4717 |
+ * - It is perfectly possible to call this function without actually using |
|
| 4718 |
+ * the resulting AVHWFramesContext. One use-case might be trying to reuse a |
|
| 4719 |
+ * previously initialized AVHWFramesContext, and calling this API function |
|
| 4720 |
+ * only to test whether the required frame parameters have changed. |
|
| 4721 |
+ * - Fields that use dynamically allocated values of any kind must not be set |
|
| 4722 |
+ * by the user unless setting them is explicitly allowed by the documentation. |
|
| 4723 |
+ * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque, |
|
| 4724 |
+ * the new free callback must call the potentially set previous free callback. |
|
| 4725 |
+ * This API call may set any dynamically allocated fields, including the free |
|
| 4726 |
+ * callback. |
|
| 4727 |
+ * |
|
| 4728 |
+ * The function will set at least the following fields on AVHWFramesContext |
|
| 4729 |
+ * (potentially more, depending on hwaccel API): |
|
| 4730 |
+ * |
|
| 4731 |
+ * - All fields set by av_hwframe_ctx_alloc(). |
|
| 4732 |
+ * - Set the format field to hw_pix_fmt. |
|
| 4733 |
+ * - Set the sw_format field to the most suited and most versatile format. (An |
|
| 4734 |
+ * implication is that this will prefer generic formats over opaque formats |
|
| 4735 |
+ * with arbitrary restrictions, if possible.) |
|
| 4736 |
+ * - Set the width/height fields to the coded frame size, rounded up to the |
|
| 4737 |
+ * API-specific minimum alignment. |
|
| 4738 |
+ * - Only _if_ the hwaccel requires a pre-allocated pool: set the initial_pool_size |
|
| 4739 |
+ * field to the number of maximum reference surfaces possible with the codec, |
|
| 4740 |
+ * plus 1 surface for the user to work (meaning the user can safely reference |
|
| 4741 |
+ * at most 1 decoded surface at a time), plus additional buffering introduced |
|
| 4742 |
+ * by frame threading. If the hwaccel does not require pre-allocation, the |
|
| 4743 |
+ * field is left to 0, and the decoder will allocate new surfaces on demand |
|
| 4744 |
+ * during decoding. |
|
| 4745 |
+ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying |
|
| 4746 |
+ * hardware API. |
|
| 4747 |
+ * |
|
| 4748 |
+ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(), but |
|
| 4749 |
+ * with basic frame parameters set. |
|
| 4750 |
+ * |
|
| 4751 |
+ * The function is stateless, and does not change the AVCodecContext or the |
|
| 4752 |
+ * device_ref AVHWDeviceContext. |
|
| 4753 |
+ * |
|
| 4754 |
+ * @param avctx The context which is currently calling get_format, and which |
|
| 4755 |
+ * implicitly contains all state needed for filling the returned |
|
| 4756 |
+ * AVHWFramesContext properly. |
|
| 4757 |
+ * @param device_ref A reference to the AVHWDeviceContext describing the device |
|
| 4758 |
+ * which will be used by the hardware decoder. |
|
| 4759 |
+ * @param hw_pix_fmt The hwaccel format you are going to return from get_format. |
|
| 4760 |
+ * @param out_frames_ref On success, set to a reference to an _uninitialized_ |
|
| 4761 |
+ * AVHWFramesContext, created from the given device_ref. |
|
| 4762 |
+ * Fields will be set to values required for decoding. |
|
| 4763 |
+ * Not changed if an error is returned. |
|
| 4764 |
+ * @return zero on success, a negative value on error. The following error codes |
|
| 4765 |
+ * have special semantics: |
|
| 4766 |
+ * AVERROR(ENOENT): the decoder does not support this functionality. Setup |
|
| 4767 |
+ * is always manual, or it is a decoder which does not |
|
| 4768 |
+ * support setting AVCodecContext.hw_frames_ctx at all, |
|
| 4769 |
+ * or it is a software format. |
|
| 4770 |
+ * AVERROR(EINVAL): it is known that hardware decoding is not supported for |
|
| 4771 |
+ * this configuration, or the device_ref is not supported |
|
| 4772 |
+ * for the hwaccel referenced by hw_pix_fmt. |
|
| 4773 |
+ */ |
|
| 4774 |
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, |
|
| 4775 |
+ AVBufferRef *device_ref, |
|
| 4776 |
+ enum AVPixelFormat hw_pix_fmt, |
|
| 4777 |
+ AVBufferRef **out_frames_ref); |
|
| 4778 |
+ |
|
| 4779 |
+ |
|
| 4677 | 4780 |
|
| 4678 | 4781 |
/** |
| 4679 | 4782 |
* @defgroup lavc_parsing Frame parsing |
| ... | ... |
@@ -1106,6 +1106,86 @@ static AVHWAccel *find_hwaccel(AVCodecContext *avctx, |
| 1106 | 1106 |
return NULL; |
| 1107 | 1107 |
} |
| 1108 | 1108 |
|
| 1109 |
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, |
|
| 1110 |
+ enum AVHWDeviceType dev_type) |
|
| 1111 |
+{
|
|
| 1112 |
+ AVHWDeviceContext *device_ctx; |
|
| 1113 |
+ AVHWFramesContext *frames_ctx; |
|
| 1114 |
+ int ret; |
|
| 1115 |
+ |
|
| 1116 |
+ if (!avctx->hwaccel) |
|
| 1117 |
+ return AVERROR(ENOSYS); |
|
| 1118 |
+ |
|
| 1119 |
+ if (avctx->hw_frames_ctx) |
|
| 1120 |
+ return 0; |
|
| 1121 |
+ if (!avctx->hw_device_ctx) {
|
|
| 1122 |
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is " |
|
| 1123 |
+ "required for hardware accelerated decoding.\n"); |
|
| 1124 |
+ return AVERROR(EINVAL); |
|
| 1125 |
+ } |
|
| 1126 |
+ |
|
| 1127 |
+ device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data; |
|
| 1128 |
+ if (device_ctx->type != dev_type) {
|
|
| 1129 |
+ av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware " |
|
| 1130 |
+ "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type), |
|
| 1131 |
+ av_hwdevice_get_type_name(device_ctx->type)); |
|
| 1132 |
+ return AVERROR(EINVAL); |
|
| 1133 |
+ } |
|
| 1134 |
+ |
|
| 1135 |
+ ret = avcodec_get_hw_frames_parameters(avctx, |
|
| 1136 |
+ avctx->hw_device_ctx, |
|
| 1137 |
+ avctx->hwaccel->pix_fmt, |
|
| 1138 |
+ &avctx->hw_frames_ctx); |
|
| 1139 |
+ if (ret < 0) |
|
| 1140 |
+ return ret; |
|
| 1141 |
+ |
|
| 1142 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 1143 |
+ |
|
| 1144 |
+ |
|
| 1145 |
+ if (frames_ctx->initial_pool_size) {
|
|
| 1146 |
+ // We guarantee 4 base work surfaces. The function above guarantees 1 |
|
| 1147 |
+ // (the absolute minimum), so add the missing count. |
|
| 1148 |
+ frames_ctx->initial_pool_size += 3; |
|
| 1149 |
+ |
|
| 1150 |
+ // Add an additional surface per thread is frame threading is enabled. |
|
| 1151 |
+ if (avctx->active_thread_type & FF_THREAD_FRAME) |
|
| 1152 |
+ frames_ctx->initial_pool_size += avctx->thread_count; |
|
| 1153 |
+ } |
|
| 1154 |
+ |
|
| 1155 |
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
| 1156 |
+ if (ret < 0) {
|
|
| 1157 |
+ av_buffer_unref(&avctx->hw_frames_ctx); |
|
| 1158 |
+ return ret; |
|
| 1159 |
+ } |
|
| 1160 |
+ |
|
| 1161 |
+ return 0; |
|
| 1162 |
+} |
|
| 1163 |
+ |
|
| 1164 |
+int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, |
|
| 1165 |
+ AVBufferRef *device_ref, |
|
| 1166 |
+ enum AVPixelFormat hw_pix_fmt, |
|
| 1167 |
+ AVBufferRef **out_frames_ref) |
|
| 1168 |
+{
|
|
| 1169 |
+ AVBufferRef *frames_ref = NULL; |
|
| 1170 |
+ AVHWAccel *hwa = find_hwaccel(avctx, hw_pix_fmt); |
|
| 1171 |
+ int ret; |
|
| 1172 |
+ |
|
| 1173 |
+ if (!hwa || !hwa->frame_params) |
|
| 1174 |
+ return AVERROR(ENOENT); |
|
| 1175 |
+ |
|
| 1176 |
+ frames_ref = av_hwframe_ctx_alloc(device_ref); |
|
| 1177 |
+ if (!frames_ref) |
|
| 1178 |
+ return AVERROR(ENOMEM); |
|
| 1179 |
+ |
|
| 1180 |
+ ret = hwa->frame_params(avctx, frames_ref); |
|
| 1181 |
+ if (ret >= 0) {
|
|
| 1182 |
+ *out_frames_ref = frames_ref; |
|
| 1183 |
+ } else {
|
|
| 1184 |
+ av_buffer_unref(&frames_ref); |
|
| 1185 |
+ } |
|
| 1186 |
+ return ret; |
|
| 1187 |
+} |
|
| 1188 |
+ |
|
| 1109 | 1189 |
static int setup_hwaccel(AVCodecContext *avctx, |
| 1110 | 1190 |
const enum AVPixelFormat fmt, |
| 1111 | 1191 |
const char *name) |
| ... | ... |
@@ -24,6 +24,10 @@ |
| 24 | 24 |
#include "libavutil/buffer.h" |
| 25 | 25 |
#include "libavutil/frame.h" |
| 26 | 26 |
|
| 27 |
+#include "libavutil/buffer.h" |
|
| 28 |
+#include "libavutil/frame.h" |
|
| 29 |
+#include "libavutil/hwcontext.h" |
|
| 30 |
+ |
|
| 27 | 31 |
#include "avcodec.h" |
| 28 | 32 |
|
| 29 | 33 |
/** |
| ... | ... |
@@ -65,6 +69,14 @@ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt); |
| 65 | 65 |
|
| 66 | 66 |
void ff_decode_bsfs_uninit(AVCodecContext *avctx); |
| 67 | 67 |
|
| 68 |
+/** |
|
| 69 |
+ * Make sure avctx.hw_frames_ctx is set. If it's not set, the function will |
|
| 70 |
+ * try to allocate it from hw_device_ctx. If that is not possible, an error |
|
| 71 |
+ * message is printed, and an error code is returned. |
|
| 72 |
+ */ |
|
| 73 |
+int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, |
|
| 74 |
+ enum AVHWDeviceType dev_type); |
|
| 75 |
+ |
|
| 68 | 76 |
int ff_attach_decode_data(AVFrame *frame); |
| 69 | 77 |
|
| 70 | 78 |
#endif /* AVCODEC_DECODE_H */ |
| ... | ... |
@@ -29,6 +29,7 @@ |
| 29 | 29 |
#include "libavutil/time.h" |
| 30 | 30 |
|
| 31 | 31 |
#include "avcodec.h" |
| 32 |
+#include "decode.h" |
|
| 32 | 33 |
#include "dxva2_internal.h" |
| 33 | 34 |
|
| 34 | 35 |
/* define all the GUIDs used directly here, |
| ... | ... |
@@ -576,14 +577,20 @@ static void ff_dxva2_unlock(AVCodecContext *avctx) |
| 576 | 576 |
#endif |
| 577 | 577 |
} |
| 578 | 578 |
|
| 579 |
-// This must work before the decoder is created. |
|
| 580 |
-// This somehow needs to be exported to the user. |
|
| 581 |
-static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frames_ctx) |
|
| 579 |
+int ff_dxva2_common_frame_params(AVCodecContext *avctx, |
|
| 580 |
+ AVBufferRef *hw_frames_ctx) |
|
| 582 | 581 |
{
|
| 583 |
- FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); |
|
| 582 |
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext *)hw_frames_ctx->data; |
|
| 583 |
+ AVHWDeviceContext *device_ctx = frames_ctx->device_ctx; |
|
| 584 | 584 |
int surface_alignment, num_surfaces; |
| 585 | 585 |
|
| 586 |
- frames_ctx->format = sctx->pix_fmt; |
|
| 586 |
+ if (device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
|
|
| 587 |
+ frames_ctx->format = AV_PIX_FMT_DXVA2_VLD; |
|
| 588 |
+ } else if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
|
|
| 589 |
+ frames_ctx->format = AV_PIX_FMT_D3D11; |
|
| 590 |
+ } else {
|
|
| 591 |
+ return AVERROR(EINVAL); |
|
| 592 |
+ } |
|
| 587 | 593 |
|
| 588 | 594 |
/* decoding MPEG-2 requires additional alignment on some Intel GPUs, |
| 589 | 595 |
but it causes issues for H.264 on certain AMD GPUs..... */ |
| ... | ... |
@@ -596,8 +603,8 @@ static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frame |
| 596 | 596 |
else |
| 597 | 597 |
surface_alignment = 16; |
| 598 | 598 |
|
| 599 |
- /* 4 base work surfaces */ |
|
| 600 |
- num_surfaces = 4; |
|
| 599 |
+ /* 1 base work surface */ |
|
| 600 |
+ num_surfaces = 1; |
|
| 601 | 601 |
|
| 602 | 602 |
/* add surfaces based on number of possible refs */ |
| 603 | 603 |
if (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_HEVC) |
| ... | ... |
@@ -633,12 +640,16 @@ static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frame |
| 633 | 633 |
frames_hwctx->BindFlags |= D3D11_BIND_DECODER; |
| 634 | 634 |
} |
| 635 | 635 |
#endif |
| 636 |
+ |
|
| 637 |
+ return 0; |
|
| 636 | 638 |
} |
| 637 | 639 |
|
| 638 | 640 |
int ff_dxva2_decode_init(AVCodecContext *avctx) |
| 639 | 641 |
{
|
| 640 | 642 |
FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); |
| 641 |
- AVHWFramesContext *frames_ctx = NULL; |
|
| 643 |
+ AVHWFramesContext *frames_ctx; |
|
| 644 |
+ enum AVHWDeviceType dev_type = avctx->hwaccel->pix_fmt == AV_PIX_FMT_DXVA2_VLD |
|
| 645 |
+ ? AV_HWDEVICE_TYPE_DXVA2 : AV_HWDEVICE_TYPE_D3D11VA; |
|
| 642 | 646 |
int ret = 0; |
| 643 | 647 |
|
| 644 | 648 |
// Old API. |
| ... | ... |
@@ -648,32 +659,14 @@ int ff_dxva2_decode_init(AVCodecContext *avctx) |
| 648 | 648 |
// (avctx->pix_fmt is not updated yet at this point) |
| 649 | 649 |
sctx->pix_fmt = avctx->hwaccel->pix_fmt; |
| 650 | 650 |
|
| 651 |
- if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
|
|
| 652 |
- av_log(avctx, AV_LOG_ERROR, "Either a hw_frames_ctx or a hw_device_ctx needs to be set for hardware decoding.\n"); |
|
| 653 |
- return AVERROR(EINVAL); |
|
| 654 |
- } |
|
| 655 |
- |
|
| 656 |
- if (avctx->hw_frames_ctx) {
|
|
| 657 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 658 |
- } else {
|
|
| 659 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
| 660 |
- if (!avctx->hw_frames_ctx) |
|
| 661 |
- return AVERROR(ENOMEM); |
|
| 662 |
- |
|
| 663 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 664 |
- |
|
| 665 |
- dxva_adjust_hwframes(avctx, frames_ctx); |
|
| 666 |
- |
|
| 667 |
- ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
| 668 |
- if (ret < 0) |
|
| 669 |
- goto fail; |
|
| 670 |
- } |
|
| 651 |
+ ret = ff_decode_get_hw_frames_ctx(avctx, dev_type); |
|
| 652 |
+ if (ret < 0) |
|
| 653 |
+ return ret; |
|
| 671 | 654 |
|
| 655 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 672 | 656 |
sctx->device_ctx = frames_ctx->device_ctx; |
| 673 | 657 |
|
| 674 |
- if (frames_ctx->format != sctx->pix_fmt || |
|
| 675 |
- !((sctx->pix_fmt == AV_PIX_FMT_D3D11 && CONFIG_D3D11VA) || |
|
| 676 |
- (sctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD && CONFIG_DXVA2))) {
|
|
| 658 |
+ if (frames_ctx->format != sctx->pix_fmt) {
|
|
| 677 | 659 |
av_log(avctx, AV_LOG_ERROR, "Invalid pixfmt for hwaccel!\n"); |
| 678 | 660 |
ret = AVERROR(EINVAL); |
| 679 | 661 |
goto fail; |
| ... | ... |
@@ -528,6 +528,7 @@ AVHWAccel ff_h264_dxva2_hwaccel = {
|
| 528 | 528 |
.start_frame = dxva2_h264_start_frame, |
| 529 | 529 |
.decode_slice = dxva2_h264_decode_slice, |
| 530 | 530 |
.end_frame = dxva2_h264_end_frame, |
| 531 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 531 | 532 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 532 | 533 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 533 | 534 |
}; |
| ... | ... |
@@ -544,6 +545,7 @@ AVHWAccel ff_h264_d3d11va_hwaccel = {
|
| 544 | 544 |
.start_frame = dxva2_h264_start_frame, |
| 545 | 545 |
.decode_slice = dxva2_h264_decode_slice, |
| 546 | 546 |
.end_frame = dxva2_h264_end_frame, |
| 547 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 547 | 548 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 548 | 549 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 549 | 550 |
}; |
| ... | ... |
@@ -560,6 +562,7 @@ AVHWAccel ff_h264_d3d11va2_hwaccel = {
|
| 560 | 560 |
.start_frame = dxva2_h264_start_frame, |
| 561 | 561 |
.decode_slice = dxva2_h264_decode_slice, |
| 562 | 562 |
.end_frame = dxva2_h264_end_frame, |
| 563 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 563 | 564 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 564 | 565 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 565 | 566 |
}; |
| ... | ... |
@@ -432,6 +432,7 @@ AVHWAccel ff_hevc_dxva2_hwaccel = {
|
| 432 | 432 |
.start_frame = dxva2_hevc_start_frame, |
| 433 | 433 |
.decode_slice = dxva2_hevc_decode_slice, |
| 434 | 434 |
.end_frame = dxva2_hevc_end_frame, |
| 435 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 435 | 436 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
| 436 | 437 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 437 | 438 |
}; |
| ... | ... |
@@ -448,6 +449,7 @@ AVHWAccel ff_hevc_d3d11va_hwaccel = {
|
| 448 | 448 |
.start_frame = dxva2_hevc_start_frame, |
| 449 | 449 |
.decode_slice = dxva2_hevc_decode_slice, |
| 450 | 450 |
.end_frame = dxva2_hevc_end_frame, |
| 451 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 451 | 452 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
| 452 | 453 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 453 | 454 |
}; |
| ... | ... |
@@ -464,6 +466,7 @@ AVHWAccel ff_hevc_d3d11va2_hwaccel = {
|
| 464 | 464 |
.start_frame = dxva2_hevc_start_frame, |
| 465 | 465 |
.decode_slice = dxva2_hevc_decode_slice, |
| 466 | 466 |
.end_frame = dxva2_hevc_end_frame, |
| 467 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 467 | 468 |
.frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context), |
| 468 | 469 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 469 | 470 |
}; |
| ... | ... |
@@ -156,6 +156,9 @@ int ff_dxva2_decode_init(AVCodecContext *avctx); |
| 156 | 156 |
|
| 157 | 157 |
int ff_dxva2_decode_uninit(AVCodecContext *avctx); |
| 158 | 158 |
|
| 159 |
+int ff_dxva2_common_frame_params(AVCodecContext *avctx, |
|
| 160 |
+ AVBufferRef *hw_frames_ctx); |
|
| 161 |
+ |
|
| 159 | 162 |
int ff_dxva2_is_d3d11(const AVCodecContext *avctx); |
| 160 | 163 |
|
| 161 | 164 |
#endif /* AVCODEC_DXVA2_INTERNAL_H */ |
| ... | ... |
@@ -327,6 +327,7 @@ AVHWAccel ff_mpeg2_dxva2_hwaccel = {
|
| 327 | 327 |
.start_frame = dxva2_mpeg2_start_frame, |
| 328 | 328 |
.decode_slice = dxva2_mpeg2_decode_slice, |
| 329 | 329 |
.end_frame = dxva2_mpeg2_end_frame, |
| 330 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 330 | 331 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 331 | 332 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 332 | 333 |
}; |
| ... | ... |
@@ -343,6 +344,7 @@ AVHWAccel ff_mpeg2_d3d11va_hwaccel = {
|
| 343 | 343 |
.start_frame = dxva2_mpeg2_start_frame, |
| 344 | 344 |
.decode_slice = dxva2_mpeg2_decode_slice, |
| 345 | 345 |
.end_frame = dxva2_mpeg2_end_frame, |
| 346 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 346 | 347 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 347 | 348 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 348 | 349 |
}; |
| ... | ... |
@@ -359,6 +361,7 @@ AVHWAccel ff_mpeg2_d3d11va2_hwaccel = {
|
| 359 | 359 |
.start_frame = dxva2_mpeg2_start_frame, |
| 360 | 360 |
.decode_slice = dxva2_mpeg2_decode_slice, |
| 361 | 361 |
.end_frame = dxva2_mpeg2_end_frame, |
| 362 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 362 | 363 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 363 | 364 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 364 | 365 |
}; |
| ... | ... |
@@ -388,6 +388,7 @@ AVHWAccel ff_wmv3_dxva2_hwaccel = {
|
| 388 | 388 |
.start_frame = dxva2_vc1_start_frame, |
| 389 | 389 |
.decode_slice = dxva2_vc1_decode_slice, |
| 390 | 390 |
.end_frame = dxva2_vc1_end_frame, |
| 391 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 391 | 392 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 392 | 393 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 393 | 394 |
}; |
| ... | ... |
@@ -404,6 +405,7 @@ AVHWAccel ff_vc1_dxva2_hwaccel = {
|
| 404 | 404 |
.start_frame = dxva2_vc1_start_frame, |
| 405 | 405 |
.decode_slice = dxva2_vc1_decode_slice, |
| 406 | 406 |
.end_frame = dxva2_vc1_end_frame, |
| 407 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 407 | 408 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 408 | 409 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 409 | 410 |
}; |
| ... | ... |
@@ -420,6 +422,7 @@ AVHWAccel ff_wmv3_d3d11va_hwaccel = {
|
| 420 | 420 |
.start_frame = dxva2_vc1_start_frame, |
| 421 | 421 |
.decode_slice = dxva2_vc1_decode_slice, |
| 422 | 422 |
.end_frame = dxva2_vc1_end_frame, |
| 423 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 423 | 424 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 424 | 425 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 425 | 426 |
}; |
| ... | ... |
@@ -436,6 +439,7 @@ AVHWAccel ff_wmv3_d3d11va2_hwaccel = {
|
| 436 | 436 |
.start_frame = dxva2_vc1_start_frame, |
| 437 | 437 |
.decode_slice = dxva2_vc1_decode_slice, |
| 438 | 438 |
.end_frame = dxva2_vc1_end_frame, |
| 439 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 439 | 440 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 440 | 441 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 441 | 442 |
}; |
| ... | ... |
@@ -452,6 +456,7 @@ AVHWAccel ff_vc1_d3d11va_hwaccel = {
|
| 452 | 452 |
.start_frame = dxva2_vc1_start_frame, |
| 453 | 453 |
.decode_slice = dxva2_vc1_decode_slice, |
| 454 | 454 |
.end_frame = dxva2_vc1_end_frame, |
| 455 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 455 | 456 |
.frame_priv_data_size = sizeof(struct dxva2_picture_context), |
| 456 | 457 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 457 | 458 |
}; |
| ... | ... |
@@ -319,6 +319,7 @@ AVHWAccel ff_vp9_dxva2_hwaccel = {
|
| 319 | 319 |
.start_frame = dxva2_vp9_start_frame, |
| 320 | 320 |
.decode_slice = dxva2_vp9_decode_slice, |
| 321 | 321 |
.end_frame = dxva2_vp9_end_frame, |
| 322 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 322 | 323 |
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context), |
| 323 | 324 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 324 | 325 |
}; |
| ... | ... |
@@ -335,6 +336,7 @@ AVHWAccel ff_vp9_d3d11va_hwaccel = {
|
| 335 | 335 |
.start_frame = dxva2_vp9_start_frame, |
| 336 | 336 |
.decode_slice = dxva2_vp9_decode_slice, |
| 337 | 337 |
.end_frame = dxva2_vp9_end_frame, |
| 338 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 338 | 339 |
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context), |
| 339 | 340 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 340 | 341 |
}; |
| ... | ... |
@@ -351,6 +353,7 @@ AVHWAccel ff_vp9_d3d11va2_hwaccel = {
|
| 351 | 351 |
.start_frame = dxva2_vp9_start_frame, |
| 352 | 352 |
.decode_slice = dxva2_vp9_decode_slice, |
| 353 | 353 |
.end_frame = dxva2_vp9_end_frame, |
| 354 |
+ .frame_params = ff_dxva2_common_frame_params, |
|
| 354 | 355 |
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context), |
| 355 | 356 |
.priv_data_size = sizeof(FFDXVASharedContext), |
| 356 | 357 |
}; |
| ... | ... |
@@ -21,6 +21,7 @@ |
| 21 | 21 |
#include "libavutil/pixdesc.h" |
| 22 | 22 |
|
| 23 | 23 |
#include "avcodec.h" |
| 24 |
+#include "decode.h" |
|
| 24 | 25 |
#include "internal.h" |
| 25 | 26 |
#include "vaapi_decode.h" |
| 26 | 27 |
|
| ... | ... |
@@ -272,10 +273,15 @@ static const struct {
|
| 272 | 272 |
#undef MAP |
| 273 | 273 |
}; |
| 274 | 274 |
|
| 275 |
-static int vaapi_decode_make_config(AVCodecContext *avctx) |
|
| 275 |
+/* |
|
| 276 |
+ * Set *va_config and the frames_ref fields from the current codec parameters |
|
| 277 |
+ * in avctx. |
|
| 278 |
+ */ |
|
| 279 |
+static int vaapi_decode_make_config(AVCodecContext *avctx, |
|
| 280 |
+ AVBufferRef *device_ref, |
|
| 281 |
+ VAConfigID *va_config, |
|
| 282 |
+ AVBufferRef *frames_ref) |
|
| 276 | 283 |
{
|
| 277 |
- VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
|
| 278 |
- |
|
| 279 | 284 |
AVVAAPIHWConfig *hwconfig = NULL; |
| 280 | 285 |
AVHWFramesConstraints *constraints = NULL; |
| 281 | 286 |
VAStatus vas; |
| ... | ... |
@@ -285,13 +291,16 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 285 | 285 |
int profile_count, exact_match, alt_profile; |
| 286 | 286 |
const AVPixFmtDescriptor *sw_desc, *desc; |
| 287 | 287 |
|
| 288 |
+ AVHWDeviceContext *device = (AVHWDeviceContext*)device_ref->data; |
|
| 289 |
+ AVVAAPIDeviceContext *hwctx = device->hwctx; |
|
| 290 |
+ |
|
| 288 | 291 |
codec_desc = avcodec_descriptor_get(avctx->codec_id); |
| 289 | 292 |
if (!codec_desc) {
|
| 290 | 293 |
err = AVERROR(EINVAL); |
| 291 | 294 |
goto fail; |
| 292 | 295 |
} |
| 293 | 296 |
|
| 294 |
- profile_count = vaMaxNumProfiles(ctx->hwctx->display); |
|
| 297 |
+ profile_count = vaMaxNumProfiles(hwctx->display); |
|
| 295 | 298 |
profile_list = av_malloc_array(profile_count, |
| 296 | 299 |
sizeof(VAProfile)); |
| 297 | 300 |
if (!profile_list) {
|
| ... | ... |
@@ -299,7 +308,7 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 299 | 299 |
goto fail; |
| 300 | 300 |
} |
| 301 | 301 |
|
| 302 |
- vas = vaQueryConfigProfiles(ctx->hwctx->display, |
|
| 302 |
+ vas = vaQueryConfigProfiles(hwctx->display, |
|
| 303 | 303 |
profile_list, &profile_count); |
| 304 | 304 |
if (vas != VA_STATUS_SUCCESS) {
|
| 305 | 305 |
av_log(avctx, AV_LOG_ERROR, "Failed to query profiles: " |
| ... | ... |
@@ -358,12 +367,9 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 358 | 358 |
} |
| 359 | 359 |
} |
| 360 | 360 |
|
| 361 |
- ctx->va_profile = profile; |
|
| 362 |
- ctx->va_entrypoint = VAEntrypointVLD; |
|
| 363 |
- |
|
| 364 |
- vas = vaCreateConfig(ctx->hwctx->display, ctx->va_profile, |
|
| 365 |
- ctx->va_entrypoint, NULL, 0, |
|
| 366 |
- &ctx->va_config); |
|
| 361 |
+ vas = vaCreateConfig(hwctx->display, profile, |
|
| 362 |
+ VAEntrypointVLD, NULL, 0, |
|
| 363 |
+ va_config); |
|
| 367 | 364 |
if (vas != VA_STATUS_SUCCESS) {
|
| 368 | 365 |
av_log(avctx, AV_LOG_ERROR, "Failed to create decode " |
| 369 | 366 |
"configuration: %d (%s).\n", vas, vaErrorStr(vas)); |
| ... | ... |
@@ -371,20 +377,15 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 371 | 371 |
goto fail; |
| 372 | 372 |
} |
| 373 | 373 |
|
| 374 |
- hwconfig = av_hwdevice_hwconfig_alloc(avctx->hw_device_ctx ? |
|
| 375 |
- avctx->hw_device_ctx : |
|
| 376 |
- ctx->frames->device_ref); |
|
| 374 |
+ hwconfig = av_hwdevice_hwconfig_alloc(device_ref); |
|
| 377 | 375 |
if (!hwconfig) {
|
| 378 | 376 |
err = AVERROR(ENOMEM); |
| 379 | 377 |
goto fail; |
| 380 | 378 |
} |
| 381 |
- hwconfig->config_id = ctx->va_config; |
|
| 379 |
+ hwconfig->config_id = *va_config; |
|
| 382 | 380 |
|
| 383 | 381 |
constraints = |
| 384 |
- av_hwdevice_get_hwframe_constraints(avctx->hw_device_ctx ? |
|
| 385 |
- avctx->hw_device_ctx : |
|
| 386 |
- ctx->frames->device_ref, |
|
| 387 |
- hwconfig); |
|
| 382 |
+ av_hwdevice_get_hwframe_constraints(device_ref, hwconfig); |
|
| 388 | 383 |
if (!constraints) {
|
| 389 | 384 |
err = AVERROR(ENOMEM); |
| 390 | 385 |
goto fail; |
| ... | ... |
@@ -410,48 +411,52 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 410 | 410 |
goto fail; |
| 411 | 411 |
} |
| 412 | 412 |
|
| 413 |
- // Find the first format in the list which matches the expected |
|
| 414 |
- // bit depth and subsampling. If none are found (this can happen |
|
| 415 |
- // when 10-bit streams are decoded to 8-bit surfaces, for example) |
|
| 416 |
- // then just take the first format on the list. |
|
| 417 |
- ctx->surface_format = constraints->valid_sw_formats[0]; |
|
| 418 |
- sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
|
| 419 |
- for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
|
|
| 420 |
- desc = av_pix_fmt_desc_get(constraints->valid_sw_formats[i]); |
|
| 421 |
- if (desc->nb_components != sw_desc->nb_components || |
|
| 422 |
- desc->log2_chroma_w != sw_desc->log2_chroma_w || |
|
| 423 |
- desc->log2_chroma_h != sw_desc->log2_chroma_h) |
|
| 424 |
- continue; |
|
| 425 |
- for (j = 0; j < desc->nb_components; j++) {
|
|
| 426 |
- if (desc->comp[j].depth != sw_desc->comp[j].depth) |
|
| 427 |
- break; |
|
| 413 |
+ if (frames_ref) {
|
|
| 414 |
+ AVHWFramesContext *frames = (AVHWFramesContext *)frames_ref->data; |
|
| 415 |
+ |
|
| 416 |
+ frames->format = AV_PIX_FMT_VAAPI; |
|
| 417 |
+ frames->width = avctx->coded_width; |
|
| 418 |
+ frames->height = avctx->coded_height; |
|
| 419 |
+ |
|
| 420 |
+ // Find the first format in the list which matches the expected |
|
| 421 |
+ // bit depth and subsampling. If none are found (this can happen |
|
| 422 |
+ // when 10-bit streams are decoded to 8-bit surfaces, for example) |
|
| 423 |
+ // then just take the first format on the list. |
|
| 424 |
+ frames->sw_format = constraints->valid_sw_formats[0]; |
|
| 425 |
+ sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
|
| 426 |
+ for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
|
|
| 427 |
+ desc = av_pix_fmt_desc_get(constraints->valid_sw_formats[i]); |
|
| 428 |
+ if (desc->nb_components != sw_desc->nb_components || |
|
| 429 |
+ desc->log2_chroma_w != sw_desc->log2_chroma_w || |
|
| 430 |
+ desc->log2_chroma_h != sw_desc->log2_chroma_h) |
|
| 431 |
+ continue; |
|
| 432 |
+ for (j = 0; j < desc->nb_components; j++) {
|
|
| 433 |
+ if (desc->comp[j].depth != sw_desc->comp[j].depth) |
|
| 434 |
+ break; |
|
| 435 |
+ } |
|
| 436 |
+ if (j < desc->nb_components) |
|
| 437 |
+ continue; |
|
| 438 |
+ frames->sw_format = constraints->valid_sw_formats[i]; |
|
| 439 |
+ break; |
|
| 428 | 440 |
} |
| 429 |
- if (j < desc->nb_components) |
|
| 430 |
- continue; |
|
| 431 |
- ctx->surface_format = constraints->valid_sw_formats[i]; |
|
| 432 |
- break; |
|
| 433 |
- } |
|
| 434 | 441 |
|
| 435 |
- // Start with at least four surfaces. |
|
| 436 |
- ctx->surface_count = 4; |
|
| 437 |
- // Add per-codec number of surfaces used for storing reference frames. |
|
| 438 |
- switch (avctx->codec_id) {
|
|
| 439 |
- case AV_CODEC_ID_H264: |
|
| 440 |
- case AV_CODEC_ID_HEVC: |
|
| 441 |
- ctx->surface_count += 16; |
|
| 442 |
- break; |
|
| 443 |
- case AV_CODEC_ID_VP9: |
|
| 444 |
- ctx->surface_count += 8; |
|
| 445 |
- break; |
|
| 446 |
- case AV_CODEC_ID_VP8: |
|
| 447 |
- ctx->surface_count += 3; |
|
| 448 |
- break; |
|
| 449 |
- default: |
|
| 450 |
- ctx->surface_count += 2; |
|
| 442 |
+ frames->initial_pool_size = 1; |
|
| 443 |
+ // Add per-codec number of surfaces used for storing reference frames. |
|
| 444 |
+ switch (avctx->codec_id) {
|
|
| 445 |
+ case AV_CODEC_ID_H264: |
|
| 446 |
+ case AV_CODEC_ID_HEVC: |
|
| 447 |
+ frames->initial_pool_size += 16; |
|
| 448 |
+ break; |
|
| 449 |
+ case AV_CODEC_ID_VP9: |
|
| 450 |
+ frames->initial_pool_size += 8; |
|
| 451 |
+ break; |
|
| 452 |
+ case AV_CODEC_ID_VP8: |
|
| 453 |
+ frames->initial_pool_size += 3; |
|
| 454 |
+ break; |
|
| 455 |
+ default: |
|
| 456 |
+ frames->initial_pool_size += 2; |
|
| 457 |
+ } |
|
| 451 | 458 |
} |
| 452 |
- // Add an additional surface per thread is frame threading is enabled. |
|
| 453 |
- if (avctx->active_thread_type & FF_THREAD_FRAME) |
|
| 454 |
- ctx->surface_count += avctx->thread_count; |
|
| 455 | 459 |
|
| 456 | 460 |
av_hwframe_constraints_free(&constraints); |
| 457 | 461 |
av_freep(&hwconfig); |
| ... | ... |
@@ -461,14 +466,38 @@ static int vaapi_decode_make_config(AVCodecContext *avctx) |
| 461 | 461 |
fail: |
| 462 | 462 |
av_hwframe_constraints_free(&constraints); |
| 463 | 463 |
av_freep(&hwconfig); |
| 464 |
- if (ctx->va_config != VA_INVALID_ID) {
|
|
| 465 |
- vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
|
| 466 |
- ctx->va_config = VA_INVALID_ID; |
|
| 464 |
+ if (*va_config != VA_INVALID_ID) {
|
|
| 465 |
+ vaDestroyConfig(hwctx->display, *va_config); |
|
| 466 |
+ *va_config = VA_INVALID_ID; |
|
| 467 | 467 |
} |
| 468 | 468 |
av_freep(&profile_list); |
| 469 | 469 |
return err; |
| 470 | 470 |
} |
| 471 | 471 |
|
| 472 |
+int ff_vaapi_common_frame_params(AVCodecContext *avctx, |
|
| 473 |
+ AVBufferRef *hw_frames_ctx) |
|
| 474 |
+{
|
|
| 475 |
+ AVHWFramesContext *hw_frames = (AVHWFramesContext *)hw_frames_ctx->data; |
|
| 476 |
+ AVHWDeviceContext *device_ctx = hw_frames->device_ctx; |
|
| 477 |
+ AVVAAPIDeviceContext *hwctx; |
|
| 478 |
+ VAConfigID va_config = VA_INVALID_ID; |
|
| 479 |
+ int err; |
|
| 480 |
+ |
|
| 481 |
+ if (device_ctx->type != AV_HWDEVICE_TYPE_VAAPI) |
|
| 482 |
+ return AVERROR(EINVAL); |
|
| 483 |
+ hwctx = device_ctx->hwctx; |
|
| 484 |
+ |
|
| 485 |
+ err = vaapi_decode_make_config(avctx, hw_frames->device_ref, &va_config, |
|
| 486 |
+ hw_frames_ctx); |
|
| 487 |
+ if (err) |
|
| 488 |
+ return err; |
|
| 489 |
+ |
|
| 490 |
+ if (va_config != VA_INVALID_ID) |
|
| 491 |
+ vaDestroyConfig(hwctx->display, va_config); |
|
| 492 |
+ |
|
| 493 |
+ return 0; |
|
| 494 |
+} |
|
| 495 |
+ |
|
| 472 | 496 |
int ff_vaapi_decode_init(AVCodecContext *avctx) |
| 473 | 497 |
{
|
| 474 | 498 |
VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
| ... | ... |
@@ -505,36 +534,8 @@ int ff_vaapi_decode_init(AVCodecContext *avctx) |
| 505 | 505 |
ctx->hwctx->driver_quirks = |
| 506 | 506 |
AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS; |
| 507 | 507 |
|
| 508 |
- } else |
|
| 509 |
-#endif |
|
| 510 |
- if (avctx->hw_frames_ctx) {
|
|
| 511 |
- // This structure has a shorter lifetime than the enclosing |
|
| 512 |
- // AVCodecContext, so we inherit the references from there |
|
| 513 |
- // and do not need to make separate ones. |
|
| 514 |
- |
|
| 515 |
- ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 516 |
- ctx->hwfc = ctx->frames->hwctx; |
|
| 517 |
- ctx->device = ctx->frames->device_ctx; |
|
| 518 |
- ctx->hwctx = ctx->device->hwctx; |
|
| 519 |
- |
|
| 520 |
- } else if (avctx->hw_device_ctx) {
|
|
| 521 |
- ctx->device = (AVHWDeviceContext*)avctx->hw_device_ctx->data; |
|
| 522 |
- ctx->hwctx = ctx->device->hwctx; |
|
| 523 |
- |
|
| 524 |
- if (ctx->device->type != AV_HWDEVICE_TYPE_VAAPI) {
|
|
| 525 |
- av_log(avctx, AV_LOG_ERROR, "Device supplied for VAAPI " |
|
| 526 |
- "decoding must be a VAAPI device (not %d).\n", |
|
| 527 |
- ctx->device->type); |
|
| 528 |
- err = AVERROR(EINVAL); |
|
| 529 |
- goto fail; |
|
| 530 |
- } |
|
| 531 |
- |
|
| 532 |
- } else {
|
|
| 533 |
- av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context " |
|
| 534 |
- "is required for VAAPI decoding.\n"); |
|
| 535 |
- err = AVERROR(EINVAL); |
|
| 536 |
- goto fail; |
|
| 537 | 508 |
} |
| 509 |
+#endif |
|
| 538 | 510 |
|
| 539 | 511 |
#if FF_API_STRUCT_VAAPI_CONTEXT |
| 540 | 512 |
if (ctx->have_old_context) {
|
| ... | ... |
@@ -546,34 +547,19 @@ int ff_vaapi_decode_init(AVCodecContext *avctx) |
| 546 | 546 |
} else {
|
| 547 | 547 |
#endif |
| 548 | 548 |
|
| 549 |
- err = vaapi_decode_make_config(avctx); |
|
| 550 |
- if (err) |
|
| 549 |
+ err = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VAAPI); |
|
| 550 |
+ if (err < 0) |
|
| 551 | 551 |
goto fail; |
| 552 | 552 |
|
| 553 |
- if (!avctx->hw_frames_ctx) {
|
|
| 554 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
| 555 |
- if (!avctx->hw_frames_ctx) {
|
|
| 556 |
- err = AVERROR(ENOMEM); |
|
| 557 |
- goto fail; |
|
| 558 |
- } |
|
| 559 |
- ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 560 |
- |
|
| 561 |
- ctx->frames->format = AV_PIX_FMT_VAAPI; |
|
| 562 |
- ctx->frames->width = avctx->coded_width; |
|
| 563 |
- ctx->frames->height = avctx->coded_height; |
|
| 564 |
- |
|
| 565 |
- ctx->frames->sw_format = ctx->surface_format; |
|
| 566 |
- ctx->frames->initial_pool_size = ctx->surface_count; |
|
| 553 |
+ ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 554 |
+ ctx->hwfc = ctx->frames->hwctx; |
|
| 555 |
+ ctx->device = ctx->frames->device_ctx; |
|
| 556 |
+ ctx->hwctx = ctx->device->hwctx; |
|
| 567 | 557 |
|
| 568 |
- err = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
| 569 |
- if (err < 0) {
|
|
| 570 |
- av_log(avctx, AV_LOG_ERROR, "Failed to initialise internal " |
|
| 571 |
- "frames context: %d.\n", err); |
|
| 572 |
- goto fail; |
|
| 573 |
- } |
|
| 574 |
- |
|
| 575 |
- ctx->hwfc = ctx->frames->hwctx; |
|
| 576 |
- } |
|
| 558 |
+ err = vaapi_decode_make_config(avctx, ctx->frames->device_ref, |
|
| 559 |
+ &ctx->va_config, avctx->hw_frames_ctx); |
|
| 560 |
+ if (err) |
|
| 561 |
+ goto fail; |
|
| 577 | 562 |
|
| 578 | 563 |
vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, |
| 579 | 564 |
avctx->coded_width, avctx->coded_height, |
| ... | ... |
@@ -53,8 +53,6 @@ typedef struct VAAPIDecodePicture {
|
| 53 | 53 |
} VAAPIDecodePicture; |
| 54 | 54 |
|
| 55 | 55 |
typedef struct VAAPIDecodeContext {
|
| 56 |
- VAProfile va_profile; |
|
| 57 |
- VAEntrypoint va_entrypoint; |
|
| 58 | 56 |
VAConfigID va_config; |
| 59 | 57 |
VAContextID va_context; |
| 60 | 58 |
|
| ... | ... |
@@ -98,4 +96,7 @@ int ff_vaapi_decode_cancel(AVCodecContext *avctx, |
| 98 | 98 |
int ff_vaapi_decode_init(AVCodecContext *avctx); |
| 99 | 99 |
int ff_vaapi_decode_uninit(AVCodecContext *avctx); |
| 100 | 100 |
|
| 101 |
+int ff_vaapi_common_frame_params(AVCodecContext *avctx, |
|
| 102 |
+ AVBufferRef *hw_frames_ctx); |
|
| 103 |
+ |
|
| 101 | 104 |
#endif /* AVCODEC_VAAPI_DECODE_H */ |
| ... | ... |
@@ -399,6 +399,7 @@ AVHWAccel ff_h264_vaapi_hwaccel = {
|
| 399 | 399 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 400 | 400 |
.init = &ff_vaapi_decode_init, |
| 401 | 401 |
.uninit = &ff_vaapi_decode_uninit, |
| 402 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 402 | 403 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 403 | 404 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 404 | 405 |
}; |
| ... | ... |
@@ -434,6 +434,7 @@ AVHWAccel ff_hevc_vaapi_hwaccel = {
|
| 434 | 434 |
.frame_priv_data_size = sizeof(VAAPIDecodePictureHEVC), |
| 435 | 435 |
.init = ff_vaapi_decode_init, |
| 436 | 436 |
.uninit = ff_vaapi_decode_uninit, |
| 437 |
+ .frame_params = ff_vaapi_common_frame_params, |
|
| 437 | 438 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 438 | 439 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 439 | 440 |
}; |
| ... | ... |
@@ -183,6 +183,7 @@ AVHWAccel ff_mpeg2_vaapi_hwaccel = {
|
| 183 | 183 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 184 | 184 |
.init = &ff_vaapi_decode_init, |
| 185 | 185 |
.uninit = &ff_vaapi_decode_uninit, |
| 186 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 186 | 187 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 187 | 188 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 188 | 189 |
}; |
| ... | ... |
@@ -189,6 +189,7 @@ AVHWAccel ff_mpeg4_vaapi_hwaccel = {
|
| 189 | 189 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 190 | 190 |
.init = &ff_vaapi_decode_init, |
| 191 | 191 |
.uninit = &ff_vaapi_decode_uninit, |
| 192 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 192 | 193 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 193 | 194 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 194 | 195 |
}; |
| ... | ... |
@@ -206,6 +207,7 @@ AVHWAccel ff_h263_vaapi_hwaccel = {
|
| 206 | 206 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 207 | 207 |
.init = &ff_vaapi_decode_init, |
| 208 | 208 |
.uninit = &ff_vaapi_decode_uninit, |
| 209 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 209 | 210 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 210 | 211 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 211 | 212 |
}; |
| ... | ... |
@@ -399,6 +399,7 @@ AVHWAccel ff_wmv3_vaapi_hwaccel = {
|
| 399 | 399 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 400 | 400 |
.init = &ff_vaapi_decode_init, |
| 401 | 401 |
.uninit = &ff_vaapi_decode_uninit, |
| 402 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 402 | 403 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 403 | 404 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 404 | 405 |
}; |
| ... | ... |
@@ -415,6 +416,7 @@ AVHWAccel ff_vc1_vaapi_hwaccel = {
|
| 415 | 415 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 416 | 416 |
.init = &ff_vaapi_decode_init, |
| 417 | 417 |
.uninit = &ff_vaapi_decode_uninit, |
| 418 |
+ .frame_params = &ff_vaapi_common_frame_params, |
|
| 418 | 419 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 419 | 420 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 420 | 421 |
}; |
| ... | ... |
@@ -179,6 +179,7 @@ AVHWAccel ff_vp9_vaapi_hwaccel = {
|
| 179 | 179 |
.frame_priv_data_size = sizeof(VAAPIDecodePicture), |
| 180 | 180 |
.init = ff_vaapi_decode_init, |
| 181 | 181 |
.uninit = ff_vaapi_decode_uninit, |
| 182 |
+ .frame_params = ff_vaapi_common_frame_params, |
|
| 182 | 183 |
.priv_data_size = sizeof(VAAPIDecodeContext), |
| 183 | 184 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 184 | 185 |
}; |
| ... | ... |
@@ -24,6 +24,7 @@ |
| 24 | 24 |
#include <limits.h> |
| 25 | 25 |
|
| 26 | 26 |
#include "avcodec.h" |
| 27 |
+#include "decode.h" |
|
| 27 | 28 |
#include "internal.h" |
| 28 | 29 |
#include "h264dec.h" |
| 29 | 30 |
#include "vc1.h" |
| ... | ... |
@@ -110,6 +111,25 @@ int av_vdpau_get_surface_parameters(AVCodecContext *avctx, |
| 110 | 110 |
return 0; |
| 111 | 111 |
} |
| 112 | 112 |
|
| 113 |
+int ff_vdpau_common_frame_params(AVCodecContext *avctx, |
|
| 114 |
+ AVBufferRef *hw_frames_ctx) |
|
| 115 |
+{
|
|
| 116 |
+ AVHWFramesContext *hw_frames = (AVHWFramesContext*)hw_frames_ctx->data; |
|
| 117 |
+ VdpChromaType type; |
|
| 118 |
+ uint32_t width; |
|
| 119 |
+ uint32_t height; |
|
| 120 |
+ |
|
| 121 |
+ if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height)) |
|
| 122 |
+ return AVERROR(EINVAL); |
|
| 123 |
+ |
|
| 124 |
+ hw_frames->format = AV_PIX_FMT_VDPAU; |
|
| 125 |
+ hw_frames->sw_format = avctx->sw_pix_fmt; |
|
| 126 |
+ hw_frames->width = width; |
|
| 127 |
+ hw_frames->height = height; |
|
| 128 |
+ |
|
| 129 |
+ return 0; |
|
| 130 |
+} |
|
| 131 |
+ |
|
| 113 | 132 |
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
| 114 | 133 |
int level) |
| 115 | 134 |
{
|
| ... | ... |
@@ -127,6 +147,7 @@ int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
| 127 | 127 |
VdpChromaType type; |
| 128 | 128 |
uint32_t width; |
| 129 | 129 |
uint32_t height; |
| 130 |
+ int ret; |
|
| 130 | 131 |
|
| 131 | 132 |
vdctx->width = UINT32_MAX; |
| 132 | 133 |
vdctx->height = UINT32_MAX; |
| ... | ... |
@@ -154,41 +175,14 @@ int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, |
| 154 | 154 |
type != VDP_CHROMA_TYPE_420) |
| 155 | 155 |
return AVERROR(ENOSYS); |
| 156 | 156 |
} else {
|
| 157 |
- AVHWFramesContext *frames_ctx = NULL; |
|
| 157 |
+ AVHWFramesContext *frames_ctx; |
|
| 158 | 158 |
AVVDPAUDeviceContext *dev_ctx; |
| 159 | 159 |
|
| 160 |
- // We assume the hw_frames_ctx always survives until ff_vdpau_common_uninit |
|
| 161 |
- // is called. This holds true as the user is not allowed to touch |
|
| 162 |
- // hw_device_ctx, or hw_frames_ctx after get_format (and ff_get_format |
|
| 163 |
- // itself also uninits before unreffing hw_frames_ctx). |
|
| 164 |
- if (avctx->hw_frames_ctx) {
|
|
| 165 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 166 |
- } else if (avctx->hw_device_ctx) {
|
|
| 167 |
- int ret; |
|
| 168 |
- |
|
| 169 |
- avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
|
| 170 |
- if (!avctx->hw_frames_ctx) |
|
| 171 |
- return AVERROR(ENOMEM); |
|
| 172 |
- |
|
| 173 |
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 174 |
- frames_ctx->format = AV_PIX_FMT_VDPAU; |
|
| 175 |
- frames_ctx->sw_format = avctx->sw_pix_fmt; |
|
| 176 |
- frames_ctx->width = avctx->coded_width; |
|
| 177 |
- frames_ctx->height = avctx->coded_height; |
|
| 178 |
- |
|
| 179 |
- ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
|
| 180 |
- if (ret < 0) {
|
|
| 181 |
- av_buffer_unref(&avctx->hw_frames_ctx); |
|
| 182 |
- return ret; |
|
| 183 |
- } |
|
| 184 |
- } |
|
| 185 |
- |
|
| 186 |
- if (!frames_ctx) {
|
|
| 187 |
- av_log(avctx, AV_LOG_ERROR, "A hardware frames context is " |
|
| 188 |
- "required for VDPAU decoding.\n"); |
|
| 189 |
- return AVERROR(EINVAL); |
|
| 190 |
- } |
|
| 160 |
+ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VDPAU); |
|
| 161 |
+ if (ret < 0) |
|
| 162 |
+ return ret; |
|
| 191 | 163 |
|
| 164 |
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
|
| 192 | 165 |
dev_ctx = frames_ctx->device_ctx->hwctx; |
| 193 | 166 |
|
| 194 | 167 |
vdctx->device = dev_ctx->device; |
| ... | ... |
@@ -273,6 +273,7 @@ AVHWAccel ff_h264_vdpau_hwaccel = {
|
| 273 | 273 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 274 | 274 |
.init = vdpau_h264_init, |
| 275 | 275 |
.uninit = ff_vdpau_common_uninit, |
| 276 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 276 | 277 |
.priv_data_size = sizeof(VDPAUContext), |
| 277 | 278 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 278 | 279 |
}; |
| ... | ... |
@@ -424,6 +424,7 @@ AVHWAccel ff_hevc_vdpau_hwaccel = {
|
| 424 | 424 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 425 | 425 |
.init = vdpau_hevc_init, |
| 426 | 426 |
.uninit = ff_vdpau_common_uninit, |
| 427 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 427 | 428 |
.priv_data_size = sizeof(VDPAUContext), |
| 428 | 429 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 429 | 430 |
}; |
| ... | ... |
@@ -119,5 +119,7 @@ int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, |
| 119 | 119 |
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx); |
| 120 | 120 |
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic, const uint8_t *buf, |
| 121 | 121 |
uint32_t buf_size); |
| 122 |
+int ff_vdpau_common_frame_params(AVCodecContext *avctx, |
|
| 123 |
+ AVBufferRef *hw_frames_ctx); |
|
| 122 | 124 |
|
| 123 | 125 |
#endif /* AVCODEC_VDPAU_INTERNAL_H */ |
| ... | ... |
@@ -149,6 +149,7 @@ AVHWAccel ff_mpeg2_vdpau_hwaccel = {
|
| 149 | 149 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 150 | 150 |
.init = vdpau_mpeg2_init, |
| 151 | 151 |
.uninit = ff_vdpau_common_uninit, |
| 152 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 152 | 153 |
.priv_data_size = sizeof(VDPAUContext), |
| 153 | 154 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 154 | 155 |
}; |
| ... | ... |
@@ -121,6 +121,7 @@ AVHWAccel ff_mpeg4_vdpau_hwaccel = {
|
| 121 | 121 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 122 | 122 |
.init = vdpau_mpeg4_init, |
| 123 | 123 |
.uninit = ff_vdpau_common_uninit, |
| 124 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 124 | 125 |
.priv_data_size = sizeof(VDPAUContext), |
| 125 | 126 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 126 | 127 |
}; |
| ... | ... |
@@ -147,6 +147,7 @@ AVHWAccel ff_wmv3_vdpau_hwaccel = {
|
| 147 | 147 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 148 | 148 |
.init = vdpau_vc1_init, |
| 149 | 149 |
.uninit = ff_vdpau_common_uninit, |
| 150 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 150 | 151 |
.priv_data_size = sizeof(VDPAUContext), |
| 151 | 152 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 152 | 153 |
}; |
| ... | ... |
@@ -163,6 +164,7 @@ AVHWAccel ff_vc1_vdpau_hwaccel = {
|
| 163 | 163 |
.frame_priv_data_size = sizeof(struct vdpau_picture_context), |
| 164 | 164 |
.init = vdpau_vc1_init, |
| 165 | 165 |
.uninit = ff_vdpau_common_uninit, |
| 166 |
+ .frame_params = ff_vdpau_common_frame_params, |
|
| 166 | 167 |
.priv_data_size = sizeof(VDPAUContext), |
| 167 | 168 |
.caps_internal = HWACCEL_CAP_ASYNC_SAFE, |
| 168 | 169 |
}; |
| ... | ... |
@@ -28,7 +28,7 @@ |
| 28 | 28 |
#include "libavutil/version.h" |
| 29 | 29 |
|
| 30 | 30 |
#define LIBAVCODEC_VERSION_MAJOR 58 |
| 31 |
-#define LIBAVCODEC_VERSION_MINOR 2 |
|
| 31 |
+#define LIBAVCODEC_VERSION_MINOR 3 |
|
| 32 | 32 |
#define LIBAVCODEC_VERSION_MICRO 100 |
| 33 | 33 |
|
| 34 | 34 |
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ |