diff options
Diffstat (limited to 'libhb')
-rw-r--r-- | libhb/common.c | 7 | ||||
-rw-r--r-- | libhb/common.h | 46 | ||||
-rw-r--r-- | libhb/decavcodec.c | 111 | ||||
-rw-r--r-- | libhb/decmpeg2.c | 13 | ||||
-rw-r--r-- | libhb/enc_qsv.c | 35 | ||||
-rw-r--r-- | libhb/qsv_common.c | 19 | ||||
-rw-r--r-- | libhb/qsv_common.h | 1 | ||||
-rw-r--r-- | libhb/qsv_filter.c | 14 | ||||
-rw-r--r-- | libhb/qsv_filter_pp.c | 8 | ||||
-rw-r--r-- | libhb/scan.c | 4 | ||||
-rw-r--r-- | libhb/sync.c | 16 |
11 files changed, 144 insertions, 130 deletions
diff --git a/libhb/common.c b/libhb/common.c index 66f3b0440..18dd9fc53 100644 --- a/libhb/common.c +++ b/libhb/common.c @@ -2885,9 +2885,10 @@ static void job_setup( hb_job_t * job, hb_title_t * title ) job->metadata = hb_metadata_copy( title->metadata ); #ifdef USE_QSV - job->qsv_enc_info.is_init_done = 0; - job->qsv_decode = title->qsv_decode_support; - job->qsv_async_depth = AV_QSV_ASYNC_DEPTH_DEFAULT; + job->qsv.enc_info.is_init_done = 0; + job->qsv.async_depth = AV_QSV_ASYNC_DEPTH_DEFAULT; + job->qsv.decode = !!(title->video_decode_support & + HB_DECODE_SUPPORT_QSV); #endif } diff --git a/libhb/common.h b/libhb/common.h index 38c97e487..e1985e87a 100644 --- a/libhb/common.h +++ b/libhb/common.h @@ -59,6 +59,10 @@ #define MAX( a, b ) ( (a) > (b) ? (a) : (b) ) #endif +#ifndef HB_DEBUG_ASSERT +#define HB_DEBUG_ASSERT(x, y) { if ((x)) { hb_error("ASSERT: %s", y); exit(1); } } +#endif + #define EVEN( a ) ( (a) + ( (a) & 1 ) ) #define MULTIPLE_16( a ) ( 16 * ( ( (a) + 8 ) / 16 ) ) #define MULTIPLE_MOD( a, b ) ((b==1)?a:( b * ( ( (a) + (b / 2) - 1) / b ) )) @@ -106,11 +110,6 @@ typedef struct hb_lock_s hb_lock_t; #include "libavutil/channel_layout.h" #ifdef USE_QSV - -#ifndef DEBUG_ASSERT -#define DEBUG_ASSERT(x,y) { if ((x)) { hb_error("ASSERT: %s", y); exit(1); } } -#endif - #include "libavcodec/qsv.h" #endif @@ -513,19 +512,23 @@ struct hb_job_s // initially (for frame accurate positioning // to non-I frames). #ifdef USE_QSV - av_qsv_context *qsv; - int qsv_decode; - int qsv_async_depth; - // shared encoding parameters - // initialized by the QSV encoder, then used upstream (e.g. by filters) to - // configure their output so that it corresponds to what the encoder expects + // QSV-specific settings struct { - int pic_struct; - int align_width; - int align_height; - int is_init_done; - } qsv_enc_info; + int decode; + int async_depth; + av_qsv_context *ctx; + // shared encoding parameters + // initialized by the QSV encoder, then used upstream (e.g. by filters) + // to configure their output so that it matches what the encoder expects + struct + { + int pic_struct; + int align_width; + int align_height; + int is_init_done; + } enc_info; + } qsv; #endif #ifdef __LIBHB__ @@ -860,9 +863,10 @@ struct hb_title_s char *container_name; int data_rate; -#ifdef USE_QSV - int qsv_decode_support; -#endif + // additional supported video decoders (e.g. HW-accelerated implementations) + int video_decode_support; +#define HB_DECODE_SUPPORT_SW 0x01 // software (libavcodec or mpeg2dec) +#define HB_DECODE_SUPPORT_QSV 0x02 // Intel Quick Sync Video hb_metadata_t *metadata; @@ -960,9 +964,7 @@ typedef struct hb_work_info_s int color_prim; int color_transfer; int color_matrix; -#ifdef USE_QSV - int qsv_decode_support; -#endif + int video_decode_support; }; struct { // info only valid for audio decoders diff --git a/libhb/decavcodec.c b/libhb/decavcodec.c index 2d7548d56..0e760b8db 100644 --- a/libhb/decavcodec.c +++ b/libhb/decavcodec.c @@ -106,14 +106,19 @@ struct hb_work_private_s int wait_for_keyframe; hb_audio_resample_t *resample; + #ifdef USE_QSV - av_qsv_config qsv_config; - int qsv_decode; - const char *qsv_codec_name; + // QSV-specific settings + struct + { + int decode; + av_qsv_config config; + const char *codec_name; #define USE_QSV_PTS_WORKAROUND // work around out-of-order output timestamps #ifdef USE_QSV_PTS_WORKAROUND - hb_list_t *qsv_pts_list; + hb_list_t *pts_list; #endif + } qsv; #endif }; @@ -352,7 +357,15 @@ static void closePrivData( hb_work_private_t ** ppv ) if ( pv->context && pv->context->codec ) { #ifdef USE_QSV - if (!pv->qsv_decode) + /* + * FIXME: knowingly leaked. + * + * If we're using our Libav QSV wrapper, qsv_decode_end() will call + * MFXClose() on the QSV session. Even if decoding is complete, we + * still need that session for QSV filtering and/or encoding, so we + * we can't close the context here until we implement a proper fix. + */ + if (!pv->qsv.decode) #endif { hb_avcodec_close(pv->context); @@ -369,16 +382,16 @@ static void closePrivData( hb_work_private_t ** ppv ) } hb_audio_resample_free(pv->resample); #ifdef USE_QSV_PTS_WORKAROUND - if (pv->qsv_decode && pv->qsv_pts_list != NULL) + if (pv->qsv.decode && pv->qsv.pts_list != NULL) { - while (hb_list_count(pv->qsv_pts_list) > 0) + while (hb_list_count(pv->qsv.pts_list) > 0) { - int64_t *item = hb_list_item(pv->qsv_pts_list, 0); - hb_list_rem(pv->qsv_pts_list, item); + int64_t *item = hb_list_item(pv->qsv.pts_list, 0); + hb_list_rem(pv->qsv.pts_list, item); free(item); } - hb_list_close(&pv->qsv_pts_list); + hb_list_close(&pv->qsv.pts_list); } #endif free( pv ); @@ -680,8 +693,8 @@ static hb_buffer_t *copy_frame( hb_work_private_t *pv, AVFrame *frame ) #ifdef USE_QSV // no need to copy the frame data when decoding with QSV to opaque memory - if (pv->qsv_decode && - pv->qsv_config.io_pattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY) + if (pv->qsv.decode && + pv->qsv.config.io_pattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY) { buf->qsv_details.qsv_atom = frame->data[2]; return buf; @@ -898,9 +911,9 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen * We work around it by saving the input timestamps (in chronological order) * and restoring them after decoding. */ - if (pv->qsv_decode && avp.data != NULL) + if (pv->qsv.decode && avp.data != NULL) { - hb_av_add_new_pts(pv->qsv_pts_list, avp.pts); + hb_av_add_new_pts(pv->qsv.pts_list, avp.pts); } #endif @@ -910,19 +923,19 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen } #ifdef USE_QSV - if (pv->qsv_decode && pv->job->qsv == NULL && pv->video_codec_opened > 0) + if (pv->qsv.decode && pv->job->qsv.ctx == NULL && pv->video_codec_opened > 0) { // this is quite late, but we can't be certain that the QSV context is // available until after we call avcodec_decode_video2() at least once - pv->job->qsv = pv->context->priv_data; + pv->job->qsv.ctx = pv->context->priv_data; } #endif #ifdef USE_QSV_PTS_WORKAROUND - if (pv->qsv_decode && got_picture) + if (pv->qsv.decode && got_picture) { // we got a decoded frame, restore the lowest available PTS - frame.pkt_pts = hb_av_pop_next_pts(pv->qsv_pts_list); + frame.pkt_pts = hb_av_pop_next_pts(pv->qsv.pts_list); } #endif @@ -1132,7 +1145,7 @@ static void decodeVideo( hb_work_object_t *w, uint8_t *data, int size, int seque continue; } #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { // flush a second time while (decodeFrame(w, NULL, 0, sequence, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0)) @@ -1187,23 +1200,23 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) if (hb_qsv_decode_is_enabled(job)) { // setup the QSV configuration - pv->qsv_config.io_pattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY; - pv->qsv_config.impl_requested = hb_qsv_impl_get_preferred(); - pv->qsv_config.async_depth = job->qsv_async_depth; - pv->qsv_config.sync_need = 0; - pv->qsv_config.usage_threaded = 1; - pv->qsv_config.additional_buffers = 64; // FIFO_LARGE + pv->qsv.config.io_pattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY; + pv->qsv.config.impl_requested = hb_qsv_impl_get_preferred(); + pv->qsv.config.async_depth = job->qsv.async_depth; + pv->qsv.config.sync_need = 0; + pv->qsv.config.usage_threaded = 1; + pv->qsv.config.additional_buffers = 64; // FIFO_LARGE if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_LOOKAHEAD) { // more surfaces may be needed for the lookahead - pv->qsv_config.additional_buffers = 160; + pv->qsv.config.additional_buffers = 160; } - pv->qsv_codec_name = hb_qsv_decode_get_codec_name(w->codec_param); - pv->qsv_decode = 1; + pv->qsv.codec_name = hb_qsv_decode_get_codec_name(w->codec_param); + pv->qsv.decode = 1; } else { - pv->qsv_decode = 0; + pv->qsv.decode = 0; } #endif @@ -1219,9 +1232,9 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) AVCodec *codec = NULL; #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { - codec = avcodec_find_decoder_by_name(pv->qsv_codec_name); + codec = avcodec_find_decoder_by_name(pv->qsv.codec_name); } else #endif @@ -1241,13 +1254,13 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) pv->context->error_concealment = FF_EC_GUESS_MVS|FF_EC_DEBLOCK; #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { #ifdef USE_QSV_PTS_WORKAROUND - pv->qsv_pts_list = hb_list_init(); + pv->qsv.pts_list = hb_list_init(); #endif // set the QSV configuration before opening the decoder - pv->context->hwaccel_context = &pv->qsv_config; + pv->context->hwaccel_context = &pv->qsv.config; } #endif @@ -1270,9 +1283,9 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) AVCodec *codec = NULL; #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { - codec = avcodec_find_decoder_by_name(pv->qsv_codec_name); + codec = avcodec_find_decoder_by_name(pv->qsv.codec_name); } else #endif @@ -1410,9 +1423,9 @@ static int decavcodecvWork( hb_work_object_t * w, hb_buffer_t ** buf_in, AVCodec *codec = NULL; #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { - codec = avcodec_find_decoder_by_name(pv->qsv_codec_name); + codec = avcodec_find_decoder_by_name(pv->qsv.codec_name); } else #endif @@ -1445,13 +1458,13 @@ static int decavcodecvWork( hb_work_object_t * w, hb_buffer_t ** buf_in, } #ifdef USE_QSV - if (pv->qsv_decode) + if (pv->qsv.decode) { #ifdef USE_QSV_PTS_WORKAROUND - pv->qsv_pts_list = hb_list_init(); + pv->qsv.pts_list = hb_list_init(); #endif // set the QSV configuration before opening the decoder - pv->context->hwaccel_context = &pv->qsv_config; + pv->context->hwaccel_context = &pv->qsv.config; } #endif @@ -1668,10 +1681,22 @@ static int decavcodecvInfo( hb_work_object_t *w, hb_work_info_t *info ) } } + info->video_decode_support = HB_DECODE_SUPPORT_SW; + switch (pv->context->codec_id) + { + case AV_CODEC_ID_H264: + if (pv->context->pix_fmt == AV_PIX_FMT_YUV420P || + pv->context->pix_fmt == AV_PIX_FMT_YUVJ420P) + { #ifdef USE_QSV - info->qsv_decode_support = hb_qsv_decode_is_supported(pv->context->codec_id, - pv->context->pix_fmt); + info->video_decode_support |= HB_DECODE_SUPPORT_QSV; #endif + } + break; + + default: + break; + } return 1; } diff --git a/libhb/decmpeg2.c b/libhb/decmpeg2.c index 2a186295d..9d6b8fe42 100644 --- a/libhb/decmpeg2.c +++ b/libhb/decmpeg2.c @@ -906,14 +906,11 @@ static int decmpeg2Info( hb_work_object_t *w, hb_work_info_t *info ) (m->info->display_picture->flags & PROGRESSIVE) && (m->height == 480 ) ) ? 1126125 : m->rate; - info->bitrate = m->info->sequence->byte_rate * 8; - info->profile = m->info->sequence->profile_level_id >> 4; - info->level = m->info->sequence->profile_level_id & 0xf; - info->name = "mpeg2"; - -#ifdef USE_QSV - info->qsv_decode_support = 0; -#endif + info->name = "mpeg2"; + info->video_decode_support = HB_DECODE_SUPPORT_SW; + info->bitrate = m->info->sequence->byte_rate * 8; + info->profile = m->info->sequence->profile_level_id >> 4; + info->level = m->info->sequence->profile_level_id & 0xf; if( pv->libmpeg2->info->sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION ) { diff --git a/libhb/enc_qsv.c b/libhb/enc_qsv.c index 66825baab..ee52b7e9b 100644 --- a/libhb/enc_qsv.c +++ b/libhb/enc_qsv.c @@ -169,7 +169,7 @@ int qsv_enc_init(av_qsv_context *qsv, hb_work_private_t *pv) hb_error("qsv_enc_init: decode enabled but no context!"); return 3; } - job->qsv = qsv = av_mallocz(sizeof(av_qsv_context)); + job->qsv.ctx = qsv = av_mallocz(sizeof(av_qsv_context)); } av_qsv_space *qsv_encode = qsv->enc_space; @@ -388,7 +388,7 @@ int encqsvInit(hb_work_object_t *w, hb_job_t *job) } // set AsyncDepth to match that of decode and VPP - pv->param.videoParam->AsyncDepth = job->qsv_async_depth; + pv->param.videoParam->AsyncDepth = job->qsv.async_depth; // enable and set colorimetry (video signal information) pv->param.videoSignalInfo.ColourDescriptionPresent = 1; @@ -490,15 +490,15 @@ int encqsvInit(hb_work_object_t *w, hb_job_t *job) // some encoding parameters are used by filters to configure their output if (pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE) { - job->qsv_enc_info.align_height = AV_QSV_ALIGN32(job->height); + job->qsv.enc_info.align_height = AV_QSV_ALIGN32(job->height); } else { - job->qsv_enc_info.align_height = AV_QSV_ALIGN16(job->height); + job->qsv.enc_info.align_height = AV_QSV_ALIGN16(job->height); } - job->qsv_enc_info.align_width = AV_QSV_ALIGN16(job->width); - job->qsv_enc_info.pic_struct = pv->param.videoParam->mfx.FrameInfo.PicStruct; - job->qsv_enc_info.is_init_done = 1; + job->qsv.enc_info.align_width = AV_QSV_ALIGN16(job->width); + job->qsv.enc_info.pic_struct = pv->param.videoParam->mfx.FrameInfo.PicStruct; + job->qsv.enc_info.is_init_done = 1; // encode to H.264 and set FrameInfo pv->param.videoParam->mfx.CodecId = MFX_CODEC_AVC; @@ -514,9 +514,9 @@ int encqsvInit(hb_work_object_t *w, hb_job_t *job) pv->param.videoParam->mfx.FrameInfo.CropY = 0; pv->param.videoParam->mfx.FrameInfo.CropW = job->width; pv->param.videoParam->mfx.FrameInfo.CropH = job->height; - pv->param.videoParam->mfx.FrameInfo.PicStruct = job->qsv_enc_info.pic_struct; - pv->param.videoParam->mfx.FrameInfo.Width = job->qsv_enc_info.align_width; - pv->param.videoParam->mfx.FrameInfo.Height = job->qsv_enc_info.align_height; + pv->param.videoParam->mfx.FrameInfo.PicStruct = job->qsv.enc_info.pic_struct; + pv->param.videoParam->mfx.FrameInfo.Width = job->qsv.enc_info.align_width; + pv->param.videoParam->mfx.FrameInfo.Height = job->qsv.enc_info.align_height; // set H.264 profile and level if (job->h264_profile != NULL && job->h264_profile[0] != '\0' && @@ -1015,11 +1015,12 @@ void encqsvClose( hb_work_object_t * w ) hb_log( "enc_qsv done: frames: %u in, %u out", pv->frames_in, pv->frames_out ); - // if system memory ( encode only ) additional free(s) for surfaces - if( pv && pv->job && pv->job->qsv && - pv->job->qsv->is_context_active ){ + // if using system memory (encode-only), free allocated surfaces too + if (pv != NULL && pv->job != NULL && pv->job->qsv.ctx != NULL && + pv->job->qsv.ctx->is_context_active) + { - av_qsv_context *qsv = pv->job->qsv; + av_qsv_context *qsv = pv->job->qsv.ctx; if(qsv && qsv->enc_space){ av_qsv_space* qsv_encode = qsv->enc_space; @@ -1102,7 +1103,7 @@ int encqsvWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_work_private_t * pv = w->private_data; hb_job_t * job = pv->job; hb_buffer_t * in = *buf_in, *buf; - av_qsv_context *qsv = job->qsv; + av_qsv_context *qsv = job->qsv.ctx; av_qsv_space* qsv_encode; hb_buffer_t *last_buf = NULL; mfxStatus sts = MFX_ERR_NONE; @@ -1112,7 +1113,7 @@ int encqsvWork( hb_work_object_t * w, hb_buffer_t ** buf_in, while(1){ int ret = qsv_enc_init(qsv, pv); - qsv = job->qsv; + qsv = job->qsv.ctx; qsv_encode = qsv->enc_space; if(ret >= 2) av_qsv_sleep(1); @@ -1310,7 +1311,7 @@ int encqsvWork( hb_work_object_t * w, hb_buffer_t ** buf_in, ff_qsv_atomic_dec(&qsv_encode->p_syncp[sync_idx]->in_use); if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) - DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." ); + HB_DEBUG_ASSERT(1, "The bitstream buffer size is insufficient."); break; } diff --git a/libhb/qsv_common.c b/libhb/qsv_common.c index 949176f0a..3ea07df18 100644 --- a/libhb/qsv_common.c +++ b/libhb/qsv_common.c @@ -176,22 +176,9 @@ const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id) int hb_qsv_decode_is_enabled(hb_job_t *job) { - return ((job != NULL && job->title->qsv_decode_support && job->qsv_decode) && - (job->vcodec & HB_VCODEC_QSV_MASK)); -} - -int hb_qsv_decode_is_supported(enum AVCodecID codec_id, - enum AVPixelFormat pix_fmt) -{ - switch (codec_id) - { - case AV_CODEC_ID_H264: - return (pix_fmt == AV_PIX_FMT_YUV420P || - pix_fmt == AV_PIX_FMT_YUVJ420P); - - default: - return 0; - } + return ((job != NULL && job->qsv.decode) && + (job->vcodec & HB_VCODEC_QSV_MASK) && + (job->title->video_decode_support & HB_DECODE_SUPPORT_QSV)); } int hb_qsv_codingoption_xlat(int val) diff --git a/libhb/qsv_common.h b/libhb/qsv_common.h index 418180432..d27e7241a 100644 --- a/libhb/qsv_common.h +++ b/libhb/qsv_common.h @@ -51,7 +51,6 @@ void hb_qsv_info_print(); /* Intel Quick Sync Video DECODE utilities */ const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id); int hb_qsv_decode_is_enabled(hb_job_t *job); -int hb_qsv_decode_is_supported(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt); /* Media SDK parameters handling */ enum diff --git a/libhb/qsv_filter.c b/libhb/qsv_filter.c index 3de9d6254..20786d343 100644 --- a/libhb/qsv_filter.c +++ b/libhb/qsv_filter.c @@ -108,7 +108,7 @@ static int filter_init( av_qsv_context* qsv, hb_filter_private_t * pv ){ if(!qsv->dec_space || !qsv->dec_space->is_init_done) return 2; // we need to know final output settings before we can properly configure - if (!pv->job->qsv_enc_info.is_init_done) + if (!pv->job->qsv.enc_info.is_init_done) { return 2; } @@ -168,12 +168,12 @@ static int filter_init( av_qsv_context* qsv, hb_filter_private_t * pv ){ qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD = pv->job->vrate_base; qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW; qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH; - qsv_vpp->m_mfxVideoParam.vpp.Out.Width = pv->job->qsv_enc_info.align_width; - qsv_vpp->m_mfxVideoParam.vpp.Out.Height = pv->job->qsv_enc_info.align_height; + qsv_vpp->m_mfxVideoParam.vpp.Out.Width = pv->job->qsv.enc_info.align_width; + qsv_vpp->m_mfxVideoParam.vpp.Out.Height = pv->job->qsv.enc_info.align_height; qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY; - qsv_vpp->m_mfxVideoParam.AsyncDepth = pv->job->qsv_async_depth; + qsv_vpp->m_mfxVideoParam.AsyncDepth = pv->job->qsv.async_depth; memset(&qsv_vpp->request, 0, sizeof(mfxFrameAllocRequest)*2); @@ -415,7 +415,7 @@ static void hb_qsv_filter_close( hb_filter_object_t * filter ) return; } - av_qsv_context* qsv = pv->job->qsv; + av_qsv_context* qsv = pv->job->qsv.ctx; if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0){ // closing local stuff @@ -543,7 +543,7 @@ int process_frame(av_qsv_list* received_item, av_qsv_context* qsv, hb_filter_pri ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use); if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) - DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." ); + HB_DEBUG_ASSERT(1, "The bitstream buffer size is insufficient."); break; } @@ -561,7 +561,7 @@ static int hb_qsv_filter_work( hb_filter_object_t * filter, hb_buffer_t * out = *buf_out; int sts = 0; - av_qsv_context* qsv = pv->job->qsv; + av_qsv_context* qsv = pv->job->qsv.ctx; if ( !pv ) { diff --git a/libhb/qsv_filter_pp.c b/libhb/qsv_filter_pp.c index 1aef1eb80..ec625d664 100644 --- a/libhb/qsv_filter_pp.c +++ b/libhb/qsv_filter_pp.c @@ -418,7 +418,7 @@ int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use); if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) - DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." ); + HB_DEBUG_ASSERT(1, "The bitstream buffer size is insufficient."); break; } @@ -434,7 +434,7 @@ static int hb_qsv_filter_pre_work( hb_filter_object_t * filter, hb_buffer_t * out = *buf_out; int sts = 0; - av_qsv_context* qsv = pv->job->qsv; + av_qsv_context* qsv = pv->job->qsv.ctx; if(!in->qsv_details.filter_details) in->qsv_details.filter_details = pv; @@ -489,7 +489,7 @@ static void hb_qsv_filter_pre_close( hb_filter_object_t * filter ){ sws_freeContext(pv->sws_context_to_nv12); sws_freeContext(pv->sws_context_from_nv12); - av_qsv_context* qsv = pv->job->qsv; + av_qsv_context* qsv = pv->job->qsv.ctx; if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0 ){ if(pv->qsv_user && qsv->mfx_session){ @@ -560,7 +560,7 @@ static int hb_qsv_filter_post_work( hb_filter_object_t * filter, return HB_FILTER_DONE; } - av_qsv_context* qsv = pv->job->qsv; + av_qsv_context* qsv = pv->job->qsv.ctx; pv = in->qsv_details.filter_details; if (!pv) diff --git a/libhb/scan.c b/libhb/scan.c index 507662f36..ec35eb916 100644 --- a/libhb/scan.c +++ b/libhb/scan.c @@ -865,9 +865,7 @@ skip_preview: title->color_transfer = vid_info.color_transfer; title->color_matrix = vid_info.color_matrix; -#ifdef USE_QSV - title->qsv_decode_support = vid_info.qsv_decode_support; -#endif + title->video_decode_support = vid_info.video_decode_support; // compute the aspect ratio based on the storage dimensions and the // pixel aspect ratio (if supplied) or just storage dimensions if no PAR. diff --git a/libhb/sync.c b/libhb/sync.c index 8134688b3..c14d96166 100644 --- a/libhb/sync.c +++ b/libhb/sync.c @@ -530,12 +530,13 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, #ifdef USE_QSV // reclaim QSV resources before dropping the buffer // when decoding without QSV, the QSV atom will be NULL - if (job != NULL && job->qsv != NULL && next->qsv_details.qsv_atom != NULL) + if (job != NULL && job->qsv.ctx != NULL && + next->qsv_details.qsv_atom != NULL) { av_qsv_stage *stage = av_qsv_get_last_stage(next->qsv_details.qsv_atom); if (stage != NULL) { - av_qsv_wait_on_sync(job->qsv, stage); + av_qsv_wait_on_sync(job->qsv.ctx, stage); if (stage->out.sync->in_use > 0) { ff_qsv_atomic_dec(&stage->out.sync->in_use); @@ -545,7 +546,8 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, ff_qsv_atomic_dec(&stage->out.p_surface->Data.Locked); } } - av_qsv_flush_stages(job->qsv->pipes, &next->qsv_details.qsv_atom); + av_qsv_flush_stages(job->qsv.ctx->pipes, + &next->qsv_details.qsv_atom); } #endif hb_buffer_close( &next ); @@ -745,12 +747,13 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, #ifdef USE_QSV // reclaim QSV resources before dropping the buffer // when decoding without QSV, the QSV atom will be NULL - if (job != NULL && job->qsv != NULL && next->qsv_details.qsv_atom != NULL) + if (job != NULL && job->qsv.ctx != NULL && + next->qsv_details.qsv_atom != NULL) { av_qsv_stage *stage = av_qsv_get_last_stage(next->qsv_details.qsv_atom); if (stage != NULL) { - av_qsv_wait_on_sync(job->qsv, stage); + av_qsv_wait_on_sync(job->qsv.ctx, stage); if (stage->out.sync->in_use > 0) { ff_qsv_atomic_dec(&stage->out.sync->in_use); @@ -760,7 +763,8 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in, ff_qsv_atomic_dec(&stage->out.p_surface->Data.Locked); } } - av_qsv_flush_stages(job->qsv->pipes, &next->qsv_details.qsv_atom); + av_qsv_flush_stages(job->qsv.ctx->pipes, + &next->qsv_details.qsv_atom); } #endif |