summaryrefslogtreecommitdiffstats
path: root/libhb
diff options
context:
space:
mode:
authorRodeo <[email protected]>2013-08-22 20:34:44 +0000
committerRodeo <[email protected]>2013-08-22 20:34:44 +0000
commit3326f988806a5decae025727784a19c8cc223833 (patch)
treecd75adb1975d223d7a0fd43a31030a78939e73d3 /libhb
parentd41905d539046445e1b81499ff7bd04d170c91d4 (diff)
Big merge, QSV to trunk: part 2 (new files).
git-svn-id: svn://svn.handbrake.fr/HandBrake/trunk@5738 b64f7644-9d1e-0410-96f1-a4d463321fa5
Diffstat (limited to 'libhb')
-rw-r--r--libhb/enc_qsv.c1543
-rw-r--r--libhb/enc_qsv.h38
-rw-r--r--libhb/h264_common.h17
-rw-r--r--libhb/qsv_common.c780
-rw-r--r--libhb/qsv_common.h119
-rw-r--r--libhb/qsv_filter.c648
-rw-r--r--libhb/qsv_filter.h35
-rw-r--r--libhb/qsv_filter_pp.c916
-rw-r--r--libhb/qsv_filter_pp.h114
-rw-r--r--libhb/qsv_memory.c120
-rw-r--r--libhb/qsv_memory.h55
11 files changed, 4385 insertions, 0 deletions
diff --git a/libhb/enc_qsv.c b/libhb/enc_qsv.c
new file mode 100644
index 000000000..c1c832a81
--- /dev/null
+++ b/libhb/enc_qsv.c
@@ -0,0 +1,1543 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#include "hb.h"
+#include "enc_qsv.h"
+#include "qsv_common.h"
+#include "qsv_memory.h"
+#include "h264_common.h"
+
+int encqsvInit( hb_work_object_t *, hb_job_t * );
+int encqsvWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
+void encqsvClose( hb_work_object_t * );
+
+hb_work_object_t hb_encqsv =
+{
+ WORK_ENCQSV,
+ "H.264/AVC encoder (Intel QSV)",
+ encqsvInit,
+ encqsvWork,
+ encqsvClose
+};
+
+struct hb_work_private_s
+{
+ hb_job_t *job;
+ uint32_t frames_in;
+ uint32_t frames_out;
+ int64_t last_start;
+
+ hb_qsv_param_t param;
+ av_qsv_space enc_space;
+
+ mfxEncodeCtrl force_keyframe;
+ struct
+ {
+ int index;
+ int64_t start;
+ } next_chapter;
+
+#define BFRM_DELAY_MAX 16
+ // for DTS generation (when MSDK API < 1.6 or VFR)
+ int bfrm_delay;
+ int bfrm_workaround;
+ int64_t init_pts[BFRM_DELAY_MAX + 1];
+ hb_list_t *list_dts;
+
+ int async_depth;
+ int max_async_depth;
+
+ // if encode-only, system memory used
+ int is_sys_mem;
+ struct SwsContext *sws_context_to_nv12;
+
+ // whether to expect input from VPP or from QSV decode
+ int is_vpp_present;
+
+ // whether the encoder is initialized
+ int init_done;
+
+ hb_list_t *delayed_processing;
+};
+
+// for DTS generation (when MSDK API < 1.6 or VFR)
+static void hb_qsv_add_new_dts(hb_list_t *list, int64_t new_dts)
+{
+ if (list != NULL)
+ {
+ int64_t *item = malloc(sizeof(int64_t));
+ if (item != NULL)
+ {
+ *item = new_dts;
+ hb_list_add(list, item);
+ }
+ }
+}
+static int64_t hb_qsv_pop_next_dts(hb_list_t *list)
+{
+ int64_t next_dts = INT64_MIN;
+ if (list != NULL && hb_list_count(list) > 0)
+ {
+ int64_t *item = hb_list_item(list, 0);
+ if (item != NULL)
+ {
+ next_dts = *item;
+ hb_list_rem(list, item);
+ free(item);
+ }
+ }
+ return next_dts;
+}
+
+static const char* qsv_h264_profile_xlat(int profile)
+{
+ switch (profile)
+ {
+ case MFX_PROFILE_AVC_CONSTRAINED_BASELINE:
+ return "Constrained Baseline";
+ case MFX_PROFILE_AVC_BASELINE:
+ return "Baseline";
+ case MFX_PROFILE_AVC_EXTENDED:
+ return "Extended";
+ case MFX_PROFILE_AVC_MAIN:
+ return "Main";
+ case MFX_PROFILE_AVC_CONSTRAINED_HIGH:
+ return "Constrained High";
+ case MFX_PROFILE_AVC_PROGRESSIVE_HIGH:
+ return "Progressive High";
+ case MFX_PROFILE_AVC_HIGH:
+ return "High";
+ case MFX_PROFILE_UNKNOWN:
+ default:
+ return NULL;
+ }
+}
+
+static const char* qsv_h264_level_xlat(int level)
+{
+ int i;
+ for (i = 0; hb_h264_level_names[i] != NULL; i++)
+ {
+ if (hb_h264_level_values[i] == level)
+ {
+ return hb_h264_level_names[i];
+ }
+ }
+ return NULL;
+}
+
+int qsv_enc_init(av_qsv_context *qsv, hb_work_private_t *pv)
+{
+ int i = 0;
+ mfxStatus sts;
+ hb_job_t *job = pv->job;
+
+ if (pv->init_done)
+ {
+ return 0;
+ }
+
+ if (qsv == NULL)
+ {
+ if (!pv->is_sys_mem)
+ {
+ hb_error("qsv_enc_init: decode enabled but no context!");
+ return 3;
+ }
+ job->qsv = qsv = av_mallocz(sizeof(av_qsv_context));
+ }
+
+ av_qsv_space *qsv_encode = qsv->enc_space;
+ if (qsv_encode == NULL)
+ {
+ // if only for encode
+ if (pv->is_sys_mem)
+ {
+ // no need to use additional sync as encode only -> single thread
+ // XXX: this zeroes the session handle, so call it before MFXInit
+ av_qsv_add_context_usage(qsv, 0);
+
+ // initialize the session
+ qsv->impl = MFX_IMPL_AUTO_ANY;
+ qsv->ver.Major = AV_QSV_MSDK_VERSION_MAJOR;
+ qsv->ver.Minor = AV_QSV_MSDK_VERSION_MINOR;
+ sts = MFXInit(qsv->impl, &qsv->ver, &qsv->mfx_session);
+ if (sts != MFX_ERR_NONE)
+ {
+ hb_error("qsv_enc_init: MFXInit failed (%d)", sts);
+ *job->die = 1;
+ return -1;
+ }
+ }
+ qsv->enc_space = qsv_encode = &pv->enc_space;
+ }
+
+ if (!pv->is_sys_mem)
+ {
+ if (!pv->is_vpp_present && job->list_filter != NULL)
+ {
+ for (i = 0; i < hb_list_count(job->list_filter); i++)
+ {
+ hb_filter_object_t *filter = hb_list_item(job->list_filter, i);
+ if (filter->id == HB_FILTER_QSV_PRE ||
+ filter->id == HB_FILTER_QSV_POST ||
+ filter->id == HB_FILTER_QSV)
+ {
+ pv->is_vpp_present = 1;
+ break;
+ }
+ }
+ }
+
+ if (pv->is_vpp_present)
+ {
+ if (qsv->vpp_space == NULL)
+ {
+ return 2;
+ }
+ for (i = 0; i < av_qsv_list_count(qsv->vpp_space); i++)
+ {
+ av_qsv_space *vpp = av_qsv_list_item(qsv->vpp_space, i);
+ if (!vpp->is_init_done)
+ {
+ return 2;
+ }
+ }
+ }
+
+ av_qsv_space *dec_space = qsv->dec_space;
+ if (dec_space == NULL || !dec_space->is_init_done)
+ {
+ return 2;
+ }
+ }
+ else
+ {
+ pv->sws_context_to_nv12 = hb_sws_get_context(job->width, job->height,
+ AV_PIX_FMT_YUV420P,
+ job->width, job->height,
+ AV_PIX_FMT_NV12,
+ SWS_LANCZOS|SWS_ACCURATE_RND);
+ }
+
+ // allocate tasks
+ qsv_encode->p_buf_max_size = AV_QSV_BUF_SIZE_DEFAULT;
+ qsv_encode->tasks = av_qsv_list_init(HAVE_THREADS);
+ for (i = 0; i < pv->max_async_depth; i++)
+ {
+ av_qsv_task *task = av_mallocz(sizeof(av_qsv_task));
+ task->bs = av_mallocz(sizeof(mfxBitstream));
+ task->bs->Data = av_mallocz(sizeof(uint8_t) * qsv_encode->p_buf_max_size);
+ task->bs->MaxLength = qsv_encode->p_buf_max_size;
+ task->bs->DataLength = 0;
+ task->bs->DataOffset = 0;
+ av_qsv_list_add(qsv_encode->tasks, task);
+ }
+
+ // setup surface allocation
+ qsv_encode->m_mfxVideoParam.IOPattern = (pv->is_sys_mem ?
+ MFX_IOPATTERN_IN_SYSTEM_MEMORY :
+ MFX_IOPATTERN_IN_OPAQUE_MEMORY);
+ memset(&qsv_encode->request, 0, sizeof(mfxFrameAllocRequest) * 2);
+ sts = MFXVideoENCODE_QueryIOSurf(qsv->mfx_session,
+ &qsv_encode->m_mfxVideoParam,
+ &qsv_encode->request);
+ if (sts < MFX_ERR_NONE) // ignore warnings
+ {
+ hb_error("qsv_enc_init: MFXVideoENCODE_QueryIOSurf failed (%d)", sts);
+ *job->die = 1;
+ return -1;
+ }
+
+ // allocate surfaces
+ if (pv->is_sys_mem)
+ {
+ qsv_encode->surface_num = FFMIN(qsv_encode->request[0].NumFrameSuggested +
+ pv->max_async_depth, AV_QSV_SURFACE_NUM);
+ if (qsv_encode->surface_num <= 0)
+ {
+ qsv_encode->surface_num = AV_QSV_SURFACE_NUM;
+ }
+ for (i = 0; i < qsv_encode->surface_num; i++)
+ {
+ qsv_encode->p_surfaces[i] = av_mallocz(sizeof(mfxFrameSurface1));
+ AV_QSV_CHECK_POINTER(qsv_encode->p_surfaces[i], MFX_ERR_MEMORY_ALLOC);
+ memcpy(&(qsv_encode->p_surfaces[i]->Info),
+ &(qsv_encode->request[0].Info), sizeof(mfxFrameInfo));
+ }
+ }
+ else
+ {
+ av_qsv_space *in_space = qsv->dec_space;
+ if (pv->is_vpp_present)
+ {
+ // we get our input from VPP instead
+ in_space = av_qsv_list_item(qsv->vpp_space,
+ av_qsv_list_count(qsv->vpp_space) - 1);
+ }
+ // introduced in API 1.3
+ memset(&qsv_encode->ext_opaque_alloc, 0, sizeof(mfxExtOpaqueSurfaceAlloc));
+ qsv_encode->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
+ qsv_encode->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
+ qsv_encode->ext_opaque_alloc.In.Surfaces = in_space->p_surfaces;
+ qsv_encode->ext_opaque_alloc.In.NumSurface = in_space->surface_num;
+ qsv_encode->ext_opaque_alloc.In.Type = qsv_encode->request[0].Type;
+ qsv_encode->m_mfxVideoParam.ExtParam[qsv_encode->m_mfxVideoParam.NumExtParam++] = (mfxExtBuffer*)&qsv_encode->ext_opaque_alloc;
+ }
+
+ // allocate sync points
+ qsv_encode->sync_num = (qsv_encode->surface_num ?
+ FFMIN(qsv_encode->surface_num, AV_QSV_SYNC_NUM) :
+ AV_QSV_SYNC_NUM);
+ for (i = 0; i < qsv_encode->sync_num; i++)
+ {
+ qsv_encode->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
+ AV_QSV_CHECK_POINTER(qsv_encode->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
+ qsv_encode->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
+ AV_QSV_CHECK_POINTER(qsv_encode->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
+ }
+
+ // initialize the encoder
+ sts = MFXVideoENCODE_Init(qsv->mfx_session, &qsv_encode->m_mfxVideoParam);
+ if (sts < MFX_ERR_NONE) // ignore warnings
+ {
+ hb_error("qsv_enc_init: MFXVideoENCODE_Init failed (%d)", sts);
+ *job->die = 1;
+ return -1;
+ }
+ qsv_encode->is_init_done = 1;
+
+ pv->init_done = 1;
+ return 0;
+}
+
+/***********************************************************************
+ * encqsvInit
+ ***********************************************************************
+ *
+ **********************************************************************/
+int encqsvInit(hb_work_object_t *w, hb_job_t *job)
+{
+ hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
+ w->private_data = pv;
+
+ pv->job = job;
+ pv->is_sys_mem = !hb_qsv_decode_is_enabled(job);
+ pv->delayed_processing = hb_list_init();
+ pv->last_start = INT64_MIN;
+ pv->frames_in = 0;
+ pv->frames_out = 0;
+ pv->init_done = 0;
+ pv->is_vpp_present = 0;
+
+ // set up a re-usable mfxEncodeCtrl to force keyframes (e.g. for chapters)
+ pv->force_keyframe.QP = 0;
+ pv->force_keyframe.FrameType = MFX_FRAMETYPE_I|MFX_FRAMETYPE_IDR|MFX_FRAMETYPE_REF;
+ pv->force_keyframe.NumExtParam = 0;
+ pv->force_keyframe.NumPayload = 0;
+ pv->force_keyframe.ExtParam = NULL;
+ pv->force_keyframe.Payload = NULL;
+
+ pv->next_chapter.index = 0;
+ pv->next_chapter.start = INT64_MIN;
+
+ // default encoding parameters
+ if (hb_qsv_param_default(&pv->param, &pv->enc_space.m_mfxVideoParam))
+ {
+ hb_error("encqsvInit: hb_qsv_param_default failed");
+ return -1;
+ }
+
+ // set AsyncDepth to match that of decode and VPP
+ pv->param.videoParam->AsyncDepth = job->qsv_async_depth;
+
+ // enable and set colorimetry (video signal information)
+ pv->param.videoSignalInfo.ColourDescriptionPresent = 1;
+ switch (job->color_matrix_code)
+ {
+ case 4:
+ // custom
+ pv->param.videoSignalInfo.ColourPrimaries = job->color_prim;
+ pv->param.videoSignalInfo.TransferCharacteristics = job->color_transfer;
+ pv->param.videoSignalInfo.MatrixCoefficients = job->color_matrix;
+ break;
+ case 3:
+ // ITU BT.709 HD content
+ pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_BT709;
+ pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
+ pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_BT709;
+ break;
+ case 2:
+ // ITU BT.601 DVD or SD TV content (PAL)
+ pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_EBUTECH;
+ pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
+ pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_SMPTE170M;
+ break;
+ case 1:
+ // ITU BT.601 DVD or SD TV content (NTSC)
+ pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_SMPTEC;
+ pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
+ pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_SMPTE170M;
+ break;
+ default:
+ // detected during scan
+ pv->param.videoSignalInfo.ColourPrimaries = job->title->color_prim;
+ pv->param.videoSignalInfo.TransferCharacteristics = job->title->color_transfer;
+ pv->param.videoSignalInfo.MatrixCoefficients = job->title->color_matrix;
+ break;
+ }
+
+ // parse user-specified advanced options, if present
+ if (job->advanced_opts != NULL && job->advanced_opts[0] != '\0')
+ {
+ hb_dict_t *options_list;
+ hb_dict_entry_t *option = NULL;
+ options_list = hb_encopts_to_dict(job->advanced_opts, job->vcodec);
+ while ((option = hb_dict_next(options_list, option)) != NULL)
+ {
+ switch (hb_qsv_param_parse(&pv->param,
+ option->key, option->value, job->vcodec))
+ {
+ case HB_QSV_PARAM_OK:
+ break;
+
+ case HB_QSV_PARAM_BAD_NAME:
+ hb_log("encqsvInit: hb_qsv_param_parse: bad key %s",
+ option->key);
+ break;
+ case HB_QSV_PARAM_BAD_VALUE:
+ hb_log("encqsvInit: hb_qsv_param_parse: bad value %s for key %s",
+ option->value, option->key);
+ break;
+ case HB_QSV_PARAM_UNSUPPORTED:
+ hb_log("encqsvInit: hb_qsv_param_parse: unsupported option %s",
+ option->key);
+ break;
+
+ case HB_QSV_PARAM_ERROR:
+ default:
+ hb_log("encqsvInit: hb_qsv_param_parse: unknown error");
+ break;
+ }
+ }
+ hb_dict_free(&options_list);
+ }
+
+ // reload colorimetry in case values were set in advanced_opts
+ if (pv->param.videoSignalInfo.ColourDescriptionPresent)
+ {
+ job->color_matrix_code = 4;
+ job->color_prim = pv->param.videoSignalInfo.ColourPrimaries;
+ job->color_transfer = pv->param.videoSignalInfo.TransferCharacteristics;
+ job->color_matrix = pv->param.videoSignalInfo.MatrixCoefficients;
+ }
+ else
+ {
+ job->color_matrix_code = 0;
+ job->color_prim = HB_COLR_PRI_UNDEF;
+ job->color_transfer = HB_COLR_TRA_UNDEF;
+ job->color_matrix = HB_COLR_MAT_UNDEF;
+ }
+
+ // sanitize values that may exceed the Media SDK variable size
+ int64_t vrate, vrate_base;
+ int64_t par_width, par_height;
+ hb_limit_rational64(&vrate, &vrate_base,
+ job->vrate, job->vrate_base, UINT32_MAX);
+ hb_limit_rational64(&par_width, &par_height,
+ job->anamorphic.par_width,
+ job->anamorphic.par_height, UINT16_MAX);
+
+ // some encoding parameters are used by filters to configure their output
+ if (pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
+ {
+ job->qsv_enc_info.align_height = AV_QSV_ALIGN32(job->height);
+ }
+ else
+ {
+ job->qsv_enc_info.align_height = AV_QSV_ALIGN16(job->height);
+ }
+ job->qsv_enc_info.align_width = AV_QSV_ALIGN16(job->width);
+ job->qsv_enc_info.pic_struct = pv->param.videoParam->mfx.FrameInfo.PicStruct;
+ job->qsv_enc_info.is_init_done = 1;
+
+ // encode to H.264 and set FrameInfo
+ pv->param.videoParam->mfx.CodecId = MFX_CODEC_AVC;
+ pv->param.videoParam->mfx.CodecLevel = MFX_LEVEL_UNKNOWN;
+ pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_UNKNOWN;
+ pv->param.videoParam->mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
+ pv->param.videoParam->mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+ pv->param.videoParam->mfx.FrameInfo.FrameRateExtN = vrate;
+ pv->param.videoParam->mfx.FrameInfo.FrameRateExtD = vrate_base;
+ pv->param.videoParam->mfx.FrameInfo.AspectRatioW = par_width;
+ pv->param.videoParam->mfx.FrameInfo.AspectRatioH = par_height;
+ pv->param.videoParam->mfx.FrameInfo.CropX = 0;
+ pv->param.videoParam->mfx.FrameInfo.CropY = 0;
+ pv->param.videoParam->mfx.FrameInfo.CropW = job->width;
+ pv->param.videoParam->mfx.FrameInfo.CropH = job->height;
+ pv->param.videoParam->mfx.FrameInfo.PicStruct = job->qsv_enc_info.pic_struct;
+ pv->param.videoParam->mfx.FrameInfo.Width = job->qsv_enc_info.align_width;
+ pv->param.videoParam->mfx.FrameInfo.Height = job->qsv_enc_info.align_height;
+
+ // set H.264 profile and level
+ if (job->h264_profile != NULL && job->h264_profile[0] != '\0' &&
+ strcasecmp(job->h264_profile, "auto"))
+ {
+ if (!strcasecmp(job->h264_profile, "baseline"))
+ {
+ pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_BASELINE;
+ }
+ else if (!strcasecmp(job->h264_profile, "main"))
+ {
+ pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_MAIN;
+ }
+ else if (!strcasecmp(job->h264_profile, "high"))
+ {
+ pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_HIGH;
+ }
+ else
+ {
+ hb_error("encqsvInit: bad profile %s", job->h264_profile);
+ return -1;
+ }
+ }
+ if (job->h264_level != NULL && job->h264_level[0] != '\0' &&
+ strcasecmp(job->h264_level, "auto"))
+ {
+ int err;
+ int i = hb_qsv_atoindex(hb_h264_level_names, job->h264_level, &err);
+ if (err || i >= (sizeof(hb_h264_level_values) /
+ sizeof(hb_h264_level_values[0])))
+ {
+ hb_error("encqsvInit: bad level %s", job->h264_level);
+ return -1;
+ }
+ else if (hb_qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
+ {
+ pv->param.videoParam->mfx.CodecLevel = HB_QSV_CLIP3(MFX_LEVEL_AVC_1,
+ MFX_LEVEL_AVC_52,
+ hb_h264_level_values[i]);
+ }
+ else
+ {
+ // Media SDK API < 1.6, MFX_LEVEL_AVC_52 unsupported
+ pv->param.videoParam->mfx.CodecLevel = HB_QSV_CLIP3(MFX_LEVEL_AVC_1,
+ MFX_LEVEL_AVC_51,
+ hb_h264_level_values[i]);
+ }
+ }
+
+ // interlaced encoding is not always possible
+ if (pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
+ {
+ if (pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_CONSTRAINED_BASELINE ||
+ pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_BASELINE ||
+ pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_PROGRESSIVE_HIGH)
+ {
+ hb_error("encqsvInit: profile %s doesn't support interlaced encoding",
+ qsv_h264_profile_xlat(pv->param.videoParam->mfx.CodecProfile));
+ return -1;
+ }
+ if ((pv->param.videoParam->mfx.CodecLevel >= MFX_LEVEL_AVC_1b &&
+ pv->param.videoParam->mfx.CodecLevel <= MFX_LEVEL_AVC_2) ||
+ (pv->param.videoParam->mfx.CodecLevel >= MFX_LEVEL_AVC_42))
+ {
+ hb_error("encqsvInit: level %s doesn't support interlaced encoding",
+ qsv_h264_level_xlat(pv->param.videoParam->mfx.CodecLevel));
+ return -1;
+ }
+ }
+
+ // set rate control paremeters
+ if (job->vquality >= 0)
+ {
+ // introduced in API 1.1
+ pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_CQP;
+ pv->param.videoParam->mfx.QPI = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[0]);
+ pv->param.videoParam->mfx.QPP = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[1]);
+ pv->param.videoParam->mfx.QPB = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[2]);
+ }
+ else if (job->vbitrate > 0)
+ {
+ // sanitize lookahead
+ if (!(hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_LOOKAHEAD))
+ {
+ // lookahead not supported
+ pv->param.rc.lookahead = 0;
+ }
+ else if (pv->param.rc.lookahead > 0 &&
+ pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
+ {
+ // user force-enabled lookahead but we can't use it
+ hb_log("encqsvInit: MFX_RATECONTROL_LA not used (LookAhead is progressive-only)");
+ pv->param.rc.lookahead = 0;
+ }
+ else if (pv->param.rc.lookahead < 0)
+ {
+ if (pv->param.rc.vbv_max_bitrate > 0 ||
+ pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
+ {
+ // lookahead doesn't support VBV or interlaced encoding
+ pv->param.rc.lookahead = 0;
+ }
+ else
+ {
+ // set automatically based on target usage
+ pv->param.rc.lookahead = (pv->param.videoParam->mfx.TargetUsage <= MFX_TARGETUSAGE_2);
+ }
+ }
+ else
+ {
+ // user force-enabled or force-disabled lookahead
+ pv->param.rc.lookahead = !!pv->param.rc.lookahead;
+ }
+ if (pv->param.rc.lookahead)
+ {
+ // introduced in API 1.7
+ pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_LA;
+ pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
+ if (pv->param.rc.vbv_max_bitrate > 0)
+ {
+ hb_log("encqsvInit: MFX_RATECONTROL_LA, ignoring VBV");
+ }
+ }
+ else if (job->vbitrate == pv->param.rc.vbv_max_bitrate)
+ {
+ // introduced in API 1.0
+ pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_CBR;
+ pv->param.videoParam->mfx.MaxKbps = job->vbitrate;
+ pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
+ pv->param.videoParam->mfx.BufferSizeInKB = (pv->param.rc.vbv_buffer_size / 8);
+ // only set BufferSizeInKB and InitialDelayInKB is bufsize is set
+ // else Media SDK will pick some good values for us automatically
+ if (pv->param.rc.vbv_buffer_size > 0)
+ {
+ if (pv->param.rc.vbv_buffer_init > 1.0)
+ {
+ pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_init / 8);
+ }
+ else
+ {
+ pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_size *
+ pv->param.rc.vbv_buffer_init / 8);
+ }
+ pv->param.videoParam->mfx.BufferSizeInKB = (pv->param.rc.vbv_buffer_size / 8);
+ }
+ }
+ else if (pv->param.rc.vbv_max_bitrate > 0)
+ {
+ // introduced in API 1.0
+ pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_VBR;
+ pv->param.videoParam->mfx.MaxKbps = pv->param.rc.vbv_max_bitrate;
+ pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
+ // only set BufferSizeInKB and InitialDelayInKB is bufsize is set
+ // else Media SDK will pick some good values for us automatically
+ if (pv->param.rc.vbv_buffer_size > 0)
+ {
+ if (pv->param.rc.vbv_buffer_init > 1.0)
+ {
+ pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_init / 8);
+ }
+ else
+ {
+ pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_size *
+ pv->param.rc.vbv_buffer_init / 8);
+ }
+ pv->param.videoParam->mfx.BufferSizeInKB = (pv->param.rc.vbv_buffer_size / 8);
+ }
+ }
+ else
+ {
+ // introduced in API 1.3
+ // Media SDK will set Accuracy and Convergence for us automatically
+ pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_AVBR;
+ pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
+ }
+ }
+ else
+ {
+ hb_error("encqsvInit: invalid rate control (%d, %d)",
+ job->vquality, job->vbitrate);
+ return -1;
+ }
+
+ // set the keyframe interval
+ if (pv->param.gop.gop_pic_size < 0)
+ {
+ int rate = (int)((double)job->vrate / (double)job->vrate_base + 0.5);
+ if (pv->param.videoParam->mfx.RateControlMethod == MFX_RATECONTROL_CQP)
+ {
+ // ensure B-pyramid is enabled for CQP on Haswell
+ pv->param.gop.gop_pic_size = 32;
+ }
+ else
+ {
+ // set the keyframe interval based on the framerate
+ pv->param.gop.gop_pic_size = 5 * rate + 1;
+ }
+ }
+ pv->param.videoParam->mfx.GopPicSize = pv->param.gop.gop_pic_size;
+
+ // sanitize some settings that affect memory consumption
+ if (!pv->is_sys_mem)
+ {
+ // limit these to avoid running out of resources (causes hang)
+ pv->param.videoParam->mfx.GopRefDist = FFMIN(pv->param.videoParam->mfx.GopRefDist,
+ pv->param.rc.lookahead ? 8 : 16);
+ pv->param.codingOption2.LookAheadDepth = FFMIN(pv->param.codingOption2.LookAheadDepth,
+ pv->param.rc.lookahead ? 48 - pv->param.videoParam->mfx.GopRefDist : 0);
+ }
+ else
+ {
+ // encode-only is a bit less sensitive to memory issues
+ pv->param.videoParam->mfx.GopRefDist = FFMIN(pv->param.videoParam->mfx.GopRefDist, 16);
+ pv->param.codingOption2.LookAheadDepth = FFMIN(pv->param.codingOption2.LookAheadDepth,
+ pv->param.rc.lookahead ? 60 : 0);
+ }
+
+ /*
+ * init a dummy encode-only session to get the SPS/PPS
+ * and the final output settings sanitized by Media SDK
+ * this is fine since the actual encode will use the same
+ * values for all parameters relevant to the H.264 bitstream
+ */
+ mfxIMPL impl;
+ mfxStatus err;
+ mfxVersion version;
+ mfxVideoParam videoParam;
+ mfxExtBuffer* ExtParamArray[3];
+ mfxSession session = (mfxSession)0;
+ mfxExtCodingOption option1_buf, *option1 = &option1_buf;
+ mfxExtCodingOption2 option2_buf, *option2 = &option2_buf;
+ mfxExtCodingOptionSPSPPS sps_pps_buf, *sps_pps = &sps_pps_buf;
+ impl = MFX_IMPL_AUTO_ANY|MFX_IMPL_VIA_ANY;
+ version.Major = HB_QSV_MINVERSION_MAJOR;
+ version.Minor = HB_QSV_MINVERSION_MINOR;
+ err = MFXInit(impl, &version, &session);
+ if (err != MFX_ERR_NONE)
+ {
+ hb_error("encqsvInit: MFXInit failed (%d)", err);
+ return -1;
+ }
+ err = MFXVideoENCODE_Init(session, pv->param.videoParam);
+ if (err < MFX_ERR_NONE) // ignore warnings
+ {
+ hb_error("encqsvInit: MFXVideoENCODE_Init failed (%d)", err);
+ MFXClose(session);
+ return -1;
+ }
+ memset(&videoParam, 0, sizeof(mfxVideoParam));
+ videoParam.ExtParam = ExtParamArray;
+ videoParam.NumExtParam = 0;
+ // introduced in API 1.3
+ memset(sps_pps, 0, sizeof(mfxExtCodingOptionSPSPPS));
+ sps_pps->Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS;
+ sps_pps->Header.BufferSz = sizeof(mfxExtCodingOptionSPSPPS);
+ sps_pps->SPSId = 0;
+ sps_pps->SPSBuffer = w->config->h264.sps;
+ sps_pps->SPSBufSize = sizeof(w->config->h264.sps);
+ sps_pps->PPSId = 0;
+ sps_pps->PPSBuffer = w->config->h264.pps;
+ sps_pps->PPSBufSize = sizeof(w->config->h264.pps);
+ videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)sps_pps;
+ // introduced in API 1.0
+ memset(option1, 0, sizeof(mfxExtCodingOption));
+ option1->Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
+ option1->Header.BufferSz = sizeof(mfxExtCodingOption);
+ videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)option1;
+ // introduced in API 1.6
+ memset(option2, 0, sizeof(mfxExtCodingOption2));
+ option2->Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
+ option2->Header.BufferSz = sizeof(mfxExtCodingOption2);
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
+ {
+ // attach to get the final output mfxExtCodingOption2 settings
+ videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)option2;
+ }
+ err = MFXVideoENCODE_GetVideoParam(session, &videoParam);
+ if (err == MFX_ERR_NONE)
+ {
+ // remove 32-bit NAL prefix (0x00 0x00 0x00 0x01)
+ w->config->h264.sps_length = sps_pps->SPSBufSize - 4;
+ memmove(w->config->h264.sps, w->config->h264.sps + 4,
+ w->config->h264.sps_length);
+ w->config->h264.pps_length = sps_pps->PPSBufSize - 4;
+ memmove(w->config->h264.pps, w->config->h264.pps + 4,
+ w->config->h264.pps_length);
+ }
+ else
+ {
+ hb_error("encqsvInit: MFXVideoENCODE_GetVideoParam failed (%d)", err);
+ MFXVideoENCODE_Close(session);
+ MFXClose (session);
+ return -1;
+ }
+
+ // log implementation details before closing this session
+ if (pv->is_sys_mem)
+ {
+ hb_log("encqsvInit: using encode-only path");
+ }
+ if ((MFXQueryIMPL (session, &impl) == MFX_ERR_NONE) &&
+ (MFXQueryVersion(session, &version) == MFX_ERR_NONE))
+ {
+ hb_log("encqsvInit: using %s implementation (%"PRIu16".%"PRIu16")",
+ impl == MFX_IMPL_SOFTWARE ? "software" : "hardware",
+ version.Major, version.Minor);
+ }
+ MFXVideoENCODE_Close(session);
+ MFXClose (session);
+
+ // log main output settings
+ hb_log("encqsvInit: TargetUsage %"PRIu16" AsyncDepth %"PRIu16"",
+ videoParam.mfx.TargetUsage, videoParam.AsyncDepth);
+ hb_log("encqsvInit: GopRefDist %"PRIu16" GopPicSize %"PRIu16" NumRefFrame %"PRIu16"",
+ videoParam.mfx.GopRefDist, videoParam.mfx.GopPicSize, videoParam.mfx.NumRefFrame);
+ if (videoParam.mfx.RateControlMethod == MFX_RATECONTROL_CQP)
+ {
+ char qpi[7], qpp[9], qpb[9];
+ snprintf(qpi, sizeof(qpi), "QPI %"PRIu16"", videoParam.mfx.QPI);
+ snprintf(qpp, sizeof(qpp), " QPP %"PRIu16"", videoParam.mfx.QPP);
+ snprintf(qpb, sizeof(qpb), " QPB %"PRIu16"", videoParam.mfx.QPB);
+ hb_log("encqsvInit: RateControlMethod CQP with %s%s%s", qpi,
+ videoParam.mfx.GopPicSize > 1 ? qpp : "",
+ videoParam.mfx.GopRefDist > 1 ? qpb : "");
+ }
+ else
+ {
+ switch (videoParam.mfx.RateControlMethod)
+ {
+ case MFX_RATECONTROL_AVBR:
+ hb_log("encqsvInit: RateControlMethod AVBR TargetKbps %"PRIu16"",
+ videoParam.mfx.TargetKbps);
+ break;
+ case MFX_RATECONTROL_LA:
+ hb_log("encqsvInit: RateControlMethod LA TargetKbps %"PRIu16" LookAheadDepth %"PRIu16"",
+ videoParam.mfx.TargetKbps, option2->LookAheadDepth);
+ break;
+ case MFX_RATECONTROL_CBR:
+ case MFX_RATECONTROL_VBR:
+ hb_log("encqsvInit: RateControlMethod %s TargetKbps %"PRIu16" MaxKbps %"PRIu16" BufferSizeInKB %"PRIu16" InitialDelayInKB %"PRIu16"",
+ videoParam.mfx.RateControlMethod == MFX_RATECONTROL_CBR ? "CBR" : "VBR",
+ videoParam.mfx.TargetKbps, videoParam.mfx.MaxKbps,
+ videoParam.mfx.BufferSizeInKB, videoParam.mfx.InitialDelayInKB);
+ break;
+ default:
+ hb_log("encqsvInit: invalid rate control method %"PRIu16"",
+ videoParam.mfx.RateControlMethod);
+ return -1;
+ }
+ }
+ switch (videoParam.mfx.FrameInfo.PicStruct)
+ {
+ case MFX_PICSTRUCT_PROGRESSIVE:
+ hb_log("encqsvInit: PicStruct progressive");
+ break;
+ case MFX_PICSTRUCT_FIELD_TFF:
+ hb_log("encqsvInit: PicStruct top field first");
+ break;
+ case MFX_PICSTRUCT_FIELD_BFF:
+ hb_log("encqsvInit: PicStruct bottom field first");
+ break;
+ default:
+ hb_error("encqsvInit: invalid PicStruct value 0x%"PRIx16"",
+ videoParam.mfx.FrameInfo.PicStruct);
+ return -1;
+ }
+ const char *cavlc, *rdopt;
+ switch (option1->CAVLC)
+ {
+ case MFX_CODINGOPTION_ON:
+ cavlc = "on";
+ break;
+ case MFX_CODINGOPTION_OFF:
+ cavlc = "off";
+ break;
+ default:
+ hb_error("encqsvInit: invalid CAVLC value %"PRIu16"",
+ option1->CAVLC);
+ return -1;
+ }
+ switch (option1->RateDistortionOpt)
+ {
+ case MFX_CODINGOPTION_ON:
+ rdopt = "on";
+ break;
+ case MFX_CODINGOPTION_OFF:
+ rdopt = "off";
+ break;
+ default:
+ hb_error("encqsvInit: invalid RateDistortionOpt value %"PRIu16"",
+ option1->RateDistortionOpt);
+ return -1;
+ }
+ hb_log("encqsvInit: CAVLC %s RateDistortionOpt %s", cavlc, rdopt);
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_BRC)
+ {
+ const char *mbbrc, *extbrc;
+ switch (option2->MBBRC)
+ {
+ case MFX_CODINGOPTION_ON:
+ mbbrc = "on";
+ break;
+ case MFX_CODINGOPTION_OFF:
+ mbbrc = "off";
+ break;
+ case MFX_CODINGOPTION_ADAPTIVE:
+ mbbrc = "adaptive";
+ break;
+ case MFX_CODINGOPTION_UNKNOWN:
+ mbbrc = "unknown (auto)";
+ break;
+ default:
+ hb_error("encqsvInit: invalid MBBRC value %"PRIu16"",
+ option2->MBBRC);
+ return -1;
+ }
+ switch (option2->ExtBRC)
+ {
+ case MFX_CODINGOPTION_ON:
+ extbrc = "on";
+ break;
+ case MFX_CODINGOPTION_OFF:
+ extbrc = "off";
+ break;
+ case MFX_CODINGOPTION_ADAPTIVE:
+ extbrc = "adaptive";
+ break;
+ case MFX_CODINGOPTION_UNKNOWN:
+ extbrc = "unknown (auto)";
+ break;
+ default:
+ hb_error("encqsvInit: invalid ExtBRC value %"PRIu16"",
+ option2->ExtBRC);
+ return -1;
+ }
+ hb_log("encqsvInit: MBBRC %s ExtBRC %s", mbbrc, extbrc);
+ }
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_TRELLIS)
+ {
+ switch (option2->Trellis)
+ {
+ case MFX_TRELLIS_OFF:
+ hb_log("encqsvInit: Trellis off");
+ break;
+ case MFX_TRELLIS_UNKNOWN:
+ hb_log("encqsvInit: Trellis unknown (auto)");
+ break;
+ default:
+ hb_log("encqsvInit: Trellis on (%s%s%s)",
+ option2->Trellis & MFX_TRELLIS_I ? "I" : "",
+ option2->Trellis & MFX_TRELLIS_P ? "P" : "",
+ option2->Trellis & MFX_TRELLIS_B ? "B" : "");
+ break;
+ }
+ }
+ hb_log("encqsvInit: H.264 profile %s @ level %s",
+ qsv_h264_profile_xlat(videoParam.mfx.CodecProfile),
+ qsv_h264_level_xlat (videoParam.mfx.CodecLevel));
+
+ // AsyncDepth has now been set and/or modified by Media SDK
+ pv->max_async_depth = videoParam.AsyncDepth;
+ pv->async_depth = 0;
+
+ // check whether B-frames are used
+ switch (videoParam.mfx.CodecProfile)
+ {
+ case MFX_PROFILE_AVC_BASELINE:
+ case MFX_PROFILE_AVC_CONSTRAINED_HIGH:
+ case MFX_PROFILE_AVC_CONSTRAINED_BASELINE:
+ pv->bfrm_delay = 0;
+ break;
+ default:
+ pv->bfrm_delay = 1;
+ break;
+ }
+ // sanitize
+ pv->bfrm_delay = FFMIN(pv->bfrm_delay, videoParam.mfx.GopRefDist - 1);
+ pv->bfrm_delay = FFMIN(pv->bfrm_delay, videoParam.mfx.GopPicSize - 2);
+ pv->bfrm_delay = FFMAX(pv->bfrm_delay, 0);
+ // let the muxer know whether to expect B-frames or not
+ job->areBframes = !!pv->bfrm_delay;
+ // check whether we need to generate DTS ourselves (MSDK API < 1.6 or VFR)
+ pv->bfrm_workaround = job->cfr != 1 || !(hb_qsv_info->capabilities &
+ HB_QSV_CAP_MSDK_API_1_6);
+ if (pv->bfrm_delay && pv->bfrm_workaround)
+ {
+ pv->bfrm_workaround = 1;
+ pv->list_dts = hb_list_init();
+ }
+ else
+ {
+ pv->bfrm_workaround = 0;
+ pv->list_dts = NULL;
+ }
+
+ return 0;
+}
+
+void encqsvClose( hb_work_object_t * w )
+{
+ int i = 0;
+ hb_work_private_t * pv = w->private_data;
+
+ hb_log( "enc_qsv done: frames: %u in, %u out", pv->frames_in, pv->frames_out );
+
+ // if system memory ( encode only ) additional free(s) for surfaces
+ if( pv && pv->job && pv->job->qsv &&
+ pv->job->qsv->is_context_active ){
+
+ av_qsv_context *qsv = pv->job->qsv;
+
+ if(qsv && qsv->enc_space){
+ av_qsv_space* qsv_encode = qsv->enc_space;
+ if(qsv_encode->is_init_done){
+ if(pv->is_sys_mem){
+ if( qsv_encode && qsv_encode->surface_num > 0)
+ for (i = 0; i < qsv_encode->surface_num; i++){
+ if( qsv_encode->p_surfaces[i]->Data.Y){
+ free(qsv_encode->p_surfaces[i]->Data.Y);
+ qsv_encode->p_surfaces[i]->Data.Y = 0;
+ }
+ if( qsv_encode->p_surfaces[i]->Data.VU){
+ free(qsv_encode->p_surfaces[i]->Data.VU);
+ qsv_encode->p_surfaces[i]->Data.VU = 0;
+ }
+ if(qsv_encode->p_surfaces[i])
+ av_freep(qsv_encode->p_surfaces[i]);
+ }
+ qsv_encode->surface_num = 0;
+
+ sws_freeContext(pv->sws_context_to_nv12);
+ }
+
+ for (i = av_qsv_list_count(qsv_encode->tasks); i > 1; i--){
+ av_qsv_task* task = av_qsv_list_item(qsv_encode->tasks,i-1);
+ if(task && task->bs){
+ av_freep(&task->bs->Data);
+ av_freep(&task->bs);
+ av_qsv_list_rem(qsv_encode->tasks,task);
+ }
+ }
+ av_qsv_list_close(&qsv_encode->tasks);
+
+ for (i = 0; i < qsv_encode->surface_num; i++){
+ av_freep(&qsv_encode->p_surfaces[i]);
+ }
+ qsv_encode->surface_num = 0;
+
+ for (i = 0; i < qsv_encode->sync_num; i++){
+ av_freep(&qsv_encode->p_syncp[i]->p_sync);
+ av_freep(&qsv_encode->p_syncp[i]);
+ }
+ qsv_encode->sync_num = 0;
+
+ qsv_encode->is_init_done = 0;
+ }
+ }
+
+ if(qsv){
+ // closing the commong stuff
+ av_qsv_context_clean(qsv);
+
+ if(pv->is_sys_mem){
+ av_freep(&qsv);
+ }
+ }
+ }
+
+ if (pv != NULL)
+ {
+ if (pv->list_dts != NULL)
+ {
+ while (hb_list_count(pv->list_dts) > 0)
+ {
+ int64_t *item = hb_list_item(pv->list_dts, 0);
+ hb_list_rem(pv->list_dts, item);
+ free(item);
+ }
+ hb_list_close(&pv->list_dts);
+ }
+ }
+
+ free( pv );
+ w->private_data = NULL;
+}
+
+int encqsvWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_work_private_t * pv = w->private_data;
+ hb_job_t * job = pv->job;
+ hb_buffer_t * in = *buf_in, *buf;
+ av_qsv_context *qsv = job->qsv;
+ av_qsv_space* qsv_encode;
+ hb_buffer_t *last_buf = NULL;
+ mfxStatus sts = MFX_ERR_NONE;
+ int is_end = 0;
+ av_qsv_list* received_item = 0;
+ av_qsv_stage* stage = 0;
+
+ while(1){
+ int ret = qsv_enc_init(qsv, pv);
+ qsv = job->qsv;
+ qsv_encode = qsv->enc_space;
+ if(ret >= 2)
+ av_qsv_sleep(1);
+ else
+ break;
+ }
+ *buf_out = NULL;
+
+ if( in->size <= 0 )
+ {
+ // do delayed frames yet
+ *buf_in = NULL;
+ is_end = 1;
+ }
+
+ // input from decode, as called - we always have some to proceed with
+ while (1)
+ {
+ {
+ mfxEncodeCtrl *work_control = NULL;
+ mfxFrameSurface1 *work_surface = NULL;
+
+ if (!is_end)
+ {
+ if (pv->is_sys_mem)
+ {
+ int surface_idx = av_qsv_get_free_surface(qsv_encode, qsv,
+ &qsv_encode->request[0].Info, QSV_PART_ANY);
+ work_surface = qsv_encode->p_surfaces[surface_idx];
+
+ if (work_surface->Data.Y == NULL)
+ {
+ // if nv12 and 422 12bits per pixel
+ work_surface->Data.Pitch = pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.Width;
+ work_surface->Data.Y = calloc(1,
+ pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.Width *
+ pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.Height);
+ work_surface->Data.VU = calloc(1,
+ pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.Width *
+ pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.Height / 2);
+ }
+ qsv_yuv420_to_nv12(pv->sws_context_to_nv12, work_surface, in);
+ }
+ else
+ {
+ received_item = in->qsv_details.qsv_atom;
+ stage = av_qsv_get_last_stage(received_item);
+ work_surface = stage->out.p_surface;
+
+ // don't let qsv->dts_seq grow needlessly
+ av_qsv_dts_pop(qsv);
+ }
+
+ work_surface->Data.TimeStamp = in->s.start;
+
+ /*
+ * Debugging code to check that the upstream modules have generated
+ * a continuous, self-consistent frame stream.
+ */
+ int64_t start = work_surface->Data.TimeStamp;
+ if (pv->last_start > start)
+ {
+ hb_log("encqsvWork: input continuity error, last start %"PRId64" start %"PRId64"",
+ pv->last_start, start);
+ }
+ pv->last_start = start;
+
+ // for DTS generation (when MSDK API < 1.6 or VFR)
+ if (pv->bfrm_delay && pv->bfrm_workaround)
+ {
+ if (pv->frames_in <= BFRM_DELAY_MAX)
+ {
+ pv->init_pts[pv->frames_in] = work_surface->Data.TimeStamp;
+ }
+ if (pv->frames_in)
+ {
+ hb_qsv_add_new_dts(pv->list_dts,
+ work_surface->Data.TimeStamp);
+ }
+ }
+
+ /*
+ * Chapters have to start with a keyframe so request that this
+ * frame be coded as IDR. Since there may be several frames
+ * buffered in the encoder, remember the timestamp so when this
+ * frame finally pops out of the encoder we'll mark its buffer
+ * as the start of a chapter.
+ */
+ if (in->s.new_chap > 0 && job->chapter_markers)
+ {
+ if (!pv->next_chapter.index)
+ {
+ pv->next_chapter.start = work_surface->Data.TimeStamp;
+ pv->next_chapter.index = in->s.new_chap;
+ work_control = &pv->force_keyframe;
+ }
+ else
+ {
+ // however unlikely, this can happen in theory
+ hb_log("encqsvWork: got chapter %d before we could write chapter %d, dropping marker",
+ in->s.new_chap, pv->next_chapter.index);
+ }
+ // don't let 'work_loop' put a chapter mark on the wrong buffer
+ in->s.new_chap = 0;
+ }
+
+ /*
+ * If interlaced encoding is requested during encoder initialization,
+ * but the input mfxFrameSurface1 is flagged as progressive here,
+ * the output bitstream will be progressive (according to MediaInfo).
+ *
+ * Assume the user knows what he's doing (say he is e.g. encoding a
+ * progressive-flagged source using interlaced compression - he may
+ * well have a good reason to do so; mis-flagged sources do exist).
+ */
+ work_surface->Info.PicStruct = pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.PicStruct;
+ }
+ else{
+ work_surface = NULL;
+ received_item = NULL;
+ }
+ int sync_idx = av_qsv_get_free_sync( qsv_encode, qsv );
+ if (sync_idx == -1)
+ {
+ hb_error("qsv: Not enough resources allocated for QSV encode");
+ return 0;
+ }
+ av_qsv_task *task = av_qsv_list_item( qsv_encode->tasks, pv->async_depth );
+
+ for (;;)
+ {
+ // Encode a frame asychronously (returns immediately)
+ sts = MFXVideoENCODE_EncodeFrameAsync(qsv->mfx_session,
+ work_control, work_surface, task->bs,
+ qsv_encode->p_syncp[sync_idx]->p_sync);
+
+ if (MFX_ERR_MORE_DATA == sts || (MFX_ERR_NONE <= sts && MFX_WRN_DEVICE_BUSY != sts))
+ if (work_surface && !pv->is_sys_mem)
+ ff_qsv_atomic_dec(&work_surface->Data.Locked);
+
+ if( MFX_ERR_MORE_DATA == sts ){
+ ff_qsv_atomic_dec(&qsv_encode->p_syncp[sync_idx]->in_use);
+ if(work_surface && received_item)
+ hb_list_add(pv->delayed_processing, received_item);
+ break;
+ }
+
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ if (MFX_ERR_NONE <= sts /*&& !syncpE*/) // repeat the call if warning and no output
+ {
+ if (MFX_WRN_DEVICE_BUSY == sts){
+ av_qsv_sleep(10); // wait if device is busy
+ continue;
+ }
+
+ av_qsv_stage* new_stage = av_qsv_stage_init();
+ new_stage->type = AV_QSV_ENCODE;
+ new_stage->in.p_surface = work_surface;
+ new_stage->out.sync = qsv_encode->p_syncp[sync_idx];
+
+ new_stage->out.p_bs = task->bs;//qsv_encode->bs;
+ task->stage = new_stage;
+
+ pv->async_depth++;
+
+ if(received_item){
+ av_qsv_add_stagee( &received_item, new_stage,HAVE_THREADS );
+ }
+ else{
+ // flushing the end
+ int pipe_idx = av_qsv_list_add( qsv->pipes, av_qsv_list_init(HAVE_THREADS) );
+ av_qsv_list* list_item = av_qsv_list_item( qsv->pipes, pipe_idx );
+ av_qsv_add_stagee( &list_item, new_stage,HAVE_THREADS );
+ }
+
+ int i = 0;
+ for(i=hb_list_count(pv->delayed_processing); i > 0;i--){
+ hb_list_t *item = hb_list_item(pv->delayed_processing,i-1);
+ if(item){
+ hb_list_rem(pv->delayed_processing,item);
+ av_qsv_flush_stages(qsv->pipes, &item);
+ }
+ }
+
+ break;
+ }
+
+ ff_qsv_atomic_dec(&qsv_encode->p_syncp[sync_idx]->in_use);
+
+ if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
+ DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." );
+
+ break;
+ }
+ }
+
+ buf = NULL;
+
+ do{
+
+ if(pv->async_depth==0) break;
+
+ // working properly with sync depth approach of MediaSDK OR flushing, if at the end
+ if( (pv->async_depth >= pv->max_async_depth) || is_end ){
+
+ pv->async_depth--;
+
+ av_qsv_task *task = av_qsv_list_item( qsv_encode->tasks, 0 );
+ av_qsv_stage* stage = task->stage;
+ av_qsv_list* this_pipe = av_qsv_pipe_by_stage(qsv->pipes,stage);
+ sts = MFX_ERR_NONE;
+
+ // only here we need to wait on operation been completed, therefore SyncOperation is used,
+ // after this step - we continue to work with bitstream, muxing ...
+ av_qsv_wait_on_sync( qsv,stage );
+
+ if(task->bs->DataLength>0){
+ av_qsv_flush_stages( qsv->pipes, &this_pipe );
+
+ // see nal_encode
+ buf = hb_video_buffer_init( job->width, job->height );
+ buf->size = 0;
+ buf->s.frametype = 0;
+
+ // maping of FrameType(s)
+ if(task->bs->FrameType & MFX_FRAMETYPE_IDR ) buf->s.frametype = HB_FRAME_IDR;
+ else
+ if(task->bs->FrameType & MFX_FRAMETYPE_I ) buf->s.frametype = HB_FRAME_I;
+ else
+ if(task->bs->FrameType & MFX_FRAMETYPE_P ) buf->s.frametype = HB_FRAME_P;
+ else
+ if(task->bs->FrameType & MFX_FRAMETYPE_B ) buf->s.frametype = HB_FRAME_B;
+
+ if(task->bs->FrameType & MFX_FRAMETYPE_REF ) buf->s.flags = HB_FRAME_REF;
+
+ parse_nalus(task->bs->Data + task->bs->DataOffset,task->bs->DataLength, buf, pv->frames_out);
+
+ if ( last_buf == NULL )
+ *buf_out = buf;
+ else
+ last_buf->next = buf;
+ last_buf = buf;
+
+ // simple for now but check on TimeStampCalc from MSDK
+ int64_t duration = ((double)pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD /
+ (double)pv->enc_space.m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN) * 90000.;
+
+ // start -> PTS
+ // renderOffset -> DTS
+ buf->s.start = buf->s.renderOffset = task->bs->TimeStamp;
+ buf->s.stop = buf->s.start + duration;
+ buf->s.duration = duration;
+ if (pv->bfrm_delay)
+ {
+ if (!pv->bfrm_workaround)
+ {
+ buf->s.renderOffset = task->bs->DecodeTimeStamp;
+ }
+ else
+ {
+ // MSDK API < 1.6 or VFR, so generate our own DTS
+ if ((pv->frames_out == 0) &&
+ (hb_qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6) &&
+ (hb_qsv_info->capabilities & HB_QSV_CAP_H264_BPYRAMID))
+ {
+ // with B-pyramid, the delay may be more than 1 frame,
+ // so compute the actual delay based on the initial DTS
+ // provided by MSDK; also, account for rounding errors
+ // (e.g. 24000/1001 fps @ 90kHz -> 3753.75 ticks/frame)
+ pv->bfrm_delay = ((task->bs->TimeStamp -
+ task->bs->DecodeTimeStamp +
+ (duration / 2)) / duration);
+ pv->bfrm_delay = FFMAX(pv->bfrm_delay, 1);
+ pv->bfrm_delay = FFMIN(pv->bfrm_delay, BFRM_DELAY_MAX);
+ }
+ /*
+ * Generate VFR-compatible output DTS based on input PTS.
+ *
+ * Depends on the B-frame delay:
+ *
+ * 0: ipts0, ipts1, ipts2...
+ * 1: ipts0 - ipts1, ipts1 - ipts1, ipts1, ipts2...
+ * 2: ipts0 - ipts2, ipts1 - ipts2, ipts2 - ipts2, ipts1...
+ * ...and so on.
+ */
+ if (pv->frames_out <= pv->bfrm_delay)
+ {
+ buf->s.renderOffset = (pv->init_pts[pv->frames_out] -
+ pv->init_pts[pv->bfrm_delay]);
+ }
+ else
+ {
+ buf->s.renderOffset = hb_qsv_pop_next_dts(pv->list_dts);
+ }
+ }
+
+ /*
+ * In the MP4 container, DT(0) = STTS(0) = 0.
+ *
+ * Which gives us:
+ * CT(0) = CTTS(0) + STTS(0) = CTTS(0) = PTS(0) - DTS(0)
+ * When DTS(0) < PTS(0), we then have:
+ * CT(0) > 0 for video, but not audio (breaks A/V sync).
+ *
+ * This is typically solved by writing an edit list shifting
+ * video samples by the initial delay, PTS(0) - DTS(0).
+ *
+ * See:
+ * ISO/IEC 14496-12:2008(E), ISO base media file format
+ * - 8.6.1.2 Decoding Time to Sample Box
+ */
+ if (w->config->h264.init_delay == 0 && buf->s.renderOffset < 0)
+ {
+ w->config->h264.init_delay = -buf->s.renderOffset;
+ }
+ }
+
+ /*
+ * If we have a chapter marker pending and this frame's
+ * presentation time stamp is at or after the marker's time stamp,
+ * use this as the chapter start.
+ */
+ if (pv->next_chapter.index && buf->s.frametype == HB_FRAME_IDR &&
+ pv->next_chapter.start <= buf->s.start)
+ {
+ buf->s.new_chap = pv->next_chapter.index;
+ pv->next_chapter.index = 0;
+ }
+
+ // shift for fifo
+ if(pv->async_depth){
+ av_qsv_list_rem(qsv_encode->tasks,task);
+ av_qsv_list_add(qsv_encode->tasks,task);
+ }
+
+ task->bs->DataLength = 0;
+ task->bs->DataOffset = 0;
+ task->bs->MaxLength = qsv_encode->p_buf_max_size;
+ task->stage = 0;
+ pv->frames_out++;
+ }
+ }
+ }while(is_end);
+
+
+ if(is_end){
+ if( !buf && MFX_ERR_MORE_DATA == sts )
+ break;
+
+ }
+ else
+ break;
+
+ }
+
+ if(!is_end)
+ ++pv->frames_in;
+
+ if(is_end){
+ *buf_in = NULL;
+ if(last_buf){
+ last_buf->next = in;
+ }
+ else
+ *buf_out = in;
+ return HB_WORK_DONE;
+ }
+ else{
+ return HB_WORK_OK;
+ }
+}
+
+int nal_find_start_code(uint8_t** pb, size_t* size){
+ if ((int) *size < 4 )
+ return 0;
+
+ // find start code by MSDK , see ff_prefix_code[]
+ while ((4 <= *size) &&
+ ((0 != (*pb)[0]) ||
+ (0 != (*pb)[1]) ||
+ (1 != (*pb)[2]) ))
+ {
+ *pb += 1;
+ *size -= 1;
+ }
+
+ if (4 <= *size)
+ return (((*pb)[0] << 24) | ((*pb)[1] << 16) | ((*pb)[2] << 8) | ((*pb)[3]));
+
+ return 0;
+}
+
+void parse_nalus(uint8_t *nal_inits, size_t length, hb_buffer_t *buf, uint32_t frame_num){
+ uint8_t *offset = nal_inits;
+ size_t size = length;
+
+ if( nal_find_start_code(&offset,&size) == 0 )
+ size = 0;
+
+ while( size > 0 ){
+
+ uint8_t* current_nal = offset + sizeof(ff_prefix_code)-1;
+ uint8_t *next_offset = offset + sizeof(ff_prefix_code);
+ size_t next_size = size - sizeof(ff_prefix_code);
+ size_t current_size = next_size;
+ if( nal_find_start_code(&next_offset,&next_size) == 0 ){
+ size = 0;
+ current_size += 1;
+ }
+ else{
+ current_size -= next_size;
+ if( next_offset > 0 && *(next_offset-1) != 0 )
+ current_size += 1;
+ }
+ {
+ char size_position[4] = {0,0,0,0};
+ size_position[1] = (current_size >> 24) & 0xFF;
+ size_position[1] = (current_size >> 16) & 0xFF;
+ size_position[2] = (current_size >> 8) & 0xFF;
+ size_position[3] = current_size & 0xFF;
+
+ memcpy(buf->data + buf->size,&size_position ,sizeof(size_position));
+ buf->size += sizeof(size_position);
+
+ memcpy(buf->data + buf->size,current_nal ,current_size);
+ buf->size += current_size;
+ }
+
+ if(size){
+ size = next_size;
+ offset = next_offset;
+ }
+ }
+}
diff --git a/libhb/enc_qsv.h b/libhb/enc_qsv.h
new file mode 100644
index 000000000..9d27347cd
--- /dev/null
+++ b/libhb/enc_qsv.h
@@ -0,0 +1,38 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#ifndef ENC_QSV_H
+#define ENC_QSV_H
+
+#include "hb.h"
+#include "qsv_common.h"
+
+int nal_find_start_code(uint8_t** pb, size_t* size);
+void parse_nalus( uint8_t *nal_inits, size_t length, hb_buffer_t *buf, uint32_t frame_num);
+
+#endif // ENC_QSV_H
diff --git a/libhb/h264_common.h b/libhb/h264_common.h
new file mode 100644
index 000000000..febe1965f
--- /dev/null
+++ b/libhb/h264_common.h
@@ -0,0 +1,17 @@
+/* h264_common.h
+
+ Copyright (c) 2003-2012 HandBrake Team
+ This file is part of the HandBrake source code
+ Homepage: <http://handbrake.fr/>.
+ It may be used under the terms of the GNU General Public License v2.
+ For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
+ */
+
+#ifndef HB_H264_COMMON_H
+#define HB_H264_COMMON_H
+
+static const char * const hb_h264_profile_names[] = { "auto", "high", "main", "baseline", NULL, };
+static const char * const hb_h264_level_names[] = { "auto", "1.0", "1b", "1.1", "1.2", "1.3", "2.0", "2.1", "2.2", "3.0", "3.1", "3.2", "4.0", "4.1", "4.2", "5.0", "5.1", "5.2", NULL, };
+static const int const hb_h264_level_values[] = { -1, 10, 9, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51, 52, 0, };
+
+#endif //HB_H264_COMMON_H
diff --git a/libhb/qsv_common.c b/libhb/qsv_common.c
new file mode 100644
index 000000000..7cc0bb70c
--- /dev/null
+++ b/libhb/qsv_common.c
@@ -0,0 +1,780 @@
+/* qsv_common.c
+ *
+ * Copyright (c) 2003-2013 HandBrake Team
+ * This file is part of the HandBrake source code.
+ * Homepage: <http://handbrake.fr/>.
+ * It may be used under the terms of the GNU General Public License v2.
+ * For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
+ */
+
+#include "hb.h"
+#include "ports.h"
+#include "common.h"
+#include "hb_dict.h"
+#include "qsv_common.h"
+#include "h264_common.h"
+
+// for x264_vidformat_names etc.
+#include "x264.h"
+
+// avoids a warning
+#include "libavutil/cpu.h"
+extern void ff_cpu_cpuid(int index, int *eax, int *ebx, int *ecx, int *edx);
+
+// make the Intel QSV information available to the UIs
+hb_qsv_info_t *hb_qsv_info = NULL;
+
+// availability and versions
+static mfxVersion qsv_hardware_version;
+static mfxVersion qsv_software_version;
+static mfxVersion qsv_minimum_version;
+static int qsv_hardware_available = 0;
+static int qsv_software_available = 0;
+
+// check available Intel Media SDK version against a minimum
+#define HB_CHECK_MFX_VERSION(MFX_VERSION, MAJOR, MINOR) \
+ (MFX_VERSION.Major == MAJOR && MFX_VERSION.Minor >= MINOR)
+
+int hb_qsv_available()
+{
+ return hb_qsv_info != NULL && (qsv_hardware_available ||
+ qsv_software_available);
+}
+
+int hb_qsv_info_init()
+{
+ static int init_done = 0;
+ if (init_done)
+ return (hb_qsv_info == NULL);
+ init_done = 1;
+
+ hb_qsv_info = calloc(1, sizeof(*hb_qsv_info));
+ if (hb_qsv_info == NULL)
+ {
+ hb_error("hb_qsv_info_init: alloc failure");
+ return -1;
+ }
+
+ mfxSession session;
+ qsv_minimum_version.Major = HB_QSV_MINVERSION_MAJOR;
+ qsv_minimum_version.Minor = HB_QSV_MINVERSION_MINOR;
+
+ // check for software fallback
+ if (MFXInit(MFX_IMPL_SOFTWARE,
+ &qsv_minimum_version, &session) == MFX_ERR_NONE)
+ {
+ qsv_software_available = 1;
+ // our minimum is supported, but query the actual version
+ MFXQueryVersion(session, &qsv_software_version);
+ MFXClose(session);
+ }
+
+ // check for actual hardware support
+ if (MFXInit(MFX_IMPL_HARDWARE_ANY|MFX_IMPL_VIA_ANY,
+ &qsv_minimum_version, &session) == MFX_ERR_NONE)
+ {
+ qsv_hardware_available = 1;
+ // our minimum is supported, but query the actual version
+ MFXQueryVersion(session, &qsv_hardware_version);
+ MFXClose(session);
+ }
+
+ // check for version-specific or hardware-specific capabilities
+ // we only use software as a fallback, so check hardware first
+ if (qsv_hardware_available)
+ {
+ if (HB_CHECK_MFX_VERSION(qsv_hardware_version, 1, 6))
+ {
+ hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_BRC;
+ hb_qsv_info->capabilities |= HB_QSV_CAP_MSDK_API_1_6;
+ }
+ if (hb_get_cpu_platform() == HB_CPU_PLATFORM_INTEL_HSW)
+ {
+ if (HB_CHECK_MFX_VERSION(qsv_hardware_version, 1, 7))
+ {
+ hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_TRELLIS;
+ hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_LOOKAHEAD;
+ }
+ hb_qsv_info->capabilities |= HB_QSV_CAP_H264_BPYRAMID;
+ }
+ }
+ else if (qsv_software_available)
+ {
+ if (HB_CHECK_MFX_VERSION(qsv_software_version, 1, 6))
+ {
+ hb_qsv_info->capabilities |= HB_QSV_CAP_MSDK_API_1_6;
+ hb_qsv_info->capabilities |= HB_QSV_CAP_H264_BPYRAMID;
+ }
+ }
+
+ // note: we pass a pointer to MFXInit but it never gets modified
+ // let's make sure of it just to be safe though
+ if (qsv_minimum_version.Major != HB_QSV_MINVERSION_MAJOR ||
+ qsv_minimum_version.Minor != HB_QSV_MINVERSION_MINOR)
+ {
+ hb_error("hb_qsv_info_init: minimum version (%d.%d) was modified",
+ qsv_minimum_version.Major,
+ qsv_minimum_version.Minor);
+ }
+
+ // success
+ return 0;
+}
+
+// we don't need it beyond this point
+#undef HB_CHECK_MFX_VERSION
+
+void hb_qsv_info_print()
+{
+ if (hb_qsv_info == NULL)
+ return;
+
+ // is QSV available?
+ hb_log("Intel Quick Sync Video support: %s",
+ hb_qsv_available() ? "yes": "no");
+
+ // if we have Quick Sync Video support, also print the details
+ if (hb_qsv_available())
+ {
+ if (qsv_hardware_available)
+ {
+ hb_log(" - Intel Media SDK hardware: API %d.%d (minimum: %d.%d)",
+ qsv_hardware_version.Major,
+ qsv_hardware_version.Minor,
+ qsv_minimum_version.Major,
+ qsv_minimum_version.Minor);
+ }
+ if (qsv_software_available)
+ {
+ hb_log(" - Intel Media SDK software: API %d.%d (minimum: %d.%d)",
+ qsv_software_version.Major,
+ qsv_software_version.Minor,
+ qsv_minimum_version.Major,
+ qsv_minimum_version.Minor);
+ }
+ }
+}
+
+const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id)
+{
+ switch (codec_id)
+ {
+ case AV_CODEC_ID_H264:
+ return "h264_qsv";
+
+ default:
+ return NULL;
+ }
+}
+
+int hb_qsv_decode_is_enabled(hb_job_t *job)
+{
+ return ((job != NULL && job->title->qsv_decode_support && job->qsv_decode) &&
+ (job->vcodec & HB_VCODEC_QSV_MASK));
+}
+
+int hb_qsv_decode_is_supported(enum AVCodecID codec_id,
+ enum AVPixelFormat pix_fmt)
+{
+ switch (codec_id)
+ {
+ case AV_CODEC_ID_H264:
+ return (pix_fmt == AV_PIX_FMT_YUV420P ||
+ pix_fmt == AV_PIX_FMT_YUVJ420P);
+
+ default:
+ return 0;
+ }
+}
+
+int hb_qsv_codingoption_xlat(int val)
+{
+ switch (HB_QSV_CLIP3(-1, 2, val))
+ {
+ case 0:
+ return MFX_CODINGOPTION_OFF;
+ case 1:
+ case 2: // MFX_CODINGOPTION_ADAPTIVE, reserved
+ return MFX_CODINGOPTION_ON;
+ case -1:
+ default:
+ return MFX_CODINGOPTION_UNKNOWN;
+ }
+}
+
+int hb_qsv_trellisvalue_xlat(int val)
+{
+ switch (HB_QSV_CLIP3(-1, 3, val))
+ {
+ case 0:
+ return MFX_TRELLIS_OFF;
+ case 1: // I-frames only
+ return MFX_TRELLIS_I;
+ case 2: // I- and P-frames
+ return MFX_TRELLIS_I|MFX_TRELLIS_P;
+ case 3: // all frames
+ return MFX_TRELLIS_I|MFX_TRELLIS_P|MFX_TRELLIS_B;
+ case -1:
+ default:
+ return MFX_TRELLIS_UNKNOWN;
+ }
+}
+
+int hb_qsv_atoindex(const char* const *arr, const char *str, int *err)
+{
+ int i;
+ for (i = 0; arr[i] != NULL; i++)
+ {
+ if (!strcasecmp(arr[i], str))
+ {
+ break;
+ }
+ }
+ *err = (arr[i] == NULL);
+ return i;
+}
+
+// adapted from libx264
+int hb_qsv_atobool(const char *str, int *err)
+{
+ if (!strcasecmp(str, "1") ||
+ !strcasecmp(str, "yes") ||
+ !strcasecmp(str, "true"))
+ {
+ return 1;
+ }
+ if (!strcasecmp(str, "0") ||
+ !strcasecmp(str, "no") ||
+ !strcasecmp(str, "false"))
+ {
+ return 0;
+ }
+ *err = 1;
+ return 0;
+}
+
+// adapted from libx264
+int hb_qsv_atoi(const char *str, int *err)
+{
+ char *end;
+ int v = strtol(str, &end, 0);
+ if (end == str || end[0] != '\0')
+ {
+ *err = 1;
+ }
+ return v;
+}
+
+// adapted from libx264
+float hb_qsv_atof(const char *str, int *err)
+{
+ char *end;
+ float v = strtod(str, &end);
+ if (end == str || end[0] != '\0')
+ {
+ *err = 1;
+ }
+ return v;
+}
+
+int hb_qsv_param_parse(hb_qsv_param_t *param,
+ const char *key, const char *value, int vcodec)
+{
+ float fvalue;
+ int ivalue, error = 0;
+ if (param == NULL)
+ {
+ return HB_QSV_PARAM_ERROR;
+ }
+ if (value == NULL || value[0] == '\0')
+ {
+ value = "true";
+ }
+ else if (value[0] == '=')
+ {
+ value++;
+ }
+ if (key == NULL || key[0] == '\0')
+ {
+ return HB_QSV_PARAM_BAD_NAME;
+ }
+ else if (!strncasecmp(key, "no-", 3))
+ {
+ key += 3;
+ value = hb_qsv_atobool(value, &error) ? "false" : "true";
+ if (error)
+ {
+ return HB_QSV_PARAM_BAD_VALUE;
+ }
+ }
+ if (!strcasecmp(key, "target-usage") ||
+ !strcasecmp(key, "tu"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->videoParam->mfx.TargetUsage = HB_QSV_CLIP3(MFX_TARGETUSAGE_1,
+ MFX_TARGETUSAGE_7,
+ ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "num-ref-frame") ||
+ !strcasecmp(key, "ref"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->videoParam->mfx.NumRefFrame = HB_QSV_CLIP3(0, 16, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "gop-ref-dist"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->videoParam->mfx.GopRefDist = HB_QSV_CLIP3(0, 32, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "gop-pic-size") ||
+ !strcasecmp(key, "keyint"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->gop.gop_pic_size = HB_QSV_CLIP3(-1, UINT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "scenecut"))
+ {
+ ivalue = hb_qsv_atobool(value, &error);
+ if (!error)
+ {
+ if (!ivalue)
+ {
+ param->videoParam->mfx.GopOptFlag |= MFX_GOP_STRICT;
+ }
+ else
+ {
+ param->videoParam->mfx.GopOptFlag &= ~MFX_GOP_STRICT;
+ }
+ }
+ }
+ else if (!strcasecmp(key, "cqp-offset-i"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->rc.cqp_offsets[0] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "cqp-offset-p"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->rc.cqp_offsets[1] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "cqp-offset-b"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->rc.cqp_offsets[2] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "vbv-init"))
+ {
+ fvalue = hb_qsv_atof(value, &error);
+ if (!error)
+ {
+ param->rc.vbv_buffer_init = HB_QSV_CLIP3(0, UINT16_MAX, fvalue);
+ }
+ }
+ else if (!strcasecmp(key, "vbv-bufsize"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->rc.vbv_buffer_size = HB_QSV_CLIP3(0, UINT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "vbv-maxrate"))
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->rc.vbv_max_bitrate = HB_QSV_CLIP3(0, UINT16_MAX, ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "cabac"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = !hb_qsv_atobool(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->codingOption.CAVLC = hb_qsv_codingoption_xlat(ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "rate-distorsion-opt") ||
+ !strcasecmp(key, "rdo"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atobool(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->codingOption.RateDistortionOpt = hb_qsv_codingoption_xlat(ivalue);
+ }
+ }
+ else if (!strcasecmp(key, "videoformat"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoindex(x264_vidformat_names, value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoSignalInfo.VideoFormat = ivalue;
+ }
+ }
+ else if (!strcasecmp(key, "fullrange"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoindex(x264_fullrange_names, value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoSignalInfo.VideoFullRange = ivalue;
+ }
+ }
+ else if (!strcasecmp(key, "colorprim"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoindex(x264_colorprim_names, value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoSignalInfo.ColourDescriptionPresent = 1;
+ param->videoSignalInfo.ColourPrimaries = ivalue;
+ }
+ }
+ else if (!strcasecmp(key, "transfer"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoindex(x264_transfer_names, value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoSignalInfo.ColourDescriptionPresent = 1;
+ param->videoSignalInfo.TransferCharacteristics = ivalue;
+ }
+ }
+ else if (!strcasecmp(key, "colormatrix"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoindex(x264_colmatrix_names, value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoSignalInfo.ColourDescriptionPresent = 1;
+ param->videoSignalInfo.MatrixCoefficients = ivalue;
+ }
+ }
+ else if (!strcasecmp(key, "tff") ||
+ !strcasecmp(key, "interlaced"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atobool(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoParam->mfx.FrameInfo.PicStruct = (ivalue ?
+ MFX_PICSTRUCT_FIELD_TFF :
+ MFX_PICSTRUCT_PROGRESSIVE);
+ }
+ }
+ else if (!strcasecmp(key, "bff"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atobool(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (!error)
+ {
+ param->videoParam->mfx.FrameInfo.PicStruct = (ivalue ?
+ MFX_PICSTRUCT_FIELD_BFF :
+ MFX_PICSTRUCT_PROGRESSIVE);
+ }
+ }
+ else if (!strcasecmp(key, "mbbrc"))
+ {
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_BRC)
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->codingOption2.MBBRC = hb_qsv_codingoption_xlat(ivalue);
+ }
+ }
+ else
+ {
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ }
+ else if (!strcasecmp(key, "extbrc"))
+ {
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_BRC)
+ {
+ ivalue = hb_qsv_atoi(value, &error);
+ if (!error)
+ {
+ param->codingOption2.ExtBRC = hb_qsv_codingoption_xlat(ivalue);
+ }
+ }
+ else
+ {
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ }
+ else if (!strcasecmp(key, "lookahead") ||
+ !strcasecmp(key, "la"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atobool(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_LOOKAHEAD)
+ {
+ if (!error)
+ {
+ param->rc.lookahead = ivalue;
+ }
+ }
+ else
+ {
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ }
+ else if (!strcasecmp(key, "lookahead-depth") ||
+ !strcasecmp(key, "la-depth"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoi(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_LOOKAHEAD)
+ {
+ if (!error)
+ {
+ param->codingOption2.LookAheadDepth = HB_QSV_CLIP3(10, 100,
+ ivalue);
+ }
+ }
+ else
+ {
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ }
+ else if (!strcasecmp(key, "trellis"))
+ {
+ switch (vcodec)
+ {
+ case HB_VCODEC_QSV_H264:
+ ivalue = hb_qsv_atoi(value, &error);
+ break;
+ default:
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_OPTION2_TRELLIS)
+ {
+ if (!error)
+ {
+ param->codingOption2.Trellis = hb_qsv_trellisvalue_xlat(ivalue);
+ }
+ }
+ else
+ {
+ return HB_QSV_PARAM_UNSUPPORTED;
+ }
+ }
+ else
+ {
+ /*
+ * TODO:
+ * - slice count control
+ * - open-gop
+ * - fake-interlaced (mfxExtCodingOption.FramePicture???)
+ * - intra-refresh
+ */
+ return HB_QSV_PARAM_BAD_NAME;
+ }
+ return error ? HB_QSV_PARAM_BAD_VALUE : HB_QSV_PARAM_OK;
+}
+
+int hb_qsv_param_default(hb_qsv_param_t *param, mfxVideoParam *videoParam)
+{
+ if (param != NULL && videoParam != NULL)
+ {
+ // introduced in API 1.0
+ memset(&param->codingOption, 0, sizeof(mfxExtCodingOption));
+ param->codingOption.Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
+ param->codingOption.Header.BufferSz = sizeof(mfxExtCodingOption);
+ param->codingOption.MECostType = 0; // reserved, must be 0
+ param->codingOption.MESearchType = 0; // reserved, must be 0
+ param->codingOption.MVSearchWindow.x = 0; // reserved, must be 0
+ param->codingOption.MVSearchWindow.y = 0; // reserved, must be 0
+ param->codingOption.RefPicListReordering = 0; // reserved, must be 0
+ param->codingOption.IntraPredBlockSize = 0; // reserved, must be 0
+ param->codingOption.InterPredBlockSize = 0; // reserved, must be 0
+ param->codingOption.MVPrecision = 0; // reserved, must be 0
+ param->codingOption.EndOfSequence = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.RateDistortionOpt = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.CAVLC = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.ResetRefList = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.MaxDecFrameBuffering = 0; // unspecified
+ param->codingOption.AUDelimiter = MFX_CODINGOPTION_OFF;
+ param->codingOption.SingleSeiNalUnit = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.PicTimingSEI = MFX_CODINGOPTION_OFF;
+ param->codingOption.VuiNalHrdParameters = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.FramePicture = MFX_CODINGOPTION_UNKNOWN;
+ // introduced in API 1.3
+ param->codingOption.RefPicMarkRep = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.FieldOutput = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.NalHrdConformance = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.SingleSeiNalUnit = MFX_CODINGOPTION_UNKNOWN;
+ param->codingOption.VuiVclHrdParameters = MFX_CODINGOPTION_UNKNOWN;
+ // introduced in API 1.4
+ param->codingOption.ViewOutput = MFX_CODINGOPTION_UNKNOWN;
+ // introduced in API 1.6
+ param->codingOption.RecoveryPointSEI = MFX_CODINGOPTION_UNKNOWN;
+
+ // introduced in API 1.3
+ memset(&param->videoSignalInfo, 0, sizeof(mfxExtVideoSignalInfo));
+ param->videoSignalInfo.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
+ param->videoSignalInfo.Header.BufferSz = sizeof(mfxExtVideoSignalInfo);
+ param->videoSignalInfo.VideoFormat = 5; // undefined
+ param->videoSignalInfo.VideoFullRange = 0; // TV range
+ param->videoSignalInfo.ColourDescriptionPresent = 0; // don't write to bitstream
+ param->videoSignalInfo.ColourPrimaries = 2; // undefined
+ param->videoSignalInfo.TransferCharacteristics = 2; // undefined
+ param->videoSignalInfo.MatrixCoefficients = 2; // undefined
+
+ // introduced in API 1.6
+ memset(&param->codingOption2, 0, sizeof(mfxExtCodingOption2));
+ param->codingOption2.Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
+ param->codingOption2.Header.BufferSz = sizeof(mfxExtCodingOption2);
+ param->codingOption2.IntRefType = 0;
+ param->codingOption2.IntRefCycleSize = 2;
+ param->codingOption2.IntRefQPDelta = 0;
+ param->codingOption2.MaxFrameSize = 0;
+ param->codingOption2.BitrateLimit = MFX_CODINGOPTION_ON;
+ param->codingOption2.ExtBRC = MFX_CODINGOPTION_OFF;
+ param->codingOption2.MBBRC = MFX_CODINGOPTION_UNKNOWN;
+ // introduced in API 1.7
+ param->codingOption2.LookAheadDepth = 40;
+ param->codingOption2.Trellis = MFX_TRELLIS_UNKNOWN;
+
+ // GOP & rate control
+ param->gop.gop_pic_size = -1; // set automatically
+ param->gop.int_ref_cycle_size = -1; // set automatically
+ param->rc.lookahead = -1; // set automatically
+ param->rc.cqp_offsets[0] = 0;
+ param->rc.cqp_offsets[1] = 2;
+ param->rc.cqp_offsets[2] = 4;
+ param->rc.vbv_max_bitrate = 0;
+ param->rc.vbv_buffer_size = 0;
+ param->rc.vbv_buffer_init = .5;
+
+ // introduced in API 1.0
+ memset(videoParam, 0, sizeof(mfxVideoParam));
+ param->videoParam = videoParam;
+ param->videoParam->Protected = 0; // reserved, must be 0
+ param->videoParam->NumExtParam = 0;
+ param->videoParam->IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
+ param->videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_2;
+ param->videoParam->mfx.GopOptFlag = MFX_GOP_CLOSED;
+ param->videoParam->mfx.NumThread = 0; // deprecated, must be 0
+ param->videoParam->mfx.EncodedOrder = 0; // input is in display order
+ param->videoParam->mfx.IdrInterval = 0; // all I-frames are IDR
+ param->videoParam->mfx.NumSlice = 0; // use Media SDK default
+ param->videoParam->mfx.NumRefFrame = 0; // use Media SDK default
+ param->videoParam->mfx.GopPicSize = 0; // use Media SDK default
+ param->videoParam->mfx.GopRefDist = 4; // power of 2, >= 4: B-pyramid
+ // introduced in API 1.1
+ param->videoParam->AsyncDepth = AV_QSV_ASYNC_DEPTH_DEFAULT;
+ // introduced in API 1.3
+ param->videoParam->mfx.BRCParamMultiplier = 0; // no multiplier
+
+ // FrameInfo: set by video encoder, except PicStruct
+ param->videoParam->mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
+
+ // attach supported mfxExtBuffer structures to the mfxVideoParam
+ param->videoParam->NumExtParam = 0;
+ param->videoParam->ExtParam = param->ExtParamArray;
+ param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->codingOption;
+ param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->videoSignalInfo;
+ if (hb_qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
+ {
+ param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->codingOption2;
+ }
+ }
+ else
+ {
+ hb_error("hb_qsv_param_default: invalid pointer(s)");
+ return -1;
+ }
+ return 0;
+}
diff --git a/libhb/qsv_common.h b/libhb/qsv_common.h
new file mode 100644
index 000000000..cd85d33a2
--- /dev/null
+++ b/libhb/qsv_common.h
@@ -0,0 +1,119 @@
+/* qsv_common.h
+ *
+ * Copyright (c) 2003-2013 HandBrake Team
+ * This file is part of the HandBrake source code.
+ * Homepage: <http://handbrake.fr/>.
+ * It may be used under the terms of the GNU General Public License v2.
+ * For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
+ */
+
+#ifndef HB_QSV_COMMON_H
+#define HB_QSV_COMMON_H
+
+#include "msdk/mfxvideo.h"
+#include "libavcodec/avcodec.h"
+
+/* Minimum Intel Media SDK version (currently 1.3, for Sandy Bridge support) */
+#define HB_QSV_MINVERSION_MAJOR AV_QSV_MSDK_VERSION_MAJOR
+#define HB_QSV_MINVERSION_MINOR AV_QSV_MSDK_VERSION_MINOR
+
+/*
+ * Get & store all available Intel Quick Sync information:
+ *
+ * - general availability
+ * - available implementations (hardware-accelerated, software fallback, etc.)
+ * - available codecs, filters, etc. for direct access (convenience)
+ * - supported API version
+ * - supported resolutions
+ */
+typedef struct hb_qsv_info_s
+{
+ // supported version-specific or hardware-specific capabilities
+ int capabilities;
+#define HB_QSV_CAP_H264_BPYRAMID (1 << 0) // H.264: reference B-frames
+#define HB_QSV_CAP_MSDK_API_1_6 (1 << 1) // Support for API 1.6 or later
+#define HB_QSV_CAP_OPTION2_BRC (1 << 2) // mfxExtCodingOption2: MBBRC/ExtBRC
+#define HB_QSV_CAP_OPTION2_LOOKAHEAD (1 << 3) // mfxExtCodingOption2: LookAhead
+#define HB_QSV_CAP_OPTION2_TRELLIS (1 << 4) // mfxExtCodingOption2: Trellis
+
+ // TODO: add available decoders, filters, encoders,
+ // maximum decode and encode resolution, etc.
+} hb_qsv_info_t;
+
+/* Global Intel QSV information for use by the UIs */
+extern hb_qsv_info_t *hb_qsv_info;
+
+/* Intel Quick Sync Video utilities */
+int hb_qsv_available();
+int hb_qsv_info_init();
+void hb_qsv_info_print();
+
+/* Intel Quick Sync Video DECODE utilities */
+const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id);
+int hb_qsv_decode_is_enabled(hb_job_t *job);
+int hb_qsv_decode_is_supported(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt);
+
+/* Media SDK parameters handling */
+enum
+{
+ HB_QSV_PARAM_OK,
+ HB_QSV_PARAM_ERROR,
+ HB_QSV_PARAM_BAD_NAME,
+ HB_QSV_PARAM_BAD_VALUE,
+ HB_QSV_PARAM_UNSUPPORTED,
+};
+
+typedef struct
+{
+ /*
+ * Supported mfxExtBuffer.BufferId values:
+ *
+ * MFX_EXTBUFF_AVC_REFLIST_CTRL
+ * MFX_EXTBUFF_AVC_TEMPORAL_LAYERS
+ * MFX_EXTBUFF_CODING_OPTION
+ * MFX_EXTBUFF_CODING_OPTION_SPSPPS
+ * MFX_EXTBUFF_CODING_OPTION2
+ * MFX_EXTBUFF_ENCODER_CAPABILITY
+ * MFX_EXTBUFF_ENCODER_RESET_OPTION
+ * MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION
+ * MFX_EXTBUFF_PICTURE_TIMING_SEI
+ * MFX_EXTBUFF_VIDEO_SIGNAL_INFO
+ *
+ * This should cover all encode-compatible extended
+ * buffers that can be attached to an mfxVideoParam.
+ */
+#define HB_QSV_ENC_NUM_EXT_PARAM_MAX 10
+ mfxExtBuffer* ExtParamArray[HB_QSV_ENC_NUM_EXT_PARAM_MAX];
+ mfxExtCodingOption codingOption;
+ mfxExtCodingOption2 codingOption2;
+ mfxExtVideoSignalInfo videoSignalInfo;
+ struct
+ {
+ int gop_pic_size;
+ int int_ref_cycle_size;
+ } gop;
+ struct
+ {
+ int lookahead;
+ int cqp_offsets[3];
+ int vbv_max_bitrate;
+ int vbv_buffer_size;
+ float vbv_buffer_init;
+ } rc;
+
+ // assigned via hb_qsv_param_default, may be shared with another structure
+ mfxVideoParam *videoParam;
+} hb_qsv_param_t;
+
+#define HB_QSV_CLIP3(min, max, val) ((val < min) ? min : (val > max) ? max : val)
+int hb_qsv_codingoption_xlat(int val);
+int hb_qsv_trellisvalue_xlat(int val);
+int hb_qsv_atoindex(const char* const *arr, const char *str, int *err);
+int hb_qsv_atobool (const char *str, int *err);
+int hb_qsv_atoi (const char *str, int *err);
+float hb_qsv_atof (const char *str, int *err);
+
+int hb_qsv_param_default(hb_qsv_param_t *param, mfxVideoParam *videoParam);
+int hb_qsv_param_parse (hb_qsv_param_t *param, const char *key, const char *value, int vcodec);
+
+#endif
diff --git a/libhb/qsv_filter.c b/libhb/qsv_filter.c
new file mode 100644
index 000000000..3de9d6254
--- /dev/null
+++ b/libhb/qsv_filter.c
@@ -0,0 +1,648 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#include "hb.h"
+#include "hbffmpeg.h"
+#include "libavcodec/qsv.h"
+#include "qsv_filter.h"
+#include "enc_qsv.h"
+
+struct hb_filter_private_s
+{
+ hb_job_t *job;
+ hb_list_t *list;
+
+ int width_in;
+ int height_in;
+ int pix_fmt;
+ int pix_fmt_out;
+ int width_out;
+ int height_out;
+ int crop[4];
+ int deinterlace;
+ int is_frc_used;
+
+ av_qsv_space *vpp_space;
+
+ // FRC param(s)
+ mfxExtVPPFrameRateConversion frc_config;
+};
+
+static int hb_qsv_filter_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+
+static int hb_qsv_filter_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static int hb_qsv_filter_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info );
+
+static void hb_qsv_filter_close( hb_filter_object_t * filter );
+
+hb_filter_object_t hb_filter_qsv =
+{
+ .id = HB_FILTER_QSV,
+ .enforce_order = 1,
+ .name = "Quick Sync Video VPP",
+ .settings = NULL,
+ .init = hb_qsv_filter_init,
+ .work = hb_qsv_filter_work,
+ .close = hb_qsv_filter_close,
+ .info = hb_qsv_filter_info,
+};
+
+static int filter_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
+ mfxStatus sts;
+ int i=0;
+
+ if(!qsv) return 3;
+
+
+ if(!qsv->vpp_space){
+ qsv->vpp_space = av_qsv_list_init(HAVE_THREADS);
+ }
+ if(!pv->vpp_space){
+ for(i=0; i<av_qsv_list_count(qsv->vpp_space);i++){
+ av_qsv_space *qsv_vpp = av_qsv_list_item( qsv->vpp_space, i );
+ if(qsv_vpp->type == AV_QSV_VPP_DEFAULT){
+ pv->vpp_space = qsv_vpp;
+ break;
+ }
+ }
+ }
+
+ if(!pv->vpp_space){
+ pv->vpp_space = calloc( 1, sizeof( av_qsv_space ));
+ pv->vpp_space->type = AV_QSV_VPP_DEFAULT;
+ av_qsv_list_add( qsv->vpp_space, pv->vpp_space );
+ }
+ else
+ if(pv->vpp_space->is_init_done ) return 1;
+
+ if(!qsv->dec_space || !qsv->dec_space->is_init_done) return 2;
+
+ // we need to know final output settings before we can properly configure
+ if (!pv->job->qsv_enc_info.is_init_done)
+ {
+ return 2;
+ }
+
+ av_qsv_add_context_usage(qsv,HAVE_THREADS);
+
+ // see params needed like at mediasdk-man.pdf:"Appendix A: Configuration Parameter Constraints"
+ // for now - most will take from the decode
+ {
+ av_qsv_space *qsv_vpp = pv->vpp_space;
+ AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
+
+ if (pv->deinterlace)
+ {
+ /*
+ * Input may be progressive, interlaced or even mixed, so init with
+ * MFX_PICSTRUCT_UNKNOWN and use per-frame field order information
+ * (mfxFrameSurface1.Info.PicStruct)
+ */
+ qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = MFX_PICSTRUCT_UNKNOWN;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
+ }
+ else
+ {
+ /* Same PicStruct in/out: no filtering */
+ qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
+ }
+
+ // FrameRate is important for VPP to start with
+ if( qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN == 0 &&
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD == 0 ){
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN = pv->job->title->rate;
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD = pv->job->title->rate_base;
+ }
+
+ qsv_vpp->m_mfxVideoParam.vpp.In.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
+ qsv_vpp->m_mfxVideoParam.vpp.In.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropX = pv->crop[2];
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropY = pv->crop[0];
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropW = pv-> width_in - pv->crop[3] - pv->crop[2];
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropH = pv->height_in - pv->crop[1] - pv->crop[0];
+ qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN;
+ qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD;
+ qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
+ qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
+ qsv_vpp->m_mfxVideoParam.vpp.In.Width = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Width;
+ qsv_vpp->m_mfxVideoParam.vpp.In.Height = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Height;
+
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropX = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropY = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropW = pv->width_out;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropH = pv->height_out;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN = pv->job->vrate;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD = pv->job->vrate_base;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.Width = pv->job->qsv_enc_info.align_width;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.Height = pv->job->qsv_enc_info.align_height;
+
+ qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
+
+ qsv_vpp->m_mfxVideoParam.AsyncDepth = pv->job->qsv_async_depth;
+
+ memset(&qsv_vpp->request, 0, sizeof(mfxFrameAllocRequest)*2);
+
+ sts = MFXVideoVPP_QueryIOSurf(qsv->mfx_session, &qsv_vpp->m_mfxVideoParam, qsv_vpp->request );
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ int num_surfaces_in = qsv_vpp->request[0].NumFrameSuggested;
+ int num_surfaces_out = qsv_vpp->request[1].NumFrameSuggested;
+
+ av_qsv_config *config = qsv->qsv_config;
+
+
+ qsv_vpp->surface_num = FFMIN( num_surfaces_in + num_surfaces_out + qsv_vpp->m_mfxVideoParam.AsyncDepth + config ? config->additional_buffers/2 :0 , AV_QSV_SURFACE_NUM );
+ if(qsv_vpp->surface_num <= 0 )
+ qsv_vpp->surface_num = AV_QSV_SURFACE_NUM;
+
+ int i = 0;
+ for (i = 0; i < qsv_vpp->surface_num; i++){
+ qsv_vpp->p_surfaces[i] = av_mallocz( sizeof(mfxFrameSurface1) );
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_surfaces[i], MFX_ERR_MEMORY_ALLOC);
+ memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
+ }
+
+ qsv_vpp->sync_num = FFMIN( qsv_vpp->surface_num, AV_QSV_SYNC_NUM );
+
+ for (i = 0; i < qsv_vpp->sync_num; i++){
+ qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
+ qsv_vpp->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
+ }
+/*
+ about available VPP filters, see "Table 4 Configurable VPP filters", mediasdk-man.pdf
+ Hints (optional feature) IDs:
+ MFX_EXTBUFF_VPP_DENOISE // Remove noise
+ // Value of 0-100 (inclusive) indicates
+ // the level of noise to remove.
+ MFX_EXTBUFF_VPP_DETAIL // Enhance picture details/edges:
+ // 0-100 value (inclusive) to indicate
+ // the level of details to be enhanced.
+ MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION // Convert input frame rate to match the output, based on frame interpolation:
+ // MFX_FRCALGM_PRESERVE_TIMESTAMP,
+ // MFX_FRCALGM_DISTRIBUTED_TIMESTAMP,
+ // MFX_FRCALGM_FRAME_INTERPOLATION
+ MFX_EXTBUFF_VPP_IMAGE_STABILIZATION // Perform image stabilization
+ // Stabilization modes:
+ // MFX_IMAGESTAB_MODE_UPSCALE
+ // MFX_IMAGESTAB_MODE_BOXING
+ MFX_EXTBUFF_VPP_PICSTRUCT_DETECTION // Perform detection of picture structure:
+ // Detected picture structure - top field first, bottom field first, progressive or unknown
+ // if video processor cannot detect picture structure.
+ MFX_EXTBUFF_VPP_PROCAMP // Adjust the brightness, contrast, saturation, and hue settings
+
+ // Initialize extended buffer for frame processing
+ // - Process amplifier (ProcAmp) used to control brightness
+ // - mfxExtVPPDoUse: Define the processing algorithm to be used
+ // - mfxExtVPPProcAmp: ProcAmp configuration
+ // - mfxExtBuffer: Add extended buffers to VPP parameter configuration
+ mfxExtVPPDoUse extDoUse;
+ mfxU32 tabDoUseAlg[1];
+ extDoUse.Header.BufferId = MFX_EXTBUFF_VPP_DOUSE;
+ extDoUse.Header.BufferSz = sizeof(mfxExtVPPDoUse);
+ extDoUse.NumAlg = 1;
+ extDoUse.AlgList = tabDoUseAlg;
+ tabDoUseAlg[0] = MFX_EXTBUFF_VPP_PROCAMP;
+
+ mfxExtVPPProcAmp procampConfig;
+ procampConfig.Header.BufferId = MFX_EXTBUFF_VPP_PROCAMP;
+ procampConfig.Header.BufferSz = sizeof(mfxExtVPPProcAmp);
+ procampConfig.Hue = 0.0f; // Default
+ procampConfig.Saturation = 1.0f; // Default
+ procampConfig.Contrast = 1.0; // Default
+ procampConfig.Brightness = 40.0; // Adjust brightness
+
+ mfxExtBuffer* ExtBuffer[2];
+ ExtBuffer[0] = (mfxExtBuffer*)&extDoUse;
+ ExtBuffer[1] = (mfxExtBuffer*)&procampConfig;
+ VPPParams.NumExtParam = 2;
+ VPPParams.ExtParam = (mfxExtBuffer**)&ExtBuffer[0];
+*/
+ memset(&qsv_vpp->ext_opaque_alloc, 0, sizeof(qsv_vpp->ext_opaque_alloc));
+
+ if( (qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN / qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD ) !=
+ (qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN / qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD) )
+ {
+ pv->is_frc_used = 1;
+ }
+
+ qsv_vpp->m_mfxVideoParam.NumExtParam = qsv_vpp->p_ext_param_num = 1 + pv->is_frc_used;
+
+ qsv_vpp->p_ext_params = av_mallocz(sizeof(mfxExtBuffer *)*qsv_vpp->p_ext_param_num);
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_ext_params, MFX_ERR_MEMORY_ALLOC);
+
+ qsv_vpp->m_mfxVideoParam.ExtParam = qsv_vpp->p_ext_params;
+
+ qsv_vpp->ext_opaque_alloc.In.Surfaces = qsv->dec_space->p_surfaces;
+ qsv_vpp->ext_opaque_alloc.In.NumSurface = qsv->dec_space->surface_num;
+ qsv_vpp->ext_opaque_alloc.In.Type = qsv->dec_space->request[0].Type;
+
+ qsv_vpp->ext_opaque_alloc.Out.Surfaces = qsv_vpp->p_surfaces;
+ qsv_vpp->ext_opaque_alloc.Out.NumSurface = qsv_vpp->surface_num;
+ qsv_vpp->ext_opaque_alloc.Out.Type = qsv->dec_space->request[0].Type;
+
+ qsv_vpp->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
+ qsv_vpp->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
+ qsv_vpp->p_ext_params[0] = (mfxExtBuffer*)&qsv_vpp->ext_opaque_alloc;
+
+ if(pv->is_frc_used)
+ {
+ pv->frc_config.Header.BufferId = MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION;
+ pv->frc_config.Header.BufferSz = sizeof(mfxExtVPPFrameRateConversion);
+ pv->frc_config.Algorithm = MFX_FRCALGM_PRESERVE_TIMESTAMP;
+
+ qsv_vpp->p_ext_params[1] = (mfxExtBuffer*)&pv->frc_config;
+ }
+
+ sts = MFXVideoVPP_Init(qsv->mfx_session, &qsv_vpp->m_mfxVideoParam);
+
+ AV_QSV_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ qsv_vpp->is_init_done = 1;
+ }
+ return 0;
+}
+
+static int hb_qsv_filter_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+
+ pv->list = hb_list_init();
+ // list of init params provided at work.c:~700
+ pv->width_in = init->width;
+ pv->height_in = init->height;
+ pv->width_out = init->width;
+ pv->height_out = init->height;
+ memcpy( pv->crop, init->crop, sizeof( int[4] ) );
+
+ if (filter->settings != NULL)
+ {
+ sscanf(filter->settings, "%d:%d:%d:%d:%d:%d_dei:%d",
+ &pv->width_out, &pv->height_out,
+ &pv->crop[0], &pv->crop[1], &pv->crop[2], &pv->crop[3],
+ &pv->deinterlace);
+ }
+
+ pv->job = init->job;
+
+ // will be later as more params will be known
+ // filter_init(pv->job->qsv, pv);
+
+ // just passing
+ init->vrate = init->vrate;
+ init->vrate_base = init->vrate_base;
+
+ // framerate shaping not yet supported
+ init->cfr = 0;
+
+ init->pix_fmt = pv->pix_fmt;
+ init->width = pv->width_out;
+ init->height = pv->height_out;
+ memcpy( init->crop, pv->crop, sizeof( int[4] ) );
+
+ return 0;
+}
+
+static int hb_qsv_filter_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info )
+{
+
+ hb_filter_private_t *pv = filter->private_data;
+ if (pv == NULL)
+ return -1;
+
+ sprintf(info->human_readable_desc,
+ "source: %d * %d, crop (%d/%d/%d/%d): %d * %d, scale: %d * %d",
+ pv->width_in, pv->height_in,
+ pv->crop[0], pv->crop[1], pv->crop[2], pv->crop[3],
+ pv->width_in - pv->crop[2] - pv->crop[3],
+ pv->height_in - pv->crop[0] - pv->crop[1],
+ pv->width_out, pv->height_out);
+
+ if (pv->deinterlace)
+ {
+ sprintf(info->human_readable_desc + strlen(info->human_readable_desc),
+ ", deinterlace");
+ }
+
+ return 0;
+}
+
+void qsv_filter_close( av_qsv_context* qsv, AV_QSV_STAGE_TYPE vpp_type ){
+ int i = 0;
+ av_qsv_space* vpp_space = 0;
+
+ if(qsv && qsv->is_context_active && qsv->vpp_space)
+ for(i=av_qsv_list_count( qsv->vpp_space);i>0;i--){
+
+ vpp_space = av_qsv_list_item( qsv->vpp_space, i-1 );
+ if( vpp_space->type == vpp_type && vpp_space->is_init_done){
+
+ hb_log( "qsv_filter[%s] done: max_surfaces: %u/%u , max_syncs: %u/%u", ((vpp_type == AV_QSV_VPP_DEFAULT)?"Default": "User") ,vpp_space->surface_num_max_used, vpp_space->surface_num, vpp_space->sync_num_max_used, vpp_space->sync_num );
+
+ for (i = 0; i < vpp_space->surface_num; i++){
+ av_freep(&vpp_space->p_surfaces[i]);
+ }
+ vpp_space->surface_num = 0;
+
+ if( vpp_space->p_ext_param_num || vpp_space->p_ext_params )
+ av_freep(&vpp_space->p_ext_params);
+ vpp_space->p_ext_param_num = 0;
+
+ for (i = 0; i < vpp_space->sync_num; i++){
+ av_freep(&vpp_space->p_syncp[i]->p_sync);
+ av_freep(&vpp_space->p_syncp[i]);
+ }
+ vpp_space->sync_num = 0;
+
+ av_qsv_list_rem(qsv->vpp_space,vpp_space);
+ if( av_qsv_list_count(qsv->vpp_space) == 0 )
+ av_qsv_list_close(&qsv->vpp_space);
+
+ vpp_space->is_init_done = 0;
+ break;
+ }
+ }
+}
+
+static void hb_qsv_filter_close( hb_filter_object_t * filter )
+{
+ int i = 0;
+ hb_filter_private_t * pv = filter->private_data;
+
+ if ( !pv )
+ {
+ return;
+ }
+
+ av_qsv_context* qsv = pv->job->qsv;
+ if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0){
+
+ // closing local stuff
+ qsv_filter_close(qsv,AV_QSV_VPP_DEFAULT);
+
+ // closing the commong stuff
+ av_qsv_context_clean(qsv);
+ }
+ hb_list_close(&pv->list);
+ free( pv );
+ filter->private_data = NULL;
+}
+
+int process_frame(av_qsv_list* received_item, av_qsv_context* qsv, hb_filter_private_t * pv ){
+
+ // 1 if have results , 0 - otherwise
+ int ret = 1;
+
+ mfxStatus sts = MFX_ERR_NONE;
+ mfxFrameSurface1 *work_surface = NULL;
+ av_qsv_stage* stage = 0;
+
+ av_qsv_space *qsv_vpp = pv->vpp_space;
+
+ if(received_item){
+ stage = av_qsv_get_last_stage( received_item );
+ work_surface = stage->out.p_surface;
+ }
+
+ int sync_idx = av_qsv_get_free_sync(qsv_vpp, qsv);
+ int surface_idx = -1;
+
+ for(;;)
+ {
+ if (sync_idx == -1)
+ {
+ hb_error("qsv: Not enough resources allocated for QSV filter");
+ ret = 0;
+ break;
+ }
+ if( sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE )
+ surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv, &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
+ if (surface_idx == -1) {
+ hb_error("qsv: Not enough resources allocated for QSV filter");
+ ret = 0;
+ break;
+ }
+ if (work_surface) {
+ work_surface->Info.CropX = pv->crop[2];
+ work_surface->Info.CropY = pv->crop[0];
+ work_surface->Info.CropW = pv->width_in - pv->crop[3] - pv->crop[2];
+ work_surface->Info.CropH = pv->height_in - pv->crop[1] - pv->crop[0];
+ }
+
+ sts = MFXVideoVPP_RunFrameVPPAsync(qsv->mfx_session, work_surface, qsv_vpp->p_surfaces[surface_idx] , NULL, qsv_vpp->p_syncp[sync_idx]->p_sync);
+
+ if( MFX_ERR_MORE_DATA == sts ){
+ if(!qsv_vpp->pending){
+ qsv_vpp->pending = av_qsv_list_init(0);
+ }
+
+ // if we have no results, we should not miss resource(s)
+ av_qsv_list_add( qsv_vpp->pending, received_item);
+
+ ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
+
+ ret = 0;
+ break;
+ }
+
+ if( MFX_ERR_MORE_DATA == sts || (MFX_ERR_NONE <= sts && MFX_WRN_DEVICE_BUSY != sts)){
+ if (work_surface){
+ ff_qsv_atomic_dec(&work_surface->Data.Locked);
+ }
+ }
+
+ if( MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE <= sts){
+ if( MFX_ERR_MORE_SURFACE == sts )
+ continue;
+
+ if (qsv_vpp->p_surfaces[surface_idx] && MFX_WRN_DEVICE_BUSY != sts )
+ ff_qsv_atomic_inc(&qsv_vpp->p_surfaces[surface_idx]->Data.Locked);
+ }
+
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ if (MFX_ERR_NONE <= sts ) // repeat the call if warning and no output
+ {
+ if (MFX_WRN_DEVICE_BUSY == sts){
+ av_qsv_sleep(10); // wait if device is busy
+ continue;
+ }
+
+ // shouldnt be a case but drain
+ if(stage){
+ av_qsv_stage* new_stage = av_qsv_stage_init();
+
+ new_stage->type = AV_QSV_VPP_DEFAULT;
+ new_stage->in.p_surface = work_surface;
+ new_stage->out.p_surface = qsv_vpp->p_surfaces[surface_idx];
+ new_stage->out.sync = qsv_vpp->p_syncp[sync_idx];
+ av_qsv_add_stagee( &received_item, new_stage,HAVE_THREADS );
+
+ // add pending resources for the proper reclaim later
+ if( qsv_vpp->pending ){
+ if( av_qsv_list_count(qsv_vpp->pending)>0 ){
+ new_stage->pending = qsv_vpp->pending;
+ }
+ qsv_vpp->pending = 0;
+
+ // making free via decrement for all pending
+ int i = 0;
+ for (i = av_qsv_list_count(new_stage->pending); i > 0; i--){
+ av_qsv_list *atom_list = av_qsv_list_item(new_stage->pending, i-1);
+ av_qsv_stage *stage = av_qsv_get_last_stage( atom_list );
+ mfxFrameSurface1 *work_surface = stage->out.p_surface;
+ if (work_surface)
+ ff_qsv_atomic_dec(&work_surface->Data.Locked);
+ }
+ }
+ }
+ break;
+ }
+
+ ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
+
+ if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
+ DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." );
+
+ break;
+ }
+
+ return ret;
+}
+
+static int hb_qsv_filter_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * out = *buf_out;
+ int sts = 0;
+
+ av_qsv_context* qsv = pv->job->qsv;
+
+ if ( !pv )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_OK;
+ }
+
+ while(1){
+ int ret = filter_init(qsv,pv);
+ if(ret >= 2)
+ av_qsv_sleep(1);
+ else
+ break;
+ }
+
+ *buf_in = NULL;
+
+ if ( in->size <= 0 )
+ {
+ while(1){
+ sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
+ if(sts)
+ hb_list_add(pv->list,in);
+ else
+ break;
+ }
+
+ hb_list_add( pv->list, in );
+ *buf_out = link_buf_list( pv );
+ return HB_FILTER_DONE;
+ }
+
+ sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
+
+ if(sts){
+ hb_list_add(pv->list,in);
+ }
+
+ if( hb_list_count(pv->list) ){
+ *buf_out = hb_list_item(pv->list,0);
+ out = *buf_out;
+ if(pv->is_frc_used && out)
+ {
+ mfxStatus sts = MFX_ERR_NONE;
+ if(out->qsv_details.qsv_atom){
+ av_qsv_stage* stage = av_qsv_get_last_stage( out->qsv_details.qsv_atom );
+ mfxFrameSurface1 *work_surface = stage->out.p_surface;
+
+ av_qsv_wait_on_sync( qsv,stage );
+
+ av_qsv_space *qsv_vpp = pv->vpp_space;
+ int64_t duration = ((double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD/(double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN ) * 90000.;
+ out->s.start = work_surface->Data.TimeStamp;
+ out->s.stop = work_surface->Data.TimeStamp + duration;
+ }
+ }
+ hb_list_rem(pv->list,*buf_out);
+ }
+ else
+ *buf_out = NULL;
+
+ return HB_FILTER_OK;
+}
+
+// see devavcode.c
+hb_buffer_t *link_buf_list( hb_filter_private_t *pv )
+{
+ hb_buffer_t *head = hb_list_item( pv->list, 0 );
+
+ if ( head )
+ {
+ hb_list_rem( pv->list, head );
+ hb_buffer_t *last = head, *buf;
+ while ( ( buf = hb_list_item( pv->list, 0 ) ) != NULL )
+ {
+ hb_list_rem( pv->list, buf );
+ last->next = buf;
+ last = buf;
+ }
+ }
+ return head;
+}
+
diff --git a/libhb/qsv_filter.h b/libhb/qsv_filter.h
new file mode 100644
index 000000000..e55a85cdf
--- /dev/null
+++ b/libhb/qsv_filter.h
@@ -0,0 +1,35 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#ifndef QSV_FILTER_H
+#define QSV_FILTER_H
+
+hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
+void qsv_filter_close( av_qsv_context* qsv, AV_QSV_STAGE_TYPE vpp_type );
+
+#endif // QSV_FILTER_H
diff --git a/libhb/qsv_filter_pp.c b/libhb/qsv_filter_pp.c
new file mode 100644
index 000000000..1aef1eb80
--- /dev/null
+++ b/libhb/qsv_filter_pp.c
@@ -0,0 +1,916 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#include "hb.h"
+#include "hbffmpeg.h"
+#include "libavcodec/qsv.h"
+#include "qsv_filter_pp.h"
+#include "qsv_filter.h"
+#include "qsv_memory.h"
+
+
+static int hb_qsv_filter_pre_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+static int hb_qsv_filter_pre_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+static int hb_qsv_filter_pre_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info );
+static void hb_qsv_filter_pre_close( hb_filter_object_t * filter );
+
+static int hb_qsv_filter_post_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+static int hb_qsv_filter_post_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+static int hb_qsv_filter_post_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info );
+static void hb_qsv_filter_post_close( hb_filter_object_t * filter );
+
+
+hb_filter_object_t hb_filter_qsv_pre =
+{
+ .id = HB_FILTER_QSV_PRE,
+ .enforce_order = 1,
+ .name = "Quick Sync Video user filter (pre)",
+ .settings = NULL,
+ .init = hb_qsv_filter_pre_init,
+ .work = hb_qsv_filter_pre_work,
+ .close = hb_qsv_filter_pre_close,
+ .info = hb_qsv_filter_pre_info,
+};
+
+hb_filter_object_t hb_filter_qsv_post =
+{
+ .id = HB_FILTER_QSV_POST,
+ .enforce_order = 1,
+ .name = "Quick Sync Video user filter (post)",
+ .settings = NULL,
+ .init = hb_qsv_filter_post_init,
+ .work = hb_qsv_filter_post_work,
+ .close = hb_qsv_filter_post_close,
+ .info = hb_qsv_filter_post_info,
+};
+
+
+static int filter_pre_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
+ mfxStatus sts = MFX_ERR_NONE;
+ int i=0;
+
+ if(!qsv) return 3;
+
+ av_qsv_space *prev_vpp = 0;
+
+ if(!qsv->vpp_space){
+ qsv->vpp_space = av_qsv_list_init(HAVE_THREADS);
+ // note some change as : when no size changes -> no VPP used
+ // impact on : prev_vpp
+ }
+
+ if(!pv->vpp_space){
+ for(i=0; i<av_qsv_list_count(qsv->vpp_space);i++){
+ av_qsv_space *qsv_vpp = av_qsv_list_item( qsv->vpp_space, i );
+ if(qsv_vpp->type == AV_QSV_VPP_USER){
+ pv->vpp_space = qsv_vpp;
+ break;
+ }
+ else
+ if(qsv_vpp->type == AV_QSV_VPP_DEFAULT){
+ prev_vpp = qsv_vpp;
+ }
+
+ }
+ }
+
+ if(!pv->vpp_space){
+ pv->vpp_space = calloc( 1, sizeof( av_qsv_space ));
+ pv->vpp_space->type = AV_QSV_VPP_USER;
+ av_qsv_list_add( qsv->vpp_space, pv->vpp_space );
+ av_qsv_add_context_usage(qsv,HAVE_THREADS);
+ }
+ else
+ if(pv->vpp_space->is_init_done ) return 1;
+
+ if(!qsv->dec_space || !qsv->dec_space->is_init_done) return 2;
+
+ av_qsv_space *qsv_vpp = pv->vpp_space;
+
+ AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
+
+
+ if (prev_vpp)
+ {
+ memcpy( &qsv_vpp->m_mfxVideoParam.vpp, &prev_vpp->m_mfxVideoParam.vpp, sizeof(prev_vpp->m_mfxVideoParam.vpp));
+ }
+ else
+ {
+ AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
+
+ // FrameRate is important for VPP to start with
+ if( qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN == 0 &&
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD == 0 ){
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN = pv->job->title->rate;
+ qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD = pv->job->title->rate_base;
+ }
+
+ qsv_vpp->m_mfxVideoParam.vpp.In.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
+ qsv_vpp->m_mfxVideoParam.vpp.In.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropX = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX;
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropY = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY;
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropW = pv->job->title->width;
+ qsv_vpp->m_mfxVideoParam.vpp.In.CropH = pv->job->title->height;
+ qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
+ qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN;
+ qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD;
+ qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
+ qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
+ qsv_vpp->m_mfxVideoParam.vpp.In.Width = AV_QSV_ALIGN16(pv->job->title->width);
+ qsv_vpp->m_mfxVideoParam.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct)?
+ AV_QSV_ALIGN16(pv->job->title->height) : AV_QSV_ALIGN32(pv->job->title->height);
+
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropX = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropY = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropW = pv->job->title->width;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.CropH = pv->job->title->height;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
+ qsv_vpp->m_mfxVideoParam.vpp.Out.Width = AV_QSV_ALIGN16(pv->job->title->width);
+ qsv_vpp->m_mfxVideoParam.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct)?
+ AV_QSV_ALIGN16(pv->job->title->height) : AV_QSV_ALIGN32(pv->job->title->height);
+
+ memset(&qsv_vpp->request, 0, sizeof(mfxFrameAllocRequest)*2);
+ }
+
+ qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
+
+ qsv_vpp->surface_num = FFMIN(prev_vpp ? prev_vpp->surface_num : qsv->dec_space->surface_num/2, AV_QSV_SURFACE_NUM);
+
+ for(i = 0; i < qsv_vpp->surface_num; i++){
+ qsv_vpp->p_surfaces[i] = av_mallocz( sizeof(mfxFrameSurface1) );
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_surfaces[i], MFX_ERR_MEMORY_ALLOC);
+ memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
+ }
+
+ qsv_vpp->sync_num = FFMIN(prev_vpp ? prev_vpp->sync_num : qsv->dec_space->sync_num, AV_QSV_SYNC_NUM);
+ for (i = 0; i < qsv_vpp->sync_num; i++){
+ qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
+ qsv_vpp->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
+ }
+
+ memset(&qsv_vpp->ext_opaque_alloc, 0, sizeof(mfxExtOpaqueSurfaceAlloc));
+ qsv_vpp->m_mfxVideoParam.NumExtParam = qsv_vpp->p_ext_param_num = 1;
+
+ qsv_vpp->p_ext_params = av_mallocz(sizeof(mfxExtBuffer *)*qsv_vpp->p_ext_param_num);
+ AV_QSV_CHECK_POINTER(qsv_vpp->p_ext_params, MFX_ERR_MEMORY_ALLOC);
+
+ qsv_vpp->m_mfxVideoParam.ExtParam = qsv_vpp->p_ext_params;
+
+ qsv_vpp->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
+ qsv_vpp->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
+ qsv_vpp->p_ext_params[0] = (mfxExtBuffer*)&qsv_vpp->ext_opaque_alloc;
+
+ if(prev_vpp){
+ qsv_vpp->ext_opaque_alloc.In.Surfaces = prev_vpp->p_surfaces;
+ qsv_vpp->ext_opaque_alloc.In.NumSurface = prev_vpp->surface_num;
+ }
+ else{
+ qsv_vpp->ext_opaque_alloc.In.Surfaces = qsv->dec_space->p_surfaces;
+ qsv_vpp->ext_opaque_alloc.In.NumSurface = qsv->dec_space->surface_num;
+ }
+ qsv_vpp->ext_opaque_alloc.In.Type = qsv->dec_space->request[0].Type;
+
+ qsv_vpp->ext_opaque_alloc.Out.Surfaces = qsv_vpp->p_surfaces;
+ qsv_vpp->ext_opaque_alloc.Out.NumSurface = qsv_vpp->surface_num;
+ qsv_vpp->ext_opaque_alloc.Out.Type = qsv->dec_space->request[0].Type;
+
+ pv->qsv_user = hb_list_init();
+
+ qsv_filter_t *plugin = av_mallocz( sizeof(qsv_filter_t) );
+
+ plugin->pv = pv;
+ plugin->plug.pthis = plugin;
+ plugin->plug.PluginInit = qsv_PluginInit;
+ plugin->plug.PluginClose = qsv_PluginClose;
+ plugin->plug.GetPluginParam = qsv_GetPluginParam;
+ plugin->plug.Submit = qsv_Submit;
+ plugin->plug.Execute = qsv_Execute;
+ plugin->plug.FreeResources = qsv_FreeResources;
+
+ hb_list_add(pv->qsv_user,plugin);
+
+ sts=MFXVideoUSER_Register(qsv->mfx_session,0,&plugin->plug);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ plugin_init(plugin,&qsv_vpp->m_mfxVideoParam);
+
+ qsv_vpp->is_init_done = 1;
+
+ return 0;
+}
+
+static int hb_qsv_filter_pre_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info ){
+ hb_filter_private_t * pv = filter->private_data;
+ if( !pv )
+ return 0;
+
+ sprintf(info->human_readable_desc, "copy data to system memory");
+
+ return 0;
+}
+static int hb_qsv_filter_pre_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init ){
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+ pv->job = init->job;
+
+ pv->pre.frame_go = 0;
+ pv->pre.frame_completed = hb_cond_init();
+ pv->pre.frame_completed_lock = hb_lock_init();
+
+ pv->post.frame_go = 0;
+ pv->post.frame_completed = hb_cond_init();
+ pv->post.frame_completed_lock = hb_lock_init();
+
+ pv->pre_busy.frame_go = 0;
+ pv->pre_busy.frame_completed = hb_cond_init();
+ pv->pre_busy.frame_completed_lock = hb_lock_init();
+
+ pv->post_busy.frame_go = 0;
+ pv->post_busy.frame_completed = hb_cond_init();
+ pv->post_busy.frame_completed_lock = hb_lock_init();
+
+ pv->list = hb_list_init();
+
+ // just to remind:
+ // PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) , 3 planes: Y, U, V
+ // PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ pv->sws_context_from_nv12 = hb_sws_get_context(
+ pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
+ pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
+ SWS_LANCZOS|SWS_ACCURATE_RND);
+ pv->sws_context_to_nv12 = hb_sws_get_context(
+ pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
+ pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
+ SWS_LANCZOS|SWS_ACCURATE_RND);
+ return 0;
+}
+int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t * pv ){
+
+ // 1 if have results , 0 otherwise
+ int ret = 1;
+
+ av_qsv_list* received_item = in->qsv_details.qsv_atom;
+
+ mfxStatus sts = MFX_ERR_NONE;
+ mfxFrameSurface1 *work_surface = NULL;
+ av_qsv_stage* stage = 0;
+
+ av_qsv_space *qsv_vpp = pv->vpp_space;
+
+ if (received_item)
+ {
+ stage = av_qsv_get_last_stage( received_item );
+ work_surface = stage->out.p_surface;
+ }
+
+ int sync_idx = av_qsv_get_free_sync(qsv_vpp, qsv);
+ int surface_idx = -1;
+
+ for (;;)
+ {
+ if (sync_idx == -1)
+ {
+ hb_error("qsv: Not enough resources allocated for the preprocessing filter");
+ ret = 0;
+ break;
+ }
+
+ if (sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE)
+ surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv, &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
+ if (surface_idx == -1) {
+ hb_error("qsv: Not enough resources allocated for the preprocessing filter");
+ ret = 0;
+ break;
+ }
+
+ sts = MFXVideoUSER_ProcessFrameAsync(qsv->mfx_session, &work_surface, 1, &qsv_vpp->p_surfaces[surface_idx] , 1, qsv_vpp->p_syncp[sync_idx]->p_sync);
+
+ if (MFX_ERR_MORE_DATA == sts)
+ {
+ if (!qsv_vpp->pending)
+ {
+ qsv_vpp->pending = av_qsv_list_init(0);
+ }
+
+ // if we have no results, we should not miss resource(s)
+ av_qsv_list_add( qsv_vpp->pending, received_item);
+
+ ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
+
+ ret = 0;
+ break;
+ }
+
+ if( MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE <= sts){
+ if( MFX_ERR_MORE_SURFACE == sts )
+ continue;
+
+ if (qsv_vpp->p_surfaces[surface_idx] && MFX_WRN_DEVICE_BUSY != sts )
+ ff_qsv_atomic_inc(&qsv_vpp->p_surfaces[surface_idx]->Data.Locked);
+ }
+
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ if (MFX_ERR_NONE <= sts ) // repeat the call if warning and no output
+ {
+ if (MFX_WRN_DEVICE_BUSY == sts){
+ hb_lock(pv->pre_busy.frame_completed_lock);
+ while(!pv->pre_busy.frame_go){
+ hb_cond_timedwait(pv->pre_busy.frame_completed,pv->pre_busy.frame_completed_lock,1000);
+ if(*pv->job->die)
+ break;
+ }
+ pv->pre_busy.frame_go = 0;
+ hb_unlock(pv->pre_busy.frame_completed_lock);
+
+ continue;
+ }
+ hb_lock(pv->pre.frame_completed_lock);
+ while(!pv->pre.frame_go){
+ hb_cond_timedwait(pv->pre.frame_completed,pv->pre.frame_completed_lock,1000);
+ if(*pv->job->die)
+ break;
+ }
+ pv->pre.frame_go = 0;
+ hb_unlock(pv->pre.frame_completed_lock);
+
+ in = pv->pre.out;
+
+ if (work_surface){
+ ff_qsv_atomic_dec(&work_surface->Data.Locked);
+ }
+
+ // inserting for the future, will be locked until very ready
+ if(stage){
+ av_qsv_stage* new_stage = av_qsv_stage_init();
+
+ new_stage->type = AV_QSV_VPP_USER;
+ new_stage->in.p_surface = work_surface;
+ new_stage->out.p_surface = qsv_vpp->p_surfaces[surface_idx];
+ new_stage->out.sync = qsv_vpp->p_syncp[sync_idx];
+ av_qsv_add_stagee( &received_item, new_stage,HAVE_THREADS );
+
+ // add pending resources for the proper reclaim later
+ if( qsv_vpp->pending ){
+ if( av_qsv_list_count(qsv_vpp->pending)>0 ){
+ new_stage->pending = qsv_vpp->pending;
+ }
+ qsv_vpp->pending = 0;
+
+ // making free via decrement for all pending
+ int i = 0;
+ for (i = av_qsv_list_count(new_stage->pending); i > 0; i--){
+ av_qsv_list *atom_list = av_qsv_list_item(new_stage->pending, i-1);
+ av_qsv_stage *stage = av_qsv_get_last_stage( atom_list );
+ mfxFrameSurface1 *work_surface = stage->out.p_surface;
+ if (work_surface)
+ ff_qsv_atomic_dec(&work_surface->Data.Locked);
+ }
+ }
+ }
+
+ break;
+ }
+
+ ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
+
+ if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
+ DEBUG_ASSERT( 1,"The bitstream buffer size is insufficient." );
+
+ break;
+ }
+
+ return ret;
+}
+
+static int hb_qsv_filter_pre_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out ){
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * out = *buf_out;
+ int sts = 0;
+
+ av_qsv_context* qsv = pv->job->qsv;
+
+ if(!in->qsv_details.filter_details)
+ in->qsv_details.filter_details = pv;
+
+ if ( in->size <= 0 )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
+ }
+
+ while(1){
+ int ret = filter_pre_init(qsv,pv);
+ if(ret >= 2)
+ av_qsv_sleep(1);
+ else
+ break;
+ }
+
+ pv->pre.in = in;
+ pv->pre.out = in;
+
+ sts = pre_process_frame(in, qsv, pv);
+
+ if(sts){
+ hb_list_add(pv->list,out);
+ }
+
+ if( hb_list_count(pv->list) ){
+ *buf_out = hb_list_item(pv->list,0);
+ hb_list_rem(pv->list,*buf_out);
+ *buf_in = NULL;
+ }
+ else{
+ *buf_in = NULL;
+ *buf_out = in;
+ }
+
+ return HB_FILTER_OK;
+}
+static void hb_qsv_filter_pre_close( hb_filter_object_t * filter ){
+ int i = 0;
+ mfxStatus sts = MFX_ERR_NONE;
+
+ hb_filter_private_t * pv = filter->private_data;
+
+ if ( !pv )
+ {
+ return;
+ }
+
+ sws_freeContext(pv->sws_context_to_nv12);
+ sws_freeContext(pv->sws_context_from_nv12);
+
+ av_qsv_context* qsv = pv->job->qsv;
+ if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0 ){
+ if(pv->qsv_user && qsv->mfx_session){
+
+ sts=MFXVideoUSER_Unregister(qsv->mfx_session,0);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ for(i=hb_list_count(pv->qsv_user);i>0;i--){
+ qsv_filter_t *plugin = hb_list_item(pv->qsv_user,i-1);
+ hb_list_rem(pv->qsv_user,plugin);
+ plugin_close(plugin);
+ }
+ hb_list_close(&pv->qsv_user);
+ }
+
+ // closing local stuff
+ qsv_filter_close(qsv,AV_QSV_VPP_USER);
+
+ // closing the commong stuff
+ av_qsv_context_clean(qsv);
+ }
+ hb_cond_close(&pv->pre.frame_completed);
+ hb_lock_close(&pv->pre.frame_completed_lock);
+
+ hb_cond_close(&pv->post.frame_completed);
+ hb_lock_close(&pv->post.frame_completed_lock);
+
+ hb_cond_close(&pv->pre_busy.frame_completed);
+ hb_lock_close(&pv->pre_busy.frame_completed_lock);
+
+ hb_cond_close(&pv->post_busy.frame_completed);
+ hb_lock_close(&pv->post_busy.frame_completed_lock);
+
+ hb_list_close( &pv->list );
+
+ free( pv );
+ filter->private_data = NULL;
+}
+
+
+static int hb_qsv_filter_post_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info ){
+ hb_filter_private_t * pv = filter->private_data;
+ if( !pv )
+ return 0;
+
+ sprintf(info->human_readable_desc, "copy data to opaque memory");
+
+ return 0;
+}
+static int hb_qsv_filter_post_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init ){
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+ pv->job = init->job;
+ return 0;
+}
+static int hb_qsv_filter_post_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out ){
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * out = *buf_out;
+
+ if ( in->size <= 0 )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
+ }
+
+ av_qsv_context* qsv = pv->job->qsv;
+ pv = in->qsv_details.filter_details;
+
+ if (!pv)
+ {
+ *buf_out = NULL;
+ *buf_in = NULL;
+ return HB_FILTER_OK;
+ }
+
+ while(1){
+ int ret = filter_pre_init(qsv,pv);
+ if(ret >= 2)
+ av_qsv_sleep(1);
+ else
+ break;
+ }
+
+ pv->post.in = in;
+ pv->post.out = out;
+
+ // signal: input is prepared, can start inserting data back into pipeline
+ hb_lock(pv->post.frame_completed_lock);
+ pv->post.frame_go = 1;
+ hb_cond_broadcast(pv->post.frame_completed);
+ hb_unlock(pv->post.frame_completed_lock);
+
+ // wait: on signal that data is ready
+ hb_lock(pv->post_busy.frame_completed_lock);
+ while(!pv->post_busy.frame_go){
+ hb_cond_timedwait(pv->post_busy.frame_completed,pv->post_busy.frame_completed_lock,1000);
+ if(*pv->job->die)
+ break;
+ }
+ pv->post_busy.frame_go = 0;
+ hb_unlock(pv->post_busy.frame_completed_lock);
+
+ if (pv->post.status == HB_FILTER_OK || pv->post.status == HB_FILTER_DONE)
+ {
+ *buf_out = in;
+ }
+ else
+ {
+ *buf_out = NULL;
+ pv->post.status = HB_FILTER_OK;
+ }
+ *buf_in = NULL;
+
+ return HB_FILTER_OK;
+}
+static void hb_qsv_filter_post_close( hb_filter_object_t * filter ){
+ hb_filter_private_t * pv = filter->private_data;
+
+ if ( !pv )
+ {
+ return;
+ }
+
+ free( pv );
+ filter->private_data = NULL;
+}
+
+
+mfxStatus MFX_CDECL qsv_PluginInit(mfxHDL pthis, mfxCoreInterface *core){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ if(core && pthis){
+ qsv_filter_t *plugin = pthis;
+ plugin->core = core;
+
+ plugin->pluginparam.MaxThreadNum = 1;
+ plugin->pluginparam.ThreadPolicy = MFX_THREADPOLICY_SERIAL;
+ }
+ else
+ sts = MFX_ERR_NULL_PTR;
+
+ return sts;
+}
+mfxStatus MFX_CDECL qsv_PluginClose (mfxHDL pthis){
+ mfxStatus sts = MFX_ERR_NONE;
+ return sts;
+}
+mfxStatus MFX_CDECL qsv_GetPluginParam(mfxHDL pthis, mfxPluginParam *par){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ if(pthis){
+ qsv_filter_t *plugin = pthis;
+ *par = plugin->pluginparam;
+ }
+ else
+ sts = MFX_ERR_NULL_PTR;
+ return sts;
+}
+mfxStatus MFX_CDECL qsv_Submit(mfxHDL pthis, const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ qsv_filter_t *plugin = pthis;
+
+ mfxFrameSurface1 *surface_in = (mfxFrameSurface1 *)in[0];
+ mfxFrameSurface1 *surface_out = (mfxFrameSurface1 *)out[0];
+ mfxFrameSurface1 *real_surface_in = surface_in;
+ mfxFrameSurface1 *real_surface_out = surface_out;
+
+ sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_in, &real_surface_in);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
+
+ sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_out, &real_surface_out);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
+
+ int task_idx = get_free_task(plugin->tasks);
+
+ if (task_idx == -1)
+ {
+ return MFX_WRN_DEVICE_BUSY;
+ }
+
+ plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_in->Data));
+ plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_out->Data));
+
+ // to preserve timing if other filters are used in-between
+ surface_out->Data.TimeStamp = surface_in->Data.TimeStamp;
+ surface_out->Data.FrameOrder = surface_in->Data.FrameOrder;
+
+ qsv_filter_task_t *current_task = hb_list_item(plugin->tasks,task_idx);
+ current_task->in = real_surface_in;
+ current_task->out = real_surface_out;
+ current_task->busy = 1;
+ current_task->pv = plugin->pv;
+
+ *task = (mfxThreadTask)current_task;
+
+ return sts;
+}
+mfxStatus MFX_CDECL qsv_Execute(mfxHDL pthis, mfxThreadTask task, mfxU32 uid_p, mfxU32 uid_a){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ qsv_filter_task_t *current_task = (qsv_filter_task_t *)task;
+ qsv_filter_t *plugin = pthis;
+
+ sts = (current_task->processor.process)(current_task,0);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ sts = MFX_TASK_DONE;
+ return sts;
+}
+mfxStatus MFX_CDECL qsv_FreeResources(mfxHDL pthis, mfxThreadTask task, mfxStatus sts){
+
+ qsv_filter_t *plugin = pthis;
+ qsv_filter_task_t *current_task = (qsv_filter_task_t *)task;
+
+ plugin->core->DecreaseReference(plugin->core->pthis, &(current_task->in->Data));
+ plugin->core->DecreaseReference(plugin->core->pthis, &(current_task->out->Data));
+
+ current_task->busy = 0;
+
+ hb_lock(plugin->pv->pre_busy.frame_completed_lock);
+ plugin->pv->pre_busy.frame_go = 1;
+ hb_cond_broadcast(plugin->pv->pre_busy.frame_completed);
+ hb_unlock(plugin->pv->pre_busy.frame_completed_lock);
+
+ return MFX_ERR_NONE;
+}
+
+mfxStatus plugin_init(qsv_filter_t* plugin, mfxVideoParam *param){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ if(plugin->is_init_done) return sts;
+
+ plugin->videoparam = param;
+
+ mfxExtOpaqueSurfaceAlloc* plugin_opaque_alloc = NULL;
+
+ plugin_opaque_alloc = (mfxExtOpaqueSurfaceAlloc*) get_ext_buffer(plugin->videoparam->ExtParam,
+ plugin->videoparam->NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);
+
+ if(!plugin_opaque_alloc || !plugin_opaque_alloc->In.Surfaces || !plugin_opaque_alloc->Out.Surfaces)
+ return MFX_ERR_INVALID_VIDEO_PARAM;
+
+ sts = plugin->core->MapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->In.NumSurface,
+ plugin_opaque_alloc->In.Type, plugin_opaque_alloc->In.Surfaces);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+
+ sts = plugin->core->MapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->Out.NumSurface,
+ plugin_opaque_alloc->Out.Type, plugin_opaque_alloc->Out.Surfaces);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+
+ plugin->tasks = hb_list_init();
+ qsv_filter_task_t *task = calloc( 1, sizeof( qsv_filter_task_t ));
+
+ task->processor.process = process_filter;
+ task->processor.alloc = &plugin->core->FrameAllocator;
+ task->processor.core = plugin->core;
+
+ hb_list_add(plugin->tasks,task);
+
+ plugin->is_init_done = 1;
+
+ return sts;
+}
+
+mfxStatus plugin_close(qsv_filter_t* plugin){
+ int i = 0;
+ mfxStatus sts = MFX_ERR_NONE;
+
+ if(!plugin->is_init_done) return sts;
+
+ mfxExtOpaqueSurfaceAlloc* plugin_opaque_alloc = NULL;
+
+ plugin_opaque_alloc = (mfxExtOpaqueSurfaceAlloc*) get_ext_buffer(plugin->videoparam->ExtParam,
+ plugin->videoparam->NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);
+
+ if(!plugin_opaque_alloc || !plugin_opaque_alloc->In.Surfaces || !plugin_opaque_alloc->Out.Surfaces)
+ return MFX_ERR_INVALID_VIDEO_PARAM;
+
+ sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->In.NumSurface,
+ plugin_opaque_alloc->In.Type, plugin_opaque_alloc->In.Surfaces);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+
+ sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->Out.NumSurface,
+ plugin_opaque_alloc->Out.Type, plugin_opaque_alloc->Out.Surfaces);
+ AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
+
+ if(plugin->tasks){
+ for(i=hb_list_count(plugin->tasks);i>0;i--){
+ qsv_filter_task_t *task = hb_list_item(plugin->tasks,i-1);
+ hb_list_rem(plugin->tasks,task);
+ free(task);
+ }
+ hb_list_close(&plugin->tasks);
+ }
+
+ plugin->is_init_done = 0;
+
+ return sts;
+}
+
+mfxExtBuffer* get_ext_buffer(mfxExtBuffer** buffers, mfxU32 buffers_num, mfxU32 buffer_id){
+ int i = 0;
+ if(!buffers) return 0;
+ for(i=0;i<buffers_num;i++){
+ if(!buffers[i]) continue;
+ if(buffers[i]->BufferId == buffer_id)
+ return buffers[i];
+ }
+ return 0;
+}
+
+int get_free_task(hb_list_t* tasks){
+ int ret = -1;
+ int i = 0;
+ for(i=0;i<hb_list_count(tasks);i++){
+ qsv_filter_task_t* task = hb_list_item(tasks,i);
+ if(!task->busy){
+ ret = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+mfxStatus lock_frame(mfxFrameAllocator *alloc,mfxFrameSurface1 *surface){
+ mfxStatus sts = MFX_ERR_NONE;
+ // prevent double lock
+ if (surface->Data.Y != 0 && surface->Data.MemId !=0){
+ return MFX_ERR_UNSUPPORTED;
+ }
+ // not allocated, therefore no lock
+ if (surface->Data.Y != 0){
+ return MFX_ERR_NONE;
+ }
+ sts = alloc->Lock(alloc->pthis,surface->Data.MemId,&surface->Data);
+ return sts;
+}
+
+mfxStatus unlock_frame(mfxFrameAllocator *alloc,mfxFrameSurface1 *surface){
+ mfxStatus sts = MFX_ERR_NONE;
+ // not allocated
+ if (surface->Data.Y != 0 && surface->Data.MemId == 0){
+ return MFX_ERR_NONE;
+ }
+ // not locked
+ if (surface->Data.Y == 0){
+ return MFX_ERR_NONE;
+ }
+ sts = alloc->Unlock(alloc->pthis,surface->Data.MemId,&surface->Data);
+ return sts;
+}
+
+
+int process_filter(qsv_filter_task_t* task, void* params){
+ mfxStatus sts = MFX_ERR_NONE;
+
+ if (MFX_ERR_NONE != (sts = lock_frame(task->processor.alloc,task->in)))return sts;
+ if (MFX_ERR_NONE != (sts = lock_frame(task->processor.alloc,task->out)))
+ {
+ unlock_frame(task->processor.alloc,task->in);
+ return sts;
+ }
+
+ qsv_nv12_to_yuv420(task->pv->sws_context_from_nv12,task->pv->pre.out, task->in, task->processor.core);
+
+ // signal: input is prepared, converted from pipeline into internal buffer
+ hb_lock(task->pv->pre.frame_completed_lock);
+ task->pv->pre.frame_go = 1;
+ hb_cond_broadcast(task->pv->pre.frame_completed);
+ hb_unlock(task->pv->pre.frame_completed_lock);
+
+ // wait: input is prepared, converted from pipeline into internal buffer
+ hb_lock(task->pv->post.frame_completed_lock);
+ while(!task->pv->post.frame_go){
+ hb_cond_timedwait(task->pv->post.frame_completed,task->pv->post.frame_completed_lock,1000);
+ if(*task->pv->job->die)
+ break;
+ }
+ task->pv->post.frame_go = 0;
+ hb_unlock(task->pv->post.frame_completed_lock);
+
+// this is just a simple fun/test case
+#if 0
+ {
+ int i = 0;
+ char *cur_line;
+ char* luma = task->pv->post.in->plane[0].data;
+ int pitch = task->pv->post.in->plane[0].stride;
+ int h = task->pv->post.in->plane[0].height;
+ int w = task->pv->post.in->plane[0].width;
+ for (i = 0; i < h; i++){
+
+ cur_line = luma + i * pitch;
+ if(i>h/4 && i < 3*h/4 && i % 5 == 0 )
+ memset(cur_line, 0 , w );
+ }
+ }
+#endif
+
+ if(task->pv->post.in)
+ {
+ qsv_yuv420_to_nv12(task->pv->sws_context_to_nv12, task->out, task->pv->post.in);
+ }
+
+ // signal: output is prepared, converted from internal buffer into pipeline
+ hb_lock(task->pv->post_busy.frame_completed_lock);
+ task->pv->post_busy.frame_go = 1;
+ hb_cond_broadcast(task->pv->post_busy.frame_completed);
+ hb_unlock(task->pv->post_busy.frame_completed_lock);
+
+ unlock_frame(task->processor.alloc,task->in);
+ unlock_frame(task->processor.alloc,task->out);
+
+ return sts;
+}
diff --git a/libhb/qsv_filter_pp.h b/libhb/qsv_filter_pp.h
new file mode 100644
index 000000000..e70370321
--- /dev/null
+++ b/libhb/qsv_filter_pp.h
@@ -0,0 +1,114 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#ifndef QSV_FILTER_PP_H
+#define QSV_FILTER_PP_H
+
+#include "msdk/mfxplugin.h"
+
+extern hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
+
+struct qsv_filter_task_s;
+
+typedef struct{
+ mfxFrameAllocator *alloc;
+ mfxStatus (*process)(struct qsv_filter_task_s*,void*);
+ mfxCoreInterface *core;
+}qsv_filter_processor_t;
+
+typedef struct qsv_filter_task_s{
+ mfxFrameSurface1 *in;
+ mfxFrameSurface1 *out;
+ int busy;
+ hb_filter_private_t *pv;
+ qsv_filter_processor_t processor;
+} qsv_filter_task_t;
+
+typedef struct qsv_filter_private_s{
+
+ int is_init_done;
+
+ mfxCoreInterface *core;
+ mfxVideoParam *videoparam;
+ mfxPluginParam pluginparam;
+
+ hb_filter_private_t *pv;
+
+ mfxPlugin plug;
+ hb_list_t *tasks;
+} qsv_filter_t;
+
+typedef struct hb_qsv_sync_s{
+ int frame_go;
+ int status;
+ hb_cond_t *frame_completed;
+ hb_lock_t *frame_completed_lock;
+
+ hb_buffer_t *in;
+ hb_buffer_t *out;
+} hb_qsv_sync_t;
+
+typedef struct hb_filter_private_s
+{
+ hb_job_t *job;
+ hb_list_t *list;
+
+ hb_qsv_sync_t pre;
+ hb_qsv_sync_t pre_busy;
+
+ hb_qsv_sync_t post;
+ hb_qsv_sync_t post_busy;
+
+ av_qsv_space *vpp_space;
+ hb_list_t *qsv_user;
+
+ struct SwsContext* sws_context_to_nv12;
+ struct SwsContext* sws_context_from_nv12;
+} hb_filter_private_t_qsv;
+
+// methods to be called by Media SDK
+mfxStatus MFX_CDECL qsv_PluginInit(mfxHDL pthis, mfxCoreInterface *core);
+mfxStatus MFX_CDECL qsv_PluginClose (mfxHDL pthis);
+mfxStatus MFX_CDECL qsv_GetPluginParam(mfxHDL pthis, mfxPluginParam *par);
+mfxStatus MFX_CDECL qsv_Submit(mfxHDL pthis, const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task);
+mfxStatus MFX_CDECL qsv_Execute(mfxHDL pthis, mfxThreadTask task, mfxU32 uid_p, mfxU32 uid_a);
+mfxStatus MFX_CDECL qsv_FreeResources(mfxHDL pthis, mfxThreadTask task, mfxStatus sts);
+
+// methods to be called by us
+mfxStatus plugin_init(qsv_filter_t*,mfxVideoParam*);
+mfxStatus plugin_close(qsv_filter_t*);
+
+//internal functions
+mfxExtBuffer* get_ext_buffer(mfxExtBuffer**, mfxU32, mfxU32);
+int get_free_task(hb_list_t*);
+mfxStatus process_filter(qsv_filter_task_t*,void*);
+mfxStatus lock_frame(mfxFrameAllocator *,mfxFrameSurface1*);
+mfxStatus unlock_frame(mfxFrameAllocator *,mfxFrameSurface1*);
+
+
+#endif //QSV_FILTER_PP_H
diff --git a/libhb/qsv_memory.c b/libhb/qsv_memory.c
new file mode 100644
index 000000000..f04c77a76
--- /dev/null
+++ b/libhb/qsv_memory.c
@@ -0,0 +1,120 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#include "hb.h"
+#include "hbffmpeg.h"
+#include "qsv_memory.h"
+
+int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrameSurface1* src, mfxCoreInterface *core){
+ int ret = 0;
+ int i,j;
+
+ int in_pitch = src->Data.Pitch;
+ int w = AV_QSV_ALIGN16(src->Info.Width);
+ int h = (MFX_PICSTRUCT_PROGRESSIVE == src->Info.PicStruct) ? AV_QSV_ALIGN16(src->Info.Height) : AV_QSV_ALIGN32(src->Info.Height);
+ uint8_t *in_luma = 0;
+ uint8_t *in_chroma = 0;
+ static int copyframe_in_use = 1;
+
+
+ mfxStatus sts = MFX_ERR_NONE;
+ mfxFrameSurface1 accel_dst;
+
+ if (copyframe_in_use)
+ {
+ accel_dst.Info.FourCC = src->Info.FourCC;
+ accel_dst.Info.CropH = src->Info.CropH;
+ accel_dst.Info.CropW = src->Info.CropW;
+ accel_dst.Info.CropY = src->Info.CropY;
+ accel_dst.Info.CropX = src->Info.CropX;
+ accel_dst.Info.Width = w;
+ accel_dst.Info.Height = h;
+ accel_dst.Data.Pitch = src->Data.Pitch;
+ accel_dst.Data.Y = calloc( 1, in_pitch*h );
+ accel_dst.Data.VU = calloc( 1, in_pitch*h/2 );
+
+ sts = core->CopyFrame(core->pthis, &accel_dst, src);
+
+ if (sts < MFX_ERR_NONE)
+ {
+ free(accel_dst.Data.Y);
+ free(accel_dst.Data.VU);
+ copyframe_in_use = 0;
+ }
+ else
+ {
+ in_luma = accel_dst.Data.Y + accel_dst.Info.CropY * in_pitch + accel_dst.Info.CropX;
+ in_chroma = accel_dst.Data.VU + accel_dst.Info.CropY / 2 * in_pitch + accel_dst.Info.CropX;
+ }
+ }
+
+ if (!copyframe_in_use)
+ {
+ in_luma = src->Data.Y + src->Info.CropY * in_pitch + src->Info.CropX;
+ in_chroma = src->Data.VU + src->Info.CropY / 2 * in_pitch + src->Info.CropX;
+ }
+
+ hb_video_buffer_realloc( dst, w, h );
+
+ uint8_t *srcs[] = { in_luma, in_chroma };
+ int srcs_stride[] = { in_pitch, in_pitch };
+
+ uint8_t *dsts[] = { dst->plane[0].data, dst->plane[1].data, dst->plane[2].data };
+ int dsts_stride[] = { dst->plane[0].stride, dst->plane[1].stride, dst->plane[2].stride };
+
+ ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );
+
+ if (copyframe_in_use)
+ {
+ free(accel_dst.Data.Y);
+ free(accel_dst.Data.VU);
+ }
+
+ return ret;
+}
+
+int qsv_yuv420_to_nv12(struct SwsContext* sws_context,mfxFrameSurface1* dst, hb_buffer_t* src){
+ int ret = 0;
+
+ int w = src->plane[0].width;
+ int h = src->plane[0].height;
+
+ int out_pitch = dst->Data.Pitch;
+ uint8_t *out_luma = dst->Data.Y;
+ uint8_t *out_chroma = dst->Data.VU;
+
+ uint8_t *srcs[] = { src->plane[0].data, src->plane[1].data, src->plane[2].data };
+ int srcs_stride[] = { src->plane[0].stride, src->plane[1].stride, src->plane[2].stride };
+
+ uint8_t *dsts[] = { out_luma, out_chroma };
+ int dsts_stride[] = { out_pitch, out_pitch };
+
+ ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );
+
+ return ret;
+}
diff --git a/libhb/qsv_memory.h b/libhb/qsv_memory.h
new file mode 100644
index 000000000..2d0f51208
--- /dev/null
+++ b/libhb/qsv_memory.h
@@ -0,0 +1,55 @@
+/* ********************************************************************* *\
+
+Copyright (C) 2013 Intel Corporation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+- Neither the name of Intel Corporation nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+\* ********************************************************************* */
+
+#ifndef QSV_MEMORY_H
+#define QSV_MEMORY_H
+
+#include "libavcodec/qsv.h"
+#include "msdk/mfxplugin.h"
+
+typedef struct{
+
+ struct{
+ // for "planes" , Y and VU
+ uint8_t *data[2];
+ int strides[2];
+ } qsv_data;
+
+ struct{
+ // for each plane, Y U V
+ uint8_t *data[3];
+ int strides[3];
+ } data;
+ int width;
+ int height;
+} qsv_memory_copy_t;
+
+int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrameSurface1* src,mfxCoreInterface *core);
+int qsv_yuv420_to_nv12(struct SwsContext* sws_context,mfxFrameSurface1* dst, hb_buffer_t* src);
+
+#endif // QSV_MEMORY_H