diff options
author | agalin89 <[email protected]> | 2020-07-29 16:31:51 +0100 |
---|---|---|
committer | Scott <[email protected]> | 2020-08-20 18:18:21 +0100 |
commit | e321b2332098e38b5ab667f5451918e81c393c5e (patch) | |
tree | ccfffc6689fcf3d95f5bc52d08577b8d1fc14370 /libhb | |
parent | 65b784fcfcc40e5adac6375fc95466c0f2b062c7 (diff) |
qsv: remove globals
Diffstat (limited to 'libhb')
-rw-r--r-- | libhb/cropscale.c | 2 | ||||
-rw-r--r-- | libhb/decavcodec.c | 8 | ||||
-rw-r--r-- | libhb/enc_qsv.c | 4 | ||||
-rw-r--r-- | libhb/handbrake/qsv_common.h | 6 | ||||
-rw-r--r-- | libhb/handbrake/qsv_libav.h | 2 | ||||
-rw-r--r-- | libhb/hbavfilter.c | 2 | ||||
-rw-r--r-- | libhb/qsv_common.c | 82 |
7 files changed, 50 insertions, 56 deletions
diff --git a/libhb/cropscale.c b/libhb/cropscale.c index de96d46af..6bc9f6d5a 100644 --- a/libhb/cropscale.c +++ b/libhb/cropscale.c @@ -97,7 +97,7 @@ static int crop_scale_init(hb_filter_object_t * filter, hb_filter_init_t * init) hb_dict_set_int(avsettings, "w", width); hb_dict_set_int(avsettings, "h", height); hb_dict_set(avfilter, "scale_qsv", avsettings); - int result = hb_create_ffmpeg_pool(width, height, AV_PIX_FMT_NV12, HB_QSV_POOL_SURFACE_SIZE, 0, &init->job->qsv.ctx->hb_vpp_qsv_frames_ctx->hw_frames_ctx); + int result = hb_create_ffmpeg_pool(init->job, width, height, AV_PIX_FMT_NV12, HB_QSV_POOL_SURFACE_SIZE, 0, &init->job->qsv.ctx->hb_vpp_qsv_frames_ctx->hw_frames_ctx); if (result < 0) { hb_error("hb_create_ffmpeg_pool vpp allocation failed"); diff --git a/libhb/decavcodec.c b/libhb/decavcodec.c index 27b852a8a..f1722a92a 100644 --- a/libhb/decavcodec.c +++ b/libhb/decavcodec.c @@ -1421,7 +1421,6 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) return 1; } } - hb_qsv_update_frames_context(pv->job); if (!pv->job->qsv.ctx->dec_space) { pv->job->qsv.ctx->dec_space = av_mallocz(sizeof(hb_qsv_space)); @@ -1474,9 +1473,10 @@ static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job ) if (pv->qsv.decode && pv->qsv.config.io_pattern == MFX_IOPATTERN_OUT_VIDEO_MEMORY) { - // assign callbacks - pv->context->get_format = hb_qsv_get_format; - pv->context->get_buffer2 = hb_qsv_get_buffer; + // assign callbacks and job to have access to qsv context from ffmpeg + pv->context->get_format = hb_qsv_get_format; + pv->context->get_buffer2 = hb_qsv_get_buffer; + pv->context->opaque = pv->job; pv->context->hwaccel_context = 0; } #endif diff --git a/libhb/enc_qsv.c b/libhb/enc_qsv.c index a35df3c2b..49eeca6bc 100644 --- a/libhb/enc_qsv.c +++ b/libhb/enc_qsv.c @@ -42,8 +42,6 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "libavutil/hwcontext.h" #include <mfx/mfxvideo.h> -extern AVBufferRef *hb_hw_device_ctx; - /* * The frame info struct remembers information about each frame across calls to * the encoder. Since frames are uniquely identified by their timestamp, we use @@ -766,7 +764,7 @@ int qsv_enc_init(hb_work_private_t *pv) mfxVersion ver; mfxIMPL impl; - AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hb_hw_device_ctx->data; + AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)qsv->hb_hw_device_ctx->data; AVQSVDeviceContext *device_hwctx = device_ctx->hwctx; mfxSession parent_session = device_hwctx->session; diff --git a/libhb/handbrake/qsv_common.h b/libhb/handbrake/qsv_common.h index 35024b6f2..dd6ffd3ee 100644 --- a/libhb/handbrake/qsv_common.h +++ b/libhb/handbrake/qsv_common.h @@ -226,11 +226,9 @@ int hb_qsv_is_enabled(hb_job_t *job); hb_qsv_context* hb_qsv_context_init(); void hb_qsv_context_uninit(hb_job_t *job); int hb_qsv_sanitize_filter_list(hb_job_t *job); -int hb_qsv_hw_frames_init(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx); -int hb_create_ffmpeg_pool(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx); +int hb_qsv_hw_frames_init(AVCodecContext *s); +int hb_create_ffmpeg_pool(hb_job_t *job, int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx); int hb_qsv_hw_filters_are_enabled(hb_job_t *job); -// TODO: After moving globals to pv->context->opaque = job remove hb_qsv_update_frames_context() -void hb_qsv_update_frames_context(hb_job_t *job); int hb_qsv_full_path_is_enabled(hb_job_t *job); AVBufferRef *hb_qsv_create_mids(AVBufferRef *hw_frames_ref); hb_buffer_t* hb_qsv_copy_frame(hb_job_t *job, AVFrame *frame, int is_vpp); diff --git a/libhb/handbrake/qsv_libav.h b/libhb/handbrake/qsv_libav.h index 3a2fb5fb8..0fef06a36 100644 --- a/libhb/handbrake/qsv_libav.h +++ b/libhb/handbrake/qsv_libav.h @@ -334,6 +334,8 @@ typedef struct hb_qsv_context { int num_cpu_filters; int qsv_filters_are_enabled; + char *qsv_device; + AVBufferRef *hb_hw_device_ctx; HBQSVFramesContext *hb_dec_qsv_frames_ctx; HBQSVFramesContext *hb_vpp_qsv_frames_ctx; } hb_qsv_context; diff --git a/libhb/hbavfilter.c b/libhb/hbavfilter.c index cbb668169..a8e6ee304 100644 --- a/libhb/hbavfilter.c +++ b/libhb/hbavfilter.c @@ -128,7 +128,7 @@ hb_avfilter_graph_init(hb_value_t * settings, hb_filter_init_t * init) init->vrate.num, init->vrate.den); AVBufferRef *hb_hw_frames_ctx = NULL; - result = hb_create_ffmpeg_pool(init->geometry.width, init->geometry.height, AV_PIX_FMT_NV12, 32, 0, &hb_hw_frames_ctx); + result = hb_create_ffmpeg_pool(graph->job, init->geometry.width, init->geometry.height, AV_PIX_FMT_NV12, 32, 0, &hb_hw_frames_ctx); if (result < 0) { hb_error("hb_create_ffmpeg_pool failed"); diff --git a/libhb/qsv_common.c b/libhb/qsv_common.c index cb51c6763..a4eb3ea83 100644 --- a/libhb/qsv_common.c +++ b/libhb/qsv_common.c @@ -29,11 +29,6 @@ #include "libavutil/hwcontext_qsv.h" #include "libavutil/hwcontext.h" -// TODO: Moving globals to pv->context->opaque = job in decavcodecvInit where get_format is assigned and then retrieve the job where these are used in hb_qsv_get_format -// (which calls hb_qsv_hw_frames_init which calls hb_create_ffmpeg_pool), then remove function hb_qsv_update_frames_context -static HBQSVFramesContext *hb_dec_qsv_frames_ctx = NULL; -static int qsv_filters_are_enabled = 0; - // QSV info for each codec static hb_qsv_info_t *hb_qsv_info_avc = NULL; static hb_qsv_info_t *hb_qsv_info_hevc = NULL; @@ -998,12 +993,6 @@ int hb_qsv_hw_filters_are_enabled(hb_job_t *job) return job && job->qsv.ctx && job->qsv.ctx->qsv_filters_are_enabled; } -void hb_qsv_update_frames_context(hb_job_t *job) -{ - qsv_filters_are_enabled = job->qsv.ctx->qsv_filters_are_enabled; - hb_dec_qsv_frames_ctx = job->qsv.ctx->hb_dec_qsv_frames_ctx; -} - int hb_qsv_is_enabled(hb_job_t *job) { return hb_qsv_decode_is_enabled(job) || hb_qsv_info_get(job->vcodec); @@ -2319,10 +2308,6 @@ void hb_qsv_force_workarounds() #undef FORCE_WORKAROUNDS } -// TODO: Moving globals to pv->context->opaque = job in decavcodecvInit where get_format is assigned -AVBufferRef *hb_hw_device_ctx = NULL; -char *qsv_device = NULL; - #if defined(_WIN32) || defined(__MINGW32__) // Direct X #define COBJMACROS @@ -2714,9 +2699,9 @@ static int hb_qsv_allocate_dx11_encoder_pool(HBQSVFramesContext* hb_enc_qsv_fram return 0; } -static int hb_qsv_get_dx_device(HBQSVFramesContext* hb_enc_qsv_frames_ctx) +static int hb_qsv_get_dx_device(hb_job_t *job) { - AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hb_hw_device_ctx->data; + AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)job->qsv.ctx->hb_hw_device_ctx->data; AVQSVDeviceContext *device_hwctx = device_ctx->hwctx; mfxSession parent_session = device_hwctx->session; @@ -2754,8 +2739,8 @@ static int hb_qsv_get_dx_device(HBQSVFramesContext* hb_enc_qsv_frames_ctx) if (device_manager_handle_type == MFX_HANDLE_D3D11_DEVICE) { ID3D11Device *device = (ID3D11Device *)device_manager_handle; - ID3D11Texture2D* input_texture = hb_enc_qsv_frames_ctx->input_texture; - err = hb_qsv_allocate_dx11_encoder_pool(hb_enc_qsv_frames_ctx, device, input_texture); + ID3D11Texture2D* input_texture = job->qsv.ctx->hb_dec_qsv_frames_ctx->input_texture; + err = hb_qsv_allocate_dx11_encoder_pool(job->qsv.ctx->hb_dec_qsv_frames_ctx, device, input_texture); if (err < 0) { hb_error("hb_qsv_get_dx_device: hb_qsv_allocate_dx11_encoder_pool failed"); @@ -2955,23 +2940,23 @@ void hb_qsv_uninit_enc(hb_job_t *job) ID3D11DeviceContext_Release(device_context); device_context = NULL; } - hb_hw_device_ctx = NULL; - qsv_device = NULL; + job->qsv.ctx->hb_hw_device_ctx = NULL; + job->qsv.ctx->qsv_device = NULL; device_manager_handle = NULL; } -static int qsv_device_init() +static int qsv_device_init(hb_job_t *job) { int err; AVDictionary *dict = NULL; - if (qsv_device) { - err = av_dict_set(&dict, "child_device", qsv_device, 0); + if (job->qsv.ctx->qsv_device) { + err = av_dict_set(&dict, "child_device", job->qsv.ctx->qsv_device, 0); if (err < 0) return err; } - if (!qsv_filters_are_enabled) + if (!job->qsv.ctx->qsv_filters_are_enabled) { err = av_dict_set(&dict, "child_device_type", "d3d11va", 0); err = av_dict_set(&dict, "vendor", "0x8086", 0); @@ -2981,7 +2966,7 @@ static int qsv_device_init() err = av_dict_set(&dict, "child_device_type", "dxva2", 0); } - err = av_hwdevice_ctx_create(&hb_hw_device_ctx, AV_HWDEVICE_TYPE_QSV, + err = av_hwdevice_ctx_create(&job->qsv.ctx->hb_hw_device_ctx, AV_HWDEVICE_TYPE_QSV, 0, dict, 0); if (err < 0) { hb_error("qsv_device_init: error creating a QSV device %d", err); @@ -2995,7 +2980,7 @@ err_out: return err; } -int hb_create_ffmpeg_pool(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) +int hb_create_ffmpeg_pool(hb_job_t *job, int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) { AVHWFramesContext *frames_ctx; AVQSVFramesContext *frames_hwctx; @@ -3004,14 +2989,14 @@ int hb_create_ffmpeg_pool(int coded_width, int coded_height, enum AVPixelFormat int ret; - if (!hb_hw_device_ctx) { - ret = qsv_device_init(); + if (!job->qsv.ctx->hb_hw_device_ctx) { + ret = qsv_device_init(job); if (ret < 0) return ret; } av_buffer_unref(&hw_frames_ctx); - hw_frames_ctx = av_hwframe_ctx_alloc(hb_hw_device_ctx); + hw_frames_ctx = av_hwframe_ctx_alloc(job->qsv.ctx->hb_hw_device_ctx); if (!hw_frames_ctx) return AVERROR(ENOMEM); @@ -3036,15 +3021,27 @@ int hb_create_ffmpeg_pool(int coded_width, int coded_height, enum AVPixelFormat return 0; } -int hb_qsv_hw_frames_init(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) +int hb_qsv_hw_frames_init(AVCodecContext *s) { AVHWFramesContext *frames_ctx; AVQSVFramesContext *frames_hwctx; AVBufferRef *hw_frames_ctx; - int ret; - ret = hb_create_ffmpeg_pool(coded_width, coded_height, sw_pix_fmt, HB_QSV_POOL_FFMPEG_SURFACE_SIZE, extra_hw_frames, out_hw_frames_ctx); + hb_job_t *job = s->opaque; + if (!job) { + hb_error("hb_qsv_hw_frames_init: job is NULL"); + return -1; + } + + HBQSVFramesContext *hb_dec_qsv_frames_ctx = job->qsv.ctx->hb_dec_qsv_frames_ctx; + int coded_width = s->coded_width; + int coded_height = s->coded_height; + enum AVPixelFormat sw_pix_fmt = s->sw_pix_fmt; + int extra_hw_frames = s->extra_hw_frames; + AVBufferRef **out_hw_frames_ctx = &s->hw_frames_ctx; + + ret = hb_create_ffmpeg_pool(job, coded_width, coded_height, sw_pix_fmt, HB_QSV_POOL_FFMPEG_SURFACE_SIZE, extra_hw_frames, out_hw_frames_ctx); if (ret < 0) { hb_error("hb_qsv_hw_frames_init: hb_create_ffmpeg_pool decoder failed %d", ret); return ret; @@ -3055,7 +3052,7 @@ int hb_qsv_hw_frames_init(int coded_width, int coded_height, enum AVPixelFormat frames_hwctx = frames_ctx->hwctx; hb_dec_qsv_frames_ctx->input_texture = frames_hwctx->texture; - ret = hb_create_ffmpeg_pool(coded_width, coded_height, sw_pix_fmt, HB_QSV_POOL_SURFACE_SIZE, extra_hw_frames, &hb_dec_qsv_frames_ctx->hw_frames_ctx); + ret = hb_create_ffmpeg_pool(job, coded_width, coded_height, sw_pix_fmt, HB_QSV_POOL_SURFACE_SIZE, extra_hw_frames, &hb_dec_qsv_frames_ctx->hw_frames_ctx); if (ret < 0) { hb_error("hb_qsv_hw_frames_init: hb_create_ffmpeg_pool qsv surface allocation failed %d", ret); return ret; @@ -3070,7 +3067,7 @@ int hb_qsv_hw_frames_init(int coded_width, int coded_height, enum AVPixelFormat hb_dec_qsv_frames_ctx->nb_mids = frames_hwctx->nb_surfaces; memset(hb_dec_qsv_frames_ctx->pool, 0, hb_dec_qsv_frames_ctx->nb_mids * sizeof(hb_dec_qsv_frames_ctx->pool[0])); - ret = hb_qsv_get_dx_device(hb_dec_qsv_frames_ctx); + ret = hb_qsv_get_dx_device(job); if (ret < 0) { hb_error("qsv_init: hb_qsv_get_dx_device failed %d", ret); return ret; @@ -3090,11 +3087,11 @@ enum AVPixelFormat hb_qsv_get_format(AVCodecContext *s, const enum AVPixelFormat { while (*pix_fmts != AV_PIX_FMT_NONE) { if (*pix_fmts == AV_PIX_FMT_QSV) { - int ret = hb_qsv_hw_frames_init(s->coded_width, s->coded_height, s->sw_pix_fmt, s->extra_hw_frames, &s->hw_frames_ctx); - if (ret < 0) { - hb_error("hb_qsv_get_format: QSV hwaccel initialization failed"); - return AV_PIX_FMT_NONE; - } + int ret = hb_qsv_hw_frames_init(s); + if (ret < 0) { + hb_error("hb_qsv_get_format: QSV hwaccel initialization failed"); + return AV_PIX_FMT_NONE; + } if (s->hw_frames_ctx) { s->hw_frames_ctx = av_buffer_ref(s->hw_frames_ctx); if (!s->hw_frames_ctx) @@ -3191,7 +3188,6 @@ int hb_qsv_sanitize_filter_list(hb_job_t *job) hb_error( "sanitize_qsv: HBQSVFramesContext vpp alloc failed" ); return 1; } - hb_qsv_update_frames_context(job); } } return 0; @@ -3199,12 +3195,12 @@ int hb_qsv_sanitize_filter_list(hb_job_t *job) #else // other OS -int hb_create_ffmpeg_pool(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) +int hb_create_ffmpeg_pool(hb_job_t *job, int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int pool_size, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) { return -1; } -int hb_qsv_hw_frames_init(int coded_width, int coded_height, enum AVPixelFormat sw_pix_fmt, int extra_hw_frames, AVBufferRef **out_hw_frames_ctx) +int hb_qsv_hw_frames_init(AVCodecContext *s) { return -1; } |