diff options
-rw-r--r-- | libhb/avfilter.c | 87 | ||||
-rw-r--r-- | libhb/decavcodec.c | 710 | ||||
-rw-r--r-- | libhb/hb.c | 185 | ||||
-rw-r--r-- | libhb/hbffmpeg.c | 276 | ||||
-rw-r--r-- | libhb/hbffmpeg.h | 4 | ||||
-rw-r--r-- | libhb/internal.h | 1 | ||||
-rw-r--r-- | libhb/stream.c | 2 |
7 files changed, 740 insertions, 525 deletions
diff --git a/libhb/avfilter.c b/libhb/avfilter.c index c37950815..9729a2c9c 100644 --- a/libhb/avfilter.c +++ b/libhb/avfilter.c @@ -401,62 +401,6 @@ static void fill_frame(hb_filter_private_t * pv, frame->top_field_first = !!(buf->s.flags & PIC_FLAG_TOP_FIELD_FIRST); } -static hb_buffer_t* avframe_to_buffer(hb_filter_private_t * pv, AVFrame *frame) -{ - hb_buffer_t * buf; - - buf = hb_frame_buffer_init(frame->format, frame->width, frame->height); - if (buf == NULL) - { - return NULL; - } - - int pp; - for (pp = 0; pp < 3; pp++) - { - int yy; - int width = buf->plane[pp].width; - int stride = buf->plane[pp].stride; - int height = buf->plane[pp].height; - int linesize = frame->linesize[pp]; - uint8_t * dst = buf->plane[pp].data; - uint8_t * src = frame->data[pp]; - - for (yy = 0; yy < height; yy++) - { - memcpy(dst, src, width); - dst += stride; - src += linesize; - } - } - buf->s.start = av_rescale_q(frame->pts, pv->out_time_base, - (AVRational){1, 90000}); - buf->s.duration = frame->reordered_opaque; - - if (frame->top_field_first) - { - buf->s.flags |= PIC_FLAG_TOP_FIELD_FIRST; - } - if (!frame->interlaced_frame) - { - buf->s.flags |= PIC_FLAG_PROGRESSIVE_FRAME; - } - else - { - buf->s.combed = HB_COMB_HEAVY; - } - if (frame->repeat_pict == 1) - { - buf->s.flags |= PIC_FLAG_REPEAT_FIRST_FIELD; - } - if (frame->repeat_pict == 2) - { - buf->s.flags |= PIC_FLAG_REPEAT_FRAME; - } - - return buf; -} - static hb_buffer_t* filterFrame( hb_filter_private_t * pv, hb_buffer_t * in ) { int result; @@ -473,7 +417,8 @@ static hb_buffer_t* filterFrame( hb_filter_private_t * pv, hb_buffer_t * in ) result = av_buffersink_get_frame(pv->output, pv->frame); while (result >= 0) { - hb_buffer_t * buf = avframe_to_buffer(pv, pv->frame); + hb_buffer_t * buf = hb_avframe_to_video_buffer(pv->frame, + pv->out_time_base); hb_buffer_list_append(&pv->list, buf); av_frame_unref(pv->frame); @@ -795,3 +740,31 @@ void hb_avfilter_combine( hb_list_t * list ) } } +char * hb_append_filter_string(char * graph_str, char * filter_str) +{ + char * tmp; + int size = 1, len = 0; + + if (graph_str != NULL) + { + len = strlen(graph_str); + size += len + 1; + } + if (filter_str != NULL) + { + size += strlen(filter_str); + } + tmp = realloc(graph_str, size); + if (tmp == NULL) + { + return graph_str; + } + graph_str = tmp; + if (len > 0) + { + graph_str[len++] = ','; + } + strcpy(&graph_str[len], filter_str); + return graph_str; +} + diff --git a/libhb/decavcodec.c b/libhb/decavcodec.c index d308a49d5..8e32064a6 100644 --- a/libhb/decavcodec.c +++ b/libhb/decavcodec.c @@ -40,6 +40,9 @@ #include "hb.h" #include "hbffmpeg.h" +#include "libavfilter/avfilter.h" +#include "libavfilter/buffersrc.h" +#include "libavfilter/buffersink.h" #include "lang.h" #include "audio_resample.h" @@ -93,6 +96,18 @@ struct reordered_data_s #define REORDERED_HASH_SZ (2 << 7) #define REORDERED_HASH_MASK (REORDERED_HASH_SZ - 1) +struct video_filters_s +{ + AVFilterGraph * graph; + AVFilterContext * last; + AVFilterContext * input; + AVFilterContext * output; + + int width; + int height; + int pix_fmt; +}; + struct hb_work_private_s { hb_job_t * job; @@ -119,11 +134,7 @@ struct hb_work_private_s int64_t sequence; int last_scr_sequence; int last_chapter; - struct SwsContext * sws_context; // if we have to rescale or convert color space - - int sws_width; - int sws_height; - int sws_pix_fmt; + struct video_filters_s video_filters; hb_audio_t * audio; hb_audio_resample_t * resample; @@ -322,6 +333,25 @@ static int decavcodecaInit( hb_work_object_t * w, hb_job_t * job ) *********************************************************************** * **********************************************************************/ +static void close_video_filters(hb_work_private_t *pv) +{ + if (pv->video_filters.input != NULL) + { + avfilter_free(pv->video_filters.input); + pv->video_filters.input = NULL; + } + if (pv->video_filters.output != NULL) + { + avfilter_free(pv->video_filters.output); + pv->video_filters.output = NULL; + } + if (pv->video_filters.graph != NULL) + { + avfilter_graph_free(&pv->video_filters.graph); + } + pv->video_filters.last = NULL; +} + static void closePrivData( hb_work_private_t ** ppv ) { hb_work_private_t * pv = *ppv; @@ -336,10 +366,7 @@ static void closePrivData( hb_work_private_t ** ppv ) pv->context->codec->name, pv->nframes, pv->decode_errors); } av_frame_free(&pv->frame); - if ( pv->sws_context ) - { - sws_freeContext( pv->sws_context ); - } + close_video_filters(pv); if ( pv->parser ) { av_parser_close(pv->parser); @@ -835,22 +862,64 @@ reordered_hash_add(hb_work_private_t * pv, reordered_data_t * reordered) * General purpose video decoder using libavcodec */ -static uint8_t *copy_plane( uint8_t *dst, uint8_t* src, int dstride, int sstride, - int h ) +// send cc_buf to the CC decoder(s) +static void cc_send_to_decoder(hb_work_private_t *pv, hb_buffer_t *buf) { - if ( dstride == sstride ) + if (buf == NULL) + return; + + // if there's more than one decoder for the captions send a copy + // of the buffer to all. + hb_subtitle_t *subtitle; + int ii = 0, n = hb_list_count(pv->list_subtitle); + while (--n > 0) { - memcpy( dst, src, dstride * h ); - return dst + dstride * h; + // make a copy of the buf then forward it to the decoder + hb_buffer_t *cpy = hb_buffer_dup(buf); + + subtitle = hb_list_item(pv->list_subtitle, ii++); + hb_fifo_push(subtitle->fifo_in, cpy); } - int lbytes = dstride <= sstride? dstride : sstride; - while ( --h >= 0 ) + subtitle = hb_list_item(pv->list_subtitle, ii); + hb_fifo_push( subtitle->fifo_in, buf ); +} + +static hb_buffer_t * cc_fill_buffer(hb_work_private_t *pv, uint8_t *cc, int size) +{ + int cc_count[4] = {0,}; + int ii; + hb_buffer_t *buf = NULL; + + for (ii = 0; ii < size; ii += 3) + { + if ((cc[ii] & 0x04) == 0) // not valid + continue; + if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing + continue; + int type = cc[ii] & 0x03; + cc_count[type]++; + } + + // Only handles CC1 for now. + if (cc_count[0] > 0) { - memcpy( dst, src, lbytes ); - src += sstride; - dst += dstride; + buf = hb_buffer_init(cc_count[0] * 2); + int jj = 0; + for (ii = 0; ii < size; ii += 3) + { + if ((cc[ii] & 0x04) == 0) // not valid + continue; + if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing + continue; + int type = cc[ii] & 0x03; + if (type == 0) + { + buf->data[jj++] = cc[ii+1]; + buf->data[jj++] = cc[ii+2]; + } + } } - return dst; + return buf; } // copy one video frame into an HB buf. If the frame isn't in our color space @@ -858,25 +927,26 @@ static uint8_t *copy_plane( uint8_t *dst, uint8_t* src, int dstride, int sstride // Otherwise just copy the bits. static hb_buffer_t *copy_frame( hb_work_private_t *pv ) { - AVCodecContext *context = pv->context; - int w, h; - if ( ! pv->job ) + reordered_data_t * reordered = NULL; + hb_buffer_t * out; + +#ifdef USE_QSV + // no need to copy the frame data when decoding with QSV to opaque memory + if (pv->qsv.decode && + pv->qsv.config.io_pattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY) { - // HandBrake's video pipeline uses yuv420 color. This means all - // dimensions must be even. So we must adjust the dimensions - // of incoming video if not even. - w = context->width & ~1; - h = context->height & ~1; + out = hb_frame_buffer_init(pv->frame->format, pv->frame->width, pv->frame->height); + hb_avframe_set_video_buffer_flags(out, pv->frame, (AVRational){1,1}); + + out->qsv_details.qsv_atom = pv->frame->data[2]; + out->qsv_details.ctx = pv->job->qsv.ctx; } else +#endif { - w = pv->job->title->geometry.width; - h = pv->job->title->geometry.height; + out = hb_avframe_to_video_buffer(pv->frame, (AVRational){1,1}); } - reordered_data_t * reordered = NULL; - hb_buffer_t * out = hb_video_buffer_init( w, h ); - if (pv->frame->pts != AV_NOPTS_VALUE) { reordered = reordered_hash_rem(pv, pv->frame->pts); @@ -895,6 +965,27 @@ static hb_buffer_t *copy_frame( hb_work_private_t *pv ) out->s.scr_sequence = pv->last_scr_sequence; out->s.start = AV_NOPTS_VALUE; } + + double frame_dur = pv->duration; + if (pv->frame->repeat_pict) + { + frame_dur += pv->frame->repeat_pict * pv->field_duration; + } + if (out->s.start == AV_NOPTS_VALUE) + { + out->s.start = pv->next_pts; + } + else + { + pv->next_pts = out->s.start; + } + if (pv->next_pts != (int64_t)AV_NOPTS_VALUE) + { + pv->next_pts += frame_dur; + out->s.stop = pv->next_pts; + } + out->s.duration = frame_dur; + if (out->s.new_chap > 0 && out->s.new_chap == pv->new_chap) { pv->new_chap = 0; @@ -910,148 +1001,318 @@ static hb_buffer_t *copy_frame( hb_work_private_t *pv ) pv->new_chap = 0; } + // Check for CC data + AVFrameSideData *sd; + sd = av_frame_get_side_data(pv->frame, AV_FRAME_DATA_A53_CC); + if (sd != NULL) + { + if (!pv->job && pv->title && sd->size > 0) + { + hb_subtitle_t *subtitle; + int i = 0; + + while ((subtitle = hb_list_item(pv->title->list_subtitle, i++))) + { + /* + * Let's call them 608 subs for now even if they aren't, + * since they are the only types we grok. + */ + if (subtitle->source == CC608SUB) + { + break; + } + } + if (subtitle == NULL) + { + iso639_lang_t * lang; + hb_audio_t * audio; + + subtitle = calloc(sizeof( hb_subtitle_t ), 1); + subtitle->track = hb_list_count(pv->title->list_subtitle); + subtitle->id = 0; + subtitle->format = TEXTSUB; + subtitle->source = CC608SUB; + subtitle->config.dest = PASSTHRUSUB; + subtitle->codec = WORK_DECCC608; + subtitle->attributes = HB_SUBTITLE_ATTR_CC; + + /* + * The language of the subtitles will be the same as the + * first audio track, i.e. the same as the video. + */ + audio = hb_list_item(pv->title->list_audio, 0); + if (audio != NULL) + { + lang = lang_for_code2( audio->config.lang.iso639_2 ); + } else { + lang = lang_for_code2( "und" ); + } + snprintf(subtitle->lang, sizeof(subtitle->lang), + "%s, Closed Caption [%s]", + strlen(lang->native_name) ? lang->native_name : + lang->eng_name, + hb_subsource_name(subtitle->source)); + snprintf(subtitle->iso639_2, sizeof(subtitle->iso639_2), + "%s", lang->iso639_2); + + hb_list_add(pv->title->list_subtitle, subtitle); + } + } + if (pv->list_subtitle != NULL && sd->size > 0) + { + hb_buffer_t *cc_buf; + cc_buf = cc_fill_buffer(pv, sd->data, sd->size); + if (cc_buf != NULL) + { + cc_buf->s.start = out->s.start; + cc_buf->s.scr_sequence = out->s.scr_sequence; + } + cc_send_to_decoder(pv, cc_buf); + } + } + + return out; +} + +static AVFilterContext * append_filter(hb_work_private_t * pv, + const char * name, const char * args) +{ + AVFilterContext * filter; + int result; + + result = avfilter_graph_create_filter(&filter, avfilter_get_by_name(name), + name, args, NULL, + pv->video_filters.graph); + if (result < 0) + { + return NULL; + } + if (pv->video_filters.last != NULL) + { + result = avfilter_link(pv->video_filters.last, 0, filter, 0); + if (result < 0) + { + avfilter_free(filter); + return NULL; + } + } + pv->video_filters.last = filter; + + return filter; +} + +int reinit_video_filters(hb_work_private_t * pv) +{ + char * sws_flags; + int result; + AVFilterContext * avfilter; + char * graph_str = NULL, * filter_str; + AVFilterInOut * in = NULL, * out = NULL; + int orig_width; + int orig_height; + #ifdef USE_QSV - // no need to copy the frame data when decoding with QSV to opaque memory if (pv->qsv.decode && pv->qsv.config.io_pattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY) { - out->qsv_details.qsv_atom = pv->frame->data[2]; - out->qsv_details.ctx = pv->job->qsv.ctx; - return out; + // Can't use software filters when decoding with QSV opaque memory + return; } #endif + if (!pv->job) + { + // HandBrake's video pipeline uses yuv420 color. This means all + // dimensions must be even. So we must adjust the dimensions + // of incoming video if not even. + orig_width = pv->context->width & ~1; + orig_height = pv->context->height & ~1; + } + else + { + if (pv->title->rotation == HB_ROTATION_90 || + pv->title->rotation == HB_ROTATION_270) + { + orig_width = pv->job->title->geometry.height; + orig_height = pv->job->title->geometry.width; + } + else + { + orig_width = pv->job->title->geometry.width; + orig_height = pv->job->title->geometry.height; + } + } - uint8_t *dst = out->data; + if (AV_PIX_FMT_YUV420P == pv->frame->format && + orig_width == pv->frame->width && + orig_height == pv->frame->height && + HB_ROTATION_0 == pv->title->rotation) + { + // No filtering required. + close_video_filters(pv); + return 0; + } - if (context->pix_fmt != AV_PIX_FMT_YUV420P || w != context->width || - h != context->height) + if (pv->video_filters.graph != NULL && + pv->video_filters.width == pv->frame->width && + pv->video_filters.height == pv->frame->height && + pv->video_filters.pix_fmt == pv->frame->format) { - // have to convert to our internal color space and/or rescale - uint8_t * data[4]; - int stride[4]; - hb_picture_fill(data, stride, out); + // Current filter settings are good + return 0; + } - if (pv->sws_context == NULL || - pv->sws_width != context->width || - pv->sws_height != context->height || - pv->sws_pix_fmt != context->pix_fmt) - { - if (pv->sws_context != NULL) - sws_freeContext(pv->sws_context); + pv->video_filters.width = pv->frame->width; + pv->video_filters.height = pv->frame->height; + pv->video_filters.pix_fmt = pv->frame->format; - hb_geometry_t geometry = {context->width, context->height}; - int color_matrix = get_color_matrix(context->colorspace, geometry); + // New filter required, create filter graph + close_video_filters(pv); + pv->video_filters.graph = avfilter_graph_alloc(); + if (pv->video_filters.graph == NULL) + { + hb_log("reinit_video_filters: avfilter_graph_alloc failed"); + goto fail; + } + sws_flags = hb_strdup_printf("flags=%d", SWS_LANCZOS|SWS_ACCURATE_RND); + // avfilter_graph_free uses av_free to release scale_sws_opts. Due + // to the hacky implementation of av_free/av_malloc on windows, + // you must av_malloc anything that is av_free'd. + pv->video_filters.graph->scale_sws_opts = av_malloc(strlen(sws_flags) + 1); + strcpy(pv->video_filters.graph->scale_sws_opts, sws_flags); + free(sws_flags); - pv->sws_context = hb_sws_get_context(context->width, - context->height, - context->pix_fmt, - w, h, AV_PIX_FMT_YUV420P, - SWS_LANCZOS|SWS_ACCURATE_RND, - hb_ff_get_colorspace(color_matrix)); - pv->sws_width = context->width; - pv->sws_height = context->height; - pv->sws_pix_fmt = context->pix_fmt; - } - sws_scale(pv->sws_context, - (const uint8_t* const *)pv->frame->data, - pv->frame->linesize, 0, context->height, data, stride); + int clock_min, clock_max, clock; + hb_rational_t vrate; + + hb_video_framerate_get_limits(&clock_min, &clock_max, &clock); + vrate.num = clock; + vrate.den = pv->duration * (clock / 90000.); + + if (AV_PIX_FMT_YUV420P != pv->frame->format || + orig_width != pv->frame->width || + orig_height != pv->frame->height) + { + + filter_str = hb_strdup_printf( + "scale='w=%d:h=%d:flags=lanczos+accurate_rnd'," + "format='pix_fmts=yuv420p'", + orig_width, orig_height); + graph_str = hb_append_filter_string(graph_str, filter_str); + free(filter_str); } - else + if (pv->title->rotation != HB_ROTATION_0) { - w = out->plane[0].stride; - h = out->plane[0].height; - dst = out->plane[0].data; - copy_plane( dst, pv->frame->data[0], w, pv->frame->linesize[0], h ); - w = out->plane[1].stride; - h = out->plane[1].height; - dst = out->plane[1].data; - copy_plane( dst, pv->frame->data[1], w, pv->frame->linesize[1], h ); - w = out->plane[2].stride; - h = out->plane[2].height; - dst = out->plane[2].data; - copy_plane( dst, pv->frame->data[2], w, pv->frame->linesize[2], h ); + switch (pv->title->rotation) + { + case HB_ROTATION_90: + filter_str = "transpose='dir=cclock'"; + break; + case HB_ROTATION_180: + filter_str = "hflip,vflip"; + break; + case HB_ROTATION_270: + filter_str = "transpose='dir=clock'"; + break; + default: + hb_log("reinit_video_filters: Unknown rotation, failed"); + goto fail; + } + graph_str = hb_append_filter_string(graph_str, filter_str); } - return out; -} - -// send cc_buf to the CC decoder(s) -static void cc_send_to_decoder(hb_work_private_t *pv, hb_buffer_t *buf) -{ - if (buf == NULL) - return; + // Build filter input + filter_str = hb_strdup_printf( + "width=%d:height=%d:pix_fmt=%d:sar=%d/%d:" + "time_base=%d/%d:frame_rate=%d/%d", + pv->frame->width, pv->frame->height, + pv->frame->format, + pv->frame->sample_aspect_ratio.num, + pv->frame->sample_aspect_ratio.den, + 1, 1, vrate.num, vrate.den); - // if there's more than one decoder for the captions send a copy - // of the buffer to all. - hb_subtitle_t *subtitle; - int ii = 0, n = hb_list_count(pv->list_subtitle); - while (--n > 0) + avfilter = append_filter(pv, "buffer", filter_str); + free(filter_str); + if (avfilter == NULL) { - // make a copy of the buf then forward it to the decoder - hb_buffer_t *cpy = hb_buffer_dup(buf); + hb_error("reinit_video_filters: failed to create buffer source filter"); + goto fail; + } + pv->video_filters.input = avfilter; - subtitle = hb_list_item(pv->list_subtitle, ii++); - hb_fifo_push(subtitle->fifo_in, cpy); + // Build the filter graph + result = avfilter_graph_parse2(pv->video_filters.graph, + graph_str, &in, &out); + if (result < 0 || in == NULL || out == NULL) + { + hb_error("reinit_video_filters: avfilter_graph_parse2 failed (%s)", + graph_str); + goto fail; } - subtitle = hb_list_item(pv->list_subtitle, ii); - hb_fifo_push( subtitle->fifo_in, buf ); -} -static hb_buffer_t * cc_fill_buffer(hb_work_private_t *pv, uint8_t *cc, int size) -{ - int cc_count[4] = {0,}; - int ii; - hb_buffer_t *buf = NULL; + // Link input to filter graph + result = avfilter_link(pv->video_filters.last, 0, in->filter_ctx, 0); + if (result < 0) + { + goto fail; + } + pv->video_filters.last = out->filter_ctx; - for (ii = 0; ii < size; ii += 3) + // Build filter output and append to filter graph + avfilter = append_filter(pv, "buffersink", NULL); + if (avfilter == NULL) { - if ((cc[ii] & 0x04) == 0) // not valid - continue; - if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing - continue; - int type = cc[ii] & 0x03; - cc_count[type]++; + hb_error("reinit_video_filters: failed to create buffer output filter"); + goto fail; } + pv->video_filters.output = avfilter; - // Only handles CC1 for now. - if (cc_count[0] > 0) + result = avfilter_graph_config(pv->video_filters.graph, NULL); + if (result < 0) { - buf = hb_buffer_init(cc_count[0] * 2); - int jj = 0; - for (ii = 0; ii < size; ii += 3) - { - if ((cc[ii] & 0x04) == 0) // not valid - continue; - if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing - continue; - int type = cc[ii] & 0x03; - if (type == 0) - { - buf->data[jj++] = cc[ii+1]; - buf->data[jj++] = cc[ii+2]; - } - } + hb_error("reinit_video_filters: failed to configure filter graph"); + goto fail; } - return buf; + + free(graph_str); + avfilter_inout_free(&in); + avfilter_inout_free(&out); + return 0; + +fail: + free(graph_str); + avfilter_inout_free(&in); + avfilter_inout_free(&out); + close_video_filters(pv); + + return 1; } -static int get_frame_type(int type) +static void filter_video(hb_work_private_t *pv) { - switch (type) + reinit_video_filters(pv); + if (pv->video_filters.graph != NULL) { - case AV_PICTURE_TYPE_B: - return HB_FRAME_B; + int result; - case AV_PICTURE_TYPE_S: - case AV_PICTURE_TYPE_P: - case AV_PICTURE_TYPE_SP: - return HB_FRAME_P; + av_buffersrc_add_frame(pv->video_filters.input, pv->frame); + result = av_buffersink_get_frame(pv->video_filters.output, pv->frame); + while (result >= 0) + { + hb_buffer_t * buf = copy_frame(pv); + hb_buffer_list_append(&pv->list, buf); + av_frame_unref(pv->frame); + ++pv->nframes; - case AV_PICTURE_TYPE_BI: - case AV_PICTURE_TYPE_SI: - case AV_PICTURE_TYPE_I: - default: - return HB_FRAME_I; + result = av_buffersink_get_frame(pv->video_filters.output, + pv->frame); + } + } + else + { + hb_buffer_t * buf = copy_frame(pv); + hb_buffer_list_append(&pv->list, buf); + av_frame_unref(pv->frame); + ++pv->nframes; } } @@ -1147,141 +1408,9 @@ static int decodeFrame( hb_work_object_t *w, packet_info_t * packet_info ) } got_picture = 1; - uint16_t flags = 0; - - // ffmpeg makes it hard to attach a pts to a frame. if the MPEG ES - // packet had a pts we handed it to av_parser_parse (if the packet had - // no pts we set it to AV_NOPTS_VALUE, but before the parse we can't - // distinguish between the start of a video frame with no pts & an - // intermediate packet of some frame which never has a pts). we hope - // that when parse returns the frame to us the pts we originally - // handed it will be in parser->pts. we put this pts into avp.pts so - // that when avcodec_receive_frame finally gets around to allocating an - // AVFrame to hold the decoded frame, avcodec_default_get_buffer can - // stuff that pts into the it. if all of these relays worked at this - // point frame.pts should hold the frame's pts from the original data - // stream or AV_NOPTS_VALUE if it didn't have one. in the latter case - // we generate the next pts in sequence for it. - // recompute the frame/field duration, because sometimes it changes compute_frame_duration( pv ); - - double frame_dur = pv->duration; - if ( pv->frame->repeat_pict ) - { - frame_dur += pv->frame->repeat_pict * pv->field_duration; - } - hb_buffer_t * out = copy_frame( pv ); - if (out->s.start == AV_NOPTS_VALUE) - { - out->s.start = pv->next_pts; - } - else - { - pv->next_pts = out->s.start; - } - if (pv->next_pts != (int64_t)AV_NOPTS_VALUE) - { - pv->next_pts += frame_dur; - out->s.stop = pv->next_pts; - } - - if ( pv->frame->top_field_first ) - { - flags |= PIC_FLAG_TOP_FIELD_FIRST; - } - if ( !pv->frame->interlaced_frame ) - { - flags |= PIC_FLAG_PROGRESSIVE_FRAME; - } - if ( pv->frame->repeat_pict == 1 ) - { - flags |= PIC_FLAG_REPEAT_FIRST_FIELD; - } - if ( pv->frame->repeat_pict == 2 ) - { - flags |= PIC_FLAG_REPEAT_FRAME; - } - int frametype = get_frame_type(pv->frame->pict_type); - - // Check for CC data - AVFrameSideData *sd; - sd = av_frame_get_side_data(pv->frame, AV_FRAME_DATA_A53_CC); - if (sd != NULL) - { - if (!pv->job && pv->title && sd->size > 0) - { - hb_subtitle_t *subtitle; - int i = 0; - - while ((subtitle = hb_list_item(pv->title->list_subtitle, i++))) - { - /* - * Let's call them 608 subs for now even if they aren't, - * since they are the only types we grok. - */ - if (subtitle->source == CC608SUB) - { - break; - } - } - if (subtitle == NULL) - { - iso639_lang_t * lang; - hb_audio_t * audio; - - subtitle = calloc(sizeof( hb_subtitle_t ), 1); - subtitle->track = hb_list_count(pv->title->list_subtitle); - subtitle->id = 0; - subtitle->format = TEXTSUB; - subtitle->source = CC608SUB; - subtitle->config.dest = PASSTHRUSUB; - subtitle->codec = WORK_DECCC608; - subtitle->attributes = HB_SUBTITLE_ATTR_CC; - - /* - * The language of the subtitles will be the same as the - * first audio track, i.e. the same as the video. - */ - audio = hb_list_item(pv->title->list_audio, 0); - if (audio != NULL) - { - lang = lang_for_code2( audio->config.lang.iso639_2 ); - } else { - lang = lang_for_code2( "und" ); - } - snprintf(subtitle->lang, sizeof(subtitle->lang), - "%s, Closed Caption [%s]", - strlen(lang->native_name) ? lang->native_name : - lang->eng_name, - hb_subsource_name(subtitle->source)); - snprintf(subtitle->iso639_2, sizeof(subtitle->iso639_2), - "%s", lang->iso639_2); - - hb_list_add(pv->title->list_subtitle, subtitle); - } - } - if (pv->list_subtitle != NULL && sd->size > 0) - { - hb_buffer_t *cc_buf; - cc_buf = cc_fill_buffer(pv, sd->data, sd->size); - if (cc_buf != NULL) - { - cc_buf->s.start = out->s.start; - cc_buf->s.scr_sequence = out->s.scr_sequence; - } - cc_send_to_decoder(pv, cc_buf); - } - } - - av_frame_unref(pv->frame); - - out->s.duration = frame_dur; - out->s.flags = flags; - out->s.frametype = frametype; - - hb_buffer_list_append(&pv->list, out); - ++pv->nframes; + filter_video(pv); } while (ret >= 0); if ( global_verbosity_level <= 1 ) @@ -1893,14 +2022,29 @@ static int decavcodecvInfo( hb_work_object_t *w, hb_work_info_t *info ) return 0; info->bitrate = pv->context->bit_rate; - // HandBrake's video pipeline uses yuv420 color. This means all - // dimensions must be even. So we must adjust the dimensions - // of incoming video if not even. - info->geometry.width = pv->context->width & ~1; - info->geometry.height = pv->context->height & ~1; - - info->geometry.par.num = pv->context->sample_aspect_ratio.num; - info->geometry.par.den = pv->context->sample_aspect_ratio.den; + if (w->title->rotation == HB_ROTATION_90 || + w->title->rotation == HB_ROTATION_270) + { + // HandBrake's video pipeline uses yuv420 color. This means all + // dimensions must be even. So we must adjust the dimensions + // of incoming video if not even. + info->geometry.width = pv->context->height & ~1; + info->geometry.height = pv->context->width & ~1; + + info->geometry.par.num = pv->context->sample_aspect_ratio.den; + info->geometry.par.den = pv->context->sample_aspect_ratio.num; + } + else + { + // HandBrake's video pipeline uses yuv420 color. This means all + // dimensions must be even. So we must adjust the dimensions + // of incoming video if not even. + info->geometry.width = pv->context->width & ~1; + info->geometry.height = pv->context->height & ~1; + + info->geometry.par.num = pv->context->sample_aspect_ratio.num; + info->geometry.par.den = pv->context->sample_aspect_ratio.den; + } compute_frame_duration( pv ); info->rate.num = clock; diff --git a/libhb/hb.c b/libhb/hb.c index 77cf6b2bf..f35090e38 100644 --- a/libhb/hb.c +++ b/libhb/hb.c @@ -191,191 +191,6 @@ int hb_picture_crop(uint8_t *data[], int stride[], hb_buffer_t *buf, return 0; } -static int handle_jpeg(enum AVPixelFormat *format) -{ - switch (*format) - { - case AV_PIX_FMT_YUVJ420P: *format = AV_PIX_FMT_YUV420P; return 1; - case AV_PIX_FMT_YUVJ422P: *format = AV_PIX_FMT_YUV422P; return 1; - case AV_PIX_FMT_YUVJ444P: *format = AV_PIX_FMT_YUV444P; return 1; - case AV_PIX_FMT_YUVJ440P: *format = AV_PIX_FMT_YUV440P; return 1; - default: return 0; - } -} - -int hb_ff_get_colorspace(int color_matrix) -{ - int color_space = SWS_CS_DEFAULT; - - switch (color_matrix) - { - case HB_COLR_MAT_SMPTE170M: - color_space = SWS_CS_ITU601; - break; - case HB_COLR_MAT_SMPTE240M: - color_space = SWS_CS_SMPTE240M; - break; - case HB_COLR_MAT_BT709: - color_space = SWS_CS_ITU709; - break; - /* enable this when implemented in Libav - case HB_COLR_MAT_BT2020: - color_space = SWS_CS_BT2020; - break; - */ - default: - break; - } - - return color_space; -} - -struct SwsContext* -hb_sws_get_context(int srcW, int srcH, enum AVPixelFormat srcFormat, - int dstW, int dstH, enum AVPixelFormat dstFormat, - int flags, int colorspace) -{ - struct SwsContext * ctx; - - ctx = sws_alloc_context(); - if ( ctx ) - { - int srcRange, dstRange; - - srcRange = handle_jpeg(&srcFormat); - dstRange = handle_jpeg(&dstFormat); - flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; - - av_opt_set_int(ctx, "srcw", srcW, 0); - av_opt_set_int(ctx, "srch", srcH, 0); - av_opt_set_int(ctx, "src_range", srcRange, 0); - av_opt_set_int(ctx, "src_format", srcFormat, 0); - av_opt_set_int(ctx, "dstw", dstW, 0); - av_opt_set_int(ctx, "dsth", dstH, 0); - av_opt_set_int(ctx, "dst_range", dstRange, 0); - av_opt_set_int(ctx, "dst_format", dstFormat, 0); - av_opt_set_int(ctx, "sws_flags", flags, 0); - - sws_setColorspaceDetails( ctx, - sws_getCoefficients( colorspace ), // src colorspace - srcRange, // src range 0 = MPG, 1 = JPG - sws_getCoefficients( colorspace ), // dst colorspace - dstRange, // dst range 0 = MPG, 1 = JPG - 0, // brightness - 1 << 16, // contrast - 1 << 16 ); // saturation - - if (sws_init_context(ctx, NULL, NULL) < 0) { - hb_error("Cannot initialize resampling context"); - sws_freeContext(ctx); - ctx = NULL; - } - } - return ctx; -} - -uint64_t hb_ff_mixdown_xlat(int hb_mixdown, int *downmix_mode) -{ - uint64_t ff_layout = 0; - int mode = AV_MATRIX_ENCODING_NONE; - switch (hb_mixdown) - { - // Passthru - case HB_AMIXDOWN_NONE: - break; - - case HB_AMIXDOWN_MONO: - case HB_AMIXDOWN_LEFT: - case HB_AMIXDOWN_RIGHT: - ff_layout = AV_CH_LAYOUT_MONO; - break; - - case HB_AMIXDOWN_DOLBY: - ff_layout = AV_CH_LAYOUT_STEREO; - mode = AV_MATRIX_ENCODING_DOLBY; - break; - - case HB_AMIXDOWN_DOLBYPLII: - ff_layout = AV_CH_LAYOUT_STEREO; - mode = AV_MATRIX_ENCODING_DPLII; - break; - - case HB_AMIXDOWN_STEREO: - ff_layout = AV_CH_LAYOUT_STEREO; - break; - - case HB_AMIXDOWN_5POINT1: - ff_layout = AV_CH_LAYOUT_5POINT1; - break; - - case HB_AMIXDOWN_6POINT1: - ff_layout = AV_CH_LAYOUT_6POINT1; - break; - - case HB_AMIXDOWN_7POINT1: - ff_layout = AV_CH_LAYOUT_7POINT1; - break; - - case HB_AMIXDOWN_5_2_LFE: - ff_layout = (AV_CH_LAYOUT_5POINT1_BACK| - AV_CH_FRONT_LEFT_OF_CENTER| - AV_CH_FRONT_RIGHT_OF_CENTER); - break; - - default: - ff_layout = AV_CH_LAYOUT_STEREO; - hb_log("hb_ff_mixdown_xlat: unsupported mixdown %d", hb_mixdown); - break; - } - if (downmix_mode != NULL) - *downmix_mode = mode; - return ff_layout; -} - -/* - * Set sample format to the request format if supported by the codec. - * The planar/packed variant of the requested format is the next best thing. - */ -void hb_ff_set_sample_fmt(AVCodecContext *context, AVCodec *codec, - enum AVSampleFormat request_sample_fmt) -{ - if (context != NULL && codec != NULL && - codec->type == AVMEDIA_TYPE_AUDIO && codec->sample_fmts != NULL) - { - const enum AVSampleFormat *fmt; - enum AVSampleFormat next_best_fmt; - - next_best_fmt = (av_sample_fmt_is_planar(request_sample_fmt) ? - av_get_packed_sample_fmt(request_sample_fmt) : - av_get_planar_sample_fmt(request_sample_fmt)); - - context->request_sample_fmt = AV_SAMPLE_FMT_NONE; - - for (fmt = codec->sample_fmts; *fmt != AV_SAMPLE_FMT_NONE; fmt++) - { - if (*fmt == request_sample_fmt) - { - context->request_sample_fmt = request_sample_fmt; - break; - } - else if (*fmt == next_best_fmt) - { - context->request_sample_fmt = next_best_fmt; - } - } - - /* - * When encoding and AVCodec.sample_fmts exists, avcodec_open2() - * will error out if AVCodecContext.sample_fmt isn't set. - */ - if (context->request_sample_fmt == AV_SAMPLE_FMT_NONE) - { - context->request_sample_fmt = codec->sample_fmts[0]; - } - context->sample_fmt = context->request_sample_fmt; - } -} - /** * Registers work objects, by adding the work object to a liked list. * @param w Handle to hb_work_object_t to register. diff --git a/libhb/hbffmpeg.c b/libhb/hbffmpeg.c new file mode 100644 index 000000000..0b3403ed7 --- /dev/null +++ b/libhb/hbffmpeg.c @@ -0,0 +1,276 @@ +#include "hb.h" +#include "hbffmpeg.h" + +static int get_frame_type(int type) +{ + switch (type) + { + case AV_PICTURE_TYPE_B: + return HB_FRAME_B; + + case AV_PICTURE_TYPE_S: + case AV_PICTURE_TYPE_P: + case AV_PICTURE_TYPE_SP: + return HB_FRAME_P; + + case AV_PICTURE_TYPE_BI: + case AV_PICTURE_TYPE_SI: + case AV_PICTURE_TYPE_I: + default: + return HB_FRAME_I; + } +} + +void hb_avframe_set_video_buffer_flags(hb_buffer_t * buf, AVFrame *frame, + AVRational time_base) +{ + if (buf == NULL || frame == NULL) + { + return; + } + + buf->s.start = av_rescale_q(frame->pts, time_base, (AVRational){1, 90000}); + buf->s.duration = frame->reordered_opaque; + + if (frame->top_field_first) + { + buf->s.flags |= PIC_FLAG_TOP_FIELD_FIRST; + } + if (!frame->interlaced_frame) + { + buf->s.flags |= PIC_FLAG_PROGRESSIVE_FRAME; + } + else + { + buf->s.combed = HB_COMB_HEAVY; + } + if (frame->repeat_pict == 1) + { + buf->s.flags |= PIC_FLAG_REPEAT_FIRST_FIELD; + } + if (frame->repeat_pict == 2) + { + buf->s.flags |= PIC_FLAG_REPEAT_FRAME; + } + buf->s.frametype = get_frame_type(frame->pict_type); +} + +hb_buffer_t * hb_avframe_to_video_buffer(AVFrame *frame, AVRational time_base) +{ + hb_buffer_t * buf; + + buf = hb_frame_buffer_init(frame->format, frame->width, frame->height); + if (buf == NULL) + { + return NULL; + } + + hb_avframe_set_video_buffer_flags(buf, frame, time_base); + + int pp; + for (pp = 0; pp < 3; pp++) + { + int yy; + int width = buf->plane[pp].width; + int stride = buf->plane[pp].stride; + int height = buf->plane[pp].height; + int linesize = frame->linesize[pp]; + uint8_t * dst = buf->plane[pp].data; + uint8_t * src = frame->data[pp]; + + for (yy = 0; yy < height; yy++) + { + memcpy(dst, src, width); + dst += stride; + src += linesize; + } + } + + return buf; +} + +static int handle_jpeg(enum AVPixelFormat *format) +{ + switch (*format) + { + case AV_PIX_FMT_YUVJ420P: *format = AV_PIX_FMT_YUV420P; return 1; + case AV_PIX_FMT_YUVJ422P: *format = AV_PIX_FMT_YUV422P; return 1; + case AV_PIX_FMT_YUVJ444P: *format = AV_PIX_FMT_YUV444P; return 1; + case AV_PIX_FMT_YUVJ440P: *format = AV_PIX_FMT_YUV440P; return 1; + default: return 0; + } +} + +struct SwsContext* +hb_sws_get_context(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, int colorspace) +{ + struct SwsContext * ctx; + + ctx = sws_alloc_context(); + if ( ctx ) + { + int srcRange, dstRange; + + srcRange = handle_jpeg(&srcFormat); + dstRange = handle_jpeg(&dstFormat); + flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; + + av_opt_set_int(ctx, "srcw", srcW, 0); + av_opt_set_int(ctx, "srch", srcH, 0); + av_opt_set_int(ctx, "src_range", srcRange, 0); + av_opt_set_int(ctx, "src_format", srcFormat, 0); + av_opt_set_int(ctx, "dstw", dstW, 0); + av_opt_set_int(ctx, "dsth", dstH, 0); + av_opt_set_int(ctx, "dst_range", dstRange, 0); + av_opt_set_int(ctx, "dst_format", dstFormat, 0); + av_opt_set_int(ctx, "sws_flags", flags, 0); + + sws_setColorspaceDetails( ctx, + sws_getCoefficients( colorspace ), // src colorspace + srcRange, // src range 0 = MPG, 1 = JPG + sws_getCoefficients( colorspace ), // dst colorspace + dstRange, // dst range 0 = MPG, 1 = JPG + 0, // brightness + 1 << 16, // contrast + 1 << 16 ); // saturation + + if (sws_init_context(ctx, NULL, NULL) < 0) { + hb_error("Cannot initialize resampling context"); + sws_freeContext(ctx); + ctx = NULL; + } + } + return ctx; +} + +int hb_ff_get_colorspace(int color_matrix) +{ + int color_space = SWS_CS_DEFAULT; + + switch (color_matrix) + { + case HB_COLR_MAT_SMPTE170M: + color_space = SWS_CS_ITU601; + break; + case HB_COLR_MAT_SMPTE240M: + color_space = SWS_CS_SMPTE240M; + break; + case HB_COLR_MAT_BT709: + color_space = SWS_CS_ITU709; + break; + /* enable this when implemented in Libav + case HB_COLR_MAT_BT2020: + color_space = SWS_CS_BT2020; + break; + */ + default: + break; + } + + return color_space; +} + +uint64_t hb_ff_mixdown_xlat(int hb_mixdown, int *downmix_mode) +{ + uint64_t ff_layout = 0; + int mode = AV_MATRIX_ENCODING_NONE; + switch (hb_mixdown) + { + // Passthru + case HB_AMIXDOWN_NONE: + break; + + case HB_AMIXDOWN_MONO: + case HB_AMIXDOWN_LEFT: + case HB_AMIXDOWN_RIGHT: + ff_layout = AV_CH_LAYOUT_MONO; + break; + + case HB_AMIXDOWN_DOLBY: + ff_layout = AV_CH_LAYOUT_STEREO; + mode = AV_MATRIX_ENCODING_DOLBY; + break; + + case HB_AMIXDOWN_DOLBYPLII: + ff_layout = AV_CH_LAYOUT_STEREO; + mode = AV_MATRIX_ENCODING_DPLII; + break; + + case HB_AMIXDOWN_STEREO: + ff_layout = AV_CH_LAYOUT_STEREO; + break; + + case HB_AMIXDOWN_5POINT1: + ff_layout = AV_CH_LAYOUT_5POINT1; + break; + + case HB_AMIXDOWN_6POINT1: + ff_layout = AV_CH_LAYOUT_6POINT1; + break; + + case HB_AMIXDOWN_7POINT1: + ff_layout = AV_CH_LAYOUT_7POINT1; + break; + + case HB_AMIXDOWN_5_2_LFE: + ff_layout = (AV_CH_LAYOUT_5POINT1_BACK| + AV_CH_FRONT_LEFT_OF_CENTER| + AV_CH_FRONT_RIGHT_OF_CENTER); + break; + + default: + ff_layout = AV_CH_LAYOUT_STEREO; + hb_log("hb_ff_mixdown_xlat: unsupported mixdown %d", hb_mixdown); + break; + } + if (downmix_mode != NULL) + *downmix_mode = mode; + return ff_layout; +} + +/* + * Set sample format to the request format if supported by the codec. + * The planar/packed variant of the requested format is the next best thing. + */ +void hb_ff_set_sample_fmt(AVCodecContext *context, AVCodec *codec, + enum AVSampleFormat request_sample_fmt) +{ + if (context != NULL && codec != NULL && + codec->type == AVMEDIA_TYPE_AUDIO && codec->sample_fmts != NULL) + { + const enum AVSampleFormat *fmt; + enum AVSampleFormat next_best_fmt; + + next_best_fmt = (av_sample_fmt_is_planar(request_sample_fmt) ? + av_get_packed_sample_fmt(request_sample_fmt) : + av_get_planar_sample_fmt(request_sample_fmt)); + + context->request_sample_fmt = AV_SAMPLE_FMT_NONE; + + for (fmt = codec->sample_fmts; *fmt != AV_SAMPLE_FMT_NONE; fmt++) + { + if (*fmt == request_sample_fmt) + { + context->request_sample_fmt = request_sample_fmt; + break; + } + else if (*fmt == next_best_fmt) + { + context->request_sample_fmt = next_best_fmt; + } + } + + /* + * When encoding and AVCodec.sample_fmts exists, avcodec_open2() + * will error out if AVCodecContext.sample_fmt isn't set. + */ + if (context->request_sample_fmt == AV_SAMPLE_FMT_NONE) + { + context->request_sample_fmt = codec->sample_fmts[0]; + } + context->sample_fmt = context->request_sample_fmt; + } +} + diff --git a/libhb/hbffmpeg.h b/libhb/hbffmpeg.h index 82930ad96..76ffbe830 100644 --- a/libhb/hbffmpeg.h +++ b/libhb/hbffmpeg.h @@ -36,3 +36,7 @@ struct SwsContext* hb_sws_get_context(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, int colorspace); + +hb_buffer_t * hb_avframe_to_video_buffer(AVFrame *frame, AVRational time_base); +void hb_avframe_set_video_buffer_flags(hb_buffer_t * buf, AVFrame *frame, + AVRational time_base); diff --git a/libhb/internal.h b/libhb/internal.h index 80b5b4de2..a1de388cf 100644 --- a/libhb/internal.h +++ b/libhb/internal.h @@ -500,6 +500,7 @@ void hb_muxmp4_process_subtitle_style(int height, void hb_deinterlace(hb_buffer_t *dst, hb_buffer_t *src); void hb_avfilter_combine( hb_list_t * list ); +char * hb_append_filter_string(char * graph_str, char * filter_str); struct hb_chapter_queue_item_s { diff --git a/libhb/stream.c b/libhb/stream.c index ba1d41761..d4d03ab12 100644 --- a/libhb/stream.c +++ b/libhb/stream.c @@ -5553,6 +5553,8 @@ static hb_title_t *ffmpeg_title_scan( hb_stream_t *stream, hb_title_t *title ) { int rotation = av_display_rotation_get((int32_t *)sd.data); switch (rotation) { + case 0: + title->rotation = HB_ROTATION_0; case 90: title->rotation = HB_ROTATION_90; break; |