summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--gtk/src/hb-backend.c150
-rw-r--r--libhb/bd.c7
-rw-r--r--libhb/common.c98
-rw-r--r--libhb/common.h93
-rw-r--r--libhb/cropscale.c204
-rw-r--r--libhb/deblock.c152
-rw-r--r--libhb/deca52.c12
-rw-r--r--libhb/decavcodec.c132
-rw-r--r--libhb/deccc608sub.c12
-rw-r--r--libhb/decdca.c10
-rw-r--r--libhb/declpcm.c6
-rw-r--r--libhb/decmpeg2.c149
-rw-r--r--libhb/decomb.c158
-rw-r--r--libhb/decsrtsub.c8
-rw-r--r--libhb/decssasub.c341
-rw-r--r--libhb/dectx3gsub.c6
-rw-r--r--libhb/decutf8sub.c2
-rw-r--r--libhb/decvobsub.c130
-rw-r--r--libhb/deinterlace.c173
-rw-r--r--libhb/demuxmpeg.c50
-rw-r--r--libhb/denoise.c145
-rw-r--r--libhb/detelecine.c169
-rw-r--r--libhb/dvd.c5
-rw-r--r--libhb/dvdnav.c22
-rw-r--r--libhb/eedi2.c6
-rw-r--r--libhb/encavcodec.c53
-rw-r--r--libhb/encavcodecaudio.c7
-rw-r--r--libhb/encfaac.c7
-rw-r--r--libhb/enclame.c18
-rw-r--r--libhb/enctheora.c21
-rw-r--r--libhb/encvorbis.c9
-rw-r--r--libhb/encx264.c68
-rw-r--r--libhb/fifo.c17
-rw-r--r--libhb/hb.c221
-rw-r--r--libhb/hb.h7
-rw-r--r--libhb/hbffmpeg.h9
-rw-r--r--libhb/internal.h242
-rw-r--r--libhb/mcdeint.c12
-rw-r--r--libhb/mcdeint.h1
-rw-r--r--libhb/muxcommon.c6
-rw-r--r--libhb/muxmkv.c18
-rw-r--r--libhb/muxmp4.c71
-rw-r--r--libhb/platform/macosx/encca_aac.c6
-rw-r--r--libhb/reader.c56
-rw-r--r--libhb/render.c950
-rw-r--r--libhb/rendersub.c648
-rw-r--r--libhb/rotate.c284
-rw-r--r--libhb/scan.c26
-rw-r--r--libhb/stream.c104
-rw-r--r--libhb/sync.c325
-rw-r--r--libhb/vfr.c616
-rw-r--r--libhb/work.c367
-rw-r--r--macosx/Controller.m145
-rw-r--r--macosx/HBPreviewController.m2
-rw-r--r--test/test.c98
55 files changed, 3659 insertions, 2995 deletions
diff --git a/gtk/src/hb-backend.c b/gtk/src/hb-backend.c
index 837da3db2..e48f8eaba 100644
--- a/gtk/src/hb-backend.c
+++ b/gtk/src/hb-backend.c
@@ -4666,11 +4666,8 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
static gchar *advanced_opts;
gint sub_id = 0;
gboolean tweaks = FALSE;
- gchar *detel_str = NULL;
- gchar *decomb_str = NULL;
- gchar *deint_str = NULL;
- gchar *deblock_str = NULL;
- gchar *denoise_str = NULL;
+ hb_filter_object_t * filter;
+ gchar *filter_str;
gchar *dest_str = NULL;
g_debug("add_job()\n");
@@ -4778,11 +4775,6 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
}
}
}
- job->crop[0] = ghb_settings_get_int(js, "PictureTopCrop");
- job->crop[1] = ghb_settings_get_int(js, "PictureBottomCrop");
- job->crop[2] = ghb_settings_get_int(js, "PictureLeftCrop");
- job->crop[3] = ghb_settings_get_int(js, "PictureRightCrop");
-
gboolean decomb_deint = ghb_settings_get_boolean(js, "PictureDecombDeinterlace");
gint decomb = ghb_settings_combo_int(js, "PictureDecomb");
@@ -4818,67 +4810,89 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
}
}
+ int width, height, crop[4];
+ width = ghb_settings_get_int(js, "scale_width");
+ height = ghb_settings_get_int(js, "scale_height");
+
+ crop[0] = ghb_settings_get_int(js, "PictureTopCrop");
+ crop[1] = ghb_settings_get_int(js, "PictureBottomCrop");
+ crop[2] = ghb_settings_get_int(js, "PictureLeftCrop");
+ crop[3] = ghb_settings_get_int(js, "PictureRightCrop");
+
+ filter_str = g_strdup_printf("%d:%d:%d:%d:%d:%d",
+ width, height, crop[0], crop[1], crop[2], crop[3]);
+ filter = hb_filter_init(HB_FILTER_CROP_SCALE);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
+
/* Add selected filters */
- job->filters = hb_list_init();
gint detel = ghb_settings_combo_int(js, "PictureDetelecine");
if ( detel )
{
+ filter_str = NULL;
if (detel != 1)
{
if (detel_opts.map[detel].svalue != NULL)
- detel_str = g_strdup(detel_opts.map[detel].svalue);
+ filter_str = g_strdup(detel_opts.map[detel].svalue);
}
else
- detel_str = ghb_settings_get_string(js, "PictureDetelecineCustom");
- hb_filter_detelecine.settings = detel_str;
- hb_list_add( job->filters, &hb_filter_detelecine );
+ filter_str = ghb_settings_get_string(js, "PictureDetelecineCustom");
+ filter = hb_filter_init(HB_FILTER_DETELECINE);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
}
if ( decomb_deint && decomb )
{
+ filter_str = NULL;
if (decomb != 1)
{
if (decomb_opts.map[decomb].svalue != NULL)
- decomb_str = g_strdup(decomb_opts.map[decomb].svalue);
+ filter_str = g_strdup(decomb_opts.map[decomb].svalue);
}
else
- decomb_str = ghb_settings_get_string(js, "PictureDecombCustom");
- hb_filter_decomb.settings = decomb_str;
- hb_list_add( job->filters, &hb_filter_decomb );
+ filter_str = ghb_settings_get_string(js, "PictureDecombCustom");
+ filter = hb_filter_init(HB_FILTER_DECOMB);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
}
if( job->deinterlace )
{
+ filter_str = NULL;
if (deint != 1)
{
if (deint_opts.map[deint].svalue != NULL)
- deint_str = g_strdup(deint_opts.map[deint].svalue);
+ filter_str = g_strdup(deint_opts.map[deint].svalue);
}
else
- deint_str = ghb_settings_get_string(js, "PictureDeinterlaceCustom");
- hb_filter_deinterlace.settings = deint_str;
- hb_list_add( job->filters, &hb_filter_deinterlace );
- }
- gint deblock = ghb_settings_get_int(js, "PictureDeblock");
- if( deblock >= 5 )
- {
- deblock_str = g_strdup_printf("%d", deblock);
- hb_filter_deblock.settings = deblock_str;
- hb_list_add( job->filters, &hb_filter_deblock );
+ filter_str = ghb_settings_get_string(js, "PictureDeinterlaceCustom");
+ filter = hb_filter_init(HB_FILTER_DEINTERLACE);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
}
gint denoise = ghb_settings_combo_int(js, "PictureDenoise");
if( denoise )
{
+ filter_str = NULL;
if (denoise != 1)
{
if (denoise_opts.map[denoise].svalue != NULL)
- denoise_str = g_strdup(denoise_opts.map[denoise].svalue);
+ filter_str = g_strdup(denoise_opts.map[denoise].svalue);
}
else
- denoise_str = ghb_settings_get_string(js, "PictureDenoiseCustom");
- hb_filter_denoise.settings = denoise_str;
- hb_list_add( job->filters, &hb_filter_denoise );
+ filter_str = ghb_settings_get_string(js, "PictureDenoiseCustom");
+ filter = hb_filter_init(HB_FILTER_DENOISE);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
+ }
+ gint deblock = ghb_settings_get_int(js, "PictureDeblock");
+ if( deblock >= 5 )
+ {
+ filter_str = NULL;
+ filter_str = g_strdup_printf("%d", deblock);
+ filter = hb_filter_init(HB_FILTER_DEBLOCK);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
}
- job->width = ghb_settings_get_int(js, "scale_width");
- job->height = ghb_settings_get_int(js, "scale_height");
job->vcodec = ghb_settings_combo_int(js, "VideoEncoder");
if ((job->mux == HB_MUX_MP4 ) && (job->vcodec == HB_VCODEC_THEORA))
@@ -4903,23 +4917,29 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
job->vbitrate = ghb_settings_get_int(js, "VideoAvgBitrate");
}
- gint vrate = ghb_settings_combo_int(js, "VideoFramerate");
- if( vrate == 0 )
+ gint vrate;
+ gint vrate_base = ghb_settings_combo_int(js, "VideoFramerate");
+ gint cfr;
+ if (ghb_settings_get_boolean(js, "VideoFrameratePFR"))
+ cfr = 2;
+ else if (ghb_settings_get_boolean(js, "VideoFramerateCFR"))
+ cfr = 1;
+ else
+ cfr = 0;
+
+ if( vrate_base == 0 )
{
- job->vrate = title->rate;
- job->vrate_base = title->rate_base;
+ vrate = title->rate;
+ vrate_base = title->rate_base;
}
else
{
- job->vrate = 27000000;
- job->vrate_base = vrate;
+ vrate = 27000000;
}
- if (ghb_settings_get_boolean(js, "VideoFrameratePFR"))
- job->cfr = 2;
- else if (ghb_settings_get_boolean(js, "VideoFramerateCFR"))
- job->cfr = 1;
- else
- job->cfr = 0;
+ filter_str = g_strdup_printf("%d:%d:%d", cfr, vrate, vrate_base);
+ filter = hb_filter_init(HB_FILTER_VFR);
+ hb_add_filter( job, filter, filter_str );
+ g_free(filter_str);
const GValue *audio_list;
gint count, ii;
@@ -5106,6 +5126,13 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
}
}
}
+ if (one_burned)
+ {
+ // Add filter that renders vobsubs
+ filter = hb_filter_init(HB_FILTER_RENDER_SUB);
+ hb_add_filter( job, filter, NULL );
+ }
+
// TODO: libhb holds onto a reference to the advanced_opts and is not
// finished with it until encoding the job is done. But I can't
@@ -5232,29 +5259,10 @@ add_job(hb_handle_t *h, GValue *js, gint unique_id, gint titleindex)
// g_free(job->advanced_opts);
}
- // clean up audio list
- gint num_audio_tracks = hb_list_count(job->list_audio);
- for(ii = 0; ii < num_audio_tracks; ii++)
- {
- hb_audio_t *audio = (hb_audio_t*)hb_list_item(job->list_audio, 0);
- hb_list_rem(job->list_audio, audio);
- free(audio);
- }
+ // Reset the job so it can be use again to add other jobs
+ // for the same title.
+ hb_reset_job(job);
- // clean up subtitle list
- gint num_subtitle_tracks = hb_list_count(job->list_subtitle);
- for(ii = 0; ii < num_subtitle_tracks; ii++)
- {
- hb_subtitle_t *subtitle = hb_list_item(job->list_subtitle, 0);
- hb_list_rem(job->list_subtitle, subtitle);
- free(subtitle);
- }
-
- if (detel_str) g_free(detel_str);
- if (decomb_str) g_free(decomb_str);
- if (deint_str) g_free(deint_str);
- if (deblock_str) g_free(deblock_str);
- if (denoise_str) g_free(denoise_str);
if (dest_str) g_free(dest_str);
}
@@ -5269,8 +5277,6 @@ ghb_add_job(GValue *js, gint unique_id)
void
ghb_add_live_job(GValue *js, gint unique_id)
{
- // Since I'm doing a scan of the single title I want just prior
- // to adding the job, there is only the one title to choose from.
gint titleindex = ghb_settings_combo_int(js, "title");
add_job(h_scan, js, unique_id, titleindex);
}
diff --git a/libhb/bd.c b/libhb/bd.c
index 93ca20d02..9ec4c078a 100644
--- a/libhb/bd.c
+++ b/libhb/bd.c
@@ -535,9 +535,8 @@ int hb_bd_seek_pts( hb_bd_t * d, uint64_t pts )
int hb_bd_seek_chapter( hb_bd_t * d, int c )
{
- int64_t pos;
d->next_chap = c;
- pos = bd_seek_chapter( d->bd, c - 1 );
+ bd_seek_chapter( d->bd, c - 1 );
hb_ts_stream_reset(d->stream);
return 1;
}
@@ -612,8 +611,8 @@ hb_buffer_t * hb_bd_read( hb_bd_t * d )
b = hb_ts_decode_pkt( d->stream, buf+4 );
if ( b )
{
- b->discontinuity = discontinuity;
- b->new_chap = new_chap;
+ b->s.discontinuity = discontinuity;
+ b->s.new_chap = new_chap;
return b;
}
}
diff --git a/libhb/common.c b/libhb/common.c
index adbd5591e..d1d539020 100644
--- a/libhb/common.c
+++ b/libhb/common.c
@@ -1059,15 +1059,14 @@ void hb_list_rem( hb_list_t * l, void * p )
{
if( l->items[i] == p )
{
+ /* Shift all items after it sizeof( void * ) bytes earlier */
+ memmove( &l->items[i], &l->items[i+1],
+ ( l->items_count - i - 1 ) * sizeof( void * ) );
+
+ (l->items_count)--;
break;
}
}
-
- /* Shift all items after it sizeof( void * ) bytes earlier */
- memmove( &l->items[i], &l->items[i+1],
- ( l->items_count - i - 1 ) * sizeof( void * ) );
-
- (l->items_count)--;
}
/**********************************************************************
@@ -1102,7 +1101,7 @@ int hb_list_bytes( hb_list_t * l )
for( i = 0; i < hb_list_count( l ); i++ )
{
buf = hb_list_item( l, i );
- ret += buf->size - buf->cur;
+ ret += buf->size - buf->offset;
}
return ret;
@@ -1124,8 +1123,8 @@ void hb_list_seebytes( hb_list_t * l, uint8_t * dst, int size )
for( i = 0, copied = 0; copied < size; i++ )
{
buf = hb_list_item( l, i );
- copying = MIN( buf->size - buf->cur, size - copied );
- memcpy( &dst[copied], &buf->data[buf->cur], copying );
+ copying = MIN( buf->size - buf->offset, size - copied );
+ memcpy( &dst[copied], &buf->data[buf->offset], copying );
copied += copying;
}
}
@@ -1157,18 +1156,18 @@ void hb_list_getbytes( hb_list_t * l, uint8_t * dst, int size,
for( copied = 0, has_pts = 0; copied < size; )
{
buf = hb_list_item( l, 0 );
- copying = MIN( buf->size - buf->cur, size - copied );
- memcpy( &dst[copied], &buf->data[buf->cur], copying );
+ copying = MIN( buf->size - buf->offset, size - copied );
+ memcpy( &dst[copied], &buf->data[buf->offset], copying );
if( !has_pts )
{
- *pts = buf->start;
- *pos = buf->cur;
+ *pts = buf->s.start;
+ *pos = buf->offset;
has_pts = 1;
}
- buf->cur += copying;
- if( buf->cur >= buf->size )
+ buf->offset += copying;
+ if( buf->offset >= buf->size )
{
hb_list_rem( l, buf );
hb_buffer_close( &buf );
@@ -1501,6 +1500,71 @@ void hb_title_close( hb_title_t ** _t )
*_t = NULL;
}
+hb_filter_object_t * hb_filter_copy( hb_filter_object_t * filter )
+{
+ if( filter == NULL )
+ return NULL;
+
+ hb_filter_object_t * filter_copy = malloc( sizeof( hb_filter_object_t ) );
+ memcpy( filter_copy, filter, sizeof( hb_filter_object_t ) );
+ if( filter->settings )
+ filter_copy->settings = strdup( filter->settings );
+ return filter_copy;
+}
+
+/**
+ * Gets a filter object with the given type
+ * @param filter_id The type of filter to get.
+ * @returns The requested filter object.
+ */
+hb_filter_object_t * hb_filter_init( int filter_id )
+{
+ hb_filter_object_t * filter;
+ switch( filter_id )
+ {
+ case HB_FILTER_DETELECINE:
+ filter = &hb_filter_detelecine;
+ break;
+
+ case HB_FILTER_DECOMB:
+ filter = &hb_filter_decomb;
+ break;
+
+ case HB_FILTER_DEINTERLACE:
+ filter = &hb_filter_deinterlace;
+ break;
+
+ case HB_FILTER_VFR:
+ filter = &hb_filter_vfr;
+ break;
+
+ case HB_FILTER_DEBLOCK:
+ filter = &hb_filter_deblock;
+ break;
+
+ case HB_FILTER_DENOISE:
+ filter = &hb_filter_denoise;
+ break;
+
+ case HB_FILTER_RENDER_SUB:
+ filter = &hb_filter_render_sub;
+ break;
+
+ case HB_FILTER_CROP_SCALE:
+ filter = &hb_filter_crop_scale;
+ break;
+
+ case HB_FILTER_ROTATE:
+ filter = &hb_filter_rotate;
+ break;
+
+ default:
+ filter = NULL;
+ break;
+ }
+ return hb_filter_copy( filter );
+}
+
/**********************************************************************
* hb_filter_close
**********************************************************************
@@ -1510,10 +1574,6 @@ void hb_filter_close( hb_filter_object_t ** _f )
{
hb_filter_object_t * f = *_f;
- f->close( f->private_data );
-
- if( f->name )
- free( f->name );
if( f->settings )
free( f->settings );
diff --git a/libhb/common.h b/libhb/common.h
index 6f8e3b6c5..1f9dfa97a 100644
--- a/libhb/common.h
+++ b/libhb/common.h
@@ -59,6 +59,7 @@
#define EVEN( a ) ( (a) + ( (a) & 1 ) )
#define MULTIPLE_16( a ) ( 16 * ( ( (a) + 8 ) / 16 ) )
#define MULTIPLE_MOD( a, b ) ((b==1)?a:( b * ( ( (a) + (b / 2) - 1) / b ) ))
+#define MULTIPLE_MOD_UP( a, b ) ((b==1)?a:( b * ( ( (a) + (b) - 1) / b ) ))
#define MULTIPLE_MOD_DOWN( a, b ) ((b==1)?a:( b * ( (a) / b ) ))
#define HB_DVD_READ_BUFFER_SIZE 2048
@@ -241,7 +242,7 @@ struct hb_job_s
maxHeight: keep height below this */
int crop[4];
int deinterlace;
- hb_list_t * filters;
+ hb_list_t * list_filter;
int width;
int height;
int keep_ratio;
@@ -849,7 +850,6 @@ extern hb_work_object_t hb_decsrtsub;
extern hb_work_object_t hb_decutf8sub;
extern hb_work_object_t hb_dectx3gsub;
extern hb_work_object_t hb_decssasub;
-extern hb_work_object_t hb_render;
extern hb_work_object_t hb_encavcodec;
extern hb_work_object_t hb_encx264;
extern hb_work_object_t hb_enctheora;
@@ -867,43 +867,86 @@ extern hb_work_object_t hb_encca_haac;
extern hb_work_object_t hb_encavcodeca;
extern hb_work_object_t hb_reader;
-#define FILTER_OK 0
-#define FILTER_DELAY 1
-#define FILTER_FAILED 2
-#define FILTER_DROP 3
+#define HB_FILTER_OK 0
+#define HB_FILTER_DELAY 1
+#define HB_FILTER_FAILED 2
+#define HB_FILTER_DROP 3
+#define HB_FILTER_DONE 4
+
+typedef struct hb_filter_init_s
+{
+ hb_job_t * job;
+ int pix_fmt;
+ int width;
+ int height;
+ int par_width;
+ int par_height;
+ int crop[4];
+ int vrate_base;
+ int vrate;
+ int cfr;
+} hb_filter_init_t;
+
+typedef struct hb_filter_info_s
+{
+ char human_readable_desc[128];
+ hb_filter_init_t out;
+} hb_filter_info_t;
struct hb_filter_object_s
{
int id;
+ int enforce_order;
char * name;
char * settings;
#ifdef __LIBHB__
- hb_filter_private_t* (* init) ( int, int, int, char * );
+ int (* init) ( hb_filter_object_t *, hb_filter_init_t * );
+
+ int (* work) ( hb_filter_object_t *,
+ hb_buffer_t **, hb_buffer_t ** );
+
+ void (* close) ( hb_filter_object_t * );
+ int (* info) ( hb_filter_object_t *, hb_filter_info_t * );
+
+ hb_fifo_t * fifo_in;
+ hb_fifo_t * fifo_out;
+
+ hb_subtitle_t * subtitle;
- int (* work) ( const hb_buffer_t *, hb_buffer_t **,
- int, int, int, hb_filter_private_t * );
+ hb_filter_private_t * private_data;
- void (* close) ( hb_filter_private_t * );
+ hb_thread_t * thread;
+ volatile int * done;
+ int status;
- hb_filter_private_t * private_data;
- //hb_buffer_t * buffer;
+ // Filters can drop frames and thus chapter marks
+ // These are used to bridge the chapter to the next buffer
+ int chapter_val;
+ int64_t chapter_time;
#endif
};
-#define HB_FILTER_DETELECINE 1
-#define HB_FILTER_DEINTERLACE 2
-#define HB_FILTER_DEBLOCK 3
-#define HB_FILTER_DENOISE 4
-#define HB_FILTER_DECOMB 5
-#define HB_FILTER_ROTATE 6
-
-extern hb_filter_object_t hb_filter_detelecine;
-extern hb_filter_object_t hb_filter_deinterlace;
-extern hb_filter_object_t hb_filter_deblock;
-extern hb_filter_object_t hb_filter_denoise;
-extern hb_filter_object_t hb_filter_decomb;
-extern hb_filter_object_t hb_filter_rotate;
+enum
+{
+ // First, filters that may change the framerate (drop or dup frames)
+ HB_FILTER_DETELECINE = 1,
+ HB_FILTER_DECOMB,
+ HB_FILTER_DEINTERLACE,
+ HB_FILTER_VFR,
+ // Filters that must operate on the original source image are next
+ HB_FILTER_DEBLOCK,
+ HB_FILTER_DENOISE,
+ HB_FILTER_RENDER_SUB,
+ HB_FILTER_CROP_SCALE,
+ // Finally filters that don't care what order they are in,
+ // except that they must be after the above filters
+ HB_FILTER_ROTATE,
+};
+
+hb_filter_object_t * hb_filter_init( int filter_id );
+hb_filter_object_t * hb_filter_copy( hb_filter_object_t * filter );
+void hb_filter_close( hb_filter_object_t ** );
typedef void hb_error_handler_t( const char *errmsg );
diff --git a/libhb/cropscale.c b/libhb/cropscale.c
new file mode 100644
index 000000000..11a22a44e
--- /dev/null
+++ b/libhb/cropscale.c
@@ -0,0 +1,204 @@
+
+#include "hb.h"
+#include "hbffmpeg.h"
+
+struct hb_filter_private_s
+{
+ int width_in;
+ int height_in;
+ int pix_fmt;
+ int pix_fmt_out;
+ int width_out;
+ int height_out;
+ int crop[4];
+ struct SwsContext * context;
+};
+
+static int hb_crop_scale_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+
+static int hb_crop_scale_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static int hb_crop_scale_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info );
+
+static void hb_crop_scale_close( hb_filter_object_t * filter );
+
+hb_filter_object_t hb_filter_crop_scale =
+{
+ .id = HB_FILTER_CROP_SCALE,
+ .enforce_order = 1,
+ .name = "Crop and Scale",
+ .settings = NULL,
+ .init = hb_crop_scale_init,
+ .work = hb_crop_scale_work,
+ .close = hb_crop_scale_close,
+ .info = hb_crop_scale_info,
+};
+
+static int hb_crop_scale_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+
+ // TODO: add pix format option to settings
+ pv->pix_fmt_out = init->pix_fmt;
+ pv->width_in = init->width;
+ pv->height_in = init->height;
+ pv->width_out = init->width;
+ pv->height_out = init->height;
+ memcpy( pv->crop, init->crop, sizeof( int[4] ) );
+ if( filter->settings )
+ {
+ sscanf( filter->settings, "%d:%d:%d:%d:%d:%d",
+ &pv->width_out, &pv->height_out,
+ &pv->crop[0], &pv->crop[1], &pv->crop[2], &pv->crop[3] );
+ }
+ // Set init values so the next stage in the pipline
+ // knows what it will be getting
+ init->pix_fmt = pv->pix_fmt;
+ init->width = pv->width_out;
+ init->height = pv->height_out;
+ memcpy( init->crop, pv->crop, sizeof( int[4] ) );
+
+ return 0;
+}
+
+static int hb_crop_scale_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
+ return 0;
+
+ // Set init values so the next stage in the pipline
+ // knows what it will be getting
+ memset( info, 0, sizeof( hb_filter_info_t ) );
+ info->out.pix_fmt = pv->pix_fmt;
+ info->out.width = pv->width_out;
+ info->out.height = pv->height_out;
+ memcpy( info->out.crop, pv->crop, sizeof( int[4] ) );
+
+ int cropped_width = pv->width_in - ( pv->crop[2] + pv->crop[3] );
+ int cropped_height = pv->height_in - ( pv->crop[0] + pv->crop[1] );
+
+ sprintf( info->human_readable_desc,
+ "source: %d * %d, crop (%d/%d/%d/%d): %d * %d, scale: %d * %d",
+ pv->width_in, pv->height_in,
+ pv->crop[0], pv->crop[1], pv->crop[2], pv->crop[3],
+ cropped_width, cropped_height, pv->width_out, pv->height_out );
+
+ return 0;
+}
+
+static void hb_crop_scale_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if ( !pv )
+ {
+ return;
+ }
+
+ if ( pv->context )
+ {
+ sws_freeContext( pv->context );
+ }
+
+ free( pv );
+ filter->private_data = NULL;
+}
+
+static hb_buffer_t* crop_scale( hb_filter_private_t * pv, hb_buffer_t * in )
+{
+ AVPicture pic_in;
+ AVPicture pic_out;
+ AVPicture pic_crop;
+ hb_buffer_t * out;
+ out = hb_video_buffer_init( pv->width_out, pv->height_out );
+
+ hb_avpicture_fill( &pic_in, in );
+ hb_avpicture_fill( &pic_out, out );
+
+ // Crop; this alters the pointer to the data to point to the
+ // correct place for cropped frame
+ av_picture_crop( &pic_crop, &pic_in, in->f.fmt,
+ pv->crop[0], pv->crop[2] );
+
+ if ( !pv->context ||
+ pv->width_in != in->f.width ||
+ pv->height_in != in->f.height ||
+ pv->pix_fmt != in->f.fmt )
+ {
+ // Something changed, need a new scaling context.
+ if( pv->context )
+ sws_freeContext( pv->context );
+
+ pv->context = hb_sws_get_context(
+ in->f.width - (pv->crop[2] + pv->crop[3]),
+ in->f.height - (pv->crop[0] + pv->crop[1]),
+ in->f.fmt,
+ out->f.width, out->f.height, out->f.fmt,
+ SWS_LANCZOS | SWS_ACCURATE_RND );
+ pv->width_in = in->f.width;
+ pv->height_in = in->f.height;
+ pv->pix_fmt = in->f.fmt;
+ }
+
+ // Scale pic_crop into pic_render according to the
+ // context set up above
+ sws_scale(pv->context,
+ (const uint8_t* const*)pic_crop.data,
+ pic_crop.linesize,
+ 0, in->f.height - (pv->crop[0] + pv->crop[1]),
+ pic_out.data, pic_out.linesize);
+
+ out->s = in->s;
+ hb_buffer_move_subs( out, in );
+ return out;
+}
+
+static int hb_crop_scale_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+
+ if ( in->size <= 0 )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
+ }
+
+ if ( !pv )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_OK;
+ }
+
+ // If width or height were not set, set them now based on the
+ // input width & height
+ if ( pv->width_out <= 0 || pv->height_out <= 0 )
+ {
+ pv->width_out = in->f.width - (pv->crop[2] + pv->crop[3]);
+ pv->height_out = in->f.height - (pv->crop[0] + pv->crop[1]);
+ }
+ if ( in->f.fmt == pv->pix_fmt_out &&
+ !pv->crop[0] && !pv->crop[1] && !pv->crop[2] && !pv->crop[3] &&
+ in->f.width == pv->width_out && in->f.height == pv->height_out )
+ {
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_OK;
+ }
+ *buf_out = crop_scale( pv, in );
+
+ return HB_FILTER_OK;
+}
diff --git a/libhb/deblock.c b/libhb/deblock.c
index 261b0b301..dab622abc 100644
--- a/libhb/deblock.c
+++ b/libhb/deblock.c
@@ -43,43 +43,31 @@ static const uint8_t __attribute__((aligned(8))) pp7_dither[8][8] =
struct hb_filter_private_s
{
- int pix_fmt;
- int width[3];
- int height[3];
-
int pp7_qp;
int pp7_mode;
int pp7_mpeg2;
int pp7_temp_stride;
uint8_t * pp7_src;
-
- AVPicture pic_in;
- AVPicture pic_out;
- hb_buffer_t * buf_out;
};
-hb_filter_private_t * hb_deblock_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_deblock_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_deblock_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_deblock_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_deblock_close( hb_filter_private_t * pv );
+static void hb_deblock_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_deblock =
{
- FILTER_DEBLOCK,
- "Deblock (pp7)",
- NULL,
- hb_deblock_init,
- hb_deblock_work,
- hb_deblock_close,
+ .id = HB_FILTER_DEBLOCK,
+ .enforce_order = 1,
+ .name = "Deblock (pp7)",
+ .settings = NULL,
+ .init = hb_deblock_init,
+ .work = hb_deblock_work,
+ .close = hb_deblock_close,
};
static inline void pp7_dct_a( DCTELEM * dst, uint8_t * src, int stride )
@@ -349,34 +337,19 @@ static void pp7_filter( hb_filter_private_t * pv,
}
}
-hb_filter_private_t * hb_deblock_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_deblock_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
-
- pv->pix_fmt = pix_fmt;
-
- pv->width[0] = width;
- pv->height[0] = height;
-
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
-
+ filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
+ hb_filter_private_t * pv = filter->private_data;
pv->pp7_qp = PP7_QP_DEFAULT;
pv->pp7_mode = PP7_MODE_DEFAULT;
pv->pp7_mpeg2 = 1; /*mpi->qscale_type;*/
- if( settings )
+ if( filter->settings )
{
- sscanf( settings, "%d:%d", &pv->pp7_qp, &pv->pp7_mode );
+ sscanf( filter->settings, "%d:%d", &pv->pp7_qp, &pv->pp7_mode );
}
if( pv->pp7_qp < 0 )
@@ -399,92 +372,83 @@ hb_filter_private_t * hb_deblock_init( int pix_fmt,
break;
}
- int h = (height+16+15)&(~15);
+ int h = (init->height+16+15)&(~15);
- pv->pp7_temp_stride = (width+16+15)&(~15);
+ pv->pp7_temp_stride = (init->width+16+15)&(~15);
pv->pp7_src = (uint8_t*)malloc( pv->pp7_temp_stride*(h+8)*sizeof(uint8_t) );
- pv->buf_out = hb_video_buffer_init( width, height );
-
- return pv;
+ return 0;
}
-void hb_deblock_close( hb_filter_private_t * pv )
+static void hb_deblock_close( hb_filter_object_t * filter )
{
+ hb_filter_private_t * pv = filter->private_data;
+
if( !pv )
{
return;
}
- if( pv->buf_out )
- {
- hb_buffer_close( &pv->buf_out );
- }
-
free( pv );
+ filter->private_data = NULL;
}
-int hb_deblock_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+static int hb_deblock_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in, * out;
+
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
-
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
- pix_fmt, width, height );
-
if( /*TODO: mpi->qscale ||*/ pv->pp7_qp )
{
+ out = hb_video_buffer_init( in->f.width, in->f.height );
+
pp7_filter( pv,
- pv->pic_out.data[0],
- pv->pic_in.data[0],
- pv->width[0],
- pv->height[0],
+ out->plane[0].data,
+ in->plane[0].data,
+ in->plane[0].stride,
+ in->plane[0].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
1 );
pp7_filter( pv,
- pv->pic_out.data[1],
- pv->pic_in.data[1],
- pv->width[1],
- pv->height[1],
+ out->plane[1].data,
+ in->plane[1].data,
+ in->plane[1].stride,
+ in->plane[1].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
0 );
pp7_filter( pv,
- pv->pic_out.data[2],
- pv->pic_in.data[2],
- pv->width[2],
- pv->height[2],
+ out->plane[2].data,
+ in->plane[2].data,
+ in->plane[2].stride,
+ in->plane[2].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
0 );
+
+ out->s = in->s;
+ hb_buffer_move_subs( out, in );
+
+ *buf_out = out;
}
else
{
- memcpy( pv->buf_out->data, buf_in->data, buf_in->size );
+ *buf_in = NULL;
+ *buf_out = in;
}
- hb_buffer_copy_settings( pv->buf_out, buf_in );
-
- *buf_out = pv->buf_out;
-
- return FILTER_OK;
+ return HB_FILTER_OK;
}
-
-
diff --git a/libhb/deca52.c b/libhb/deca52.c
index a8d537d52..1f05cc829 100644
--- a/libhb/deca52.c
+++ b/libhb/deca52.c
@@ -153,7 +153,7 @@ static int deca52Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_DONE;
}
- if ( (*buf_in)->start < -1 && pv->next_expected_pts == 0 )
+ if ( (*buf_in)->s.start < -1 && pv->next_expected_pts == 0 )
{
// discard buffers that start before video time 0
*buf_out = NULL;
@@ -264,9 +264,9 @@ static hb_buffer_t * Decode( hb_work_object_t * w )
{
buf = hb_buffer_init( size );
memcpy( buf->data, pv->frame, size );
- buf->start = pts;
+ buf->s.start = pts;
pts += frame_dur;
- buf->stop = pts;
+ buf->s.stop = pts;
pv->next_expected_pts = pts;
return buf;
}
@@ -288,9 +288,9 @@ static hb_buffer_t * Decode( hb_work_object_t * w )
/* 6 blocks per frame, 256 samples per block, channelsused channels */
buf = hb_buffer_init( 6 * 256 * pv->out_discrete_channels * sizeof( float ) );
- buf->start = pts;
+ buf->s.start = pts;
pts += frame_dur;
- buf->stop = pts;
+ buf->s.stop = pts;
pv->next_expected_pts = pts;
for( i = 0; i < 6; i++ )
@@ -374,7 +374,7 @@ static int deca52BSInfo( hb_work_object_t *w, const hb_buffer_t *b,
// discard enough bytes from the front of the buffer to make
// room for the new stuff
int newlen = sizeof(w->audio->priv.config.a52.buf) - blen;
- memcpy( buf, buf + len - newlen, newlen );
+ memmove( buf, buf + len - newlen, newlen );
len = newlen;
}
}
diff --git a/libhb/decavcodec.c b/libhb/decavcodec.c
index fd58c5c28..1bba19990 100644
--- a/libhb/decavcodec.c
+++ b/libhb/decavcodec.c
@@ -231,10 +231,10 @@ static int decavcodecaInit( hb_work_object_t * w, hb_job_t * job )
if ( w->audio != NULL )
{
- if ( hb_need_downmix( w->audio->config.in.channel_layout,
+ if ( hb_need_downmix( w->audio->config.in.channel_layout,
w->audio->config.out.mixdown) )
{
- pv->downmix = hb_downmix_init(w->audio->config.in.channel_layout,
+ pv->downmix = hb_downmix_init(w->audio->config.in.channel_layout,
w->audio->config.out.mixdown);
hb_downmix_set_chan_map( pv->downmix, &hb_smpte_chan_map, &hb_smpte_chan_map );
}
@@ -328,7 +328,7 @@ static int decavcodecaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
*buf_out = NULL;
- if ( in->start < -1 && pv->pts_next <= 0 )
+ if ( in->s.start < -1 && pv->pts_next <= 0 )
{
// discard buffers that start before video time 0
return HB_WORK_OK;
@@ -341,7 +341,7 @@ static int decavcodecaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
int pout_len;
int64_t cur;
- cur = in->start;
+ cur = in->s.start;
if ( pv->parser != NULL )
{
@@ -443,10 +443,10 @@ static int decavcodecaBSInfo( hb_work_object_t *w, const hb_buffer_t *buf,
if ( parser != NULL )
{
- len = av_parser_parse2( parser, context, &pbuffer,
- &pbuffer_size, buf->data + pos,
- buf->size - pos, buf->start,
- buf->start, 0 );
+ len = av_parser_parse2( parser, context, &pbuffer,
+ &pbuffer_size, buf->data + pos,
+ buf->size - pos, buf->s.start,
+ buf->s.start, 0 );
}
else
{
@@ -461,7 +461,7 @@ static int decavcodecaBSInfo( hb_work_object_t *w, const hb_buffer_t *buf,
avp.data = pbuffer;
avp.size = pbuffer_size;
- len = avcodec_decode_audio3( context, (int16_t*)buffer,
+ len = avcodec_decode_audio3( context, (int16_t*)buffer,
&out_size, &avp );
if ( len > 0 && context->sample_rate > 0 )
{
@@ -474,13 +474,13 @@ static int decavcodecaBSInfo( hb_work_object_t *w, const hb_buffer_t *buf,
info->bitrate = context->bit_rate;
info->rate = context->sample_rate;
info->rate_base = 1;
- info->channel_layout =
- hb_ff_layout_xlat(context->channel_layout,
+ info->channel_layout =
+ hb_ff_layout_xlat(context->channel_layout,
context->channels);
ret = 1;
if ( context->channels && isamp )
{
- info->samples_per_frame = out_size /
+ info->samples_per_frame = out_size /
(isamp * context->channels);
}
break;
@@ -552,7 +552,7 @@ static hb_buffer_t *copy_frame( hb_work_private_t *pv, AVFrame *frame )
{
// have to convert to our internal color space and/or rescale
AVPicture dstpic;
- avpicture_fill( &dstpic, dst, PIX_FMT_YUV420P, w, h );
+ hb_avpicture_fill( &dstpic, buf );
if ( ! pv->sws_context ||
pv->sws_width != context->width ||
@@ -575,9 +575,14 @@ static hb_buffer_t *copy_frame( hb_work_private_t *pv, AVFrame *frame )
}
else
{
+ w = buf->plane[0].stride;
+ h = buf->plane[0].height;
dst = copy_plane( dst, frame->data[0], w, frame->linesize[0], h );
- w = (w + 1) >> 1; h = (h + 1) >> 1;
+ w = buf->plane[1].stride;
+ h = buf->plane[1].height;
dst = copy_plane( dst, frame->data[1], w, frame->linesize[1], h );
+ w = buf->plane[2].stride;
+ h = buf->plane[2].height;
dst = copy_plane( dst, frame->data[2], w, frame->linesize[2], h );
}
return buf;
@@ -621,7 +626,7 @@ static void flushDelayQueue( hb_work_private_t *pv )
// flush all the video packets left on our timestamp-reordering delay q
while ( ( buf = pv->delayq[slot] ) != NULL )
{
- buf->start = heap_pop( &pv->pts_heap );
+ buf->s.start = heap_pop( &pv->pts_heap );
hb_list_add( pv->list, buf );
pv->delayq[slot] = NULL;
slot = ( slot + 1 ) & (HEAP_SIZE-1);
@@ -708,15 +713,15 @@ static void checkCadence( int * cadence, uint16_t flags, int64_t start )
}
/*
- * Decodes a video frame from the specified raw packet data
+ * Decodes a video frame from the specified raw packet data
* ('data', 'size', 'sequence').
* The output of this function is stored in 'pv->list', which contains a list
* of zero or more decoded packets.
- *
- * The returned packets are guaranteed to have their timestamps in the correct
- * order, even if the original packets decoded by libavcodec have misordered
+ *
+ * The returned packets are guaranteed to have their timestamps in the correct
+ * order, even if the original packets decoded by libavcodec have misordered
* timestamps, due to the use of 'packed B-frames'.
- *
+ *
* Internally the set of decoded packets may be buffered in 'pv->delayq'
* until enough packets have been decoded so that the timestamps can be
* correctly rewritten, if this is necessary.
@@ -741,7 +746,7 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen
avp.dts = dts;
if ( avcodec_decode_video2( pv->context, &frame, &got_picture, &avp ) < 0 )
{
- ++pv->decode_errors;
+ ++pv->decode_errors;
}
if ( global_verbosity_level <= 1 )
{
@@ -770,15 +775,15 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen
// ffmpeg makes it hard to attach a pts to a frame. if the MPEG ES
// packet had a pts we handed it to av_parser_parse (if the packet had
- // no pts we set it to AV_NOPTS_VALUE, but before the parse we can't
- // distinguish between the start of a video frame with no pts & an
- // intermediate packet of some frame which never has a pts). we hope
- // that when parse returns the frame to us the pts we originally
- // handed it will be in parser->pts. we put this pts into avp.pts so
- // that when avcodec_decode_video finally gets around to allocating an
- // AVFrame to hold the decoded frame, avcodec_default_get_buffer can
- // stuff that pts into the it. if all of these relays worked at this
- // point frame.pts should hold the frame's pts from the original data
+ // no pts we set it to AV_NOPTS_VALUE, but before the parse we can't
+ // distinguish between the start of a video frame with no pts & an
+ // intermediate packet of some frame which never has a pts). we hope
+ // that when parse returns the frame to us the pts we originally
+ // handed it will be in parser->pts. we put this pts into avp.pts so
+ // that when avcodec_decode_video finally gets around to allocating an
+ // AVFrame to hold the decoded frame, avcodec_default_get_buffer can
+ // stuff that pts into the it. if all of these relays worked at this
+ // point frame.pts should hold the frame's pts from the original data
// stream or AV_NOPTS_VALUE if it didn't have one. in the latter case
// we generate the next pts in sequence for it.
if ( !pv->frame_duration_set )
@@ -827,21 +832,23 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen
if ( ! pv->job || ! pv->brokenByMicrosoft )
{
buf = copy_frame( pv, &frame );
- buf->start = pts;
+ buf->s.start = pts;
buf->sequence = sequence;
- buf->flags = flags;
- if ( pv->new_chap && buf->start >= pv->chap_time )
+
+ buf->s.flags = flags;
+
+ if ( pv->new_chap && buf->s.start >= pv->chap_time )
{
- buf->new_chap = pv->new_chap;
+ buf->s.new_chap = pv->new_chap;
+ log_chapter( pv, pv->new_chap, buf->s.start );
pv->new_chap = 0;
pv->chap_time = 0;
- log_chapter( pv, buf->new_chap, buf->start );
}
else if ( pv->nframes == 0 && pv->job )
{
- log_chapter( pv, pv->job->chapter_start, buf->start );
+ log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
- checkCadence( pv->cadence, buf->flags, buf->start );
+ checkCadence( pv->cadence, flags, buf->s.start );
hb_list_add( pv->list, buf );
++pv->nframes;
return got_picture;
@@ -873,27 +880,28 @@ static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequen
if ( ( buf = pv->delayq[slot] ) != NULL )
{
pv->queue_primed = 1;
- buf->start = heap_pop( &pv->pts_heap );
+ buf->s.start = heap_pop( &pv->pts_heap );
- if ( pv->new_chap && buf->start >= pv->chap_time )
+ if ( pv->new_chap && buf->s.start >= pv->chap_time )
{
- buf->new_chap = pv->new_chap;
+ buf->s.new_chap = pv->new_chap;
+ log_chapter( pv, pv->new_chap, buf->s.start );
pv->new_chap = 0;
pv->chap_time = 0;
- log_chapter( pv, buf->new_chap, buf->start );
}
else if ( pv->nframes == 0 && pv->job )
{
- log_chapter( pv, pv->job->chapter_start, buf->start );
+ log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
- checkCadence( pv->cadence, buf->flags, buf->start );
+ checkCadence( pv->cadence, buf->s.flags, buf->s.start );
hb_list_add( pv->list, buf );
}
// add the new frame to the delayq & push its timestamp on the heap
buf = copy_frame( pv, &frame );
buf->sequence = sequence;
- buf->flags = flags;
+ /* Store picture flags for later use by filters */
+ buf->s.flags = flags;
pv->delayq[slot] = buf;
heap_push( &pv->pts_heap, pts );
@@ -1085,7 +1093,7 @@ static int setup_extradata( hb_work_object_t *w, hb_buffer_t *in )
hb_work_private_t *pv = w->private_data;
// we can't call the avstream funcs but the read_header func in the
- // AVInputFormat may set up some state in the AVContext. In particular
+ // AVInputFormat may set up some state in the AVContext. In particular
// vc1t_read_header allocates 'extradata' to deal with header issues
// related to Microsoft's bizarre engineering notions. We alloc a chunk
// of space to make vc1 work then associate the codec with the context.
@@ -1096,7 +1104,7 @@ static int setup_extradata( hb_work_object_t *w, hb_buffer_t *in )
pv->context->extradata_size = 0;
// av_malloc uses posix_memalign which is allowed to
// return NULL when allocating 0 bytes. We use extradata == NULL
- // to trigger initialization of extradata and the decoder, so
+ // to trigger initialization of extradata and the decoder, so
// we can not set it to NULL here. So allocate a small
// buffer instead.
pv->context->extradata = av_malloc(1);
@@ -1193,14 +1201,14 @@ static int decavcodecvWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
pv->video_codec_opened = 1;
}
- if( in->start >= 0 )
+ if( in->s.start >= 0 )
{
- pts = in->start;
- dts = in->renderOffset;
+ pts = in->s.start;
+ dts = in->s.renderOffset;
}
- if ( in->new_chap )
+ if ( in->s.new_chap )
{
- pv->new_chap = in->new_chap;
+ pv->new_chap = in->s.new_chap;
pv->chap_time = pts >= 0? pts : pv->pts_next;
}
decodeVideo( w, in->data, in->size, in->sequence, pts, dts );
@@ -1262,7 +1270,7 @@ static void compute_frame_duration( hb_work_private_t *pv )
pv->context->time_base.num * max_fps > pv->context->time_base.den &&
pv->context->time_base.den > pv->context->time_base.num * 8L )
{
- duration = (double)pv->context->time_base.num /
+ duration = (double)pv->context->time_base.num /
(double)pv->context->time_base.den;
if ( pv->context->ticks_per_frame > 1 )
{
@@ -1277,7 +1285,7 @@ static void compute_frame_duration( hb_work_private_t *pv )
if ( pv->context->time_base.num * max_fps > pv->context->time_base.den &&
pv->context->time_base.den > pv->context->time_base.num * 8L )
{
- duration = (double)pv->context->time_base.num /
+ duration = (double)pv->context->time_base.num /
(double)pv->context->time_base.den;
if ( pv->context->ticks_per_frame > 1 )
{
@@ -1376,10 +1384,10 @@ hb_work_object_t hb_decavcodecv =
.bsinfo = decavcodecvBSInfo
};
-static hb_buffer_t * downmixAudio(
- hb_audio_t *audio,
- hb_work_private_t *pv,
- hb_sample_t *buffer,
+static hb_buffer_t * downmixAudio(
+ hb_audio_t *audio,
+ hb_work_private_t *pv,
+ hb_sample_t *buffer,
int channels,
int nsamples )
{
@@ -1459,8 +1467,8 @@ static void decodeAudio( hb_audio_t * audio, hb_work_private_t *pv, uint8_t *dat
buf = hb_buffer_init( avp.size );
memcpy( buf->data, avp.data, avp.size );
- buf->start = pv->pts_next;
- buf->stop = pts_next;
+ buf->s.start = pv->pts_next;
+ buf->s.stop = pts_next;
hb_list_add( pv->list, buf );
pv->pts_next = pts_next;
continue;
@@ -1486,7 +1494,7 @@ static void decodeAudio( hb_audio_t * audio, hb_work_private_t *pv, uint8_t *dat
// get output buffer size then malloc a buffer
buffer = av_malloc( nsamples * sizeof(hb_sample_t) );
- // we're doing straight sample format conversion which
+ // we're doing straight sample format conversion which
// behaves as if there were only one channel.
const void * const ibuf[6] = { pv->buffer };
void * const obuf[6] = { buffer };
@@ -1499,8 +1507,8 @@ static void decodeAudio( hb_audio_t * audio, hb_work_private_t *pv, uint8_t *dat
hb_buffer_t * buf;
buf = downmixAudio( audio, pv, buffer, context->channels, nsamples );
- buf->start = pv->pts_next;
- buf->stop = pts_next;
+ buf->s.start = pv->pts_next;
+ buf->s.stop = pts_next;
hb_list_add( pv->list, buf );
pv->pts_next = pts_next;
diff --git a/libhb/deccc608sub.c b/libhb/deccc608sub.c
index b9033e6d6..aaa3d020d 100644
--- a/libhb/deccc608sub.c
+++ b/libhb/deccc608sub.c
@@ -1497,10 +1497,10 @@ void write_cc_line_as_transcript (struct eia608_screen *data, struct s_write *wb
* Put this subtitle in a hb_buffer_t and shove it into the subtitle fifo
*/
buffer = hb_buffer_init( length + 1 );
- buffer->start = wb->data608->current_visible_start_ms;
- buffer->stop = get_fts(wb);
+ buffer->s.start = wb->data608->current_visible_start_ms;
+ buffer->s.stop = get_fts(wb);
memcpy( buffer->data, wb->subline, length + 1 );
- //hb_log("CC %"PRId64": %s", buffer->stop, wb->subline);
+ //hb_log("CC %"PRId64": %s", buffer->s.stop, wb->subline);
if (wb->hb_last_buffer) {
wb->hb_last_buffer->next = buffer;
@@ -1650,8 +1650,8 @@ int write_cc_buffer_as_srt (struct eia608_screen *data, struct s_write *wb)
if (wb->enc_buffer_used)
{
hb_buffer_t *buffer = hb_buffer_init( wb->enc_buffer_used + 1 );
- buffer->start = ms_start;
- buffer->stop = ms_end;
+ buffer->s.start = ms_start;
+ buffer->s.stop = ms_end;
memcpy( buffer->data, wb->enc_buffer, wb->enc_buffer_used + 1 );
if (wb->hb_last_buffer) {
wb->hb_last_buffer->next = buffer;
@@ -2504,7 +2504,7 @@ int decccWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_DONE;
}
- pv->cc608->last_pts = in->start;
+ pv->cc608->last_pts = in->s.start;
process608(in->data, in->size, pv->cc608);
diff --git a/libhb/decdca.c b/libhb/decdca.c
index 4f5ee3929..1e6e4192c 100644
--- a/libhb/decdca.c
+++ b/libhb/decdca.c
@@ -125,7 +125,7 @@ static int decdcaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_DONE;
}
- if ( (*buf_in)->start < -1 && pv->next_pts == 0 )
+ if ( (*buf_in)->s.start < -1 && pv->next_pts == 0 )
{
// discard buffers that start before video time 0
*buf_out = NULL;
@@ -235,9 +235,9 @@ static hb_buffer_t * Decode( hb_work_object_t * w )
{
buf = hb_buffer_init( pv->size );
memcpy( buf->data, pv->frame, pv->size );
- buf->start = pts;
+ buf->s.start = pts;
pv->next_pts = pts + frame_dur;
- buf->stop = pv->next_pts;
+ buf->s.stop = pv->next_pts;
pv->sync = 0;
return buf;
}
@@ -252,9 +252,9 @@ static hb_buffer_t * Decode( hb_work_object_t * w )
int nsamp = num_blocks * 256;
buf = hb_buffer_init( nsamp * pv->out_discrete_channels * sizeof( float ) );
- buf->start = pts;
+ buf->s.start = pts;
pv->next_pts = pts + (double)nsamp / (double)pv->rate * 90000.;
- buf->stop = pv->next_pts;
+ buf->s.stop = pv->next_pts;
for( i = 0; i < num_blocks; i++ )
{
diff --git a/libhb/declpcm.c b/libhb/declpcm.c
index a47e0974c..6a7cbe56c 100644
--- a/libhb/declpcm.c
+++ b/libhb/declpcm.c
@@ -143,7 +143,7 @@ static void lpcmInfo( hb_work_object_t *w, hb_buffer_t *in )
pv->samples = ( pv->duration * pv->nchannels * pv->samplerate ) / 90000;
pv->size = pv->chunks * chunk_size;
- pv->next_pts = in->start;
+ pv->next_pts = in->s.start;
}
static int declpcmInit( hb_work_object_t * w, hb_job_t * job )
@@ -218,9 +218,9 @@ static hb_buffer_t *Decode( hb_work_object_t *w )
out = hb_buffer_init( pv->samples * sizeof( float ) );
- out->start = pv->next_pts;
+ out->s.start = pv->next_pts;
pv->next_pts += pv->duration;
- out->stop = pv->next_pts;
+ out->s.stop = pv->next_pts;
float *odat = (float *)out->data;
int count = pv->chunks / pv->nchannels;
diff --git a/libhb/decmpeg2.c b/libhb/decmpeg2.c
index f37ee9022..7379964cf 100644
--- a/libhb/decmpeg2.c
+++ b/libhb/decmpeg2.c
@@ -57,6 +57,11 @@ typedef struct hb_libmpeg2_s
int64_t start; // start time of this frame
hb_buffer_t * cc_buf; // captions for this frame
} tags[NTAGS];
+
+ struct SwsContext *sws_context; // if we have to rescale or convert color space
+ int sws_width;
+ int sws_height;
+ int sws_pix_fmt;
} hb_libmpeg2_t;
/**********************************************************************
@@ -95,7 +100,7 @@ static void cc_send_to_decoder( hb_libmpeg2_t *m, hb_buffer_t *cc_buf )
{
// make a copy of the buf then forward it to the decoder
hb_buffer_t *cpy = hb_buffer_init( cc_buf->size );
- hb_buffer_copy_settings( cpy, cc_buf );
+ cpy->s = cc_buf->s;
memcpy( cpy->data, cc_buf->data, cc_buf->size );
subtitle = hb_list_item( m->list_subtitle, i++ );
@@ -251,21 +256,37 @@ static void next_tag( hb_libmpeg2_t *m, hb_buffer_t *buf_es )
if ( m->tags[m->cur_tag].start < 0 ||
( m->got_iframe && m->tags[m->cur_tag].start >= m->first_pts ) )
hb_log("mpeg2 tag botch: pts %"PRId64", tag pts %"PRId64" buf 0x%p",
- buf_es->start, m->tags[m->cur_tag].start, m->tags[m->cur_tag].cc_buf);
+ buf_es->s.start, m->tags[m->cur_tag].start, m->tags[m->cur_tag].cc_buf);
if ( m->tags[m->cur_tag].cc_buf )
hb_buffer_close( &m->tags[m->cur_tag].cc_buf );
}
- m->tags[m->cur_tag].start = buf_es->start;
+ m->tags[m->cur_tag].start = buf_es->s.start;
mpeg2_tag_picture( m->libmpeg2, m->cur_tag, 0 );
}
-static hb_buffer_t *hb_copy_frame( hb_job_t *job, int width, int height,
- int *crop, enum PixelFormat pixfmt,
- uint8_t* y, uint8_t *u, uint8_t *v )
+static hb_buffer_t *hb_copy_frame( hb_libmpeg2_t *m )
{
+ hb_job_t * job = m->job;
+ int width = m->info->sequence->width;
+ int height = m->info->sequence->height;
+ enum PixelFormat pixfmt = m->pixfmt;
+ uint8_t *y = m->info->display_fbuf->buf[0];
+ uint8_t *u = m->info->display_fbuf->buf[1];
+ uint8_t *v = m->info->display_fbuf->buf[2];
+ int crop[4] = {0};
+
int dst_w, dst_h;
int src_w, src_h;
+ if ( m->info->sequence->picture_width < m->info->sequence->width )
+ {
+ crop[3] = m->info->sequence->width - m->info->sequence->picture_width;
+ }
+ if ( m->info->sequence->picture_height < m->info->sequence->height )
+ {
+ crop[1] = m->info->sequence->height - m->info->sequence->picture_height;
+ }
+
src_w = width - (crop[2] + crop[3]);
src_h = height - (crop[0] + crop[1]);
if ( job )
@@ -280,7 +301,7 @@ static hb_buffer_t *hb_copy_frame( hb_job_t *job, int width, int height,
}
hb_buffer_t *buf = hb_video_buffer_init( dst_w, dst_h );
- buf->start = -1;
+ buf->s.start = -1;
AVPicture in, out, pic_crop;
@@ -290,23 +311,34 @@ static hb_buffer_t *hb_copy_frame( hb_job_t *job, int width, int height,
in.linesize[0] = width;
in.linesize[1] = width>>1;
in.linesize[2] = width>>1;
- avpicture_fill( &out, buf->data, PIX_FMT_YUV420P, dst_w, dst_h );
+ hb_avpicture_fill( &out, buf );
av_picture_crop( &pic_crop, &in, pixfmt, crop[0], crop[2] );
- // Source and Dest dimensions may be the same. There is no speed
- // cost to using sws_scale to simply copy the data.
- struct SwsContext *context = hb_sws_get_context( src_w, src_h, pixfmt,
- dst_w, dst_h, PIX_FMT_YUV420P,
- SWS_LANCZOS|SWS_ACCURATE_RND);
- if ( context == NULL )
+ if ( !m->sws_context ||
+ m->sws_width != src_w ||
+ m->sws_height != src_h ||
+ m->sws_pix_fmt != pixfmt )
{
- hb_buffer_close( &buf );
- return NULL;
+ // Source and Dest dimensions may be the same. There is no speed
+ // cost to using sws_scale to simply copy the data.
+ m->sws_context = hb_sws_get_context( src_w, src_h, pixfmt,
+ dst_w, dst_h, buf->f.fmt,
+ SWS_LANCZOS|SWS_ACCURATE_RND);
+ m->sws_width = src_w;
+ m->sws_height = src_h;
+ m->sws_pix_fmt = pixfmt;
+
+ if ( m->sws_context == NULL )
+ {
+ hb_buffer_close( &buf );
+ return NULL;
+ }
+
}
- sws_scale( context, (const uint8_t* const *)pic_crop.data, pic_crop.linesize, 0, src_h, out.data, out.linesize );
- sws_freeContext( context );
+ sws_scale( m->sws_context, (const uint8_t* const *)pic_crop.data,
+ pic_crop.linesize, 0, src_h, out.data, out.linesize );
return buf;
}
@@ -325,7 +357,7 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
if ( buf_es->size )
{
/* Feed libmpeg2 */
- if( buf_es->start >= 0 )
+ if( buf_es->s.start >= 0 )
{
next_tag( m, buf_es );
}
@@ -353,7 +385,7 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
if (m->tags[m->cur_tag].cc_buf)
{
hb_log("mpeg2 tag botch2: pts %"PRId64", tag pts %"PRId64" buf 0x%p",
- buf_es->start, m->tags[m->cur_tag].start, m->tags[m->cur_tag].cc_buf);
+ buf_es->s.start, m->tags[m->cur_tag].start, m->tags[m->cur_tag].cc_buf);
hb_buffer_close( &m->tags[m->cur_tag].cc_buf );
}
// see if we already made a tag for the timestamp. If so we
@@ -416,23 +448,7 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
if( m->got_iframe )
{
- int crop[4] = {0};
- if ( m->info->sequence->picture_width < m->info->sequence->width )
- {
- crop[3] = m->info->sequence->width - m->info->sequence->picture_width;
- }
- if ( m->info->sequence->picture_height < m->info->sequence->height )
- {
- crop[1] = m->info->sequence->height - m->info->sequence->picture_height;
- }
- buf = hb_copy_frame( m->job,
- m->info->sequence->width,
- m->info->sequence->height,
- crop,
- m->pixfmt,
- m->info->display_fbuf->buf[0],
- m->info->display_fbuf->buf[1],
- m->info->display_fbuf->buf[2] );
+ buf = hb_copy_frame( m );
if ( buf == NULL )
continue;
@@ -442,29 +458,29 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
if( m->info->display_picture->flags & PIC_FLAG_TAGS )
{
int t = m->info->display_picture->tag;
- buf->start = m->tags[t].start;
+ buf->s.start = m->tags[t].start;
cc_buf = m->tags[t].cc_buf;
m->tags[t].start = -1;
m->tags[t].cc_buf = NULL;
}
- if( buf->start < 0 && m->last_pts >= 0 )
+ if( buf->s.start < 0 && m->last_pts >= 0 )
{
/* For some reason nb_fields is sometimes 1 while it
should be 2 */
- buf->start = m->last_pts +
+ buf->s.start = m->last_pts +
MAX( 2, m->info->display_picture->nb_fields ) *
m->info->sequence->frame_period / 600;
}
- if ( buf->start >= 0 )
+ if ( buf->s.start >= 0 )
{
- m->last_pts = buf->start;
+ m->last_pts = buf->s.start;
}
// if we were accumulating captions we now know the timestamp
// so ship them to the decoder.
if ( cc_buf )
{
- cc_buf->start = m->last_pts;
+ cc_buf->s.start = m->last_pts;
cc_send_to_decoder( m, cc_buf );
}
@@ -473,31 +489,35 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
{
// we were waiting for an iframe to insert a chapter mark
// and we have one.
- buf->new_chap = m->look_for_iframe;
+ int new_chap = m->look_for_iframe;
+ buf->s.new_chap = new_chap;
+
m->look_for_iframe = 0;
const char *chap_name = "";
- if ( m->job && buf->new_chap > 0 &&
+ if ( m->job && new_chap > 0 &&
hb_list_item( m->job->title->list_chapter,
- buf->new_chap - 1 ) )
+ new_chap - 1 ) )
{
- hb_chapter_t * c = hb_list_item( m->job->title->list_chapter,
- buf->new_chap - 1 );
+ hb_chapter_t * c = hb_list_item(
+ m->job->title->list_chapter,
+ new_chap - 1 );
chap_name = c->title;
}
hb_log( "mpeg2: \"%s\" (%d) at frame %u time %"PRId64,
- chap_name, buf->new_chap, m->nframes, buf->start );
+ chap_name, new_chap,
+ m->nframes, buf->s.start );
}
else if ( m->nframes == 0 )
{
// this is the first frame returned by the decoder
- m->first_pts = buf->start;
+ m->first_pts = buf->s.start;
if ( m->job && hb_list_item( m->job->title->list_chapter,
m->job->chapter_start - 1 ) )
{
hb_chapter_t * c = hb_list_item( m->job->title->list_chapter,
m->job->chapter_start - 1 );
hb_log( "mpeg2: \"%s\" (%d) at frame %u time %"PRId64,
- c->title, m->job->chapter_start, m->nframes, buf->start );
+ c->title, m->job->chapter_start, m->nframes, buf->s.start );
}
}
++m->nframes;
@@ -505,7 +525,7 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
m->flag = m->info->display_picture->flags;
/* Uncomment this block to see frame-by-frame picture flags, as the video encodes.
- hb_log("***** MPEG 2 Picture Info for PTS %"PRId64" *****", buf->start);
+ hb_log("***** MPEG 2 Picture Info for PTS %"PRId64" *****", buf->s.start);
if( m->flag & TOP_FIRST )
hb_log("MPEG2 Flag: Top field first");
if( m->flag & PROGRESSIVE )
@@ -584,12 +604,11 @@ static int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
}
if ( (m->cadence[2] <= TB) && (m->cadence[1] <= TB) && (m->cadence[0] > TB) && (m->cadence[11]) )
- hb_log("%fs: Video -> Film", (float)buf->start / 90000);
+ hb_log("%fs: Video -> Film", (float)buf->s.start / 90000);
if ( (m->cadence[2] > TB) && (m->cadence[1] <= TB) && (m->cadence[0] <= TB) && (m->cadence[11]) )
- hb_log("%fs: Film -> Video", (float)buf->start / 90000);
+ hb_log("%fs: Film -> Video", (float)buf->s.start / 90000);
- /* Store picture flags for later use by filters */
- buf->flags = m->info->display_picture->flags;
+ buf->s.flags = m->info->display_picture->flags;
hb_list_add( list_raw, buf );
}
@@ -667,6 +686,11 @@ static void hb_libmpeg2_close( hb_libmpeg2_t ** _m )
mpeg2_close( m->libmpeg2 );
+ if ( m->sws_context )
+ {
+ sws_freeContext( m->sws_context );
+ }
+
int i;
for ( i = 0; i < NTAGS; ++i )
{
@@ -754,6 +778,7 @@ static int decmpeg2Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * buf, * last = NULL;
+ hb_buffer_t * in = *buf_in;
int status = HB_WORK_OK;
if( w->title && pv && pv->libmpeg2 && !pv->libmpeg2->title ) {
@@ -763,21 +788,21 @@ static int decmpeg2Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
// The reader found a chapter break. Remove it from the input
// stream. If we're reading (as opposed to scanning) start looking
// for the next GOP start since that's where the chapter begins.
- if( (*buf_in)->new_chap )
+ if( in->s.new_chap )
{
if ( pv->libmpeg2->job )
{
- pv->libmpeg2->look_for_break = (*buf_in)->new_chap;
+ pv->libmpeg2->look_for_break = in->s.new_chap;
}
- (*buf_in)->new_chap = 0;
+ in->s.new_chap = 0;
}
- hb_libmpeg2_decode( pv->libmpeg2, *buf_in, pv->list );
+ hb_libmpeg2_decode( pv->libmpeg2, in, pv->list );
/* if we got an empty buffer signaling end-of-stream send it downstream */
- if ( (*buf_in)->size == 0 )
+ if ( in->size == 0 )
{
- hb_list_add( pv->list, *buf_in );
+ hb_list_add( pv->list, in );
*buf_in = NULL;
status = HB_WORK_DONE;
diff --git a/libhb/decomb.c b/libhb/decomb.c
index b9baa963a..7d911ff28 100644
--- a/libhb/decomb.c
+++ b/libhb/decomb.c
@@ -139,7 +139,6 @@ typedef struct yadif_thread_arg_s {
struct hb_filter_private_s
{
- int pix_fmt;
int width[3];
int height[3];
@@ -221,28 +220,24 @@ struct hb_filter_private_s
// int alternator; // for bobbing parity when framedoubling
};
-hb_filter_private_t * hb_decomb_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_decomb_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_decomb_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_decomb_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_decomb_close( hb_filter_private_t * pv );
+static void hb_decomb_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_decomb =
{
- FILTER_DECOMB,
- "Decomb",
- NULL,
- hb_decomb_init,
- hb_decomb_work,
- hb_decomb_close,
+ .id = HB_FILTER_DECOMB,
+ .enforce_order = 1,
+ .name = "Decomb",
+ .settings = NULL,
+ .init = hb_decomb_init,
+ .work = hb_decomb_work,
+ .close = hb_decomb_close,
};
int cubic_interpolate_pixel( int y0, int y1, int y2, int y3 )
@@ -790,7 +785,7 @@ int check_filtered_combing_mask( hb_filter_private_t * pv )
pv->mask_box_x = x;
pv->mask_box_y = y;
- if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
+ if ( block_score <= threshold && !( pv->buf_settings->s.flags & 16) )
{
/* Blend video content that scores between
( threshold / 2 ) and threshold. */
@@ -799,7 +794,7 @@ int check_filtered_combing_mask( hb_filter_private_t * pv )
}
else if( block_score > threshold )
{
- if( pv->buf_settings->flags & 16 )
+ if( pv->buf_settings->s.flags & 16 )
{
/* Blend progressive content above the threshold.*/
pv->mask_box_color = 2;
@@ -919,7 +914,7 @@ int check_combing_mask( hb_filter_private_t * pv )
pv->mask_box_x = x;
pv->mask_box_y = y;
- if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
+ if ( block_score <= threshold && !( pv->buf_settings->s.flags & 16) )
{
/* Blend video content that scores between
( threshold / 2 ) and threshold. */
@@ -928,7 +923,7 @@ int check_combing_mask( hb_filter_private_t * pv )
}
else if( block_score > threshold )
{
- if( pv->buf_settings->flags & 16 )
+ if( pv->buf_settings->s.flags & 16 )
{
/* Blend progressive content above the threshold.*/
pv->mask_box_color = 2;
@@ -1994,29 +1989,22 @@ static void yadif_filter( uint8_t ** dst,
}
}
-hb_filter_private_t * hb_decomb_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_decomb_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
-
- pv->pix_fmt = pix_fmt;
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
- pv->width[0] = width;
- pv->height[0] = height;
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
+ pv->width[0] = hb_image_stride( init->pix_fmt, init->width, 0 );
+ pv->height[0] = hb_image_height( init->pix_fmt, init->height, 0 );
+ pv->width[1] = pv->width[2] = hb_image_stride( init->pix_fmt, init->width, 1 );
+ pv->height[1] = pv->height[2] = hb_image_height( init->pix_fmt, init->height, 1 );
build_gamma_lut( pv );
- pv->buf_out[0] = hb_video_buffer_init( width, height );
- pv->buf_out[1] = hb_video_buffer_init( width, height );
+ pv->buf_out[0] = hb_video_buffer_init( init->width, init->height );
+ pv->buf_out[1] = hb_video_buffer_init( init->width, init->height );
+
pv->buf_settings = hb_buffer_init( 0 );
pv->deinterlaced_frames = 0;
@@ -2049,9 +2037,9 @@ hb_filter_private_t * hb_decomb_init( int pix_fmt,
pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;
int mcdeint_qp = MCDEINT_QP_DEFAULT;
- if( settings )
+ if( filter->settings )
{
- sscanf( settings, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
+ sscanf( filter->settings, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
&pv->mode,
&pv->spatial_metric,
&pv->motion_threshold,
@@ -2084,8 +2072,8 @@ hb_filter_private_t * hb_decomb_init( int pix_fmt,
for( i = 0; i < 3; i++ )
{
int is_chroma = !!i;
- int w = ((width + 31) & (~31))>>is_chroma;
- int h = ((height+6+ 31) & (~31))>>is_chroma;
+ int w = ((init->width + 31) & (~31))>>is_chroma;
+ int h = ((init->height+6+ 31) & (~31))>>is_chroma;
pv->ref_stride[i] = w;
@@ -2110,11 +2098,11 @@ hb_filter_private_t * hb_decomb_init( int pix_fmt,
if( pv->mode & MODE_EEDI2 )
{
/* Allocate half-height eedi2 buffers */
- height = pv->height[0] / 2;
+ int height = pv->height[0] / 2;
for( i = 0; i < 3; i++ )
{
int is_chroma = !!i;
- int w = ((width + 31) & (~31))>>is_chroma;
+ int w = ((init->width + 31) & (~31))>>is_chroma;
int h = ((height+6+ 31) & (~31))>>is_chroma;
for( j = 0; j < 4; j++ )
@@ -2128,7 +2116,7 @@ hb_filter_private_t * hb_decomb_init( int pix_fmt,
for( i = 0; i < 3; i++ )
{
int is_chroma = !!i;
- int w = ((width + 31) & (~31))>>is_chroma;
+ int w = ((init->width + 31) & (~31))>>is_chroma;
int h = ((height+6+ 31) & (~31))>>is_chroma;
for( j = 0; j < 5; j++ )
@@ -2276,14 +2264,17 @@ hb_filter_private_t * hb_decomb_init( int pix_fmt,
}
}
}
+
+ mcdeint_init( &pv->mcdeint, pv->mcdeint_mode, mcdeint_qp,
+ init->pix_fmt, init->width, init->height );
- mcdeint_init( &pv->mcdeint, pv->mcdeint_mode, mcdeint_qp, width, height );
-
- return pv;
+ return 0;
}
-void hb_decomb_close( hb_filter_private_t * pv )
+static void hb_decomb_close( hb_filter_object_t * filter )
{
+ hb_filter_private_t * pv = filter->private_data;
+
if( !pv )
{
return;
@@ -2451,33 +2442,30 @@ void hb_decomb_close( hb_filter_private_t * pv )
mcdeint_close( &pv->mcdeint );
free( pv );
+ filter->private_data = NULL;
}
-int hb_decomb_work( const hb_buffer_t * cbuf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+static int hb_decomb_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
+ hb_avpicture_fill( &pv->pic_in, in );
/* Determine if top-field first layout */
int tff;
if( pv->parity < 0 )
{
- tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
+ tff = !!(in->s.flags & PIC_FLAG_TOP_FIELD_FIRST);
}
else
{
@@ -2492,18 +2480,16 @@ int hb_decomb_work( const hb_buffer_t * cbuf_in,
{
store_ref( (const uint8_t**)pv->pic_in.data, pv );
- hb_buffer_copy_settings( pv->buf_settings, buf_in );
-
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+ pv->buf_settings->s = in->s;
+ hb_buffer_move_subs( pv->buf_settings, in );
pv->yadif_ready = 1;
- return FILTER_DELAY;
+ return HB_FILTER_DELAY;
}
- /* Perform yadif filtering */
- int frame;
+ /* Perform yadif filtering */
+ int frame, out_frame;
for( frame = 0; frame <= ( ( pv->mode & MODE_MCDEINT ) ? 1 : 0 ) ; frame++ )
// This would be what to use for bobbing: for( frame = 0; frame <= 0 ; frame++ )
{
@@ -2531,8 +2517,8 @@ int hb_decomb_work( const hb_buffer_t * cbuf_in,
#endif
pv->tff = !parity;
- avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
- pix_fmt, width, height );
+ hb_buffer_t *b = pv->buf_out[!(frame^1)];
+ hb_avpicture_fill( &pv->pic_out, b );
/* XXX
Should check here and only bother filtering field 2 when
@@ -2548,27 +2534,29 @@ int hb_decomb_work( const hb_buffer_t * cbuf_in,
if( pv->mcdeint_mode >= 0 /* && pv->yadif_arguments[0].is_combed */)
{
/* Perform mcdeint filtering */
- avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
- pix_fmt, width, height );
+ b = pv->buf_out[(frame^1)];
+ hb_avpicture_fill( &pv->pic_in, b );
mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv->width, pv->height, &pv->mcdeint );
- *buf_out = pv->buf_out[(frame^1)];
+ out_frame = frame ^ 1;
}
else
{
- *buf_out = pv->buf_out[!(frame^1)];
+ out_frame = !(frame ^ 1);
}
}
+ *buf_out = pv->buf_out[out_frame];
+ // Allocate a replacement for the buffer we just consumed
+ pv->buf_out[out_frame] = hb_video_buffer_init( pv->width[0], pv->height[0] );
/* Copy buffered settings to output buffer settings */
- hb_buffer_copy_settings( *buf_out, pv->buf_settings );
+ (*buf_out)->s = pv->buf_settings->s;
+ hb_buffer_move_subs( *buf_out, pv->buf_settings );
/* Replace buffered settings with input buffer settings */
- hb_buffer_copy_settings( pv->buf_settings, buf_in );
-
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+ pv->buf_settings->s = in->s;
+ hb_buffer_move_subs( pv->buf_settings, in );
- return FILTER_OK;
+ return HB_FILTER_OK;
}
diff --git a/libhb/decsrtsub.c b/libhb/decsrtsub.c
index daf210d6f..c914e9c91 100644
--- a/libhb/decsrtsub.c
+++ b/libhb/decsrtsub.c
@@ -369,8 +369,8 @@ static hb_buffer_t *srt_read( hb_work_private_t *pv )
if( buffer )
{
- buffer->start = start_time - pv->start_time;
- buffer->stop = stop_time - pv->start_time;
+ buffer->s.start = start_time - pv->start_time;
+ buffer->s.stop = stop_time - pv->start_time;
memcpy( buffer->data, pv->current_entry.text, length + 1 );
}
@@ -438,8 +438,8 @@ static hb_buffer_t *srt_read( hb_work_private_t *pv )
if( buffer )
{
- buffer->start = start_time - pv->start_time;
- buffer->stop = stop_time - pv->start_time;
+ buffer->s.start = start_time - pv->start_time;
+ buffer->s.stop = stop_time - pv->start_time;
memcpy( buffer->data, pv->current_entry.text, length + 1 );
}
diff --git a/libhb/decssasub.c b/libhb/decssasub.c
index 9e1c36b47..0a053e9fe 100644
--- a/libhb/decssasub.c
+++ b/libhb/decssasub.c
@@ -28,9 +28,6 @@
struct hb_work_private_s
{
// If decoding to PICTURESUB format:
- ASS_Library *ssa;
- ASS_Renderer *renderer;
- ASS_Track *ssaTrack;
int readOrder;
hb_job_t *job;
@@ -113,7 +110,7 @@ static void ssa_append_html_tags_for_style_change(
}
static hb_buffer_t *ssa_decode_line_to_utf8( uint8_t *in_data, int in_size, int in_sequence );
-static hb_buffer_t *ssa_decode_line_to_picture( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence );
+static hb_buffer_t *ssa_decode_line_to_mkv_ssa( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence );
/*
* Decodes a single SSA packet to one or more TEXTSUB or PICTURESUB subtitle packets.
@@ -164,7 +161,7 @@ static hb_buffer_t *ssa_decode_packet( hb_work_object_t * w, hb_buffer_t *in )
continue;
}
} else if ( w->subtitle->config.dest == RENDERSUB ) {
- out = ssa_decode_line_to_picture( w, (uint8_t *) curLine, strlen( curLine ), in->sequence );
+ out = ssa_decode_line_to_mkv_ssa( w, (uint8_t *) curLine, strlen( curLine ), in->sequence );
if ( out == NULL )
continue;
}
@@ -188,16 +185,16 @@ static hb_buffer_t *ssa_decode_packet( hb_work_object_t * w, hb_buffer_t *in )
// such that first output packet's display time aligns with the
// input packet's display time. This should give the correct time
// when point-to-point encoding is in effect.
- if (out_list && out_list->start > in->start)
+ if (out_list && out_list->s.start > in->s.start)
{
- int64_t slip = out_list->start - in->start;
+ int64_t slip = out_list->s.start - in->s.start;
hb_buffer_t *out;
out = out_list;
while (out)
{
- out->start -= slip;
- out->stop -= slip;
+ out->s.start -= slip;
+ out->s.stop -= slip;
out = out->next;
}
}
@@ -262,7 +259,7 @@ static hb_buffer_t *ssa_decode_line_to_utf8( uint8_t *in_data, int in_size, int
uint8_t *pos = in_data;
uint8_t *end = in_data + in_size;
- // Parse values for in->start and in->stop
+ // Parse values for in->s.start and in->s.stop
int64_t in_start, in_stop;
if ( parse_timing_from_ssa_packet( (char *) in_data, &in_start, &in_stop ) )
goto fail;
@@ -335,8 +332,8 @@ static hb_buffer_t *ssa_decode_line_to_utf8( uint8_t *in_data, int in_size, int
out->size = dst - out->data;
// Copy metadata from the input packet to the output packet
- out->start = in_start;
- out->stop = in_stop;
+ out->s.start = in_start;
+ out->s.stop = in_stop;
out->sequence = in_sequence;
return out;
@@ -348,7 +345,6 @@ fail:
static hb_buffer_t * ssa_to_mkv_ssa( hb_work_object_t * w, hb_buffer_t * in )
{
- hb_work_private_t * pv = w->private_data;
hb_buffer_t * out_last = NULL;
hb_buffer_t * out_first = NULL;
@@ -361,62 +357,11 @@ static hb_buffer_t * ssa_to_mkv_ssa( hb_work_object_t * w, hb_buffer_t * in )
curLine;
curLine = strtok_r( NULL, EOL, &curLine_parserData ) )
{
- // Skip empty lines and spaces between adjacent CR and LF
- if (curLine[0] == '\0')
- continue;
-
- int64_t in_start, in_stop;
- if ( parse_timing_from_ssa_packet( curLine, &in_start, &in_stop ) )
- continue;
-
- int len = strlen(curLine);
-
- // Convert the SSA line to MKV-SSA format
- char *layerField = malloc( len );
- // SSA subtitles have an empty layer field (bare ','). The scanf
- // format specifier "%*128[^,]" will not match on a bare ','. There
- // must be at least one non ',' character in the match. So the format
- // specifier is placed directly next to the ':' so that the next
- // expected ' ' after the ':' will be the character it matches on
- // when there is no layer field.
- int numPartsRead = sscanf( curLine, "Dialogue:%128[^,],", layerField );
- if ( numPartsRead != 1 )
- {
- free( layerField );
- continue;
- }
-
- char *styleToTextFields = (char *)find_field( (uint8_t*)curLine, (uint8_t*)curLine + len, 4 );
- if ( styleToTextFields == NULL )
- {
- free( layerField );
- continue;
- }
-
- // The output should always be shorter than the input
- hb_buffer_t * out = hb_buffer_init( len );
- char *mkvOut = (char*)out->data;
- out->start = in_start;
- out->stop = in_stop;
-
- // The sscanf conversion above will result in an extra space
- // before the layerField. Strip the space.
- char *stripLayerField = layerField;
- for(; *stripLayerField == ' '; stripLayerField++);
-
- sprintf( mkvOut, "%d,%s,%s",
- pv->readOrder++, stripLayerField, styleToTextFields );
-
- free( layerField );
+ hb_buffer_t * out;
- len = strlen(mkvOut);
- if ( len == 0 )
+ out = ssa_decode_line_to_mkv_ssa( w, (uint8_t *) curLine, strlen( curLine ), in->sequence );
+ if( out )
{
- hb_buffer_close(&out);
- }
- else
- {
- out->size = len;
if ( out_last == NULL )
{
out_last = out_first = out;
@@ -441,154 +386,64 @@ static hb_buffer_t * ssa_to_mkv_ssa( hb_work_object_t * w, hb_buffer_t * in )
* ReadOrder,Marked, Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
* 1 2 3 4 5 6 7 8 9
*/
-static hb_buffer_t *ssa_decode_line_to_picture( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence )
+static hb_buffer_t *ssa_decode_line_to_mkv_ssa( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence )
{
hb_work_private_t * pv = w->private_data;
+ hb_buffer_t * out;
- // Parse values for in->start and in->stop
+ // Parse values for in->s.start and in->s.stop
int64_t in_start, in_stop;
if ( parse_timing_from_ssa_packet( (char *) in_data, &in_start, &in_stop ) )
goto fail;
// Convert the SSA packet to MKV-SSA format, which is what libass expects
char *mkvIn;
- int mkvInSize;
- {
- char *layerField = malloc( in_size );
- // SSA subtitles have an empty layer field (bare ','). The scanf
- // format specifier "%*128[^,]" will not match on a bare ','. There
- // must be at least one non ',' character in the match. So the format
- // specifier is placed directly next to the ':' so that the next
- // expected ' ' after the ':' will be the character it matches on
- // when there is no layer field.
- int numPartsRead = sscanf( (char *) in_data, "Dialogue:%128[^,],", layerField );
- if ( numPartsRead != 1 )
- goto fail;
-
- char *styleToTextFields = (char *) find_field( in_data, in_data + in_size, 4 );
- if ( styleToTextFields == NULL ) {
- free( layerField );
- goto fail;
- }
-
- // The sscanf conversion above will result in an extra space
- // before the layerField. Strip the space.
- char *stripLayerField = layerField;
- for(; *stripLayerField == ' '; stripLayerField++);
-
- mkvIn = malloc( in_size + 1 );
- mkvIn[0] = '\0';
- sprintf(mkvIn, "%d", pv->readOrder++); // ReadOrder: make this up
- strcat( mkvIn, "," );
- strcat( mkvIn, stripLayerField );
- strcat( mkvIn, "," );
- strcat( mkvIn, (char *) styleToTextFields );
-
- mkvInSize = strlen(mkvIn);
-
+ int numPartsRead;
+ char *styleToTextFields;
+ char *layerField = malloc( in_size );
+
+ // SSA subtitles have an empty layer field (bare ','). The scanf
+ // format specifier "%*128[^,]" will not match on a bare ','. There
+ // must be at least one non ',' character in the match. So the format
+ // specifier is placed directly next to the ':' so that the next
+ // expected ' ' after the ':' will be the character it matches on
+ // when there is no layer field.
+ numPartsRead = sscanf( (char *)in_data, "Dialogue:%128[^,],", layerField );
+ if ( numPartsRead != 1 )
+ goto fail;
+
+ styleToTextFields = (char *)find_field( in_data, in_data + in_size, 4 );
+ if ( styleToTextFields == NULL ) {
free( layerField );
+ goto fail;
}
- // Parse MKV-SSA packet
- ass_process_chunk( pv->ssaTrack, mkvIn, mkvInSize, in_start / 90, (in_stop - in_start) / 90 );
-
- free( mkvIn );
-
- // TODO: To support things like karaoke, it won't be sufficient to only generate
- // new subtitle pictures when there are subtitle packets. Rather, pictures will
- // need to be generated potentially continuously.
- //
- // Until "karaoke support" is implemented, make an educated guess about the
- // timepoint within the subtitle that should be rendered. I guess the midpoint.
- int64_t renderTime = ( in_start + in_stop ) / 2;
-
- int changed;
- ASS_Image *frameList = ass_render_frame( pv->renderer, pv->ssaTrack, renderTime / 90, &changed );
- if ( !changed || !frameList )
- return NULL;
-
- int numFrames = 0;
- ASS_Image *curFrame;
- for (curFrame = frameList; curFrame; curFrame = curFrame->next)
- numFrames++;
-
- hb_buffer_t *outSubpictureList = NULL;
- hb_buffer_t **outSubpictureListTailPtr = &outSubpictureList;
+ // The sscanf conversion above will result in an extra space
+ // before the layerField. Strip the space.
+ char *stripLayerField = layerField;
+ for(; *stripLayerField == ' '; stripLayerField++);
+
+ out = hb_buffer_init( in_size + 1 );
+ mkvIn = (char*)out->data;
+
+ mkvIn[0] = '\0';
+ sprintf(mkvIn, "%d", pv->readOrder++); // ReadOrder: make this up
+ strcat( mkvIn, "," );
+ strcat( mkvIn, stripLayerField );
+ strcat( mkvIn, "," );
+ strcat( mkvIn, (char *) styleToTextFields );
- // Generate a PICTURESUB packet from the frames
- ASS_Image *frame;
- for (frame = frameList; frame; frame = frame->next) {
- // Allocate pixmap where drawing will be done
- uint8_t *rgba = calloc(frame->w * frame->h * 4, 1);
-
- unsigned r = (frame->color >> 24) & 0xff;
- unsigned g = (frame->color >> 16) & 0xff;
- unsigned b = (frame->color >> 8) & 0xff;
- unsigned a = (frame->color ) & 0xff;
-
- int x, y;
- for (y = 0; y < frame->h; y++) {
- for (x = 0; x < frame->w; x++) {
- unsigned srcAlphaPrenormalized = frame->bitmap[y*frame->stride + x];
- unsigned srcAlpha = (255 - a) * srcAlphaPrenormalized / 255;
-
- uint8_t *dst = &rgba[(y*frame->w + x) * 4];
- unsigned oldDstAlpha = dst[3];
-
- if (oldDstAlpha == 0) {
- // Optimized version
- dst[0] = r;
- dst[1] = g;
- dst[2] = b;
- dst[3] = srcAlpha;
- } else {
- dst[3] = 255 - ( 255 - dst[3] ) * ( 255 - srcAlpha ) / 255;
- if (dst[3] != 0) {
- dst[0] = ( dst[0] * oldDstAlpha * (255-srcAlpha) / 255 + r * srcAlpha ) / dst[3];
- dst[1] = ( dst[1] * oldDstAlpha * (255-srcAlpha) / 255 + g * srcAlpha ) / dst[3];
- dst[2] = ( dst[2] * oldDstAlpha * (255-srcAlpha) / 255 + b * srcAlpha ) / dst[3];
- }
- }
- }
- }
-
- // Generate output subpicture (in PICTURESUB format)
- hb_buffer_t *out = hb_buffer_init(frame->w * frame->h * 4);
- out->x = frame->dst_x;
- out->y = frame->dst_y;
- out->width = frame->w;
- out->height = frame->h;
- out->start = in_start;
- out->stop = in_stop;
- out->sequence = in_sequence;
-
- int i;
- int numPixels = frame->w * frame->h;
- for (i = 0; i < numPixels; i++) {
- uint8_t *srcRgba = &rgba[i * 4];
-
- uint8_t *dstY = &out->data[(numPixels * 0) + i];
- uint8_t *dstA = &out->data[(numPixels * 1) + i];
- uint8_t *dstU = &out->data[(numPixels * 2) + i];
- uint8_t *dstV = &out->data[(numPixels * 3) + i];
-
- int srcYuv = hb_rgb2yuv((srcRgba[0] << 16) | (srcRgba[1] << 8) | (srcRgba[2] << 0));
- int srcA = srcRgba[3];
-
- *dstY = (srcYuv >> 16) & 0xff;
- *dstV = (srcYuv >> 8 ) & 0xff;
- *dstU = (srcYuv >> 0 ) & 0xff;
- *dstA = srcA / 16; // HB's max alpha value is 16
- }
-
- free(rgba);
-
- *outSubpictureListTailPtr = out;
- outSubpictureListTailPtr = &out->next;
+ out->size = strlen(mkvIn);
+ out->s.start = in_start;
+ out->s.stop = in_stop;
+ out->sequence = in_sequence;
+
+ if( out->size == 0 )
+ {
+ hb_buffer_close(&out);
}
- // NOTE: The subpicture list is actually considered a single packet by most other code
- hb_buffer_t *out = outSubpictureList;
+ free( layerField );
return out;
@@ -597,14 +452,6 @@ fail:
return NULL;
}
-static void ssa_log(int level, const char *fmt, va_list args, void *data)
-{
- if ( level < 5 ) // same as default verbosity when no callback is set
- {
- hb_valog( 1, "[ass]", fmt, args );
- }
-}
-
static int decssaInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
@@ -613,73 +460,6 @@ static int decssaInit( hb_work_object_t * w, hb_job_t * job )
w->private_data = pv;
pv->job = job;
- if ( w->subtitle->config.dest == RENDERSUB ) {
- pv->ssa = ass_library_init();
- if ( !pv->ssa ) {
- hb_log( "decssasub: libass initialization failed\n" );
- return 1;
- }
-
- // Redirect libass output to hb_log
- ass_set_message_cb( pv->ssa, ssa_log, NULL );
-
- // Load embedded fonts
- hb_list_t * list_attachment = job->title->list_attachment;
- int i;
- for ( i = 0; i < hb_list_count(list_attachment); i++ )
- {
- hb_attachment_t * attachment = hb_list_item( list_attachment, i );
-
- if ( attachment->type == FONT_TTF_ATTACH )
- {
- ass_add_font(
- pv->ssa,
- attachment->name,
- attachment->data,
- attachment->size );
- }
- }
-
- ass_set_extract_fonts( pv->ssa, 1 );
- ass_set_style_overrides( pv->ssa, NULL );
-
- pv->renderer = ass_renderer_init( pv->ssa );
- if ( !pv->renderer ) {
- hb_log( "decssasub: renderer initialization failed\n" );
- return 1;
- }
-
- ass_set_use_margins( pv->renderer, 0 );
- ass_set_hinting( pv->renderer, ASS_HINTING_LIGHT ); // VLC 1.0.4 uses this
- ass_set_font_scale( pv->renderer, 1.0 );
- ass_set_line_spacing( pv->renderer, 1.0 );
-
- // Setup default font family
- //
- // SSA v4.00 requires that "Arial" be the default font
- const char *font = NULL;
- const char *family = "Arial";
- // NOTE: This can sometimes block for several *seconds*.
- // It seems that process_fontdata() for some embedded fonts is slow.
- ass_set_fonts( pv->renderer, font, family, /*haveFontConfig=*/1, NULL, 1 );
-
- // Setup track state
- pv->ssaTrack = ass_new_track( pv->ssa );
- if ( !pv->ssaTrack ) {
- hb_log( "decssasub: ssa track initialization failed\n" );
- return 1;
- }
-
- // NOTE: The codec extradata is expected to be in MKV format
- ass_process_codec_private( pv->ssaTrack,
- (char *) w->subtitle->extradata, w->subtitle->extradata_size );
-
- int originalWidth = job->title->width;
- int originalHeight = job->title->height;
- ass_set_frame_size( pv->renderer, originalWidth, originalHeight);
- ass_set_aspect_ratio( pv->renderer, /*dar=*/1.0, /*sar=*/1.0 );
- }
-
return 0;
}
@@ -690,7 +470,7 @@ static int decssaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t * in = *buf_in;
#if SSA_VERBOSE_PACKETS
- printf("\nPACKET(%"PRId64",%"PRId64"): %.*s\n", in->start/90, in->stop/90, in->size, in->data);
+ printf("\nPACKET(%"PRId64",%"PRId64"): %.*s\n", in->s.start/90, in->s.stop/90, in->size, in->data);
#endif
if ( in->size <= 0 )
@@ -714,15 +494,6 @@ static int decssaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
static void decssaClose( hb_work_object_t * w )
{
- hb_work_private_t * pv = w->private_data;
-
- if ( pv->ssaTrack )
- ass_free_track( pv->ssaTrack );
- if ( pv->renderer )
- ass_renderer_done( pv->renderer );
- if ( pv->ssa )
- ass_library_done( pv->ssa );
-
free( w->private_data );
}
diff --git a/libhb/dectx3gsub.c b/libhb/dectx3gsub.c
index e466ac5d5..c9c6a5f59 100644
--- a/libhb/dectx3gsub.c
+++ b/libhb/dectx3gsub.c
@@ -164,8 +164,8 @@ static hb_buffer_t *tx3g_decode_to_utf8( hb_buffer_t *in )
out->size = dst - out->data;
// Copy metadata from the input packet to the output packet
- out->start = in->start;
- out->stop = in->stop;
+ out->s.start = in->s.start;
+ out->s.stop = in->s.stop;
fail:
free( startStyle );
@@ -197,7 +197,7 @@ static int dectx3gWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
// Warn if the subtitle's duration has not been passed through by the demuxer,
// which will prevent the subtitle from displaying at all
- if ( in->stop == 0 ) {
+ if ( in->s.stop == 0 ) {
hb_log( "dectx3gsub: subtitle packet lacks duration" );
}
diff --git a/libhb/decutf8sub.c b/libhb/decutf8sub.c
index dcd05d4fc..9b9cea57a 100644
--- a/libhb/decutf8sub.c
+++ b/libhb/decutf8sub.c
@@ -32,7 +32,7 @@ static int decutf8Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
// Warn if the subtitle's duration has not been passed through by the demuxer,
// which will prevent the subtitle from displaying at all
- if ( out->stop == 0 ) {
+ if ( out->s.stop == 0 ) {
hb_log( "decutf8sub: subtitle packet lacks duration" );
}
diff --git a/libhb/decvobsub.c b/libhb/decvobsub.c
index 997f14e05..2160d59e2 100644
--- a/libhb/decvobsub.c
+++ b/libhb/decvobsub.c
@@ -96,7 +96,7 @@ int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_DONE;
}
- pv->stream_id = in->id;
+ pv->stream_id = in->s.id;
size_sub = ( in->data[0] << 8 ) | in->data[1];
size_rle = ( in->data[2] << 8 ) | in->data[3];
@@ -113,12 +113,12 @@ int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
pv->buf = hb_buffer_init( 0xFFFF );
memcpy( pv->buf->data, in->data, in->size );
- pv->buf->id = in->id;
+ pv->buf->s.id = in->s.id;
pv->buf->sequence = in->sequence;
pv->size_got = in->size;
- if( in->start >= 0 )
+ if( in->s.start >= 0 )
{
- pv->pts = in->start;
+ pv->pts = in->s.start;
}
}
}
@@ -128,12 +128,12 @@ int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
if( in->size <= pv->size_sub - pv->size_got )
{
memcpy( pv->buf->data + pv->size_got, in->data, in->size );
- pv->buf->id = in->id;
+ pv->buf->s.id = in->s.id;
pv->buf->sequence = in->sequence;
pv->size_got += in->size;
- if( in->start >= 0 )
+ if( in->s.start >= 0 )
{
- pv->pts = in->start;
+ pv->pts = in->s.start;
}
}
else
@@ -158,7 +158,7 @@ int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
if( buf_out && *buf_out )
{
- (*buf_out)->id = in->id;
+ (*buf_out)->s.id = in->s.id;
(*buf_out)->sequence = in->sequence;
}
@@ -335,10 +335,10 @@ static void ParseControls( hb_work_object_t * w )
*/
uint8_t alpha[4];
- alpha[3] = (buf[i+0]>>4)&0x0f;
- alpha[2] = (buf[i+0])&0x0f;
- alpha[1] = (buf[i+1]>>4)&0x0f;
- alpha[0] = (buf[i+1])&0x0f;
+ alpha[3] = ((buf[i+0] >> 4) & 0x0f) << 4;
+ alpha[2] = ((buf[i+0] ) & 0x0f) << 4;
+ alpha[1] = ((buf[i+1] >> 4) & 0x0f) << 4;
+ alpha[0] = ((buf[i+1] ) & 0x0f) << 4;
int lastAlpha = pv->alpha[3] + pv->alpha[2] + pv->alpha[1] + pv->alpha[0];
@@ -416,6 +416,7 @@ static int LineIsTransparent( hb_work_object_t * w, uint8_t * p )
}
return 1;
}
+
static int ColumnIsTransparent( hb_work_object_t * w, uint8_t * p )
{
hb_work_private_t * pv = w->private_data;
@@ -429,6 +430,58 @@ static int ColumnIsTransparent( hb_work_object_t * w, uint8_t * p )
}
return 1;
}
+
+// Brain dead resampler. This should really use swscale...
+// Uses Bresenham algo to pick source samples for averaging
+static void resample( uint8_t * dst, uint8_t * src, int dst_w, int src_w )
+{
+ int dst_x, src_x, err, cnt, sum, val;
+
+ if( dst_w < src_w )
+ {
+ // sample down
+ err = 0;
+ val = 0;
+ cnt = 0;
+ err = src_w / 2;
+ dst_x = 0;
+ for( src_x = 0; src_x < src_w; src_x++ )
+ {
+ sum += src[src_x];
+ cnt++;
+ err -= dst_w;
+ if( err < 0 )
+ {
+ val = sum / cnt;
+ dst[dst_x++] = val;
+ sum = cnt = 0;
+ err += src_w;
+ }
+ }
+ for( ; dst_x < dst_w; dst_x++ )
+ {
+ dst[dst_x] = val;
+ }
+ }
+ else
+ {
+ // sample up
+ err = 0;
+ err = dst_w / 2;
+ src_x = 0;
+ for( dst_x = 0; dst_x < dst_w; dst_x++ )
+ {
+ dst[dst_x] = src[src_x];
+ err -= src_w;
+ if( err < 0 )
+ {
+ src_x++;
+ err += dst_w;
+ }
+ }
+ }
+}
+
static hb_buffer_t * CropSubtitle( hb_work_object_t * w, uint8_t * raw )
{
hb_work_private_t * pv = w->private_data;
@@ -437,8 +490,7 @@ static hb_buffer_t * CropSubtitle( hb_work_object_t * w, uint8_t * raw )
uint8_t * alpha;
int realwidth, realheight;
hb_buffer_t * buf;
- uint8_t * lum_in, * lum_out, * alpha_in, * alpha_out;
- uint8_t * u_in, * u_out, * v_in, * v_out;
+ uint8_t * lum_in, * alpha_in, * u_in, * v_in;
alpha = raw + pv->width * pv->height;
@@ -491,40 +543,44 @@ static hb_buffer_t * CropSubtitle( hb_work_object_t * w, uint8_t * raw )
realwidth = crop[3] - crop[2] + 1;
realheight = crop[1] - crop[0] + 1;
- buf = hb_buffer_init( realwidth * realheight * 4 );
- buf->start = pv->pts_start;
- buf->stop = pv->pts_stop;
- buf->x = pv->x + crop[2];
- buf->y = pv->y + crop[0];
- buf->width = realwidth;
- buf->height = realheight;
+ buf = hb_pic_buffer_init( PIX_FMT_YUVA420P, realwidth, realheight );
+ buf->s.start = pv->pts_start;
+ buf->s.stop = pv->pts_stop;
+ buf->s.type = SUBTITLE_BUF;
+
+ buf->f.x = pv->x + crop[2];
+ buf->f.y = pv->y + crop[0];
lum_in = raw + crop[0] * pv->width + crop[2];
alpha_in = lum_in + pv->width * pv->height;
u_in = alpha_in + pv->width * pv->height;
v_in = u_in + pv->width * pv->height;
- lum_out = buf->data;
- alpha_out = lum_out + realwidth * realheight;
- u_out = alpha_out + realwidth * realheight;
- v_out = u_out + realwidth * realheight;
-
+ uint8_t *dst;
for( i = 0; i < realheight; i++ )
{
- memcpy( lum_out, lum_in, realwidth );
- memcpy( alpha_out, alpha_in, realwidth );
- memcpy( u_out, u_in, realwidth );
- memcpy( v_out, v_in, realwidth );
+ // Luma
+ dst = buf->plane[0].data + buf->plane[0].stride * i;
+ memcpy( dst, lum_in, realwidth );
+
+ if( ( i & 1 ) == 0 )
+ {
+ // chroma U (resample to YUV420)
+ dst = buf->plane[1].data + buf->plane[1].stride * ( i >> 1 );
+ resample( dst, u_in, buf->plane[1].width, realwidth );
+
+ // chroma V (resample to YUV420)
+ dst = buf->plane[2].data + buf->plane[2].stride * ( i >> 1 );
+ resample( dst, v_in, buf->plane[2].width, realwidth );
+ }
+ // Alpha
+ dst = buf->plane[3].data + buf->plane[3].stride * i;
+ memcpy( dst, alpha_in, realwidth );
lum_in += pv->width;
alpha_in += pv->width;
u_in += pv->width;
v_in += pv->width;
-
- lum_out += realwidth;
- alpha_out += realwidth;
- u_out += realwidth;
- v_out += realwidth;
}
return buf;
@@ -557,8 +613,8 @@ static hb_buffer_t * Decode( hb_work_object_t * w )
if (w->subtitle->config.dest == PASSTHRUSUB)
{
- pv->buf->start = pv->pts_start;
- pv->buf->stop = pv->pts_stop;
+ pv->buf->s.start = pv->pts_start;
+ pv->buf->s.stop = pv->pts_stop;
buf = pv->buf;
pv->buf = NULL;
return buf;
diff --git a/libhb/deinterlace.c b/libhb/deinterlace.c
index bd51e9a77..2ed6ef3e5 100644
--- a/libhb/deinterlace.c
+++ b/libhb/deinterlace.c
@@ -40,7 +40,6 @@ typedef struct yadif_arguments_s {
struct hb_filter_private_s
{
- int pix_fmt;
int width[3];
int height[3];
@@ -61,34 +60,28 @@ struct hb_filter_private_s
int mcdeint_mode;
mcdeint_private_t mcdeint;
- AVPicture pic_in;
- AVPicture pic_out;
hb_buffer_t * buf_out[2];
hb_buffer_t * buf_settings;
};
-hb_filter_private_t * hb_deinterlace_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_deinterlace_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_deinterlace_work( hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_deinterlace_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_deinterlace_close( hb_filter_private_t * pv );
+static void hb_deinterlace_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_deinterlace =
{
- FILTER_DEINTERLACE,
- "Deinterlace (ffmpeg or yadif/mcdeint)",
- NULL,
- hb_deinterlace_init,
- hb_deinterlace_work,
- hb_deinterlace_close,
+ .id = HB_FILTER_DEINTERLACE,
+ .enforce_order = 1,
+ .name = "Deinterlace (ffmpeg or yadif/mcdeint)",
+ .settings = NULL,
+ .init = hb_deinterlace_init,
+ .work = hb_deinterlace_work,
+ .close = hb_deinterlace_close,
};
@@ -392,27 +385,19 @@ static void yadif_filter( uint8_t ** dst,
*/
}
-hb_filter_private_t * hb_deinterlace_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_deinterlace_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
- pv->pix_fmt = pix_fmt;
+ pv->width[0] = hb_image_stride( init->pix_fmt, init->width, 0 );
+ pv->height[0] = hb_image_height( init->pix_fmt, init->height, 0 );
+ pv->width[1] = pv->width[2] = hb_image_stride( init->pix_fmt, init->width, 1 );
+ pv->height[1] = pv->height[2] = hb_image_height( init->pix_fmt, init->height, 1 );
- pv->width[0] = width;
- pv->height[0] = height;
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
-
- pv->buf_out[0] = hb_video_buffer_init( width, height );
- pv->buf_out[1] = hb_video_buffer_init( width, height );
+ pv->buf_out[0] = hb_video_buffer_init( init->width, init->height );
+ pv->buf_out[1] = hb_video_buffer_init( init->width, init->height );
pv->buf_settings = hb_buffer_init( 0 );
pv->yadif_ready = 0;
@@ -422,9 +407,9 @@ hb_filter_private_t * hb_deinterlace_init( int pix_fmt,
pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;
int mcdeint_qp = MCDEINT_QP_DEFAULT;
- if( settings )
+ if( filter->settings )
{
- sscanf( settings, "%d:%d:%d:%d",
+ sscanf( filter->settings, "%d:%d:%d:%d",
&pv->yadif_mode,
&pv->yadif_parity,
&pv->mcdeint_mode,
@@ -440,8 +425,8 @@ hb_filter_private_t * hb_deinterlace_init( int pix_fmt,
for( i = 0; i < 3; i++ )
{
int is_chroma = !!i;
- int w = ((width + 31) & (~31))>>is_chroma;
- int h = ((height+6+ 31) & (~31))>>is_chroma;
+ int w = ((init->width + 31) & (~31))>>is_chroma;
+ int h = ((init->height+6+ 31) & (~31))>>is_chroma;
pv->yadif_ref_stride[i] = w;
@@ -491,13 +476,16 @@ hb_filter_private_t * hb_deinterlace_init( int pix_fmt,
}
}
- mcdeint_init( &pv->mcdeint, pv->mcdeint_mode, mcdeint_qp, width, height );
+ mcdeint_init( &pv->mcdeint, pv->mcdeint_mode, mcdeint_qp,
+ init->pix_fmt, init->width, init->height );
- return pv;
+ return 0;
}
-void hb_deinterlace_close( hb_filter_private_t * pv )
+static void hb_deinterlace_close( hb_filter_object_t * filter )
{
+ hb_filter_private_t * pv = filter->private_data;
+
if( !pv )
{
return;
@@ -556,47 +544,52 @@ void hb_deinterlace_close( hb_filter_private_t * pv )
mcdeint_close( &pv->mcdeint );
free( pv );
+ filter->private_data = NULL;
}
-int hb_deinterlace_work( hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+static int hb_deinterlace_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ AVPicture pic_in;
+ AVPicture pic_out;
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
+ hb_avpicture_fill( &pic_in, in );
/* Use libavcodec deinterlace if yadif_mode < 0 */
if( pv->yadif_mode < 0 )
{
- avpicture_fill( &pv->pic_out, pv->buf_out[0]->data,
- pix_fmt, width, height );
+ hb_avpicture_fill( &pic_out, pv->buf_out[0] );
- avpicture_deinterlace( &pv->pic_out, &pv->pic_in,
- pix_fmt, width, height );
+ avpicture_deinterlace( &pic_out, &pic_in, pv->buf_out[0]->f.fmt,
+ pv->buf_out[0]->f.width, pv->buf_out[0]->f.height );
- hb_buffer_copy_settings( pv->buf_out[0], buf_in );
+ pv->buf_out[0]->s = in->s;
+ hb_buffer_move_subs( pv->buf_out[0], in );
*buf_out = pv->buf_out[0];
- return FILTER_OK;
+ // Allocate a replacement for the buffer we just consumed
+ hb_buffer_t * b = pv->buf_out[0];
+ pv->buf_out[0] = hb_video_buffer_init( b->f.width, b->f.height );
+
+ return HB_FILTER_OK;
}
/* Determine if top-field first layout */
int tff;
if( pv->yadif_parity < 0 )
{
- tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
+ tff = !!(in->s.flags & PIC_FLAG_TOP_FIELD_FIRST);
}
else
{
@@ -604,59 +597,65 @@ int hb_deinterlace_work( hb_buffer_t * buf_in,
}
/* Store current frame in yadif cache */
- yadif_store_ref( (const uint8_t**)pv->pic_in.data, pv );
+ yadif_store_ref( (const uint8_t**)pic_in.data, pv );
- /* If yadif is not ready, store another ref and return FILTER_DELAY */
+ /* If yadif is not ready, store another ref and return HB_FILTER_DELAY */
if( pv->yadif_ready == 0 )
{
- yadif_store_ref( (const uint8_t**)pv->pic_in.data, pv );
-
- hb_buffer_copy_settings( pv->buf_settings, buf_in );
+ yadif_store_ref( (const uint8_t**)pic_in.data, pv );
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+ pv->buf_settings->s = in->s;
+ hb_buffer_move_subs( pv->buf_settings, in );
pv->yadif_ready = 1;
- return FILTER_DELAY;
+ return HB_FILTER_DELAY;
}
/* Perform yadif and mcdeint filtering */
int frame;
+ int out_frame;
+ hb_buffer_t * b;
for( frame = 0; frame <= (pv->yadif_mode & 1); frame++ )
{
+ AVPicture pic_yadif_out;
int parity = frame ^ tff ^ 1;
- avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
- pix_fmt, width, height );
+ b = pv->buf_out[!(frame^1)];
+ hb_avpicture_fill( &pic_yadif_out, b );
- yadif_filter( pv->pic_out.data, parity, tff, pv );
+ yadif_filter( pic_yadif_out.data, parity, tff, pv );
if( pv->mcdeint_mode >= 0 )
{
- avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
- pix_fmt, width, height );
+ b = pv->buf_out[(frame^1)];
+ hb_avpicture_fill( &pic_out, b );
- mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv->width, pv->height, &pv->mcdeint );
+ mcdeint_filter( pic_out.data, pic_yadif_out.data, parity,
+ pv->width, pv->height, &pv->mcdeint );
- *buf_out = pv->buf_out[ (frame^1)];
+ out_frame = (frame^1);
}
else
{
- *buf_out = pv->buf_out[!(frame^1)];
+ out_frame = !(frame^1);
}
}
+ *buf_out = pv->buf_out[out_frame];
+
+ // Allocate a replacement for the buffer we just consumed
+ b = pv->buf_out[out_frame];
+ pv->buf_out[out_frame] = hb_video_buffer_init( b->f.width, b->f.height );
/* Copy buffered settings to output buffer settings */
- hb_buffer_copy_settings( *buf_out, pv->buf_settings );
+ (*buf_out)->s = pv->buf_settings->s;
+ hb_buffer_move_subs( *buf_out, pv->buf_settings );
/* Replace buffered settings with input buffer settings */
- hb_buffer_copy_settings( pv->buf_settings, buf_in );
-
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+ pv->buf_settings->s = in->s;
+ hb_buffer_move_subs( pv->buf_settings, in );
- return FILTER_OK;
+ return HB_FILTER_OK;
}
diff --git a/libhb/demuxmpeg.c b/libhb/demuxmpeg.c
index abddecb4b..2d5f0c9a7 100644
--- a/libhb/demuxmpeg.c
+++ b/libhb/demuxmpeg.c
@@ -46,10 +46,10 @@ static inline void check_mpeg_scr( hb_psdemux_t *state, int64_t scr, int tol )
static inline void save_chap( hb_psdemux_t *state, hb_buffer_t *buf )
{
- if ( state && buf->new_chap )
+ if ( state && buf->s.new_chap )
{
- state->new_chap = buf->new_chap;
- buf->new_chap = 0;
+ state->new_chap = buf->s.new_chap;
+ buf->s.new_chap = 0;
}
}
@@ -57,7 +57,7 @@ static inline void restore_chap( hb_psdemux_t *state, hb_buffer_t *buf )
{
if ( state )
{
- buf->new_chap = state->new_chap;
+ buf->s.new_chap = state->new_chap;
state->new_chap = 0;
}
}
@@ -214,10 +214,10 @@ void hb_demux_dvd_ps( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* stat
/* Here we hit we ES payload */
buf_es = hb_buffer_init( pes_packet_end - pos );
- buf_es->id = id;
- buf_es->start = pts;
- buf_es->renderOffset = dts;
- buf_es->stop = -1;
+ buf_es->s.id = id;
+ buf_es->s.start = pts;
+ buf_es->s.renderOffset = dts;
+ buf_es->s.stop = -1;
if ( state && id == 0xE0)
{
// Consume a chapter break, and apply it to the ES.
@@ -248,27 +248,27 @@ void hb_demux_mpeg( hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state )
save_chap( state, buf );
if ( state )
{
- if ( buf->discontinuity )
+ if ( buf->s.discontinuity )
{
// Buffer has been flagged as a discontinuity. This happens
// when a blueray changes clips.
++state->scr_changes;
- state->last_scr = buf->start;
+ state->last_scr = buf->s.start;
}
// we're keeping track of timing (i.e., not in scan)
// check if there's a new pcr in this packet
- if ( buf->pcr >= 0 )
+ if ( buf->s.pcr >= 0 )
{
// we have a new pcr
- check_mpeg_scr( state, buf->pcr, 300 );
- buf->pcr = -1;
+ check_mpeg_scr( state, buf->s.pcr, 300 );
+ buf->s.pcr = -1;
// Some streams have consistantly bad PCRs or SCRs
// So filter out the offset
- if ( buf->start >= 0 )
- state->scr_delta = buf->start - state->last_scr;
+ if ( buf->s.start >= 0 )
+ state->scr_delta = buf->s.start - state->last_scr;
}
- if ( buf->start >= 0 )
+ if ( buf->s.start >= 0 )
{
// Program streams have an SCR in every PACK header so they
// can't lose their clock reference. But the PCR in Transport
@@ -279,7 +279,7 @@ void hb_demux_mpeg( hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state )
// We try to protect against that here by sanity checking
// timestamps against the current reference clock and discarding
// packets where the DTS is "too far" from its clock.
- int64_t fdelta = buf->start - state->last_scr - state->scr_delta;
+ int64_t fdelta = buf->s.start - state->last_scr - state->scr_delta;
if ( fdelta < -300 * 90000LL || fdelta > 300 * 90000LL )
{
// packet too far behind or ahead of its clock reference
@@ -297,11 +297,11 @@ void hb_demux_mpeg( hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state )
// in DTS or PTS is detected. So we need to update
// our scr_delta with each valid timestamp so that
// fdelta does not continually grow.
- state->scr_delta = buf->start - state->last_scr;
+ state->scr_delta = buf->s.start - state->last_scr;
}
if ( state->last_pts >= 0 )
{
- fdelta = buf->start - state->last_pts;
+ fdelta = buf->s.start - state->last_pts;
if ( fdelta < -5 * 90000LL || fdelta > 5 * 90000LL )
{
// Packet too far from last. This may be a NZ TV broadcast
@@ -309,13 +309,13 @@ void hb_demux_mpeg( hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state )
// update. Since it may be a while until they actually tell
// us the new PCR use the PTS as the PCR.
++state->scr_changes;
- state->last_scr = buf->start;
+ state->last_scr = buf->s.start;
}
}
- state->last_pts = buf->start;
+ state->last_pts = buf->s.start;
}
- if ( buf->type == VIDEO_BUF )
+ if ( buf->s.type == VIDEO_BUF )
{
restore_chap( state, buf );
}
@@ -341,13 +341,13 @@ void hb_demux_null( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* state
// if we don't have a time offset yet,
// use this timestamp as the offset.
if ( state->scr_changes == 0 &&
- ( buf->start != -1 || buf->renderOffset != -1 ) )
+ ( buf->s.start != -1 || buf->s.renderOffset != -1 ) )
{
++state->scr_changes;
- state->last_scr = buf->start >= 0 ? buf->start : buf->renderOffset;
+ state->last_scr = buf->s.start >= 0 ? buf->s.start : buf->s.renderOffset;
}
- if ( buf->type == VIDEO_BUF )
+ if ( buf->s.type == VIDEO_BUF )
{
restore_chap( state, buf );
}
diff --git a/libhb/denoise.c b/libhb/denoise.c
index 26e2535a5..dfc58863d 100644
--- a/libhb/denoise.c
+++ b/libhb/denoise.c
@@ -28,41 +28,29 @@
struct hb_filter_private_s
{
- int pix_fmt;
- int width[3];
- int height[3];
-
int hqdn3d_coef[4][512*16];
unsigned int * hqdn3d_line;
- unsigned short * hqdn3d_frame[3];
-
- AVPicture pic_in;
- AVPicture pic_out;
- hb_buffer_t * buf_out;
+ unsigned short * hqdn3d_frame[3];
};
-hb_filter_private_t * hb_denoise_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_denoise_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_denoise_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_denoise_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_denoise_close( hb_filter_private_t * pv );
+static void hb_denoise_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_denoise =
{
- FILTER_DENOISE,
- "Denoise (hqdn3d)",
- NULL,
- hb_denoise_init,
- hb_denoise_work,
- hb_denoise_close,
+ .id = HB_FILTER_DENOISE,
+ .enforce_order = 1,
+ .name = "Denoise (hqdn3d)",
+ .settings = NULL,
+ .init = hb_denoise_init,
+ .work = hb_denoise_work,
+ .close = hb_denoise_close,
};
static void hqdn3d_precalc_coef( int * ct,
@@ -293,34 +281,17 @@ static void hqdn3d_denoise( unsigned char * frame_src,
}
}
-hb_filter_private_t * hb_denoise_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_denoise_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
-
- /*
- * Clear the memory to avoid freeing uninitialised memory later.
- */
- memset( pv, 0, sizeof( struct hb_filter_private_s ) );
-
- pv->pix_fmt = pix_fmt;
- pv->width[0] = width;
- pv->height[0] = height;
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
+ filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
+ hb_filter_private_t * pv = filter->private_data;
double spatial_luma, temporal_luma, spatial_chroma, temporal_chroma;
- if( settings )
+ if( filter->settings )
{
- switch( sscanf( settings, "%lf:%lf:%lf:%lf",
+ switch( sscanf( filter->settings, "%lf:%lf:%lf:%lf",
&spatial_luma, &spatial_chroma,
&temporal_luma, &temporal_chroma ) )
{
@@ -361,20 +332,18 @@ hb_filter_private_t * hb_denoise_init( int pix_fmt,
}
}
- pv->hqdn3d_line = malloc( width * sizeof(int) );
-
hqdn3d_precalc_coef( pv->hqdn3d_coef[0], spatial_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[1], temporal_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[2], spatial_chroma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[3], temporal_chroma );
- pv->buf_out = hb_video_buffer_init( width, height );
-
- return pv;
+ return 0;
}
-void hb_denoise_close( hb_filter_private_t * pv )
+static void hb_denoise_close( hb_filter_object_t * filter )
{
+ hb_filter_private_t * pv = filter->private_data;
+
if( !pv )
{
return;
@@ -400,68 +369,66 @@ void hb_denoise_close( hb_filter_private_t * pv )
free( pv->hqdn3d_frame[2] );
pv->hqdn3d_frame[2] = NULL;
}
- if( pv->buf_out )
- {
- hb_buffer_close( &pv->buf_out );
- }
free( pv );
+ filter->private_data = NULL;
}
-int hb_denoise_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+static int hb_denoise_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in, * out;
+
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
+ out = hb_video_buffer_init( in->f.width, in->f.height );
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
- pix_fmt, width, height );
+ if( !pv->hqdn3d_line )
+ {
+ pv->hqdn3d_line = malloc( in->f.width * sizeof(int) );
+ }
- hqdn3d_denoise( pv->pic_in.data[0],
- pv->pic_out.data[0],
+ hqdn3d_denoise( in->plane[0].data,
+ out->plane[0].data,
pv->hqdn3d_line,
&pv->hqdn3d_frame[0],
- pv->width[0],
- pv->height[0],
+ in->plane[0].stride,
+ in->plane[0].height,
pv->hqdn3d_coef[0],
pv->hqdn3d_coef[0],
pv->hqdn3d_coef[1] );
- hqdn3d_denoise( pv->pic_in.data[1],
- pv->pic_out.data[1],
+ hqdn3d_denoise( in->plane[1].data,
+ out->plane[1].data,
pv->hqdn3d_line,
&pv->hqdn3d_frame[1],
- pv->width[1],
- pv->height[1],
+ in->plane[1].stride,
+ in->plane[1].height,
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[3] );
- hqdn3d_denoise( pv->pic_in.data[2],
- pv->pic_out.data[2],
+ hqdn3d_denoise( in->plane[2].data,
+ out->plane[2].data,
pv->hqdn3d_line,
&pv->hqdn3d_frame[2],
- pv->width[2],
- pv->height[2],
+ in->plane[2].stride,
+ in->plane[2].height,
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[3] );
- hb_buffer_copy_settings( pv->buf_out, buf_in );
+ out->s = in->s;
+ hb_buffer_move_subs( out, in );
- *buf_out = pv->buf_out;
+ *buf_out = out;
- return FILTER_OK;
+ return HB_FILTER_OK;
}
diff --git a/libhb/detelecine.c b/libhb/detelecine.c
index 202103828..0437730f2 100644
--- a/libhb/detelecine.c
+++ b/libhb/detelecine.c
@@ -80,41 +80,29 @@ struct pullup_context
struct hb_filter_private_s
{
- int pix_fmt;
- int width[3];
- int height[3];
-
struct pullup_context * pullup_ctx;
int pullup_fakecount;
int pullup_skipflag;
-
- AVPicture pic_in;
- AVPicture pic_out;
- hb_buffer_t * buf_out;
};
-hb_filter_private_t * hb_detelecine_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_detelecine_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_detelecine_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_detelecine_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_detelecine_close( hb_filter_private_t * pv );
+static void hb_detelecine_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_detelecine =
{
- FILTER_DETELECINE,
- "Detelecine (pullup)",
- NULL,
- hb_detelecine_init,
- hb_detelecine_work,
- hb_detelecine_close,
+ .id = HB_FILTER_DETELECINE,
+ .enforce_order = 1,
+ .name = "Detelecine (pullup)",
+ .settings = NULL,
+ .init = hb_detelecine_init,
+ .work = hb_detelecine_work,
+ .close = hb_detelecine_close,
};
/*
@@ -812,25 +800,11 @@ void pullup_flush_fields( struct pullup_context * c )
*
*/
-hb_filter_private_t * hb_detelecine_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_detelecine_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
-
- pv->pix_fmt = pix_fmt;
- pv->width[0] = width;
- pv->height[0] = height;
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
-
- pv->buf_out = hb_video_buffer_init( width, height );
+ filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
+ hb_filter_private_t * pv = filter->private_data;
struct pullup_context * ctx;
pv->pullup_ctx = ctx = pullup_alloc_context();
@@ -841,9 +815,9 @@ hb_filter_private_t * hb_detelecine_init( int pix_fmt,
ctx->metric_plane = 0;
ctx->parity = -1;
- if( settings )
+ if( filter->settings )
{
- sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
+ sscanf( filter->settings, "%d:%d:%d:%d:%d:%d:%d",
&ctx->junk_left,
&ctx->junk_right,
&ctx->junk_top,
@@ -861,19 +835,19 @@ hb_filter_private_t * hb_detelecine_init( int pix_fmt,
ctx->bpp[0] = ctx->bpp[1] = ctx->bpp[2] = 8;
ctx->background[1] = ctx->background[2] = 128;
- ctx->w[0] = pv->width[0];
- ctx->h[0] = pv->height[0];
- ctx->stride[0] = pv->width[0];
+ ctx->w[0] = init->width;
+ ctx->h[0] = hb_image_height( init->pix_fmt, init->height, 0 );
+ ctx->stride[0] = hb_image_stride( init->pix_fmt, init->width, 0 );
- ctx->w[1] = pv->width[1];
- ctx->h[1] = pv->height[1];
- ctx->stride[1] = pv->width[1];
+ ctx->w[1] = init->width >> 1;
+ ctx->h[1] = hb_image_height( init->pix_fmt, init->height, 1 );
+ ctx->stride[1] = hb_image_stride( init->pix_fmt, init->width, 1 );
- ctx->w[2] = pv->width[2];
- ctx->h[2] = pv->height[2];
- ctx->stride[2] = pv->width[2];
+ ctx->w[1] = init->width >> 1;
+ ctx->h[2] = hb_image_height( init->pix_fmt, init->height, 2 );
+ ctx->stride[2] = hb_image_stride( init->pix_fmt, init->width, 2 );
- ctx->w[3] = ((width+15)/16) * ((height+15)/16);
+ ctx->w[3] = ((init->width+15)/16) * ((init->height+15)/16);
ctx->h[3] = 2;
ctx->stride[3] = ctx->w[3];
@@ -886,42 +860,40 @@ hb_filter_private_t * hb_detelecine_init( int pix_fmt,
pv->pullup_fakecount = 1;
pv->pullup_skipflag = 0;
- return pv;
+ return 0;
}
-void hb_detelecine_close( hb_filter_private_t * pv )
+static void hb_detelecine_close( hb_filter_object_t * filter )
{
+ hb_filter_private_t * pv = filter->private_data;
+
if( !pv )
{
return;
}
- if( pv->buf_out )
- {
- hb_buffer_close( &pv->buf_out );
- }
-
if( pv->pullup_ctx )
{
pullup_free_context( pv->pullup_ctx );
}
free( pv );
+ filter->private_data = NULL;
}
-int hb_detelecine_work( const hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+
+static int hb_detelecine_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in, * out;
+
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
struct pullup_context * ctx = pv->pullup_ctx;
@@ -934,26 +906,18 @@ int hb_detelecine_work( const hb_buffer_t * buf_in,
frame = pullup_get_frame( ctx );
pullup_release_frame( frame );
hb_log( "Could not get buffer from pullup!" );
- return FILTER_FAILED;
+ return HB_FILTER_FAILED;
}
/* Copy input buffer into pullup buffer */
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
-
- hb_buffer_copy_settings( pv->buf_out, buf_in );
-
- memcpy( buf->planes[0], pv->pic_in.data[0],
- pv->width[0] * pv->height[0] * sizeof(uint8_t) );
- memcpy( buf->planes[1], pv->pic_in.data[1],
- pv->width[1] * pv->height[1] * sizeof(uint8_t) );
- memcpy( buf->planes[2], pv->pic_in.data[2],
- pv->width[2] * pv->height[2] * sizeof(uint8_t) );
+ memcpy( buf->planes[0], in->plane[0].data, in->plane[0].size );
+ memcpy( buf->planes[1], in->plane[1].data, in->plane[1].size );
+ memcpy( buf->planes[2], in->plane[2].data, in->plane[2].size );
/* Submit buffer fields based on buffer flags.
Detelecine assumes BFF when the TFF flag isn't present. */
int parity = 1;
- if( buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST )
+ if( in->s.flags & PIC_FLAG_TOP_FIELD_FIRST )
{
/* Source signals TFF */
parity = 0;
@@ -971,7 +935,7 @@ int hb_detelecine_work( const hb_buffer_t * buf_in,
}
pullup_submit_field( ctx, buf, parity );
pullup_submit_field( ctx, buf, parity^1 );
- if( buf_in->flags & PIC_FLAG_REPEAT_FIRST_FIELD )
+ if( in->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD )
{
pullup_submit_field( ctx, buf, parity );
}
@@ -985,7 +949,8 @@ int hb_detelecine_work( const hb_buffer_t * buf_in,
{
pv->pullup_fakecount--;
- memcpy( pv->buf_out->data, buf_in->data, buf_in->size );
+ *buf_in = NULL;
+ *buf_out = in;
goto output_frame;
}
@@ -1009,7 +974,7 @@ int hb_detelecine_work( const hb_buffer_t * buf_in,
{
pullup_release_frame( frame );
- if( !(buf_in->flags & PIC_FLAG_REPEAT_FIRST_FIELD) )
+ if( !(in->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD) )
{
goto discard_frame;
}
@@ -1034,30 +999,30 @@ int hb_detelecine_work( const hb_buffer_t * buf_in,
pullup_pack_frame( ctx, frame );
}
- /* Copy pullup frame buffer into output buffer */
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
- pix_fmt, width, height );
+ out = hb_video_buffer_init( in->f.width, in->f.height );
- memcpy( pv->pic_out.data[0], frame->buffer->planes[0],
- pv->width[0] * pv->height[0] * sizeof(uint8_t) );
- memcpy( pv->pic_out.data[1], frame->buffer->planes[1],
- pv->width[1] * pv->height[1] * sizeof(uint8_t) );
- memcpy( pv->pic_out.data[2], frame->buffer->planes[2],
- pv->width[2] * pv->height[2] * sizeof(uint8_t) );
+ /* Copy pullup frame buffer into output buffer */
+ memcpy( out->plane[0].data, frame->buffer->planes[0], in->plane[0].size );
+ memcpy( out->plane[1].data, frame->buffer->planes[1], in->plane[1].size );
+ memcpy( out->plane[2].data, frame->buffer->planes[2], in->plane[2].size );
pullup_release_frame( frame );
+ out->s = in->s;
+ hb_buffer_move_subs( out, in );
+
+ *buf_out = out;
+
output_frame:
- *buf_out = pv->buf_out;
- return FILTER_OK;
+
+ return HB_FILTER_OK;
/* This and all discard_frame calls shown above are
the result of me restoring the functionality in
pullup that huevos_rancheros disabled because
HB couldn't handle it. */
discard_frame:
- *buf_out = pv->buf_out;
- return FILTER_DROP;
+ return HB_FILTER_OK;
}
diff --git a/libhb/dvd.c b/libhb/dvd.c
index 36798c2c1..338dd12d1 100644
--- a/libhb/dvd.c
+++ b/libhb/dvd.c
@@ -405,7 +405,7 @@ static hb_title_t * hb_dvdread_title_scan( hb_dvd_t * e, int t, uint64_t min_dur
audio->config.lang.type = lang_extension;
- lang = lang_for_code( vts->vtsi_mat->vts_audio_attr[i].lang_code );
+ lang = lang_for_code( lang_code );
snprintf( audio->config.lang.description, sizeof( audio->config.lang.description ), "%s (%s)",
strlen(lang->native_name) ? lang->native_name : lang->eng_name,
@@ -832,7 +832,6 @@ int is_nav_pack( unsigned char *buf )
}
}
-
/***********************************************************************
* hb_dvdread_read
***********************************************************************
@@ -1065,7 +1064,7 @@ static hb_buffer_t * hb_dvdread_read( hb_dvd_t * e )
if( d->cell_overlap )
{
- b->new_chap = hb_dvdread_is_break( d );
+ b->s.new_chap = hb_dvdread_is_break( d );
d->cell_overlap = 0;
}
}
diff --git a/libhb/dvdnav.c b/libhb/dvdnav.c
index 1de7444af..52d062b06 100644
--- a/libhb/dvdnav.c
+++ b/libhb/dvdnav.c
@@ -553,17 +553,19 @@ static hb_title_t * hb_dvdnav_title_scan( hb_dvd_t * e, int t, uint64_t min_dura
audio->config.lang.type = lang_extension;
- lang = lang_for_code( ifo->vtsi_mat->vts_audio_attr[i].lang_code );
+ lang = lang_for_code( lang_code );
- snprintf( audio->config.lang.description, sizeof( audio->config.lang.description ), "%s (%s)",
+ snprintf( audio->config.lang.description,
+ sizeof( audio->config.lang.description ), "%s (%s)",
strlen(lang->native_name) ? lang->native_name : lang->eng_name,
- audio->config.in.codec == HB_ACODEC_AC3 ? "AC3" : ( audio->config.in.codec ==
- HB_ACODEC_DCA ? "DTS" : ( audio->config.in.codec ==
- HB_ACODEC_FFMPEG ? "MPEG" : "LPCM" ) ) );
- snprintf( audio->config.lang.simple, sizeof( audio->config.lang.simple ), "%s",
+ audio->config.in.codec == HB_ACODEC_AC3 ? "AC3" :
+ ( audio->config.in.codec == HB_ACODEC_DCA ? "DTS" :
+ ( audio->config.in.codec == HB_ACODEC_FFMPEG ? "MPEG" : "LPCM" ) ) );
+ snprintf( audio->config.lang.simple,
+ sizeof( audio->config.lang.simple ), "%s",
strlen(lang->native_name) ? lang->native_name : lang->eng_name );
- snprintf( audio->config.lang.iso639_2, sizeof( audio->config.lang.iso639_2 ), "%s",
- lang->iso639_2);
+ snprintf( audio->config.lang.iso639_2,
+ sizeof( audio->config.lang.iso639_2 ), "%s", lang->iso639_2);
switch( lang_extension )
{
@@ -1621,7 +1623,7 @@ static hb_buffer_t * hb_dvdnav_read( hb_dvd_t * e )
// The muxers expect to only get chapter 2 and above
// They write chapter 1 when chapter 2 is detected.
if (chapter > 1)
- b->new_chap = chapter;
+ b->s.new_chap = chapter;
chapter = 0;
error_count = 0;
return b;
@@ -1762,7 +1764,7 @@ static hb_buffer_t * hb_dvdnav_read( hb_dvd_t * e )
// The muxers expect to only get chapter 2 and above
// They write chapter 1 when chapter 2 is detected.
if (chapter > 1)
- b->new_chap = chapter;
+ b->s.new_chap = chapter;
chapter = 0;
return b;
diff --git a/libhb/eedi2.c b/libhb/eedi2.c
index 2aa906ef0..2e3741ad5 100644
--- a/libhb/eedi2.c
+++ b/libhb/eedi2.c
@@ -1191,15 +1191,15 @@ void eedi2_interpolate_lattice( const int plane, uint8_t * dmskp, int dmsk_pitch
}
}
if( x > 1 && x < width - 2 &&
- ( dstp[x] < MAX( dstp[x-2], dstp[x-1] ) - 3 &&
+ ( ( dstp[x] < MAX( dstp[x-2], dstp[x-1] ) - 3 &&
dstp[x] < MAX( dstp[x+2], dstp[x+1] ) - 3 &&
dstpnn[x] < MAX( dstpnn[x-2], dstpnn[x-1] ) - 3 &&
dstpnn[x] < MAX( dstpnn[x+2], dstpnn[x+1] ) - 3 )
||
- ( dstp[x] > MIN( dstp[x-2], dstp[x-1] ) + 3 &&
+ ( dstp[x] > MIN( dstp[x-2], dstp[x-1] ) + 3 &&
dstp[x] > MIN( dstp[x+2], dstp[x+1] ) + 3 &&
dstpnn[x] > MIN( dstpnn[x-2], dstpnn[x-1] ) + 3 &&
- dstpnn[x] > MIN( dstpnn[x+2], dstpnn[x+1] ) + 3 ) )
+ dstpnn[x] > MIN( dstpnn[x+2], dstpnn[x+1] ) + 3 ) ) )
{
dstpn[x] = ( dstp[x] + dstpnn[x] + 1 ) >> 1;
dmskp[x] = 128;
diff --git a/libhb/encavcodec.c b/libhb/encavcodec.c
index a3164f4a8..4744712c7 100644
--- a/libhb/encavcodec.c
+++ b/libhb/encavcodec.c
@@ -334,8 +334,8 @@ void encavcodecClose( hb_work_object_t * w )
static void save_frame_info( hb_work_private_t * pv, hb_buffer_t * in )
{
int i = pv->frameno_in & FRAME_INFO_MASK;
- pv->frame_info[i].start = in->start;
- pv->frame_info[i].stop = in->stop;
+ pv->frame_info[i].start = in->s.start;
+ pv->frame_info[i].stop = in->s.stop;
}
static int64_t get_frame_start( hb_work_private_t * pv, int64_t frameno )
@@ -356,7 +356,7 @@ static void compute_dts_offset( hb_work_private_t * pv, hb_buffer_t * buf )
{
if ( ( pv->frameno_in - 1 ) == pv->job->areBframes )
{
- pv->dts_delay = buf->start;
+ pv->dts_delay = buf->s.start;
pv->job->config.h264.init_delay = pv->dts_delay;
}
}
@@ -401,7 +401,7 @@ static hb_buffer_t * process_delay_list( hb_work_private_t * pv, hb_buffer_t * b
// Note that start Nth frame != start time this buffer since the
// output buffers have rearranged start times.
int64_t start = get_frame_start( pv, pv->frameno_out );
- buf->renderOffset = start - pv->dts_delay;
+ buf->s.renderOffset = start - pv->dts_delay;
return buf;
}
else
@@ -415,7 +415,7 @@ static hb_buffer_t * process_delay_list( hb_work_private_t * pv, hb_buffer_t * b
// Note that start Nth frame != start time this buffer since the
// output buffers have rearranged start times.
int64_t start = get_frame_start( pv, pv->frameno_out );
- buf->renderOffset = start - pv->dts_delay;
+ buf->s.renderOffset = start - pv->dts_delay;
buf = buf->next;
}
buf = pv->delay_head;
@@ -425,7 +425,7 @@ static hb_buffer_t * process_delay_list( hb_work_private_t * pv, hb_buffer_t * b
}
else if ( buf )
{
- buf->renderOffset = buf->start - pv->dts_delay;
+ buf->s.renderOffset = buf->s.start - pv->dts_delay;
return buf;
}
return NULL;
@@ -453,12 +453,13 @@ int encavcodecWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
}
frame = avcodec_alloc_frame();
- frame->data[0] = in->data;
- frame->data[1] = frame->data[0] + job->width * job->height;
- frame->data[2] = frame->data[1] + job->width * job->height / 4;
- frame->linesize[0] = job->width;
- frame->linesize[1] = job->width / 2;
- frame->linesize[2] = job->width / 2;
+ frame->data[0] = in->plane[0].data;
+ frame->data[1] = in->plane[1].data;
+ frame->data[2] = in->plane[2].data;
+ frame->linesize[0] = in->plane[0].stride;
+ frame->linesize[1] = in->plane[1].stride;
+ frame->linesize[2] = in->plane[2].stride;
+
// For constant quality, setting the quality in AVCodecContext
// doesn't do the trick. It must be set in the AVFrame.
frame->quality = pv->context->global_quality;
@@ -466,7 +467,7 @@ int encavcodecWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
// Bizarro ffmpeg appears to require the input AVFrame.pts to be
// set to a frame number. Setting it to an actual pts causes
// jerky video.
- // frame->pts = in->start;
+ // frame->pts = in->s.start;
frame->pts = ++pv->frameno_in;
// Remember info about this frame that we need to pass across
@@ -487,43 +488,43 @@ int encavcodecWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
else
{
int64_t frameno = pv->context->coded_frame->pts;
- buf->start = get_frame_start( pv, frameno );
- buf->stop = get_frame_stop( pv, frameno );
- buf->flags &= ~HB_FRAME_REF;
+ buf->s.start = get_frame_start( pv, frameno );
+ buf->s.stop = get_frame_stop( pv, frameno );
+ buf->s.flags &= ~HB_FRAME_REF;
switch ( pv->context->coded_frame->pict_type )
{
case AV_PICTURE_TYPE_P:
{
- buf->frametype = HB_FRAME_P;
+ buf->s.frametype = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_B:
{
- buf->frametype = HB_FRAME_B;
+ buf->s.frametype = HB_FRAME_B;
} break;
case AV_PICTURE_TYPE_S:
{
- buf->frametype = HB_FRAME_P;
+ buf->s.frametype = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_SP:
{
- buf->frametype = HB_FRAME_P;
+ buf->s.frametype = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_BI:
case AV_PICTURE_TYPE_SI:
case AV_PICTURE_TYPE_I:
{
- buf->flags |= HB_FRAME_REF;
+ buf->s.flags |= HB_FRAME_REF;
if ( pv->context->coded_frame->key_frame )
{
- buf->frametype = HB_FRAME_IDR;
+ buf->s.frametype = HB_FRAME_IDR;
}
else
{
- buf->frametype = HB_FRAME_I;
+ buf->s.frametype = HB_FRAME_I;
}
} break;
@@ -531,12 +532,12 @@ int encavcodecWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
{
if ( pv->context->coded_frame->key_frame )
{
- buf->flags |= HB_FRAME_REF;
- buf->frametype = HB_FRAME_KEY;
+ buf->s.flags |= HB_FRAME_REF;
+ buf->s.frametype = HB_FRAME_KEY;
}
else
{
- buf->frametype = HB_FRAME_REF;
+ buf->s.frametype = HB_FRAME_REF;
}
} break;
}
diff --git a/libhb/encavcodecaudio.c b/libhb/encavcodecaudio.c
index 3a23256fa..fc692e4be 100644
--- a/libhb/encavcodecaudio.c
+++ b/libhb/encavcodecaudio.c
@@ -260,10 +260,11 @@ static hb_buffer_t * Encode( hb_work_object_t * w )
buf->size = avcodec_encode_audio( pv->context, buf->data, buf->alloc,
(short*)pv->buf );
- buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
- buf->stop = buf->start + 90000 * pv->samples_per_frame / audio->config.out.samplerate;
+ buf->s.start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
+ buf->s.stop = buf->s.start + 90000 * pv->samples_per_frame / audio->config.out.samplerate;
- buf->frametype = HB_FRAME_AUDIO;
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
if ( !buf->size )
{
diff --git a/libhb/encfaac.c b/libhb/encfaac.c
index 36c69a24d..38b44f08d 100644
--- a/libhb/encfaac.c
+++ b/libhb/encfaac.c
@@ -231,10 +231,11 @@ static hb_buffer_t * Encode( hb_work_object_t * w )
hb_buffer_t * buf = hb_buffer_init( size );
memcpy( buf->data, pv->obuf, size );
buf->size = size;
- buf->start = pv->pts;
+ buf->s.start = pv->pts;
pv->pts += pv->framedur;
- buf->stop = pv->pts;
- buf->frametype = HB_FRAME_AUDIO;
+ buf->s.stop = pv->pts;
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
return buf;
}
return NULL;
diff --git a/libhb/enclame.c b/libhb/enclame.c
index 000e0cef5..04e6ae6d8 100644
--- a/libhb/enclame.c
+++ b/libhb/enclame.c
@@ -144,14 +144,15 @@ static hb_buffer_t * Encode( hb_work_object_t * w )
}
buf = hb_buffer_init( pv->output_bytes );
- buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
- buf->stop = buf->start + 90000 * 1152 / audio->config.out.samplerate;
- pv->pts = buf->stop;
+ buf->s.start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
+ buf->s.stop = buf->s.start + 90000 * 1152 / audio->config.out.samplerate;
+ pv->pts = buf->s.stop;
buf->size = lame_encode_buffer_float(
pv->lame, samples[0], samples[1],
1152, buf->data, LAME_MAXMP3BUFFER );
- buf->frametype = HB_FRAME_AUDIO;
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
if( !buf->size )
{
@@ -188,9 +189,12 @@ int enclameWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
buf = hb_buffer_init( pv->output_bytes );
buf->size = lame_encode_flush( pv->lame, buf->data, LAME_MAXMP3BUFFER );
- buf->start = pv->pts;
- buf->stop = buf->start + 90000 * 1152 / audio->config.out.samplerate;
- buf->frametype = HB_FRAME_AUDIO;
+ buf->s.start = pv->pts;
+ buf->s.stop = buf->s.start + 90000 * 1152 / audio->config.out.samplerate;
+
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
+
if( buf->size <= 0 )
{
hb_buffer_close( &buf );
diff --git a/libhb/enctheora.c b/libhb/enctheora.c
index 20d532362..b4f3bb4be 100644
--- a/libhb/enctheora.c
+++ b/libhb/enctheora.c
@@ -319,16 +319,18 @@ int enctheoraWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
// Y
ycbcr[0].width = frame_width;
ycbcr[0].height = frame_height;
- ycbcr[0].stride = job->width;
// CbCr decimated by factor of 2 in both width and height
ycbcr[1].width = ycbcr[2].width = (frame_width + 1) / 2;
ycbcr[1].height = ycbcr[2].height = (frame_height + 1) / 2;
- ycbcr[1].stride = ycbcr[2].stride = (job->width + 1) / 2;
- ycbcr[0].data = in->data;
- ycbcr[1].data = ycbcr[0].data + (ycbcr[0].stride * job->height);
- ycbcr[2].data = ycbcr[1].data + (ycbcr[1].stride * ((job->height+1)/2));
+ ycbcr[0].stride = in->plane[0].stride;
+ ycbcr[1].stride = in->plane[1].stride;
+ ycbcr[2].stride = in->plane[2].stride;
+
+ ycbcr[0].data = in->plane[0].data;
+ ycbcr[1].data = in->plane[1].data;
+ ycbcr[2].data = in->plane[2].data;
th_encode_ycbcr_in( pv->ctx, ycbcr );
@@ -358,9 +360,12 @@ int enctheoraWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
buf = hb_buffer_init( op.bytes + sizeof(op) );
memcpy(buf->data, &op, sizeof(op));
memcpy(buf->data + sizeof(op), op.packet, op.bytes);
- buf->frametype = ( th_packet_iskeyframe(&op) ) ? HB_FRAME_KEY : HB_FRAME_REF;
- buf->start = in->start;
- buf->stop = in->stop;
+ buf->f.fmt = PIX_FMT_YUV420P;
+ buf->f.width = frame_width;
+ buf->f.height = frame_height;
+ buf->s.frametype = ( th_packet_iskeyframe(&op) ) ? HB_FRAME_KEY : HB_FRAME_REF;
+ buf->s.start = in->s.start;
+ buf->s.stop = in->s.stop;
*buf_out = buf;
diff --git a/libhb/encvorbis.c b/libhb/encvorbis.c
index cb38351cd..d48373e09 100644
--- a/libhb/encvorbis.c
+++ b/libhb/encvorbis.c
@@ -224,9 +224,12 @@ static hb_buffer_t * Flush( hb_work_object_t * w )
memcpy( buf->data + sizeof( ogg_packet ), op.packet,
op.bytes );
blocksize = vorbis_packet_blocksize(&pv->vi, &op);
- buf->frametype = HB_FRAME_AUDIO;
- buf->start = (int64_t)(vorbis_granule_time(&pv->vd, op.granulepos) * 90000);
- buf->stop = (int64_t)(vorbis_granule_time(&pv->vd, (pv->prev_blocksize + blocksize)/4 + op.granulepos) * 90000);
+
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
+
+ buf->s.start = (int64_t)(vorbis_granule_time(&pv->vd, op.granulepos) * 90000);
+ buf->s.stop = (int64_t)(vorbis_granule_time(&pv->vd, (pv->prev_blocksize + blocksize)/4 + op.granulepos) * 90000);
/* The stop time isn't accurate for the first ~3 packets, as the actual blocksize depends on the previous _and_ current packets. */
pv->prev_blocksize = blocksize;
return buf;
diff --git a/libhb/encx264.c b/libhb/encx264.c
index d086a3422..253b95b87 100644
--- a/libhb/encx264.c
+++ b/libhb/encx264.c
@@ -338,12 +338,11 @@ int encx264Init( hb_work_object_t * w, hb_job_t * job )
pv->pic_in.img.i_csp = X264_CSP_I420;
pv->pic_in.img.i_plane = 3;
- pv->pic_in.img.i_stride[0] = job->width;
- pv->pic_in.img.i_stride[2] = pv->pic_in.img.i_stride[1] = ( ( job->width + 1 ) >> 1 );
if( job->grayscale )
{
- int uvsize = ( (job->width + 1) >> 1 ) * ( (job->height + 1) >> 1 );
+ int uvsize = hb_image_stride( PIX_FMT_YUV420P, job->width, 1 ) *
+ hb_image_height( PIX_FMT_YUV420P, job->height, 1 );
pv->grey_data = malloc( uvsize );
memset( pv->grey_data, 0x80, uvsize );
pv->pic_in.img.plane[1] = pv->pic_in.img.plane[2] = pv->grey_data;
@@ -375,8 +374,8 @@ void encx264Close( hb_work_object_t * w )
*/
static void save_frame_info( hb_work_private_t * pv, hb_buffer_t * in )
{
- int i = (in->start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
- pv->frame_info[i].duration = in->stop - in->start;
+ int i = (in->s.start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
+ pv->frame_info[i].duration = in->s.stop - in->s.start;
}
static int64_t get_frame_duration( hb_work_private_t * pv, int64_t pts )
@@ -395,13 +394,13 @@ static hb_buffer_t *nal_encode( hb_work_object_t *w, x264_picture_t *pic_out,
/* Should be way too large */
buf = hb_video_buffer_init( job->width, job->height );
buf->size = 0;
- buf->frametype = 0;
+ buf->s.frametype = 0;
// use the pts to get the original frame's duration.
int64_t duration = get_frame_duration( pv, pic_out->i_pts );
- buf->start = pic_out->i_pts;
- buf->stop = pic_out->i_pts + duration;
- buf->renderOffset = pic_out->i_dts;
+ buf->s.start = pic_out->i_pts;
+ buf->s.stop = pic_out->i_pts + duration;
+ buf->s.renderOffset = pic_out->i_dts;
if ( !w->config->h264.init_delay && pic_out->i_dts < 0 )
{
w->config->h264.init_delay = -pic_out->i_dts;
@@ -447,47 +446,47 @@ static hb_buffer_t *nal_encode( hb_work_object_t *w, x264_picture_t *pic_out,
break;
case X264_TYPE_I:
- buf->frametype = HB_FRAME_I;
+ buf->s.frametype = HB_FRAME_I;
break;
case X264_TYPE_P:
- buf->frametype = HB_FRAME_P;
+ buf->s.frametype = HB_FRAME_P;
break;
case X264_TYPE_B:
- buf->frametype = HB_FRAME_B;
+ buf->s.frametype = HB_FRAME_B;
break;
/* This is for b-pyramid, which has reference b-frames
However, it doesn't seem to ever be used... */
case X264_TYPE_BREF:
- buf->frametype = HB_FRAME_BREF;
+ buf->s.frametype = HB_FRAME_BREF;
break;
// If it isn't the above, what type of frame is it??
default:
- buf->frametype = 0;
+ buf->s.frametype = 0;
break;
}
/* Since libx264 doesn't tell us when b-frames are
themselves reference frames, figure it out on our own. */
- if( (buf->frametype == HB_FRAME_B) &&
+ if( (buf->s.frametype == HB_FRAME_B) &&
(nal[i].i_ref_idc != NAL_PRIORITY_DISPOSABLE) )
- buf->frametype = HB_FRAME_BREF;
+ buf->s.frametype = HB_FRAME_BREF;
/* Expose disposable bit to muxer. */
if( nal[i].i_ref_idc == NAL_PRIORITY_DISPOSABLE )
- buf->flags &= ~HB_FRAME_REF;
+ buf->s.flags &= ~HB_FRAME_REF;
else
- buf->flags |= HB_FRAME_REF;
+ buf->s.flags |= HB_FRAME_REF;
// PIR has no IDR frames, but x264 marks recovery points
// as keyframes. So fake an IDR at these points. This flag
// is also set for real IDR frames.
if( pic_out->b_keyframe )
{
- buf->frametype = HB_FRAME_IDR;
+ buf->s.frametype = HB_FRAME_IDR;
/* if we have a chapter marker pending and this
frame's presentation time stamp is at or after
the marker's time stamp, use this as the
@@ -495,7 +494,7 @@ static hb_buffer_t *nal_encode( hb_work_object_t *w, x264_picture_t *pic_out,
if( pv->next_chap != 0 && pv->next_chap <= pic_out->i_pts )
{
pv->next_chap = 0;
- buf->new_chap = pv->chap_mark;
+ buf->s.new_chap = pv->chap_mark;
}
}
@@ -516,16 +515,17 @@ static hb_buffer_t *x264_encode( hb_work_object_t *w, hb_buffer_t *in )
hb_job_t *job = pv->job;
/* Point x264 at our current buffers Y(UV) data. */
- pv->pic_in.img.plane[0] = in->data;
-
- int uvsize = ( (job->width + 1) >> 1 ) * ( (job->height + 1) >> 1 );
+ pv->pic_in.img.i_stride[0] = in->plane[0].stride;
+ pv->pic_in.img.i_stride[1] = in->plane[1].stride;
+ pv->pic_in.img.i_stride[2] = in->plane[2].stride;
+ pv->pic_in.img.plane[0] = in->plane[0].data;
if( !job->grayscale )
{
- /* Point x264 at our buffers (Y)UV data */
- pv->pic_in.img.plane[1] = in->data + job->width * job->height;
- pv->pic_in.img.plane[2] = pv->pic_in.img.plane[1] + uvsize;
+ pv->pic_in.img.plane[1] = in->plane[1].data;
+ pv->pic_in.img.plane[2] = in->plane[2].data;
}
- if( in->new_chap && job->chapter_markers )
+
+ if( in->s.new_chap && job->chapter_markers )
{
/* chapters have to start with an IDR frame so request that this
frame be coded as IDR. Since there may be up to 16 frames
@@ -535,11 +535,11 @@ static hb_buffer_t *x264_encode( hb_work_object_t *w, hb_buffer_t *in )
pv->pic_in.i_type = X264_TYPE_IDR;
if( pv->next_chap == 0 )
{
- pv->next_chap = in->start;
- pv->chap_mark = in->new_chap;
+ pv->next_chap = in->s.start;
+ pv->chap_mark = in->s.new_chap;
}
/* don't let 'work_loop' put a chapter mark on the wrong buffer */
- in->new_chap = 0;
+ in->s.new_chap = 0;
}
else
{
@@ -551,19 +551,19 @@ static hb_buffer_t *x264_encode( hb_work_object_t *w, hb_buffer_t *in )
* frame stream with the current frame's start time equal to the
* previous frame's stop time.
*/
- if( pv->last_stop != in->start )
+ if( pv->last_stop != in->s.start )
{
hb_log("encx264 input continuity err: last stop %"PRId64" start %"PRId64,
- pv->last_stop, in->start);
+ pv->last_stop, in->s.start);
}
- pv->last_stop = in->stop;
+ pv->last_stop = in->s.stop;
// Remember info about this frame that we need to pass across
// the x264_encoder_encode call (since it reorders frames).
save_frame_info( pv, in );
/* Feed the input PTS to x264 so it can figure out proper output PTS */
- pv->pic_in.i_pts = in->start;
+ pv->pic_in.i_pts = in->s.start;
x264_picture_t pic_out;
int i_nal;
diff --git a/libhb/fifo.c b/libhb/fifo.c
index 9c76b01ba..391fc39c6 100644
--- a/libhb/fifo.c
+++ b/libhb/fifo.c
@@ -375,6 +375,9 @@ void hb_buffer_close( hb_buffer_t ** _b )
b->next = NULL;
+ // Close any attached subtitle buffers
+ hb_buffer_close( &b->sub );
+
if( buffer_pool && b->data && !hb_fifo_is_full( buffer_pool ) )
{
hb_fifo_push_head( buffer_pool, b );
@@ -393,16 +396,15 @@ void hb_buffer_close( hb_buffer_t ** _b )
free( b );
b = next;
}
+
*_b = NULL;
}
-void hb_buffer_copy_settings( hb_buffer_t * dst, const hb_buffer_t * src )
+void hb_buffer_move_subs( hb_buffer_t * dst, hb_buffer_t * src )
{
- dst->start = src->start;
- dst->stop = src->stop;
- dst->new_chap = src->new_chap;
- dst->frametype = src->frametype;
- dst->flags = src->flags;
+ // Note that dst takes ownership of the subtitles
+ dst->sub = src->sub;
+ src->sub = NULL;
}
hb_fifo_t * hb_fifo_init( int capacity, int thresh )
@@ -737,6 +739,9 @@ void hb_fifo_close( hb_fifo_t ** _f )
hb_fifo_t * f = *_f;
hb_buffer_t * b;
+ if ( f == NULL )
+ return;
+
hb_deep_log( 2, "fifo_close: trashing %d buffer(s)", hb_fifo_size( f ) );
while( ( b = hb_fifo_get( f ) ) )
{
diff --git a/libhb/hb.c b/libhb/hb.c
index 31456784f..a9e76b926 100644
--- a/libhb/hb.c
+++ b/libhb/hb.c
@@ -124,6 +124,19 @@ int hb_avcodec_close(AVCodecContext *avctx)
return ret;
}
+
+int hb_avpicture_fill( AVPicture *pic, hb_buffer_t *buf )
+{
+ int ret, ii;
+
+ for( ii = 0; ii < 4; ii++ )
+ pic->linesize[ii] = buf->plane[ii].stride;
+
+ ret = av_image_fill_pointers( pic->data, buf->f.fmt, buf->f.height,
+ buf->data, pic->linesize );
+ return ret;
+}
+
static int handle_jpeg(enum PixelFormat *format)
{
switch (*format) {
@@ -446,7 +459,6 @@ hb_handle_t * hb_init( int verbose, int update_check )
hb_register( &hb_decutf8sub );
hb_register( &hb_dectx3gsub );
hb_register( &hb_decssasub );
- hb_register( &hb_render );
hb_register( &hb_encavcodec );
hb_register( &hb_encx264 );
hb_register( &hb_enctheora );
@@ -546,7 +558,6 @@ hb_handle_t * hb_init_dl( int verbose, int update_check )
hb_register( &hb_decutf8sub );
hb_register( &hb_dectx3gsub );
hb_register( &hb_decssasub );
- hb_register( &hb_render );
hb_register( &hb_encavcodec );
hb_register( &hb_encx264 );
hb_register( &hb_enctheora );
@@ -699,6 +710,39 @@ void hb_get_preview_by_index( hb_handle_t * h, int title_index, int picture, uin
}
}
+int hb_save_preview( hb_handle_t * h, int title, int preview, hb_buffer_t *buf )
+{
+ FILE * file;
+ char filename[1024];
+
+ hb_get_tempory_filename( h, filename, "%d_%d_%d",
+ hb_get_instance_id(h), title, preview );
+
+ file = fopen( filename, "wb" );
+ if( !file )
+ {
+ hb_error( "hb_save_preview: fopen failed (%s)", filename );
+ return -1;
+ }
+
+ int pp, hh;
+ for( pp = 0; pp < 3; pp++ )
+ {
+ uint8_t *data = buf->plane[pp].data;
+ int stride = buf->plane[pp].stride;
+ int w = buf->plane[pp].width;
+ int h = buf->plane[pp].height;
+
+ for( hh = 0; hh < h; hh++ )
+ {
+ fwrite( data, w, 1, file );
+ data += stride;
+ }
+ }
+ fclose( file );
+ return 0;
+}
+
/**
* Create preview image of desired title a index of picture.
* @param h Handle to hb_handle_t.
@@ -835,8 +879,8 @@ int hb_detect_comb( hb_buffer_t * buf, int width, int height, int color_equal, i
cc_1 = 0; cc_2 = 0;
int offset = 0;
-
- if ( buf->flags & 16 )
+
+ if ( buf->s.flags & 16 )
{
/* Frame is progressive, be more discerning. */
color_diff = prog_diff;
@@ -1171,6 +1215,77 @@ void hb_set_anamorphic_size( hb_job_t * job,
}
/**
+ * Add a filter to a jobs filter list
+ *
+ * @param job Handle to hb_job_t
+ * @param settings to give the filter
+ */
+void hb_add_filter( hb_job_t * job, hb_filter_object_t * filter, const char * settings_in )
+{
+ char * settings = NULL;
+
+ if ( settings_in != NULL )
+ {
+ settings = strdup( settings_in );
+ }
+ filter->settings = settings;
+ if( filter->enforce_order )
+ {
+ // Find the position in the filter chain this filter belongs in
+ int i;
+ for( i = 0; i < hb_list_count( job->list_filter ); i++ )
+ {
+ hb_filter_object_t * f = hb_list_item( job->list_filter, i );
+ if( f->id > filter->id )
+ {
+ hb_list_insert( job->list_filter, i, filter );
+ return;
+ }
+ else if( f->id == filter->id )
+ {
+ // Don't allow the same filter to be added twice
+ return;
+ }
+ }
+ }
+ // No position found or order not enforced for this filter
+ hb_list_add( job->list_filter, filter );
+}
+
+/**
+ * Validate and adjust dimensions if necessary
+ *
+ * @param job Handle to hb_job_t
+ */
+void hb_validate_size( hb_job_t * job )
+{
+ if ( job->anamorphic.mode )
+ {
+ hb_set_anamorphic_size( job, &job->width, &job->height,
+ &job->anamorphic.par_width, &job->anamorphic.par_height );
+ }
+ else
+ {
+ if ( job->maxHeight && ( job->height > job->maxHeight ) )
+ {
+ job->height = job->maxHeight;
+ hb_fix_aspect( job, HB_KEEP_HEIGHT );
+ hb_log( "Height out of bounds, scaling down to %i",
+ job->maxHeight );
+ hb_log( "New dimensions %i * %i", job->width, job->height );
+ }
+ if ( job->maxWidth && ( job->width > job->maxWidth ) )
+ {
+ job->width = job->maxWidth;
+ hb_fix_aspect( job, HB_KEEP_WIDTH );
+ hb_log( "Width out of bounds, scaling down to %i",
+ job->maxWidth );
+ hb_log( "New dimensions %i * %i", job->width, job->height );
+ }
+ }
+}
+
+/**
* Calculates job width, height, and cropping parameters.
* @param job Handle to hb_job_t.
* @param aspect Desired aspect ratio. Value of -1 uses title aspect.
@@ -1501,11 +1616,11 @@ void hb_add( hb_handle_t * h, hb_job_t * job )
job_copy->pause = h->pause_lock;
/* Copy the job filter list */
- if( job->filters )
+ if( job->list_filter )
{
int i;
- int filter_count = hb_list_count( job->filters );
- job_copy->filters = hb_list_init();
+ int filter_count = hb_list_count( job->list_filter );
+ job_copy->list_filter = hb_list_init();
for( i = 0; i < filter_count; i++ )
{
/*
@@ -1516,14 +1631,9 @@ void hb_add( hb_handle_t * h, hb_job_t * job )
* as well for completeness. Not copying private_data since it gets
* created for each job in renderInit.
*/
- hb_filter_object_t * filter = hb_list_item( job->filters, i );
- hb_filter_object_t * filter_copy = malloc( sizeof( hb_filter_object_t ) );
- memcpy( filter_copy, filter, sizeof( hb_filter_object_t ) );
- if( filter->name )
- filter_copy->name = strdup( filter->name );
- if( filter->settings )
- filter_copy->settings = strdup( filter->settings );
- hb_list_add( job_copy->filters, filter_copy );
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+ hb_filter_object_t * filter_copy = hb_filter_copy( filter );
+ hb_list_add( job_copy->list_filter, filter_copy );
}
}
@@ -1534,6 +1644,37 @@ void hb_add( hb_handle_t * h, hb_job_t * job )
}
/**
+ * Clean up the job structure so that is is ready for setting up a new job.
+ * Should be called by front-ends after hb_add().
+ */
+void hb_reset_job( hb_job_t * job )
+{
+ hb_audio_t *audio;
+ hb_subtitle_t *subtitle;
+ hb_filter_object_t *filter;
+
+ // clean up audio list
+ while( ( audio = hb_list_item( job->list_audio, 0 ) ) )
+ {
+ hb_list_rem( job->list_audio, audio );
+ free( audio );
+ }
+ // clean up subtitle list
+ while( ( subtitle = hb_list_item( job->list_subtitle, 0 ) ) )
+ {
+ hb_list_rem( job->list_subtitle, subtitle );
+ free( subtitle );
+ }
+ // clean up filter list
+ while( ( filter = hb_list_item( job->list_filter, 0 ) ) )
+ {
+ hb_list_rem( job->list_filter, filter );
+ free( filter->settings );
+ free( filter );
+ }
+}
+
+/**
* Removes a job from the job list.
* @param h Handle to hb_handle_t.
* @param job Handle to hb_job_t.
@@ -1650,52 +1791,6 @@ void hb_scan_stop( hb_handle_t * h )
}
/**
- * Gets a filter object with the given type and settings.
- * @param filter_id The type of filter to get.
- * @param settings The filter settings to use.
- * @returns The requested filter object.
- */
-hb_filter_object_t * hb_get_filter_object(int filter_id, const char * settings)
-{
- if (filter_id == HB_FILTER_ROTATE)
- {
- hb_filter_rotate.settings = (char*)settings;
- return &hb_filter_rotate;
- }
-
- if (filter_id == HB_FILTER_DETELECINE)
- {
- hb_filter_detelecine.settings = (char*)settings;
- return &hb_filter_detelecine;
- }
-
- if (filter_id == HB_FILTER_DECOMB)
- {
- hb_filter_decomb.settings = (char*)settings;
- return &hb_filter_decomb;
- }
-
- if (filter_id == HB_FILTER_DEINTERLACE)
- {
- hb_filter_deinterlace.settings = (char*)settings;
- return &hb_filter_deinterlace;
- }
-
- if (filter_id == HB_FILTER_DEBLOCK)
- {
- hb_filter_deblock.settings = (char*)settings;
- return &hb_filter_deblock;
- }
-
- if (filter_id == HB_FILTER_DENOISE)
- {
- hb_filter_denoise.settings = (char*)settings;
- return &hb_filter_denoise;
- }
- return NULL;
-}
-
-/**
* Returns the state of the conversion process.
* @param h Handle to hb_handle_t.
* @param s Handle to hb_state_t which to copy the state data.
@@ -1745,9 +1840,9 @@ void hb_close( hb_handle_t ** _h )
while( ( title = hb_list_item( h->list_title, 0 ) ) )
{
hb_list_rem( h->list_title, title );
- if( title->job && title->job->filters )
+ if( title->job )
{
- hb_list_close( &title->job->filters );
+ hb_reset_job( title->job );
}
free( title->job );
hb_title_close( &title );
diff --git a/libhb/hb.h b/libhb/hb.h
index 0a3af6019..4fc668c9d 100644
--- a/libhb/hb.h
+++ b/libhb/hb.h
@@ -47,7 +47,6 @@ void hb_scan( hb_handle_t *, const char * path,
int title_index, int preview_count,
int store_previews, uint64_t min_duration );
void hb_scan_stop( hb_handle_t * );
-hb_filter_object_t * hb_get_filter_object(int filter_id, const char * settings);
uint64_t hb_first_duration( hb_handle_t * );
/* hb_get_titles()
@@ -59,6 +58,8 @@ hb_list_t * hb_get_titles( hb_handle_t * );
Taken from Thomas Oestreich's 32detect filter in the Transcode project. */
int hb_detect_comb( hb_buffer_t * buf, int width, int height, int color_equal, int color_diff, int threshold, int prog_equal, int prog_diff, int prog_threshold );
+int hb_save_preview( hb_handle_t * h, int title, int preview,
+ hb_buffer_t *buf );
void hb_get_preview_by_index( hb_handle_t *, int, int, uint8_t * );
void hb_get_preview( hb_handle_t *, hb_title_t *, int,
uint8_t * );
@@ -69,6 +70,9 @@ void hb_set_anamorphic_size_by_index( hb_handle_t *, int,
void hb_set_anamorphic_size( hb_job_t *,
int *output_width, int *output_height,
int *output_par_width, int *output_par_height );
+void hb_validate_size( hb_job_t * job );
+void hb_add_filter( hb_job_t * job, hb_filter_object_t * filter,
+ const char * settings );
/* Handling jobs */
int hb_count( hb_handle_t * );
@@ -77,6 +81,7 @@ void hb_set_chapter_name( hb_handle_t *, int, int, const char * );
void hb_set_job( hb_handle_t *, int, hb_job_t * );
void hb_add( hb_handle_t *, hb_job_t * );
void hb_rem( hb_handle_t *, hb_job_t * );
+void hb_reset_job( hb_job_t * job );
void hb_start( hb_handle_t * );
void hb_pause( hb_handle_t * );
diff --git a/libhb/hbffmpeg.h b/libhb/hbffmpeg.h
index 0fb0cd0a5..935aa16c1 100644
--- a/libhb/hbffmpeg.h
+++ b/libhb/hbffmpeg.h
@@ -6,6 +6,7 @@
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "libavutil/mathematics.h"
+#include "libavutil/imgutils.h"
#include "libswscale/swscale.h"
#define HB_FFMPEG_THREADS_AUTO (-1) // let hb_avcodec_open decide thread_count
@@ -14,9 +15,9 @@ void hb_avcodec_init(void);
int hb_avcodec_open( AVCodecContext *, struct AVCodec *, AVDictionary **av_opts, int thread_count );
int hb_avcodec_close( AVCodecContext * );
int hb_ff_layout_xlat(int64_t ff_layout, int channels);
-struct SwsContext*
-hb_sws_get_context(int srcW, int srcH, enum PixelFormat srcFormat,
- int dstW, int dstH, enum PixelFormat dstFormat,
- int flags);
+struct SwsContext* hb_sws_get_context( int srcW, int srcH,
+ enum PixelFormat srcFormat, int dstW, int dstH,
+ enum PixelFormat dstFormat, int flags);
void hb_ff_set_sample_fmt(AVCodecContext *context, AVCodec *codec);
int hb_ff_dts_disable_xch( AVCodecContext *c );
+int hb_avpicture_fill( AVPicture *pic, hb_buffer_t *buf );
diff --git a/libhb/internal.h b/libhb/internal.h
index 00a7b3eb9..fa391bc6d 100644
--- a/libhb/internal.h
+++ b/libhb/internal.h
@@ -4,6 +4,8 @@
Homepage: <http://handbrake.fr/>.
It may be used under the terms of the GNU General Public License. */
+#include "hbffmpeg.h"
+
/***********************************************************************
* common.c
**********************************************************************/
@@ -29,8 +31,6 @@ void hb_list_empty( hb_list_t ** );
hb_title_t * hb_title_init( char * dvd, int index );
void hb_title_close( hb_title_t ** );
-void hb_filter_close( hb_filter_object_t ** );
-
/***********************************************************************
* hb.c
**********************************************************************/
@@ -40,6 +40,7 @@ void hb_set_state( hb_handle_t *, hb_state_t * );
/***********************************************************************
* fifo.c
**********************************************************************/
+
/*
* Holds a packet of data that is moving through the transcoding process.
*
@@ -51,7 +52,7 @@ struct hb_buffer_s
int size; // size of this packet
int alloc; // used internally by the packet allocator (hb_buffer_init)
uint8_t * data; // packet data
- int cur; // used internally by packet lists (hb_list_t)
+ int offset; // used internally by packet lists (hb_list_t)
/*
* Corresponds to the order that this packet was read from the demuxer.
@@ -66,39 +67,56 @@ struct hb_buffer_s
*/
int64_t sequence;
- enum { AUDIO_BUF, VIDEO_BUF, SUBTITLE_BUF, OTHER_BUF } type;
-
- int id; // ID of the track that the packet comes from
- int64_t start; // Video and subtitle packets: start time of frame/subtitle
- int64_t stop; // Video and subtitle packets: stop time of frame/subtitle
- int64_t pcr;
- uint8_t discontinuity;
- int new_chap; // Video packets: if non-zero, is the index of the chapter whose boundary was crossed
-
-#define HB_FRAME_IDR 0x01
-#define HB_FRAME_I 0x02
-#define HB_FRAME_AUDIO 0x04
-#define HB_FRAME_P 0x10
-#define HB_FRAME_B 0x20
-#define HB_FRAME_BREF 0x40
-#define HB_FRAME_KEY 0x0F
-#define HB_FRAME_REF 0xF0
- uint8_t frametype;
- uint16_t flags;
-
- /* Holds the output PTS from x264, for use by b-frame offsets in muxmp4.c */
- int64_t renderOffset;
+ struct settings
+ {
+ enum { AUDIO_BUF, VIDEO_BUF, SUBTITLE_BUF, OTHER_BUF } type;
+
+ int id; // ID of the track that the packet comes from
+ int64_t start; // start time of frame
+ int64_t stop; // stop time of frame
+ int64_t renderOffset; // DTS used by b-frame offsets in muxmp4
+ int64_t pcr;
+ uint8_t discontinuity;
+ int new_chap; // Video packets: if non-zero, is the index of the chapter whose boundary was crossed
+
+ #define HB_FRAME_IDR 0x01
+ #define HB_FRAME_I 0x02
+ #define HB_FRAME_AUDIO 0x04
+ #define HB_FRAME_P 0x10
+ #define HB_FRAME_B 0x20
+ #define HB_FRAME_BREF 0x40
+ #define HB_FRAME_KEY 0x0F
+ #define HB_FRAME_REF 0xF0
+ uint8_t frametype;
+ uint16_t flags;
+ } s;
+
+ struct format
+ {
+ int x;
+ int y;
+ int width;
+ int height;
+ int fmt;
+ } f;
+
+ struct plane
+ {
+ uint8_t * data;
+ int stride;
+ int width;
+ int height;
+ int size;
+ } plane[4]; // 3 Color components + alpha
// PICTURESUB subtitle packets:
- // Location and size of the subpicture.
- int x;
- int y;
- int width;
- int height;
// Video packets (after processing by the hb_sync_video work-object):
- // A (copy of a) PICTURESUB subtitle packet that needs to be burned into this video packet by the hb_render work-object.
- // Subtitles that are simply passed thru are NOT attached to the associated video packets.
+ // A (copy of a) PICTURESUB subtitle packet that needs to be burned into
+ // this video packet by the vobsub renderer filter
+ //
+ // Subtitles that are simply passed thru are NOT attached to the
+ // associated video packets.
hb_buffer_t * sub;
// Packets in a list:
@@ -115,6 +133,7 @@ void hb_buffer_reduce( hb_buffer_t * b, int size );
void hb_buffer_close( hb_buffer_t ** );
void hb_buffer_copy_settings( hb_buffer_t * dst,
const hb_buffer_t * src );
+void hb_buffer_move_subs( hb_buffer_t * dst, hb_buffer_t * src );
hb_fifo_t * hb_fifo_init( int capacity, int thresh );
int hb_fifo_size( hb_fifo_t * );
@@ -135,19 +154,142 @@ hb_buffer_t * hb_fifo_get_list_element( hb_fifo_t *fifo );
void hb_fifo_close( hb_fifo_t ** );
void hb_fifo_flush( hb_fifo_t * f );
+static inline int hb_image_stride( int pix_fmt, int width, int plane )
+{
+ int linesize = av_image_get_linesize( pix_fmt, width, plane );
+
+ // Make buffer SIMD friendly.
+ linesize = MULTIPLE_MOD_UP( linesize, 16 );
+ return linesize;
+}
+
+static inline int hb_image_width( int pix_fmt, int width, int plane )
+{
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt];
+
+ if ( plane == 1 || plane == 2 )
+ {
+ // The wacky arithmatic assures rounding up.
+ width = -((-width)>>desc->log2_chroma_w);
+ }
+
+ return width;
+}
+
+static inline int hb_image_height( int pix_fmt, int height, int plane )
+{
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt];
+
+ if ( plane == 1 || plane == 2 )
+ {
+ // The wacky arithmatic assures rounding up.
+ height = -((-height)>>desc->log2_chroma_h);
+ }
+
+ return height;
+}
+
+// this routine gets a buffer for an uncompressed picture
+// with pixel format pix_fmt and dimensions width x height.
+static inline hb_buffer_t * hb_pic_buffer_init( int pix_fmt, int width, int height )
+{
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt];
+
+ hb_buffer_t * buf;
+ int p;
+ uint8_t has_plane[4] = {0,};
+
+ for( p = 0; p < 4; p++ )
+ {
+ has_plane[desc->comp[p].plane] = 1;
+ }
+
+ int size = 0;
+ for( p = 0; p < 4; p++ )
+ {
+ if( has_plane[p] )
+ {
+ size += hb_image_stride( pix_fmt, width, p ) *
+ hb_image_height( pix_fmt, height, p );
+ }
+ }
+
+ buf = hb_buffer_init( size );
+ if( buf == NULL )
+ return NULL;
+
+ buf->s.type = VIDEO_BUF;
+ buf->f.width = width;
+ buf->f.height = height;
+ buf->f.fmt = pix_fmt;
+
+ uint8_t * plane = buf->data;
+ for( p = 0; p < 4; p++ )
+ {
+ if ( has_plane[p] )
+ {
+ buf->plane[p].data = plane;
+ buf->plane[p].stride = hb_image_stride( pix_fmt, width, p );
+ buf->plane[p].height = hb_image_height( pix_fmt, height, p );
+ buf->plane[p].width = hb_image_width( pix_fmt, width, p );
+ buf->plane[p].size = hb_image_stride( pix_fmt, width, p ) *
+ hb_image_height( pix_fmt, height, p );
+ plane += buf->plane[p].size;
+ }
+ }
+ return buf;
+}
+
// this routine gets a buffer for an uncompressed YUV420 video frame
// with dimensions width x height.
static inline hb_buffer_t * hb_video_buffer_init( int width, int height )
{
- // Y requires w x h bytes. U & V each require (w+1)/2 x
- // (h+1)/2 bytes (the "+1" is to round up). We shift rather
- // than divide by 2 since the compiler can't know these ints
- // are positive so it generates very expensive integer divides
- // if we do "/2". The code here matches the calculation for
- // PIX_FMT_YUV420P in ffmpeg's avpicture_fill() which is required
- // for most of HB's filters to work right.
- return hb_buffer_init( width * height + ( ( width+1 ) >> 1 ) *
- ( ( height+1 ) >> 1 ) * 2 );
+ return hb_pic_buffer_init( PIX_FMT_YUV420P, width, height );
+}
+
+// this routine reallocs a buffer for an uncompressed YUV420 video frame
+// with dimensions width x height.
+static inline void hb_video_buffer_realloc( hb_buffer_t * buf, int width, int height )
+{
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[buf->f.fmt];
+ int p;
+
+ uint8_t has_plane[4] = {0,};
+
+ for( p = 0; p < 4; p++ )
+ {
+ has_plane[desc->comp[p].plane] = 1;
+ }
+
+ int size = 0;
+ for( p = 0; p < 4; p++ )
+ {
+ if( has_plane[p] )
+ {
+ size += hb_image_stride( buf->f.fmt, width, p ) *
+ hb_image_height( buf->f.fmt, height, p );
+ }
+ }
+
+ hb_buffer_realloc(buf, size );
+
+ buf->f.width = width;
+ buf->f.height = height;
+
+ uint8_t * plane = buf->data;
+ for( p = 0; p < 4; p++ )
+ {
+ if( has_plane[p] )
+ {
+ buf->plane[p].data = plane;
+ buf->plane[p].stride = hb_image_stride( buf->f.fmt, width, p );
+ buf->plane[p].height = hb_image_height( buf->f.fmt, height, p );
+ buf->plane[p].width = hb_image_width( buf->f.fmt, width, p );
+ buf->plane[p].size = hb_image_stride( buf->f.fmt, width, p ) *
+ hb_image_height( buf->f.fmt, height, p );
+ plane += buf->plane[p].size;
+ }
+ }
}
// this routine 'moves' data from src to dst by interchanging 'data',
@@ -365,15 +507,15 @@ enum
WORK_READER
};
-enum
-{
- FILTER_DEINTERLACE = 1,
- FILTER_DEBLOCK,
- FILTER_DENOISE,
- FILTER_DETELECINE,
- FILTER_DECOMB,
- FILTER_ROTATE
-};
+extern hb_filter_object_t hb_filter_detelecine;
+extern hb_filter_object_t hb_filter_deinterlace;
+extern hb_filter_object_t hb_filter_deblock;
+extern hb_filter_object_t hb_filter_denoise;
+extern hb_filter_object_t hb_filter_decomb;
+extern hb_filter_object_t hb_filter_rotate;
+extern hb_filter_object_t hb_filter_crop_scale;
+extern hb_filter_object_t hb_filter_render_sub;
+extern hb_filter_object_t hb_filter_vfr;
// Picture flags used by filters
#ifndef PIC_FLAG_REPEAT_FIRST_FIELD
diff --git a/libhb/mcdeint.c b/libhb/mcdeint.c
index 9e3d82371..2ffb722f7 100644
--- a/libhb/mcdeint.c
+++ b/libhb/mcdeint.c
@@ -28,6 +28,7 @@
void mcdeint_init( mcdeint_private_t * pv,
int mode,
int qp,
+ int pix_fmt,
int width,
int height )
{
@@ -53,7 +54,7 @@ void mcdeint_init( mcdeint_private_t * pv,
avctx_enc->time_base = (AVRational){1,25}; // meaningless
avctx_enc->gop_size = 300;
avctx_enc->max_b_frames = 0;
- avctx_enc->pix_fmt = PIX_FMT_YUV420P;
+ avctx_enc->pix_fmt = pix_fmt;
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
avctx_enc->global_quality = 1;
@@ -110,7 +111,6 @@ void mcdeint_filter( uint8_t ** dst,
mcdeint_private_t * pv )
{
int x, y, i;
- int out_size;
#ifdef SUPPRESS_AV_LOG
/* TODO: temporarily change log level to suppress obnoxious debug output */
@@ -127,10 +127,10 @@ void mcdeint_filter( uint8_t ** dst,
pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
pv->mcdeint_frame->quality = pv->mcdeint_qp * FF_QP2LAMBDA;
- out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
- pv->mcdeint_outbuf,
- pv->mcdeint_outbuf_size,
- pv->mcdeint_frame );
+ avcodec_encode_video( pv->mcdeint_avctx_enc,
+ pv->mcdeint_outbuf,
+ pv->mcdeint_outbuf_size,
+ pv->mcdeint_frame );
pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
diff --git a/libhb/mcdeint.h b/libhb/mcdeint.h
index 3c2a96200..55a89303f 100644
--- a/libhb/mcdeint.h
+++ b/libhb/mcdeint.h
@@ -15,6 +15,7 @@ typedef struct mcdeint_private_s mcdeint_private_t;
void mcdeint_init( mcdeint_private_t * pv,
int mode,
int qp,
+ int pix_fmt,
int width,
int height );
diff --git a/libhb/muxcommon.c b/libhb/muxcommon.c
index 39f56dc6b..ffb7aa35e 100644
--- a/libhb/muxcommon.c
+++ b/libhb/muxcommon.c
@@ -188,7 +188,7 @@ static void MoveToInternalFifos( int tk, hb_mux_t *mux, hb_buffer_t * buf )
// (b) we can control how data from multiple tracks is
// interleaved in the output file.
mf_push( mux, tk, buf );
- if ( buf->stop >= mux->pts )
+ if ( buf->s.stop >= mux->pts )
{
// buffer is past our next interleave point so
// note that this track is ready to be output.
@@ -201,7 +201,7 @@ static void OutputTrackChunk( hb_mux_t *mux, int tk, hb_mux_object_t *m )
hb_track_t *track = mux->track[tk];
hb_buffer_t *buf;
- while ( ( buf = mf_peek( track ) ) != NULL && buf->start < mux->pts )
+ while ( ( buf = mf_peek( track ) ) != NULL && buf->s.start < mux->pts )
{
buf = mf_pull( mux, tk );
track->frames += 1;
@@ -276,7 +276,7 @@ static int muxWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
// Otherwise clear rdy.
if ( ( mux->eof & (1 << i) ) == 0 &&
( track->mf.out == track->mf.in ||
- track->mf.fifo[(track->mf.in-1) & (track->mf.flen-1)]->stop
+ track->mf.fifo[(track->mf.in-1) & (track->mf.flen-1)]->s.stop
< mux->pts + mux->interleave ) )
{
mux->rdy &=~ ( 1 << i );
diff --git a/libhb/muxmkv.c b/libhb/muxmkv.c
index 524ca9d0a..1bed15c49 100644
--- a/libhb/muxmkv.c
+++ b/libhb/muxmkv.c
@@ -419,16 +419,16 @@ static int MKVMux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
if (mux_data == job->mux_data)
{
/* Video */
- timecode = buf->start * TIMECODE_SCALE;
+ timecode = buf->s.start * TIMECODE_SCALE;
- if (job->chapter_markers && (buf->new_chap || timecode == 0))
+ if (job->chapter_markers && (buf->s.new_chap || timecode == 0))
{
/* Make sure we're not writing a chapter that has 0 length */
if (mux_data->prev_chapter_tc != timecode)
{
- if ( buf->new_chap )
+ if ( buf->s.new_chap )
{
- mux_data->current_chapter = buf->new_chap - 2;
+ mux_data->current_chapter = buf->s.new_chap - 2;
}
chapter_data = hb_list_item( title->list_chapter,
mux_data->current_chapter++ );
@@ -468,14 +468,14 @@ static int MKVMux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
else if ( mux_data->subtitle )
{
uint64_t duration;
- timecode = buf->start * TIMECODE_SCALE;
+ timecode = buf->s.start * TIMECODE_SCALE;
if( mk_startFrame(m->file, mux_data->track) < 0)
{
hb_error( "Failed to write frame to output file, Disk Full?" );
*job->die = 1;
}
- duration = buf->stop * TIMECODE_SCALE - timecode;
+ duration = buf->s.stop * TIMECODE_SCALE - timecode;
mk_addFrameData(m->file, mux_data->track, buf->data, buf->size);
mk_setFrameFlags(m->file, mux_data->track, timecode, 1, duration);
mk_flushFrame(m->file, mux_data->track);
@@ -485,7 +485,7 @@ static int MKVMux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
else
{
/* Audio */
- timecode = buf->start * TIMECODE_SCALE;
+ timecode = buf->s.start * TIMECODE_SCALE;
if (mux_data->codec == HB_ACODEC_VORBIS)
{
/* ughhh, vorbis is a pain :( */
@@ -513,8 +513,8 @@ static int MKVMux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
(((job->vcodec == HB_VCODEC_X264 ||
(job->vcodec & HB_VCODEC_FFMPEG_MASK)) &&
mux_data == job->mux_data) ?
- (buf->frametype == HB_FRAME_IDR) :
- ((buf->frametype & HB_FRAME_KEY) != 0)), 0 );
+ (buf->s.frametype == HB_FRAME_IDR) :
+ ((buf->s.frametype & HB_FRAME_KEY) != 0)), 0 );
hb_buffer_close( &buf );
return 0;
}
diff --git a/libhb/muxmp4.c b/libhb/muxmp4.c
index d03326b80..31c235961 100644
--- a/libhb/muxmp4.c
+++ b/libhb/muxmp4.c
@@ -893,15 +893,14 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
if( mux_data == job->mux_data )
{
/* Video */
-
if( job->vcodec == HB_VCODEC_X264 ||
( job->vcodec & HB_VCODEC_FFMPEG_MASK ) )
{
- if ( buf && buf->start < buf->renderOffset )
+ if ( buf && buf->s.start < buf->s.renderOffset )
{
hb_log("MP4Mux: PTS %"PRId64" < DTS %"PRId64,
- buf->start, buf->renderOffset );
- buf->renderOffset = buf->start;
+ buf->s.start, buf->s.renderOffset );
+ buf->s.renderOffset = buf->s.start;
}
}
@@ -918,14 +917,14 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
( job->vcodec & HB_VCODEC_FFMPEG_MASK ) )
{
// x264 supplies us with DTS, so offset is PTS - DTS
- offset = buf->start - buf->renderOffset;
+ offset = buf->s.start - buf->s.renderOffset;
}
/* Add the sample before the new frame.
It is important that this be calculated prior to the duration
of the new video sample, as we want to sync to right after it.
(This is because of how durations for text tracks work in QT) */
- if( job->chapter_markers && buf->new_chap )
+ if( job->chapter_markers && buf->s.new_chap )
{
hb_chapter_t *chapter = NULL;
@@ -940,14 +939,14 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
if ( duration >= (90000*3)/2 )
{
chapter = hb_list_item( m->job->title->list_chapter,
- buf->new_chap - 2 );
+ buf->s.new_chap - 2 );
MP4AddChapter( m->file,
m->chapter_track,
duration,
(chapter != NULL) ? chapter->title : NULL);
- m->current_chapter = buf->new_chap;
+ m->current_chapter = buf->s.new_chap;
m->chapter_duration += duration;
}
}
@@ -958,11 +957,11 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
// x264 supplies us with DTS
if ( m->delay_buf )
{
- duration = m->delay_buf->renderOffset - buf->renderOffset;
+ duration = m->delay_buf->s.renderOffset - buf->s.renderOffset;
}
else
{
- duration = buf->stop - m->sum_dur;
+ duration = buf->s.stop - m->sum_dur;
// Due to how libx264 generates DTS, it's possible for the
// above calculation to be negative.
//
@@ -998,7 +997,7 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
// We're getting the frames in decode order but the timestamps are
// for presentation so we have to use durations and effectively
// compute a DTS.
- duration = buf->stop - buf->start;
+ duration = buf->s.stop - buf->s.start;
}
if ( duration <= 0 )
@@ -1009,7 +1008,7 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
try to fix the error so that the file will still be playable. */
hb_log("MP4Mux: illegal duration %"PRId64", start %"PRId64","
"stop %"PRId64", sum_dur %"PRId64,
- duration, buf->start, buf->stop, m->sum_dur );
+ duration, buf->s.start, buf->s.stop, m->sum_dur );
/* we don't know when the next frame starts so we can't pick a
valid duration for this one. we pick something "short"
(roughly 1/3 of an NTSC frame time) to take time from
@@ -1045,12 +1044,12 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
uint32_t dflags = 0;
/* encoding layer signals if frame is referenced by other frames */
- if( buf->flags & HB_FRAME_REF )
+ if( buf->s.flags & HB_FRAME_REF )
dflags |= MP4_SDT_HAS_DEPENDENTS;
else
dflags |= MP4_SDT_HAS_NO_DEPENDENTS; /* disposable */
- switch( buf->frametype )
+ switch( buf->s.frametype )
{
case HB_FRAME_IDR:
sync = 1;
@@ -1086,50 +1085,50 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
{
/* MPEG4 timed text does not allow overlapping samples; upstream
code should coalesce overlapping subtitle lines. */
- if( buf->start < mux_data->sum_dur )
+ if( buf->s.start < mux_data->sum_dur )
{
- if ( buf->stop - mux_data->sum_dur > 90*500 )
+ if ( buf->s.stop - mux_data->sum_dur > 90*500 )
{
hb_log("MP4Mux: shortening overlapping subtitle, "
"start %"PRId64", stop %"PRId64", sum_dur %"PRId64,
- buf->start, buf->stop, m->sum_dur);
- buf->start = mux_data->sum_dur;
+ buf->s.start, buf->s.stop, m->sum_dur);
+ buf->s.start = mux_data->sum_dur;
}
}
- if( buf->start < mux_data->sum_dur )
+ if( buf->s.start < mux_data->sum_dur )
{
hb_log("MP4Mux: skipping overlapping subtitle, "
"start %"PRId64", stop %"PRId64", sum_dur %"PRId64,
- buf->start, buf->stop, m->sum_dur);
+ buf->s.start, buf->s.stop, m->sum_dur);
}
else
{
int64_t duration;
- if( buf->start < 0 )
- buf->start = mux_data->sum_dur;
+ if( buf->s.start < 0 )
+ buf->s.start = mux_data->sum_dur;
- if( buf->stop < 0 )
+ if( buf->s.stop < 0 )
duration = 90000L * 10;
else
- duration = buf->stop - buf->start;
+ duration = buf->s.stop - buf->s.start;
/* Write an empty sample */
- if ( mux_data->sum_dur < buf->start )
+ if ( mux_data->sum_dur < buf->s.start )
{
uint8_t empty[2] = {0,0};
if( !MP4WriteSample( m->file,
mux_data->track,
empty,
2,
- buf->start - mux_data->sum_dur,
+ buf->s.start - mux_data->sum_dur,
0,
1 ))
{
hb_error("Failed to write to output file, disk full?");
*job->die = 1;
}
- mux_data->sum_dur += buf->start - mux_data->sum_dur;
+ mux_data->sum_dur += buf->s.start - mux_data->sum_dur;
}
uint8_t styleatom[2048];;
uint16_t stylesize = 0;
@@ -1150,7 +1149,7 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
buffersize = strlen((char*)buffer);
hb_deep_log(3, "MuxMP4:Sub:%fs:%"PRId64":%"PRId64":%"PRId64": %s",
- (float)buf->start / 90000, buf->start, buf->stop,
+ (float)buf->s.start / 90000, buf->s.start, buf->s.stop,
duration, buffer);
/* Write the subtitle sample */
@@ -1178,30 +1177,30 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
{
int64_t duration;
- if( buf->start < 0 )
- buf->start = mux_data->sum_dur;
+ if( buf->s.start < 0 )
+ buf->s.start = mux_data->sum_dur;
- if( buf->stop < 0 )
+ if( buf->s.stop < 0 )
duration = 90000L * 10;
else
- duration = buf->stop - buf->start;
+ duration = buf->s.stop - buf->s.start;
/* Write an empty sample */
- if ( mux_data->sum_dur < buf->start )
+ if ( mux_data->sum_dur < buf->s.start )
{
uint8_t empty[2] = {0,0};
if( !MP4WriteSample( m->file,
mux_data->track,
empty,
2,
- buf->start - mux_data->sum_dur,
+ buf->s.start - mux_data->sum_dur,
0,
1 ))
{
hb_error("Failed to write to output file, disk full?");
*job->die = 1;
}
- mux_data->sum_dur += buf->start - mux_data->sum_dur;
+ mux_data->sum_dur += buf->s.start - mux_data->sum_dur;
}
if( !MP4WriteSample( m->file,
mux_data->track,
@@ -1229,7 +1228,7 @@ static int MP4Mux( hb_mux_object_t * m, hb_mux_data_t * mux_data,
buf->size,
duration,
offset,
- ( buf->frametype & HB_FRAME_KEY ) != 0 ))
+ ( buf->s.frametype & HB_FRAME_KEY ) != 0 ))
{
hb_error("Failed to write to output file, disk full?");
*job->die = 1;
diff --git a/libhb/platform/macosx/encca_aac.c b/libhb/platform/macosx/encca_aac.c
index 35eee4a00..66cac1e62 100644
--- a/libhb/platform/macosx/encca_aac.c
+++ b/libhb/platform/macosx/encca_aac.c
@@ -419,7 +419,11 @@ static hb_buffer_t * Encode( hb_work_object_t * w )
pv->pts += 90000LL * pv->isamples / pv->osamplerate;
obuf->stop = pv->pts;
obuf->size = odesc.mDataByteSize;
- obuf->frametype = HB_FRAME_AUDIO;
+
+ hb_buffer_tag_t tag;
+ tag.id = HB_TAG_AUDIO;
+ tag.u.audio.frametype = HB_FRAME_AUDIO;
+ hb_buffer_add_tag( obuf, &tag );
return obuf;
}
diff --git a/libhb/reader.c b/libhb/reader.c
index d7661d9b4..40756bc67 100644
--- a/libhb/reader.c
+++ b/libhb/reader.c
@@ -210,7 +210,7 @@ static int is_audio( hb_work_private_t *r, int id )
static stream_timing_t *id_to_st( hb_work_private_t *r, const hb_buffer_t *buf, int valid )
{
stream_timing_t *st = r->stream_timing;
- while ( st->id != buf->id && st->id != -1)
+ while ( st->id != buf->s.id && st->id != -1)
{
++st;
}
@@ -228,11 +228,11 @@ static stream_timing_t *id_to_st( hb_work_private_t *r, const hb_buffer_t *buf,
sizeof(*r->stream_timing) );
st = r->stream_timing + slot;
}
- st->id = buf->id;
+ st->id = buf->s.id;
st->average = 30.*90.;
st->startup = 10;
st->last = -st->average;
- if ( ( st->is_audio = is_audio( r, buf->id ) ) != 0 )
+ if ( ( st->is_audio = is_audio( r, buf->s.id ) ) != 0 )
{
r->saw_audio = 1;
}
@@ -249,13 +249,13 @@ static void update_ipt( hb_work_private_t *r, const hb_buffer_t *buf )
{
stream_timing_t *st = id_to_st( r, buf, 1 );
- if( buf->renderOffset < 0 )
+ if( buf->s.renderOffset < 0 )
{
st->last += st->average;
return;
}
- double dt = buf->renderOffset - st->last;
+ double dt = buf->s.renderOffset - st->last;
// Protect against spurious bad timestamps
if ( dt > -5 * 90000LL && dt < 5 * 90000LL )
{
@@ -268,7 +268,7 @@ static void update_ipt( hb_work_private_t *r, const hb_buffer_t *buf )
{
st->average += ( dt - st->average ) * (1./32.);
}
- st->last = buf->renderOffset;
+ st->last = buf->s.renderOffset;
}
st->valid = 1;
}
@@ -294,10 +294,10 @@ static void new_scr_offset( hb_work_private_t *r, hb_buffer_t *buf )
last = st->last;
}
int64_t nxt = last + st->average;
- r->scr_offset = buf->renderOffset - nxt;
+ r->scr_offset = buf->s.renderOffset - nxt;
// This log is handy when you need to debug timing problems...
//hb_log("id %x last %ld avg %g nxt %ld renderOffset %ld scr_offset %ld",
- // buf->id, last, st->average, nxt, buf->renderOffset, r->scr_offset);
+ // buf->s.id, last, st->average, nxt, buf->s.renderOffset, r->scr_offset);
r->scr_changes = r->demux.scr_changes;
}
@@ -401,9 +401,9 @@ void ReadLoop( void * _w )
// and then seek to the appropriate offset from it
if ( ( buf = hb_stream_read( r->stream ) ) )
{
- if ( buf->start > 0 )
+ if ( buf->s.start > 0 )
{
- pts_to_start += buf->start;
+ pts_to_start += buf->s.start;
}
}
@@ -487,11 +487,11 @@ void ReadLoop( void * _w )
// We will inspect the timestamps of each frame in sync
// to skip from this seek point to the timestamp we
// want to start at.
- if ( buf->start > 0 && buf->start < r->job->pts_to_start )
+ if ( buf->s.start > 0 && buf->s.start < r->job->pts_to_start )
{
- r->job->pts_to_start -= buf->start;
+ r->job->pts_to_start -= buf->s.start;
}
- else if ( buf->start >= r->job->pts_to_start )
+ else if ( buf->s.start >= r->job->pts_to_start )
{
r->job->pts_to_start = 0;
r->start_found = 1;
@@ -526,15 +526,15 @@ void ReadLoop( void * _w )
while( ( buf = hb_list_item( list, 0 ) ) )
{
hb_list_rem( list, buf );
- fifos = GetFifoForId( r->job, buf->id );
+ fifos = GetFifoForId( r->job, buf->s.id );
if ( fifos && ! r->saw_video && !r->job->indepth_scan )
{
// The first data packet with a PTS from an audio or video stream
// that we're decoding defines 'time zero'. Discard packets until
// we get one.
- if ( buf->start != -1 && buf->renderOffset != -1 &&
- ( buf->id == r->title->video_id || is_audio( r, buf->id ) ) )
+ if ( buf->s.start != -1 && buf->s.renderOffset != -1 &&
+ ( buf->s.id == r->title->video_id || is_audio( r, buf->s.id ) ) )
{
// force a new scr offset computation
r->scr_changes = r->demux.scr_changes - 1;
@@ -543,7 +543,7 @@ void ReadLoop( void * _w )
id_to_st( r, buf, 1 );
r->saw_video = 1;
hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
- r->demux.last_scr, buf->id, buf->renderOffset );
+ r->demux.last_scr, buf->s.id, buf->s.renderOffset );
}
else
{
@@ -552,7 +552,7 @@ void ReadLoop( void * _w )
}
if( fifos )
{
- if ( buf->renderOffset != -1 )
+ if ( buf->s.renderOffset != -1 )
{
if ( r->scr_changes != r->demux.scr_changes )
{
@@ -577,14 +577,14 @@ void ReadLoop( void * _w )
// frame but video & subtitles don't. Clear
// the timestamps so the decoder will generate
// them from the frame durations.
- buf->start = -1;
- buf->renderOffset = -1;
+ buf->s.start = -1;
+ buf->s.renderOffset = -1;
}
}
}
- if ( buf->start != -1 )
+ if ( buf->s.start != -1 )
{
- int64_t start = buf->start - r->scr_offset;
+ int64_t start = buf->s.start - r->scr_offset;
if ( !r->start_found )
UpdateState( r, start );
@@ -596,17 +596,17 @@ void ReadLoop( void * _w )
}
// This log is handy when you need to debug timing problems
//hb_log("id %x scr_offset %ld start %ld --> %ld",
- // buf->id, r->scr_offset, buf->start,
- // buf->start - r->scr_offset);
- buf->start -= r->scr_offset;
+ // buf->s.id, r->scr_offset, buf->s.start,
+ // buf->s.start - r->scr_offset);
+ buf->s.start -= r->scr_offset;
}
- if ( buf->renderOffset != -1 )
+ if ( buf->s.renderOffset != -1 )
{
// This packet is referenced to the same SCR as the last.
// Adjust timestamp to remove the System Clock Reference
// offset then update the average inter-packet time
// for this stream.
- buf->renderOffset -= r->scr_offset;
+ buf->s.renderOffset -= r->scr_offset;
update_ipt( r, buf );
}
else
@@ -627,7 +627,7 @@ void ReadLoop( void * _w )
for( n = 1; fifos[n] != NULL; n++)
{
hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
- hb_buffer_copy_settings( buf_copy, buf );
+ buf_copy->s = buf->s;
memcpy( buf_copy->data, buf->data, buf->size );
push_buf( r, fifos[n], buf_copy );
}
diff --git a/libhb/render.c b/libhb/render.c
deleted file mode 100644
index 5fded023e..000000000
--- a/libhb/render.c
+++ /dev/null
@@ -1,950 +0,0 @@
-/* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
-
- This file is part of the HandBrake source code.
- Homepage: <http://handbrake.fr/>.
- It may be used under the terms of the GNU General Public License. */
-
-#include "hb.h"
-#include "hbffmpeg.h"
-
-struct hb_work_private_s
-{
- hb_job_t * job;
-
- struct SwsContext * context;
- AVPicture pic_tmp_in;
- AVPicture pic_tmp_crop;
- AVPicture pic_tmp_out;
- hb_buffer_t * buf_scale;
- hb_fifo_t * subtitle_queue;
- hb_fifo_t * delay_queue;
- int dropped_frames;
- int extended_frames;
- uint64_t last_start[4];
- uint64_t last_stop[4];
- uint64_t lost_time[4];
- uint64_t total_lost_time;
- uint64_t total_gained_time;
- int64_t chapter_time;
- int chapter_val;
- int count_frames; // frames output so far
- double frame_rate; // 90kHz ticks per frame (for CFR/PFR)
- uint64_t out_last_stop; // where last frame ended (for CFR/PFR)
- int drops; // frames dropped (for CFR/PFR)
- int dups; // frames duped (for CFR/PFR)
- float max_metric; // highest motion metric since
- // last output frame
- float frame_metric; // motion metric of last frame
- float out_metric; // motion metric of last output frame
- int sync_parity;
- unsigned gamma_lut[256];
-};
-
-int renderInit( hb_work_object_t *, hb_job_t * );
-int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
-void renderClose( hb_work_object_t * );
-
-hb_work_object_t hb_render =
-{
- WORK_RENDER,
- "Renderer",
- renderInit,
- renderWork,
- renderClose
-};
-
-// Create gamma lookup table.
-// Note that we are creating a scaled integer lookup table that will
-// not cause overflows in sse_block16() below. This results in
-// small values being truncated to 0 which is ok for this usage.
-static void build_gamma_lut( hb_work_private_t * pv )
-{
- int i;
- for( i = 0; i < 256; i++ )
- {
- pv->gamma_lut[i] = 4095 * pow( ( (float)i / (float)255 ), 2.2f );
- }
-}
-
-/*
- * getU() & getV()
- *
- * Utility function that finds where the U is in the YUV sub-picture
- *
- * The Y data is at the top, followed by U and V, but the U and V
- * are half the width of the Y, i.e. each chroma element covers 2x2
- * of the Y's.
- */
-static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
-{
- return(&data[(y>>1) * ((width+1)>>1) + (x>>1) + width*height]);
-}
-
-static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
-{
- int w2 = (width+1) >> 1, h2 = (height+1) >> 1;
- return(&data[(y>>1) * w2 + (x>>1) + width*height + w2*h2]);
-}
-
-// Draws the specified PICTURESUB subtitle on the specified video packet.
-// Disposes the subtitle afterwards.
-static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
- hb_buffer_t * sub )
-{
- hb_title_t * title = job->title;
- int i, j, offset_top, offset_left, margin_top, margin_percent;
- uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
-
- /*
- * Percent of height of picture that form a margin that subtitles
- * should not be displayed within.
- */
- margin_percent = 2;
-
- if( !sub )
- {
- return;
- }
-
- /*
- * If necessary, move the subtitle so it is not in a cropped zone.
- * When it won't fit, we center it so we lose as much on both ends.
- * Otherwise we try to leave a 20px or 2% margin around it.
- */
- margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
- margin_percent ) / 100;
-
- if( margin_top > 20 )
- {
- /*
- * A maximum margin of 20px regardless of height of the picture.
- */
- margin_top = 20;
- }
-
- if( sub->height > title->height - job->crop[0] - job->crop[1] -
- ( margin_top * 2 ) )
- {
- /*
- * The subtitle won't fit in the cropped zone, so center
- * it vertically so we fit in as much as we can.
- */
- offset_top = job->crop[0] + ( title->height - job->crop[0] -
- job->crop[1] - sub->height ) / 2;
- }
- else if( sub->y < job->crop[0] + margin_top )
- {
- /*
- * The subtitle fits in the cropped zone, but is currently positioned
- * within our top margin, so move it outside of our margin.
- */
- offset_top = job->crop[0] + margin_top;
- }
- else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
- {
- /*
- * The subtitle fits in the cropped zone, and is not within the top
- * margin but is within the bottom margin, so move it to be above
- * the margin.
- */
- offset_top = title->height - job->crop[1] - margin_top - sub->height;
- }
- else
- {
- /*
- * The subtitle is fine where it is.
- */
- offset_top = sub->y;
- }
-
- if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
- offset_left = job->crop[2] + ( title->width - job->crop[2] -
- job->crop[3] - sub->width ) / 2;
- else if( sub->x < job->crop[2] + 20 )
- offset_left = job->crop[2] + 20;
- else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
- offset_left = title->width - job->crop[3] - 20 - sub->width;
- else
- offset_left = sub->x;
-
- lum = sub->data;
- alpha = lum + sub->width * sub->height;
- sub_chromaU = alpha + sub->width * sub->height;
- sub_chromaV = sub_chromaU + sub->width * sub->height;
-
- out = buf->data + offset_top * title->width + offset_left;
-
- for( i = 0; i < sub->height; i++ )
- {
- if( offset_top + i >= 0 && offset_top + i < title->height )
- {
- for( j = 0; j < sub->width; j++ )
- {
- if( offset_left + j >= 0 && offset_left + j < title->width )
- {
- uint8_t *chromaU, *chromaV;
-
- /*
- * Merge the luminance and alpha with the picture
- */
- out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
- (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
- /*
- * Set the chroma (colour) based on whether there is
- * any alpha at all. Don't try to blend with the picture.
- */
- chromaU = getU(buf->data, title->width, title->height,
- offset_left+j, offset_top+i);
-
- chromaV = getV(buf->data, title->width, title->height,
- offset_left+j, offset_top+i);
-
- if( alpha[j] > 0 )
- {
- /*
- * Add the chroma from the sub-picture, as this is
- * not a transparent element.
- */
- *chromaU = sub_chromaU[j];
- *chromaV = sub_chromaV[j];
- }
- }
- }
- }
-
- lum += sub->width;
- alpha += sub->width;
- sub_chromaU += sub->width;
- sub_chromaV += sub->width;
- out += title->width;
- }
-}
-
-// Draws the specified PICTURESUB subtitle on the specified video packet.
-static void ApplySubs( hb_job_t * job, hb_buffer_t * buf,
- hb_buffer_t * sub )
-{
- while ( sub )
- {
- ApplySub( job, buf, sub );
- sub = sub->next;
- }
-}
-
-// delete the buffer 'out' from the chain of buffers whose head is 'buf_out'.
-// out & buf_out must be non-null (checks prior to anywhere this routine
-// can be called guarantee this) and out must be on buf_out's chain
-// (all places that call this get 'out' while traversing the chain).
-// 'out' is freed and its predecessor is returned.
-static hb_buffer_t *delete_buffer_from_chain( hb_buffer_t **buf_out, hb_buffer_t *out)
-{
- hb_buffer_t *succ = out->next;
- hb_buffer_t *pred = *buf_out;
- if ( pred == out )
- {
- // we're deleting the first buffer
- *buf_out = succ;
- }
- else
- {
- // target isn't the first buf so search for its predecessor.
- while ( pred->next != out )
- {
- pred = pred->next;
- }
- // found 'out' - remove it from the chain
- pred->next = succ;
- }
- out->next = 0;
- hb_buffer_close( &out );
- return succ;
-}
-
-// insert buffer 'succ' after buffer chain element 'pred'.
-// caller must guarantee that 'pred' and 'succ' are non-null.
-static hb_buffer_t *insert_buffer_in_chain( hb_buffer_t *pred, hb_buffer_t *succ )
-{
- succ->next = pred->next;
- pred->next = succ;
- return succ;
-}
-
-// Compute ths sum of squared errors for a 16x16 block
-// Gamma adjusts pixel values so that less visible diffreences
-// count less.
-static inline unsigned sse_block16( hb_work_private_t *pv, uint8_t *a, uint8_t *b, int stride )
-{
- int x, y;
- unsigned sum = 0;
- int diff;
- unsigned *g = pv->gamma_lut;
-
- for( y = 0; y < 16; y++ )
- {
- for( x = 0; x < 16; x++ )
- {
- diff = g[a[x]] - g[b[x]];
- sum += diff * diff;
- }
- a += stride;
- b += stride;
- }
- return sum;
-}
-
-// Sum of squared errors. Computes and sums the SSEs for all
-// 16x16 blocks in the images. Only checks the Y component.
-static float motion_metric( hb_work_private_t * pv, hb_buffer_t * a, hb_buffer_t * b )
-{
- int bw = pv->job->width / 16;
- int bh = pv->job->height / 16;
- int stride = pv->job->width;
- uint8_t * pa = a->data;
- uint8_t * pb = b->data;
- int x, y;
- uint64_t sum = 0;
-
- for( y = 0; y < bh; y++ )
- {
- for( x = 0; x < bw; x++ )
- {
- sum += sse_block16( pv, pa + y * 16 * stride + x * 16,
- pb + y * 16 * stride + x * 16, stride );
- }
- }
- return (float)sum / ( pv->job->width * pv->job->height );
-}
-
-// This section of the code implements video frame rate control.
-// Since filters are allowed to duplicate and drop frames (which
-// changes the timing), this has to be the last thing done in render.
-//
-// There are three options, selected by the value of job->cfr:
-// 0 - Variable Frame Rate (VFR) or 'same as source': frame times
-// are left alone
-// 1 - Constant Frame Rate (CFR): Frame timings are adjusted so that all
-// frames are exactly job->vrate_base ticks apart. Frames are dropped
-// or duplicated if necessary to maintain this spacing.
-// 2 - Peak Frame Rate (PFR): job->vrate_base is treated as the peak
-// average frame rate. I.e., the average frame rate (current frame
-// end time divided by number of frames so far) is never allowed to be
-// greater than job->vrate_base and frames are dropped if necessary
-// to keep the average under this value. Other than those drops, frame
-// times are left alone.
-//
-static void adjust_frame_rate( hb_work_private_t *pv, hb_buffer_t **buf_out )
-{
- hb_buffer_t *out = *buf_out;
-
- while ( out && out->size > 0 )
- {
- if ( pv->job->cfr == 0 )
- {
- ++pv->count_frames;
- pv->out_last_stop = out->stop;
- out = out->next;
- continue;
- }
-
- // compute where this frame would stop if the frame rate were constant
- // (this is our target stopping time for CFR and earliest possible
- // stopping time for PFR).
- double cfr_stop = pv->frame_rate * ( pv->count_frames + 1 );
-
- hb_buffer_t * next = hb_fifo_see( pv->delay_queue );
-
- float next_metric = 0;
- if( next )
- next_metric = motion_metric( pv, out, next );
-
- if( pv->out_last_stop >= out->stop )
- {
- // This frame stops a frame time or more in the past - drop it
- // but don't lose its chapter mark.
- if ( out->new_chap )
- {
- pv->chapter_time = out->start;
- pv->chapter_val = out->new_chap;
- }
- ++pv->drops;
- out = delete_buffer_from_chain( buf_out, out );
- pv->frame_metric = next_metric;
- if( next_metric > pv->max_metric )
- pv->max_metric = next_metric;
- continue;
- }
-
- if( out->start <= pv->out_last_stop &&
- out->stop > pv->out_last_stop &&
- next && next->stop < cfr_stop )
- {
- // This frame starts before the end of the last output
- // frame and ends after the end of the last output
- // frame (i.e. it straddles it). Also the next frame
- // ends before the end of the next output frame. If the
- // next frame is not a duplicate, and we haven't seen
- // a changed frame since the last output frame,
- // then drop this frame.
- //
- // This causes us to sync to the pattern of progressive
- // 23.976 fps content that has been upsampled to
- // progressive 59.94 fps.
- if( pv->out_metric > pv->max_metric &&
- next_metric > pv->max_metric )
- {
- // Pattern: N R R N
- // o c n
- // N == new frame
- // R == repeat frame
- // o == last output frame
- // c == current frame
- // n == next frame
- // We haven't seen a frame change since the last output
- // frame and the next frame changes. Use the next frame,
- // drop this one.
- if ( out->new_chap )
- {
- pv->chapter_time = out->start;
- pv->chapter_val = out->new_chap;
- }
- ++pv->drops;
- out = delete_buffer_from_chain( buf_out, out );
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
- pv->sync_parity = 1;
- continue;
- }
- else if( pv->sync_parity &&
- pv->out_metric < pv->max_metric &&
- pv->max_metric > pv->frame_metric &&
- pv->frame_metric < next_metric )
- {
- // Pattern: R N R N
- // o c n
- // N == new frame
- // R == repeat frame
- // o == last output frame
- // c == current frame
- // n == next frame
- // If we see this pattern, we must not use the next
- // frame when straddling the current frame.
- pv->sync_parity = 0;
- }
- else if( pv->sync_parity )
- {
- // The pattern is indeterminate. Continue dropping
- // frames on the same schedule
- if ( out->new_chap )
- {
- pv->chapter_time = out->start;
- pv->chapter_val = out->new_chap;
- }
- ++pv->drops;
- out = delete_buffer_from_chain( buf_out, out );
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
- pv->sync_parity = 1;
- continue;
- }
- }
-
- // this frame has to start where the last one stopped.
- out->start = pv->out_last_stop;
-
- pv->out_metric = pv->frame_metric;
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
-
- // at this point we know that this frame doesn't push the average
- // rate over the limit so we just pass it on for PFR. For CFR we're
- // going to return it (with its start & stop times modified) and
- // we may have to dup it.
- ++pv->count_frames;
- if ( pv->job->cfr > 1 )
- {
- // PFR - we're going to keep the frame but may need to
- // adjust it's stop time to meet the average rate constraint.
- if ( out->stop <= cfr_stop )
- {
- out->stop = cfr_stop;
- }
- }
- else
- {
- // we're doing CFR so we have to either trim some time from a
- // buffer that ends too far in the future or, if the buffer is
- // two or more frame times long, split it into multiple pieces,
- // each of which is a frame time long.
- double excess_dur = (double)out->stop - cfr_stop;
- out->stop = cfr_stop;
- for ( ; excess_dur >= pv->frame_rate; excess_dur -= pv->frame_rate )
- {
- /* next frame too far ahead - dup current frame */
- hb_buffer_t *dup = hb_buffer_init( out->size );
- memcpy( dup->data, out->data, out->size );
- hb_buffer_copy_settings( dup, out );
- dup->new_chap = 0;
- dup->start = cfr_stop;
- cfr_stop += pv->frame_rate;
- dup->stop = cfr_stop;
- out = insert_buffer_in_chain( out, dup );
- ++pv->dups;
- ++pv->count_frames;
- }
- }
- pv->out_last_stop = out->stop;
- out = out->next;
- }
-}
-
-int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
- hb_buffer_t ** buf_out )
-{
- hb_work_private_t * pv = w->private_data;
- hb_job_t * job = pv->job;
- hb_title_t * title = job->title;
- hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
- hb_buffer_t * ivtc_buffer = NULL;
-
- if( in->size <= 0 )
- {
- hb_buffer_t *head = NULL, *tail = NULL, *next;
- int counter = 2;
-
- /* If the input buffer is end of stream, send out an empty one
- * to the next stage as well. To avoid losing the contents of
- * the delay queue connect the buffers in the delay queue in
- * the correct order, and add the end of stream buffer to the
- * end.
- */
- while( ( next = hb_fifo_get( pv->delay_queue ) ) != NULL )
- {
-
- /* We can't use the given time stamps. Previous frames
- might already have been extended, throwing off the
- raw values fed to render.c. Instead, their
- stop and start times are stored in arrays.
- The 4th cached frame will be the to use.
- If it needed its duration extended to make up
- lost time, it will have happened above. */
- next->start = pv->last_start[counter];
- next->stop = pv->last_stop[counter--];
-
- if( !head && !tail )
- {
- head = tail = next;
- } else {
- tail->next = next;
- tail = next;
- }
- }
- if( tail )
- {
- tail->next = in;
- *buf_out = head;
- adjust_frame_rate( pv, buf_out );
- } else {
- *buf_out = in;
- }
- *buf_in = NULL;
- return HB_WORK_DONE;
- }
-
- /*
- * During the indepth_scan ditch the buffers here before applying filters or attempting to
- * use the subtitles.
- */
- if( job->indepth_scan )
- {
- *buf_out = NULL;
- return HB_WORK_OK;
- }
-
- /* Push subtitles onto queue just in case we need to delay a frame */
- if( in->sub )
- {
- hb_fifo_push_list_element( pv->subtitle_queue, in->sub );
- in->sub = NULL;
- }
- else
- {
- hb_fifo_push_list_element( pv->subtitle_queue, NULL );
- }
-
- /* If there's a chapter mark remember it in case we delay or drop its frame */
- if( in->new_chap )
- {
- pv->chapter_time = in->start;
- pv->chapter_val = in->new_chap;
- in->new_chap = 0;
- }
-
- /* Setup render buffer */
- hb_buffer_t * buf_render = hb_video_buffer_init( job->width, job->height );
-
- /* Apply filters */
- if( job->filters )
- {
- int filter_count = hb_list_count( job->filters );
- int i;
-
- for( i = 0; i < filter_count; i++ )
- {
- hb_filter_object_t * filter = hb_list_item( job->filters, i );
-
- if( !filter )
- {
- continue;
- }
-
- hb_buffer_t * buf_tmp_out = NULL;
-
- int result = filter->work( buf_tmp_in,
- &buf_tmp_out,
- PIX_FMT_YUV420P,
- title->width,
- title->height,
- filter->private_data );
-
- /*
- * FILTER_OK: set temp buffer to filter buffer, continue
- * FILTER_DELAY: set temp buffer to NULL, abort
- * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
- * FILTER_FAILED: leave temp buffer alone, continue
- */
- if( result == FILTER_OK )
- {
- buf_tmp_in = buf_tmp_out;
- }
- else if( result == FILTER_DELAY )
- {
- // Process the current frame later
-
- buf_tmp_in = NULL;
- break;
- }
- else if( result == FILTER_DROP )
- {
- // Drop the current frame
-
- /* We need to compensate for the time lost by dropping this frame.
- Spread its duration out in quarters, because usually dropped frames
- maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
- Store these in the lost_time array, which has 4 slots in it.
- Because not every frame duration divides evenly by 4, and we can't lose the
- remainder, we have to go through an awkward process to preserve it in the 4th array index. */
- uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
- pv->lost_time[0] += (temp_duration / 4);
- pv->lost_time[1] += (temp_duration / 4);
- pv->lost_time[2] += (temp_duration / 4);
- pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
-
- pv->total_lost_time += temp_duration;
- pv->dropped_frames++;
-
- /* Pop the frame's subtitle list and dispose of it. */
- hb_buffer_t * sub = hb_fifo_get_list_element( pv->subtitle_queue );
- if ( sub )
- hb_buffer_close( &sub );
-
- buf_tmp_in = NULL;
- break;
- }
- }
- }
-
- if( buf_tmp_in )
- {
- /* Cache frame start and stop times, so we can renumber
- time stamps if dropping frames for VFR. */
- int i;
- for( i = 3; i >= 1; i-- )
- {
- pv->last_start[i] = pv->last_start[i-1];
- pv->last_stop[i] = pv->last_stop[i-1];
- }
-
- /* In order to make sure we have continuous time stamps, store
- the current frame's duration as starting when the last one stopped. */
- pv->last_start[0] = pv->last_stop[1];
- pv->last_stop[0] = pv->last_start[0] + (buf_tmp_in->stop - buf_tmp_in->start);
- }
-
- /* Apply subtitles and dispose them */
- if( buf_tmp_in )
- {
- hb_buffer_t * sub = hb_fifo_get_list_element( pv->subtitle_queue );
- if ( sub )
- {
- ApplySubs( job, buf_tmp_in, sub );
- hb_buffer_close( &sub );
- }
- }
-
- /* Apply crop/scale if specified */
- if( buf_tmp_in && pv->context )
- {
- avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
- PIX_FMT_YUV420P,
- title->width, title->height );
-
- avpicture_fill( &pv->pic_tmp_out, buf_render->data,
- PIX_FMT_YUV420P,
- job->width, job->height );
-
- // Crop; this alters the pointer to the data to point to the correct place for cropped frame
- av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
- job->crop[0], job->crop[2] );
-
- // Scale pic_crop into pic_render according to the context set up in renderInit
- sws_scale(pv->context,
- (const uint8_t* const *)pv->pic_tmp_crop.data,
- pv->pic_tmp_crop.linesize,
- 0, title->height - (job->crop[0] + job->crop[1]),
- pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
-
- hb_buffer_copy_settings( buf_render, buf_tmp_in );
-
- buf_tmp_in = buf_render;
- }
-
- /* Set output to render buffer */
- (*buf_out) = buf_render;
-
- if( buf_tmp_in == NULL )
- {
- /* Teardown and cleanup buffers if we are emitting NULL */
- if( buf_in && *buf_in )
- {
- hb_buffer_close( buf_in );
- *buf_in = NULL;
- }
- if( buf_out && *buf_out )
- {
- hb_buffer_close( buf_out );
- *buf_out = NULL;
- }
- }
- else if( buf_tmp_in != buf_render )
- {
- /* Copy temporary results and settings into render buffer */
- memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
- hb_buffer_copy_settings( buf_render, buf_tmp_in );
- }
-
- if (*buf_out )
- {
- hb_fifo_push( pv->delay_queue, *buf_out );
- *buf_out = NULL;
- }
-
- /*
- * Keep the last three frames in our queue, this ensures that we have the last
- * two always in there should we need to rewrite the durations on them.
- */
-
- if( hb_fifo_size( pv->delay_queue ) >= 4 )
- {
- *buf_out = hb_fifo_get( pv->delay_queue );
- }
-
- if( *buf_out )
- {
- /* The current frame exists. That means it hasn't been dropped by a filter.
- Make it accessible as ivtc_buffer so we can edit its duration if needed. */
- ivtc_buffer = *buf_out;
-
- if( pv->lost_time[3] > 0 )
- {
- /*
- * A frame's been dropped earlier by VFR detelecine.
- * Gotta make up the lost time. This will also
- * slow down the video.
- * The dropped frame's has to be accounted for, so
- * divvy it up amongst the 4 frames left behind.
- * This is what the delay_queue is for;
- * telecined sequences start 2 frames before
- * the dropped frame, so to slow down the right
- * ones you need a 2 frame delay between
- * reading input and writing output.
- */
-
- /* We want to extend the outputted frame's duration by the value
- stored in the 4th slot of the lost_time array. Because we need
- to adjust all the values in the array so they're contiguous,
- extend the duration inside the array first, before applying
- it to the current frame buffer. */
- pv->last_stop[3] += pv->lost_time[3];
-
- /* Log how much time has been added back in to the video. */
- pv->total_gained_time += pv->lost_time[3];
-
- /* We've pulled the 4th value from the lost_time array
- and added it to the last_stop array's 4th slot. Now, rotate the
- lost_time array so the 4th slot now holds the 3rd's value, and
- so on down the line, and set the 0 index to a value of 0. */
- int i;
- for( i=2; i >= 0; i--)
- {
- pv->lost_time[i+1] = pv->lost_time[i];
- }
- pv->lost_time[0] = 0;
-
- /* Log how many frames have had their durations extended. */
- pv->extended_frames++;
- }
-
- /* We can't use the given time stamps. Previous frames
- might already have been extended, throwing off the
- raw values fed to render.c. Instead, their
- stop and start times are stored in arrays.
- The 4th cached frame will be the to use.
- If it needed its duration extended to make up
- lost time, it will have happened above. */
- ivtc_buffer->start = pv->last_start[3];
- ivtc_buffer->stop = pv->last_stop[3];
-
- /* Set the 3rd cached frame to start when this one stops,
- and so on down the line. If any of them need to be
- extended as well to make up lost time, it'll be handled
- on the next loop through the renderer. */
- int i;
- for (i = 2; i >= 0; i--)
- {
- int temp_duration = pv->last_stop[i] - pv->last_start[i];
- pv->last_start[i] = pv->last_stop[i+1];
- pv->last_stop[i] = pv->last_start[i] + temp_duration;
- }
-
- /* If we have a pending chapter mark and this frame is at
- or after the time of the mark, mark this frame & clear
- our pending mark. */
- if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
- {
- ivtc_buffer->new_chap = pv->chapter_val;
- pv->chapter_time = 0;
- }
-
- }
-
- if ( buf_out && *buf_out )
- {
- adjust_frame_rate( pv, buf_out );
- }
- return HB_WORK_OK;
-}
-
-void renderClose( hb_work_object_t * w )
-{
- hb_work_private_t * pv = w->private_data;
-
- if ( pv->job->cfr )
- {
- hb_log("render: %d frames output, %d dropped and %d duped for CFR/PFR",
- pv->count_frames, pv->drops, pv->dups );
- }
-
- hb_interjob_t * interjob = hb_interjob_get( w->private_data->job->h );
-
- /* Preserve output frame count and time for more accurate framerates in 2nd passes. */
- interjob->out_frame_count = pv->count_frames;
- interjob->total_time = pv->out_last_stop;
-
- hb_log("render: lost time: %"PRId64" (%i frames)", pv->total_lost_time, pv->dropped_frames);
- hb_log("render: gained time: %"PRId64" (%i frames) (%"PRId64" not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
- if (pv->dropped_frames)
- hb_log("render: average dropped frame duration: %"PRId64, (pv->total_lost_time / pv->dropped_frames) );
-
- /* Cleanup subtitle queue */
- if( pv->subtitle_queue )
- {
- hb_fifo_close( &pv->subtitle_queue );
- }
-
- if( pv->delay_queue )
- {
- hb_fifo_close( &pv->delay_queue );
- }
-
- /* Cleanup render work structure */
- free( pv );
- w->private_data = NULL;
-}
-
-int renderInit( hb_work_object_t * w, hb_job_t * job )
-{
- /* Allocate new private work object */
- hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
- pv->job = job;
- w->private_data = pv;
- uint32_t swsflags;
-
- build_gamma_lut( pv );
- swsflags = SWS_LANCZOS | SWS_ACCURATE_RND;
-
- /* Get title and title size */
- hb_title_t * title = job->title;
-
- /* If crop or scale is specified, setup rescale context */
- if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
- job->width != title->width || job->height != title->height )
- {
- pv->context = hb_sws_get_context(title->width - (job->crop[2] + job->crop[3]),
- title->height - (job->crop[0] + job->crop[1]),
- PIX_FMT_YUV420P,
- job->width, job->height, PIX_FMT_YUV420P,
- swsflags);
- }
-
- /* Setup FIFO queue for subtitle cache */
- pv->subtitle_queue = hb_fifo_init( 8, 1 );
- pv->delay_queue = hb_fifo_init( 8, 1 );
-
- /* VFR IVTC needs a bunch of time-keeping variables to track
- how many frames are dropped, how many are extended, what the
- last 4 start and stop times were (so they can be modified),
- how much time has been lost and gained overall, how much time
- the latest 4 frames should be extended by, and where chapter
- markers are (so they can be saved if their frames are dropped.) */
- pv->dropped_frames = 0;
- pv->extended_frames = 0;
- pv->last_start[0] = 0;
- pv->last_stop[0] = 0;
- pv->total_lost_time = 0;
- pv->total_gained_time = 0;
- pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
- pv->chapter_time = 0;
- pv->chapter_val = 0;
- pv->frame_metric = 1000; // Force first frame
-
- if ( job->cfr == 2 )
- {
- pv->frame_rate = (double)job->pfr_vrate_base * (1./300.);
- }
- else
- {
- pv->frame_rate = (double)job->vrate_base * (1./300.);
- }
-
- /* Setup filters */
- /* TODO: Move to work.c? */
- if( job->filters )
- {
- int filter_count = hb_list_count( job->filters );
- int i;
-
- for( i = 0; i < filter_count; i++ )
- {
- hb_filter_object_t * filter = hb_list_item( job->filters, i );
-
- if( !filter ) continue;
-
- filter->private_data = filter->init( PIX_FMT_YUV420P,
- title->width,
- title->height,
- filter->settings );
- }
- }
-
- return 0;
-}
diff --git a/libhb/rendersub.c b/libhb/rendersub.c
new file mode 100644
index 000000000..26a3ec06d
--- /dev/null
+++ b/libhb/rendersub.c
@@ -0,0 +1,648 @@
+
+#include "hb.h"
+#include "hbffmpeg.h"
+#include <ass/ass.h>
+
+struct hb_filter_private_s
+{
+ // Common
+ int crop[4];
+ int type;
+
+ // VOBSUB
+ hb_list_t * sub_list; // List of active subs
+
+ // SSA
+ ASS_Library * ssa;
+ ASS_Renderer * renderer;
+ ASS_Track * ssaTrack;
+};
+
+// VOBSUB
+static int vobsub_init( hb_filter_object_t * filter, hb_filter_init_t * init );
+
+static int vobsub_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static void vobsub_close( hb_filter_object_t * filter );
+
+
+// SSA
+static int ssa_init( hb_filter_object_t * filter, hb_filter_init_t * init );
+
+static int ssa_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static void ssa_close( hb_filter_object_t * filter );
+
+
+// Entry points
+static int hb_rendersub_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+
+static int hb_rendersub_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static void hb_rendersub_close( hb_filter_object_t * filter );
+
+hb_filter_object_t hb_filter_render_sub =
+{
+ .id = HB_FILTER_RENDER_SUB,
+ .enforce_order = 1,
+ .name = "Subtitle renderer",
+ .settings = NULL,
+ .init = hb_rendersub_init,
+ .work = hb_rendersub_work,
+ .close = hb_rendersub_close,
+};
+
+static void blend( hb_buffer_t *dst, hb_buffer_t *src, int left, int top )
+{
+ int xx, yy;
+ int ww, hh;
+ int x0, y0;
+ uint8_t *y_in, *y_out;
+ uint8_t *u_in, *u_out;
+ uint8_t *v_in, *v_out;
+ uint8_t *a_in, alpha;
+
+ x0 = y0 = 0;
+ if( left < 0 )
+ {
+ x0 = -left;
+ }
+ if( top < 0 )
+ {
+ y0 = -top;
+ }
+
+ ww = src->f.width;
+ if( left + src->f.width > dst->f.width )
+ {
+ ww = dst->f.width - ( left + src->f.width );
+ }
+ hh = src->f.height;
+ if( top + src->f.height > dst->f.height )
+ {
+ hh = dst->f.height - ( top + src->f.height );
+ }
+
+ // Blend luma
+ for( yy = y0; yy < hh; yy++ )
+ {
+ y_in = src->plane[0].data + yy * src->plane[0].stride;
+ y_out = dst->plane[0].data + ( yy + top ) * dst->plane[0].stride;
+ if( a_in )
+ {
+ a_in = src->plane[3].data + yy * src->plane[3].stride;
+ }
+ for( xx = x0; xx < ww; xx++ )
+ {
+ if( a_in )
+ {
+ alpha = a_in[xx];
+ }
+ else
+ {
+ // If source has no alpha channel, use 50%
+ alpha = 128;
+ }
+
+ /*
+ * Merge the luminance and alpha with the picture
+ */
+ y_out[left + xx] =
+ ( (uint16_t)y_out[left + xx] * ( 255 - alpha ) +
+ (uint16_t)y_in[xx] * alpha ) >> 8;
+ }
+ }
+
+ // Blend U & V
+ // Assumes source and dest are the same PIX_FMT
+ int hshift = 0;
+ int wshift = 0;
+ if( dst->plane[1].height < dst->plane[0].height )
+ hshift = 1;
+ if( dst->plane[1].width < dst->plane[0].width )
+ wshift = 1;
+ for( yy = y0 >> hshift; yy < hh >> hshift; yy++ )
+ {
+ u_in = src->plane[1].data + yy * src->plane[1].stride;
+ u_out = dst->plane[1].data + ( yy + ( top >> hshift ) ) * dst->plane[1].stride;
+ v_in = src->plane[2].data + yy * src->plane[2].stride;
+ v_out = dst->plane[2].data + ( yy + ( top >> hshift ) ) * dst->plane[2].stride;
+ if( a_in )
+ {
+ a_in = src->plane[3].data + ( yy << hshift ) * src->plane[3].stride;
+ }
+
+ for( xx = x0 >> wshift; xx < ww >> wshift; xx++ )
+ {
+ if( a_in )
+ {
+ alpha = a_in[xx << wshift];
+ }
+ else
+ {
+ // If source has no alpha channel, use 50%
+ alpha = 128;
+ }
+
+ // Blend averge U and alpha
+ u_out[(left >> wshift) + xx] =
+ ( (uint16_t)u_out[(left >> wshift) + xx] * ( 255 - alpha ) +
+ (uint16_t)u_in[xx] * alpha ) >> 8;
+
+ // Blend V and alpha
+ v_out[(left >> wshift) + xx] =
+ ( (uint16_t)v_out[(left >> wshift) + xx] * ( 255 - alpha ) +
+ (uint16_t)v_in[xx] * alpha ) >> 8;
+ }
+ }
+}
+
+// Assumes that the input buffer has the same dimensions
+// as the original title diminsions
+static void ApplySub( hb_filter_private_t * pv, hb_buffer_t * buf, hb_buffer_t * sub )
+{
+ int top, left, margin_top, margin_percent;
+
+ /*
+ * Percent of height of picture that form a margin that subtitles
+ * should not be displayed within.
+ */
+ margin_percent = 2;
+
+ /*
+ * If necessary, move the subtitle so it is not in a cropped zone.
+ * When it won't fit, we center it so we lose as much on both ends.
+ * Otherwise we try to leave a 20px or 2% margin around it.
+ */
+ margin_top = ( ( buf->f.height - pv->crop[0] - pv->crop[1] ) *
+ margin_percent ) / 100;
+
+ if( margin_top > 20 )
+ {
+ /*
+ * A maximum margin of 20px regardless of height of the picture.
+ */
+ margin_top = 20;
+ }
+
+ if( sub->f.height > buf->f.height - pv->crop[0] - pv->crop[1] -
+ ( margin_top * 2 ) )
+ {
+ /*
+ * The subtitle won't fit in the cropped zone, so center
+ * it vertically so we fit in as much as we can.
+ */
+ top = pv->crop[0] + ( buf->f.height - pv->crop[0] -
+ pv->crop[1] - sub->f.height ) / 2;
+ }
+ else if( sub->f.y < pv->crop[0] + margin_top )
+ {
+ /*
+ * The subtitle fits in the cropped zone, but is currently positioned
+ * within our top margin, so move it outside of our margin.
+ */
+ top = pv->crop[0] + margin_top;
+ }
+ else if( sub->f.y > buf->f.height - pv->crop[1] - margin_top - sub->f.height )
+ {
+ /*
+ * The subtitle fits in the cropped zone, and is not within the top
+ * margin but is within the bottom margin, so move it to be above
+ * the margin.
+ */
+ top = buf->f.height - pv->crop[1] - margin_top - sub->f.height;
+ }
+ else
+ {
+ /*
+ * The subtitle is fine where it is.
+ */
+ top = sub->f.y;
+ }
+
+ if( sub->f.width > buf->f.width - pv->crop[2] - pv->crop[3] - 40 )
+ left = pv->crop[2] + ( buf->f.width - pv->crop[2] -
+ pv->crop[3] - sub->f.width ) / 2;
+ else if( sub->f.x < pv->crop[2] + 20 )
+ left = pv->crop[2] + 20;
+ else if( sub->f.x > buf->f.width - pv->crop[3] - 20 - sub->f.width )
+ left = buf->f.width - pv->crop[3] - 20 - sub->f.width;
+ else
+ left = sub->f.x;
+
+ blend( buf, sub, left, top );
+}
+
+// Assumes that the input buffer has the same dimensions
+// as the original title diminsions
+static void ApplyVOBSubs( hb_filter_private_t * pv, hb_buffer_t * buf )
+{
+ int ii;
+ hb_buffer_t * sub;
+
+ for( ii = 0; ii < hb_list_count( pv->sub_list ); ii++ )
+ {
+ sub = hb_list_item( pv->sub_list, ii );
+ if( sub->s.stop <= buf->s.start )
+ {
+ // Subtitle stop is in the past, delete it
+ hb_list_rem( pv->sub_list, sub );
+ }
+ else if( sub->s.start <= buf->s.start )
+ {
+ // The subtitle has started before this frame and ends
+ // after it. Render the subtitle into the frame.
+ while ( sub )
+ {
+ ApplySub( pv, buf, sub );
+ sub = sub->next;
+ }
+ }
+ else
+ {
+ // The subtitle starts in the future. No need to continue.
+ break;
+ }
+ }
+}
+
+static int vobsub_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ // VOBSUB render filter has no settings
+ memcpy( pv->crop, init->crop, sizeof( int[4] ) );
+
+ pv->sub_list = hb_list_init();
+
+ return 0;
+}
+
+static void vobsub_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
+ {
+ return;
+ }
+
+ if( pv->sub_list )
+ hb_list_empty( &pv->sub_list );
+
+ free( pv );
+ filter->private_data = NULL;
+}
+
+static int vobsub_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * sub;
+
+ if ( in->size <= 0 )
+ {
+ *buf_in = NULL;
+ *buf_out = in;
+ return HB_FILTER_DONE;
+ }
+
+ // Get any pending subtitles and add them to the active
+ // subtitle list
+ while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
+ {
+ hb_list_add( pv->sub_list, sub );
+ }
+
+ ApplyVOBSubs( pv, in );
+ *buf_in = NULL;
+ *buf_out = in;
+
+ return HB_FILTER_OK;
+}
+
+static uint8_t ssaAlpha( ASS_Image *frame, int x, int y )
+{
+ unsigned frameA = ( frame->color ) & 0xff;
+ unsigned gliphA = frame->bitmap[y*frame->stride + x];
+
+ // Alpha for this pixel is the frame opacity (255 - frameA)
+ // multiplied by the gliph alfa (gliphA) for this pixel
+ unsigned alpha = (255 - frameA) * gliphA >> 8;
+
+ return (uint8_t)alpha;
+}
+
+static hb_buffer_t * RenderSSAFrame( ASS_Image * frame )
+{
+ hb_buffer_t *sub;
+ int xx, yy;
+
+ unsigned r = ( frame->color >> 24 ) & 0xff;
+ unsigned g = ( frame->color >> 16 ) & 0xff;
+ unsigned b = ( frame->color >> 8 ) & 0xff;
+
+ int yuv = hb_rgb2yuv((r << 16) | (g << 8) | b );
+
+ unsigned frameY = (yuv >> 16) & 0xff;
+ unsigned frameV = (yuv >> 8 ) & 0xff;
+ unsigned frameU = (yuv >> 0 ) & 0xff;
+
+ sub = hb_pic_buffer_init( PIX_FMT_YUVA420P, frame->w, frame->h );
+ if( sub == NULL )
+ return NULL;
+
+ uint8_t *y_out, *u_out, *v_out, *a_out;
+ y_out = sub->plane[0].data;
+ u_out = sub->plane[1].data;
+ v_out = sub->plane[2].data;
+ a_out = sub->plane[3].data;
+
+ for( yy = 0; yy < frame->h; yy++ )
+ {
+ for( xx = 0; xx < frame->w; xx++ )
+ {
+ y_out[xx] = frameY;
+ if( ( yy & 1 ) == 0 )
+ {
+ u_out[xx>>1] = frameU;
+ v_out[xx>>1] = frameV;
+ }
+ a_out[xx] = ssaAlpha( frame, xx, yy );;
+ }
+ y_out += sub->plane[0].stride;
+ if( ( yy & 1 ) == 0 )
+ {
+ u_out += sub->plane[1].stride;
+ v_out += sub->plane[2].stride;
+ }
+ a_out += sub->plane[3].stride;
+ }
+ sub->f.width = frame->w;
+ sub->f.height = frame->h;
+ sub->f.x = frame->dst_x;
+ sub->f.y = frame->dst_y;
+
+ return sub;
+}
+
+static void ApplySSASubs( hb_filter_private_t * pv, hb_buffer_t * buf )
+{
+ ASS_Image *frameList;
+ hb_buffer_t *sub;
+ frameList = ass_render_frame( pv->renderer, pv->ssaTrack,
+ buf->s.start / 90, NULL );
+ if ( !frameList )
+ return;
+
+ ASS_Image *frame;
+ for (frame = frameList; frame; frame = frame->next) {
+ sub = RenderSSAFrame( frame );
+ if( sub )
+ {
+ ApplySub( pv, buf, sub );
+ hb_buffer_close( &sub );
+ }
+ }
+}
+
+static void ssa_log(int level, const char *fmt, va_list args, void *data)
+{
+ if ( level < 5 ) // same as default verbosity when no callback is set
+ {
+ hb_valog( 1, "[ass]", fmt, args );
+ }
+}
+
+static int ssa_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ memcpy( pv->crop, init->crop, sizeof( int[4] ) );
+
+ pv->ssa = ass_library_init();
+ if ( !pv->ssa ) {
+ hb_error( "decssasub: libass initialization failed\n" );
+ return 1;
+ }
+
+ // Redirect libass output to hb_log
+ ass_set_message_cb( pv->ssa, ssa_log, NULL );
+
+ // Load embedded fonts
+ hb_list_t * list_attachment = init->job->title->list_attachment;
+ int i;
+ for ( i = 0; i < hb_list_count(list_attachment); i++ )
+ {
+ hb_attachment_t * attachment = hb_list_item( list_attachment, i );
+
+ if ( attachment->type == FONT_TTF_ATTACH )
+ {
+ ass_add_font(
+ pv->ssa,
+ attachment->name,
+ attachment->data,
+ attachment->size );
+ }
+ }
+
+ ass_set_extract_fonts( pv->ssa, 1 );
+ ass_set_style_overrides( pv->ssa, NULL );
+
+ pv->renderer = ass_renderer_init( pv->ssa );
+ if ( !pv->renderer ) {
+ hb_log( "decssasub: renderer initialization failed\n" );
+ return 1;
+ }
+
+ ass_set_use_margins( pv->renderer, 0 );
+ ass_set_hinting( pv->renderer, ASS_HINTING_LIGHT ); // VLC 1.0.4 uses this
+ ass_set_font_scale( pv->renderer, 1.0 );
+ ass_set_line_spacing( pv->renderer, 1.0 );
+
+ // Setup default font family
+ //
+ // SSA v4.00 requires that "Arial" be the default font
+ const char *font = NULL;
+ const char *family = "Arial";
+ // NOTE: This can sometimes block for several *seconds*.
+ // It seems that process_fontdata() for some embedded fonts is slow.
+ ass_set_fonts( pv->renderer, font, family, /*haveFontConfig=*/1, NULL, 1 );
+
+ // Setup track state
+ pv->ssaTrack = ass_new_track( pv->ssa );
+ if ( !pv->ssaTrack ) {
+ hb_log( "decssasub: ssa track initialization failed\n" );
+ return 1;
+ }
+
+ // NOTE: The codec extradata is expected to be in MKV format
+ ass_process_codec_private( pv->ssaTrack,
+ (char *)filter->subtitle->extradata, filter->subtitle->extradata_size );
+
+ int width = init->width - ( init->crop[2] + init->crop[3] );
+ int height = init->height - ( init->crop[0] + init->crop[1] );
+ ass_set_frame_size( pv->renderer, width, height);
+
+ double par = (double)init->par_width / init->par_height;
+ ass_set_aspect_ratio( pv->renderer, 1, par );
+
+ return 0;
+}
+
+static void ssa_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
+ {
+ return;
+ }
+
+ if ( pv->ssaTrack )
+ ass_free_track( pv->ssaTrack );
+ if ( pv->renderer )
+ ass_renderer_done( pv->renderer );
+ if ( pv->ssa )
+ ass_library_done( pv->ssa );
+
+ free( pv );
+ filter->private_data = NULL;
+}
+
+static int ssa_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * sub;
+
+ if ( in->size <= 0 )
+ {
+ *buf_in = NULL;
+ *buf_out = in;
+ return HB_FILTER_DONE;
+ }
+
+ // Get any pending subtitles and add them to the active
+ // subtitle list
+ while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
+ {
+ // Parse MKV-SSA packet
+ ass_process_chunk( pv->ssaTrack, (char*)sub->data, sub->size,
+ sub->s.start / 90,
+ (sub->s.stop - sub->s.start) / 90 );
+ }
+
+ ApplySSASubs( pv, in );
+ *buf_in = NULL;
+ *buf_out = in;
+
+ return HB_FILTER_OK;
+}
+
+static int hb_rendersub_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+ hb_subtitle_t *subtitle;
+ int ii;
+
+ // Find the subtitle we need
+ for( ii = 0; ii < hb_list_count(init->job->title->list_subtitle); ii++ )
+ {
+ subtitle = hb_list_item( init->job->title->list_subtitle, ii );
+ if( subtitle && subtitle->config.dest == RENDERSUB )
+ {
+ // Found it
+ filter->subtitle = subtitle;
+ pv->type = subtitle->source;
+ break;
+ }
+ }
+ if( filter->subtitle == NULL )
+ {
+ hb_error("rendersub: no subtitle marked for burn");
+ return 1;
+ }
+
+ switch( pv->type )
+ {
+ case VOBSUB:
+ {
+ return vobsub_init( filter, init );
+ } break;
+
+ case SSASUB:
+ {
+ return ssa_init( filter, init );
+ } break;
+
+ default:
+ {
+ hb_error("rendersub: unsupported subtitle format %d", pv->type );
+ return 1;
+ } break;
+ }
+}
+
+static int hb_rendersub_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ switch( pv->type )
+ {
+ case VOBSUB:
+ {
+ return vobsub_work( filter, buf_in, buf_out );
+ } break;
+
+ case SSASUB:
+ {
+ return ssa_work( filter, buf_in, buf_out );
+ } break;
+
+ default:
+ {
+ hb_error("rendersub: unsupported subtitle format %d", pv->type );
+ return 1;
+ } break;
+ }
+}
+
+static void hb_rendersub_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ switch( pv->type )
+ {
+ case VOBSUB:
+ {
+ vobsub_close( filter );
+ } break;
+
+ case SSASUB:
+ {
+ ssa_close( filter );
+ } break;
+
+ default:
+ {
+ hb_error("rendersub: unsupported subtitle format %d", pv->type );
+ } break;
+ }
+}
+
diff --git a/libhb/rotate.c b/libhb/rotate.c
index efa825a91..5f9558764 100644
--- a/libhb/rotate.c
+++ b/libhb/rotate.c
@@ -9,19 +9,18 @@
// Mode 3: Flip both horizontally and vertically (modes 1 and 2 combined)
typedef struct rotate_arguments_s {
- uint8_t **dst;
+ hb_buffer_t *dst;
+ hb_buffer_t *src;
int stop;
} rotate_arguments_t;
struct hb_filter_private_s
{
- int pix_fmt;
- int width[3];
- int height[3];
-
int mode;
-
- int ref_stride[3];
+ int width;
+ int height;
+ int par_width;
+ int par_height;
int cpu_count;
@@ -29,60 +28,33 @@ struct hb_filter_private_s
hb_lock_t ** rotate_begin_lock; // Thread has work
hb_lock_t ** rotate_complete_lock; // Thread has completed work
rotate_arguments_t *rotate_arguments; // Arguments to thread for work
-
- AVPicture pic_in;
- AVPicture pic_out;
- hb_buffer_t * buf_out;
- hb_buffer_t * buf_settings;
};
-hb_filter_private_t * hb_rotate_init( int pix_fmt,
- int width,
- int height,
- char * settings );
+static int hb_rotate_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
-int hb_rotate_work( hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv );
+static int hb_rotate_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
-void hb_rotate_close( hb_filter_private_t * pv );
+static void hb_rotate_close( hb_filter_object_t * filter );
+
+static int hb_rotate_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info );
hb_filter_object_t hb_filter_rotate =
{
- FILTER_ROTATE,
- "Rotate (flips image axes)",
- NULL,
- hb_rotate_init,
- hb_rotate_work,
- hb_rotate_close,
+ .id = HB_FILTER_ROTATE,
+ .enforce_order = 0,
+ .name = "Rotate (rotate & flip image axes)",
+ .settings = NULL,
+ .init = hb_rotate_init,
+ .work = hb_rotate_work,
+ .close = hb_rotate_close,
+ .info = hb_rotate_info
};
-static void rotate_filter_line( uint8_t *dst,
- uint8_t *cur,
- int plane,
- hb_filter_private_t * pv )
-{
-
- int w = pv->width[plane];
-
- int x;
- for( x = 0; x < w; x++)
- {
- if( pv->mode & 2 )
- {
- dst[x] = cur[w-x-1];
- }
- else
- {
- dst[x] = cur[x];
- }
- }
-}
-
typedef struct rotate_thread_arg_s {
hb_filter_private_t *pv;
int segment;
@@ -99,8 +71,10 @@ void rotate_filter_thread( void *thread_args_v )
int plane;
int segment, segment_start, segment_stop;
rotate_thread_arg_t *thread_args = thread_args_v;
- uint8_t **dst;
- int y, w, h, ref_stride;
+ uint8_t *dst;
+ hb_buffer_t *dst_buf;
+ hb_buffer_t *src_buf;
+ int y;
pv = thread_args->pv;
@@ -137,13 +111,18 @@ void rotate_filter_thread( void *thread_args_v )
/*
* Process all three planes, but only this segment of it.
*/
+ dst_buf = rotate_work->dst;
+ src_buf = rotate_work->src;
for( plane = 0; plane < 3; plane++)
{
+ int dst_stride, src_stride;
+
+ dst = dst_buf->plane[plane].data;
+ dst_stride = dst_buf->plane[plane].stride;
+ src_stride = src_buf->plane[plane].stride;
- dst = rotate_work->dst;
- w = pv->width[plane];
- h = pv->height[plane];
- ref_stride = pv->ref_stride[plane];
+ int h = src_buf->plane[plane].height;
+ int w = src_buf->plane[plane].width;
segment_start = ( h / pv->cpu_count ) * segment;
if( segment == pv->cpu_count - 1 )
{
@@ -158,21 +137,35 @@ void rotate_filter_thread( void *thread_args_v )
for( y = segment_start; y < segment_stop; y++ )
{
uint8_t * cur;
-
- if( pv->mode & 1 )
- {
- cur = &pv->pic_in.data[plane][(h-y-1)*pv->pic_in.linesize[plane]];
- }
- else
+ int x, xo, yo;
+
+ cur = &src_buf->plane[plane].data[y * src_stride];
+ for( x = 0; x < w; x++)
{
- cur = &pv->pic_in.data[plane][(y)*pv->pic_in.linesize[plane]];
+ if( pv->mode & 1 )
+ {
+ yo = h - y - 1;
+ }
+ else
+ {
+ yo = y;
+ }
+ if( pv->mode & 2 )
+ {
+ xo = w - x - 1;
+ }
+ else
+ {
+ xo = x;
+ }
+ if( pv->mode & 4 ) // Rotate 90 clockwise
+ {
+ int tmp = xo;
+ xo = h - yo - 1;
+ yo = tmp;
+ }
+ dst[yo*dst_stride + xo] = cur[x];
}
- uint8_t *dst2 = &dst[plane][y*w];
-
- rotate_filter_line( dst2,
- cur,
- plane,
- pv );
}
}
/*
@@ -185,14 +178,16 @@ void rotate_filter_thread( void *thread_args_v )
/*
- * threaded rotate - each thread rptates a single segment of all
+ * threaded rotate - each thread rotates a single segment of all
* three planes. Where a segment is defined as the frame divided by
* the number of CPUs.
*
* This function blocks until the frame is rotated.
*/
-static void rotate_filter( uint8_t ** dst,
- hb_filter_private_t * pv )
+static void rotate_filter(
+ hb_filter_private_t * pv,
+ hb_buffer_t *out,
+ hb_buffer_t *in )
{
int segment;
@@ -202,7 +197,8 @@ static void rotate_filter( uint8_t ** dst,
/*
* Setup the work for this plane.
*/
- pv->rotate_arguments[segment].dst = dst;
+ pv->rotate_arguments[segment].dst = out;
+ pv->rotate_arguments[segment].src = in;
/*
* Let the thread for this plane know that we've setup work
@@ -232,37 +228,17 @@ static void rotate_filter( uint8_t ** dst,
}
-hb_filter_private_t * hb_rotate_init( int pix_fmt,
- int width,
- int height,
- char * settings )
+static int hb_rotate_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
{
- if( pix_fmt != PIX_FMT_YUV420P )
- {
- return 0;
- }
-
- hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
-
- pv->pix_fmt = pix_fmt;
-
- pv->width[0] = width;
- pv->height[0] = height;
- pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
-
- pv->buf_out = hb_video_buffer_init( width, height );
- pv->buf_settings = hb_buffer_init( 0 );
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
pv->mode = MODE_DEFAULT;
- pv->ref_stride[0] = pv->width[0];
- pv->ref_stride[1] = pv->width[1];
- pv->ref_stride[2] = pv->width[2];
-
- if( settings )
+ if( filter->settings )
{
- sscanf( settings, "%d",
+ sscanf( filter->settings, "%d",
&pv->mode );
}
@@ -308,25 +284,64 @@ hb_filter_private_t * hb_rotate_init( int pix_fmt,
hb_error( "rotate could not create threads" );
}
}
+ // Set init width/height so the next stage in the pipline
+ // knows what it will be getting
+ if( pv->mode & 4 )
+ {
+ // 90 degree rotation, exchange width and height
+ int tmp = init->width;
+ init->width = init->height;
+ init->height = tmp;
+
+ tmp = init->par_width;
+ init->par_width = init->par_height;
+ init->par_height = tmp;
+ }
+ pv->width = init->width;
+ pv->height = init->height;
+ pv->par_width = init->par_width;
+ pv->par_height = init->par_height;
- return pv;
+ return 0;
}
-void hb_rotate_close( hb_filter_private_t * pv )
+static int hb_rotate_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info )
{
+ hb_filter_private_t * pv = filter->private_data;
if( !pv )
+ return 1;
+
+ memset( info, 0, sizeof( hb_filter_info_t ) );
+ info->out.width = pv->width;
+ info->out.height = pv->height;
+ info->out.par_width = pv->par_width;
+ info->out.par_height = pv->par_height;
+ int pos = 0;
+ if( pv->mode & 1 )
+ pos += sprintf( &info->human_readable_desc[pos], "flip vertical" );
+ if( pv->mode & 2 )
{
- return;
+ if( pos )
+ pos += sprintf( &info->human_readable_desc[pos], "/" );
+ pos += sprintf( &info->human_readable_desc[pos], "flip horizontal" );
}
-
- /* Cleanup frame buffers */
- if( pv->buf_out )
+ if( pv->mode & 4 )
{
- hb_buffer_close( &pv->buf_out );
+ if( pos )
+ pos += sprintf( &info->human_readable_desc[pos], "/" );
+ pos += sprintf( &info->human_readable_desc[pos], "rotate 90" );
}
- if (pv->buf_settings )
+ return 0;
+}
+
+static void hb_rotate_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
{
- hb_buffer_close( &pv->buf_settings );
+ return;
}
int i;
@@ -352,36 +367,43 @@ void hb_rotate_close( hb_filter_private_t * pv )
free( pv->rotate_arguments );
free( pv );
+ filter->private_data = NULL;
}
-int hb_rotate_work( hb_buffer_t * buf_in,
- hb_buffer_t ** buf_out,
- int pix_fmt,
- int width,
- int height,
- hb_filter_private_t * pv )
+static int hb_rotate_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
{
- if( !pv ||
- pix_fmt != pv->pix_fmt ||
- width != pv->width[0] ||
- height != pv->height[0] )
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in, * out;
+
+ if ( in->size <= 0 )
{
- return FILTER_FAILED;
+ *buf_out = in;
+ *buf_in = NULL;
+ return HB_FILTER_DONE;
}
- avpicture_fill( &pv->pic_in, buf_in->data,
- pix_fmt, width, height );
+ int width_out, height_out;
+ if ( pv->mode & 4 )
+ {
+ width_out = in->f.height;
+ height_out = in->f.width;
+ }
+ else
+ {
+ width_out = in->f.width;
+ height_out = in->f.height;
+ }
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
- pix_fmt, width, height );
+ out = hb_video_buffer_init( width_out, height_out );
- //do stuff here
- rotate_filter( pv->pic_out.data, pv );
- hb_buffer_copy_settings( pv->buf_out, buf_in );
+ // Rotate!
+ rotate_filter( pv, out, in );
+ out->s = in->s;
+ hb_buffer_move_subs( out, in );
- *buf_out = pv->buf_out;
+ *buf_out = out;
- return FILTER_OK;
+ return HB_FILTER_OK;
}
-
-
diff --git a/libhb/scan.c b/libhb/scan.c
index ead4f2511..29c9caaee 100644
--- a/libhb/scan.c
+++ b/libhb/scan.c
@@ -302,6 +302,7 @@ static void ScanFunc( void * _data )
job->list_audio = hb_list_init();
job->list_subtitle = hb_list_init();
+ job->list_filter = hb_list_init();
job->mux = HB_MUX_MP4;
}
@@ -553,8 +554,6 @@ static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
for( i = 0; i < data->preview_count; i++ )
{
int j;
- FILE * file_preview;
- char filename[1024];
if ( *data->die )
{
@@ -645,7 +644,7 @@ static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
while( ( buf_es = hb_list_item( list_es, 0 ) ) )
{
hb_list_rem( list_es, buf_es );
- if( buf_es->id == title->video_id && vid_buf == NULL )
+ if( buf_es->s.id == title->video_id && vid_buf == NULL )
{
vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
}
@@ -688,13 +687,13 @@ static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
remember_info( info_list, &vid_info );
if( is_close_to( vid_info.rate_base, 900900, 100 ) &&
- ( vid_buf->flags & PIC_FLAG_REPEAT_FIRST_FIELD ) )
+ ( vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD ) )
{
/* Potentially soft telecine material */
pulldown_count++;
}
- if( vid_buf->flags & PIC_FLAG_REPEAT_FRAME )
+ if( vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME )
{
// AVCHD-Lite specifies that all streams are
// 50 or 60 fps. To produce 25 or 30 fps, camera
@@ -725,20 +724,7 @@ static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
if( data->store_previews )
{
- hb_get_tempory_filename( data->h, filename, "%d_%d_%d",
- hb_get_instance_id(data->h), title->index, i );
-
- file_preview = fopen( filename, "wb" );
- if( file_preview )
- {
- fwrite( vid_buf->data, vid_info.width * vid_info.height * 3 / 2,
- 1, file_preview );
- fclose( file_preview );
- }
- else
- {
- hb_log( "scan: fopen failed (%s)", filename );
- }
+ hb_save_preview( data->h, title->index, i, vid_buf );
}
/* Detect black borders */
@@ -983,7 +969,7 @@ static void LookForAudio( hb_title_t * title, hb_buffer_t * b )
{
audio = hb_list_item( title->list_audio, i );
/* check if this elementary stream is one we want */
- if ( audio->id == b->id )
+ if ( audio->id == b->s.id )
{
break;
}
diff --git a/libhb/stream.c b/libhb/stream.c
index bf9a76ff8..9f1bba283 100644
--- a/libhb/stream.c
+++ b/libhb/stream.c
@@ -1184,14 +1184,13 @@ static int isRecoveryPoint( const uint8_t *buf, int len )
{
uint8_t *nal;
int nal_len;
- int ii, type, size, start;
+ int ii, type, size;
int recovery_frames = 0;
CreateDecodedNAL( &nal, &nal_len, buf, len );
for ( ii = 0; ii+1 < nal_len; )
{
- start = ii;
type = 0;
while ( ii+1 < nal_len )
{
@@ -3348,15 +3347,15 @@ static hb_buffer_t * hb_ps_stream_decode( hb_stream_t *stream )
switch (stream->pes.list[idx].stream_kind)
{
case A:
- buf->type = AUDIO_BUF;
+ buf->s.type = AUDIO_BUF;
break;
case V:
- buf->type = VIDEO_BUF;
+ buf->s.type = VIDEO_BUF;
break;
default:
- buf->type = OTHER_BUF;
+ buf->s.type = OTHER_BUF;
break;
}
@@ -3364,25 +3363,25 @@ static hb_buffer_t * hb_ps_stream_decode( hb_stream_t *stream )
{
// we're looking for the first video frame because we're
// doing random access during 'scan'
- if ( buf->type != VIDEO_BUF ||
+ if ( buf->s.type != VIDEO_BUF ||
!isIframe( stream, buf->data, buf->size ) )
{
// not the video stream or didn't find an I frame
// but we'll only wait 255 video frames for an I frame.
- if ( buf->type != VIDEO_BUF || ++stream->need_keyframe < 512 )
+ if ( buf->s.type != VIDEO_BUF || ++stream->need_keyframe < 512 )
{
continue;
}
}
stream->need_keyframe = 0;
}
- if ( buf->type == VIDEO_BUF )
+ if ( buf->s.type == VIDEO_BUF )
++stream->frames;
- buf->id = get_id( &stream->pes.list[idx] );
- buf->pcr = stream->pes.scr;
- buf->start = pes_info.pts;
- buf->renderOffset = pes_info.dts;
+ buf->s.id = get_id( &stream->pes.list[idx] );
+ buf->s.pcr = stream->pes.scr;
+ buf->s.start = pes_info.pts;
+ buf->s.renderOffset = pes_info.dts;
memmove( buf->data, buf->data + pes_info.header_len,
buf->size - pes_info.header_len );
buf->size -= pes_info.header_len;
@@ -4150,7 +4149,7 @@ static void hb_ts_resolve_pid_types(hb_stream_t *stream)
break;
int idx;
- idx = index_of_id( stream, buf->id );
+ idx = index_of_id( stream, buf->s.id );
if (idx < 0 || stream->pes.list[idx].stream_kind != U )
continue;
@@ -4226,7 +4225,7 @@ static void hb_ps_resolve_stream_types(hb_stream_t *stream)
break;
int idx;
- idx = index_of_id( stream, buf->id );
+ idx = index_of_id( stream, buf->s.id );
if (idx < 0 || stream->pes.list[idx].stream_kind != U )
continue;
@@ -4404,53 +4403,53 @@ static hb_buffer_t * generate_output_data(hb_stream_t *stream, int curstream)
buf = tmp;
}
- buf->id = get_id( &stream->pes.list[pes_idx] );
+ buf->s.id = get_id( &stream->pes.list[pes_idx] );
switch (stream->pes.list[pes_idx].stream_kind)
{
case A:
- buf->type = AUDIO_BUF;
+ buf->s.type = AUDIO_BUF;
break;
case V:
- buf->type = VIDEO_BUF;
+ buf->s.type = VIDEO_BUF;
break;
default:
- buf->type = OTHER_BUF;
+ buf->s.type = OTHER_BUF;
break;
}
- if( b->cur > stream->ts.pcr_out )
+ if( b->sequence > stream->ts.pcr_out )
{
// we have a new pcr
- stream->ts.pcr_out = b->cur;
- buf->pcr = b->pcr;
- if( b->cur >= stream->ts.pcr_discontinuity )
+ stream->ts.pcr_out = b->sequence;
+ buf->s.pcr = b->s.pcr;
+ if( b->sequence >= stream->ts.pcr_discontinuity )
stream->ts.pcr_current = stream->ts.pcr_discontinuity;
}
else
{
- buf->pcr = -1;
+ buf->s.pcr = -1;
}
// check if this packet was referenced to an older pcr and if that
// pcr was prior to a discontinuity.
- if( b->cur < stream->ts.pcr_current )
+ if( b->sequence < stream->ts.pcr_current )
{
// we've sent up a new pcr but have a packet referenced to an
// old pcr and the difference was enough to trigger a discontinuity
// correction. smash the timestamps or we'll mess up the correction.
- buf->start = -1;
- buf->renderOffset = -1;
- buf->stop = -1;
- buf->pcr = -1;
+ buf->s.start = -1;
+ buf->s.renderOffset = -1;
+ buf->s.stop = -1;
+ buf->s.pcr = -1;
}
else
{
// put the PTS & possible DTS into 'start' & 'renderOffset'
// then strip off the PES header.
- buf->start = pes_info.pts;
- buf->renderOffset = pes_info.dts;
+ buf->s.start = pes_info.pts;
+ buf->s.renderOffset = pes_info.dts;
}
memcpy( buf->data, tdat, size );
}
@@ -4704,16 +4703,16 @@ hb_buffer_t * hb_ts_decode_pkt( hb_stream_t *stream, const uint8_t * pkt )
// Output data is ready.
// remember the pcr that was in effect when we started
// this packet.
- stream->ts.list[curstream].buf->cur = stream->ts.pcr_in;
- stream->ts.list[curstream].buf->pcr = stream->ts.pcr;
+ stream->ts.list[curstream].buf->sequence = stream->ts.pcr_in;
+ stream->ts.list[curstream].buf->s.pcr = stream->ts.pcr;
hb_ts_stream_append_pkt(stream, curstream, pkt + 4 + adapt_len,
184 - adapt_len);
return buf;
}
}
// remember the pcr that was in effect when we started this packet.
- stream->ts.list[curstream].buf->cur = stream->ts.pcr_in;
- stream->ts.list[curstream].buf->pcr = stream->ts.pcr;
+ stream->ts.list[curstream].buf->sequence = stream->ts.pcr_in;
+ stream->ts.list[curstream].buf->s.pcr = stream->ts.pcr;
}
// Add the payload for this packet to the current buffer
@@ -5436,26 +5435,26 @@ hb_buffer_t * hb_ffmpeg_read( hb_stream_t *stream )
buf = hb_buffer_init( stream->ffmpeg_pkt->size );
memcpy( buf->data, stream->ffmpeg_pkt->data, stream->ffmpeg_pkt->size );
}
- buf->id = stream->ffmpeg_pkt->stream_index;
+ buf->s.id = stream->ffmpeg_pkt->stream_index;
// compute a conversion factor to go from the ffmpeg
// timebase for the stream to HB's 90kHz timebase.
AVStream *s = stream->ffmpeg_ic->streams[stream->ffmpeg_pkt->stream_index];
double tsconv = 90000. * (double)s->time_base.num / (double)s->time_base.den;
- buf->start = av_to_hb_pts( stream->ffmpeg_pkt->pts, tsconv );
- buf->renderOffset = av_to_hb_pts( stream->ffmpeg_pkt->dts, tsconv );
- if ( buf->renderOffset >= 0 && buf->start == -1 )
+ buf->s.start = av_to_hb_pts( stream->ffmpeg_pkt->pts, tsconv );
+ buf->s.renderOffset = av_to_hb_pts( stream->ffmpeg_pkt->dts, tsconv );
+ if ( buf->s.renderOffset >= 0 && buf->s.start == -1 )
{
- buf->start = buf->renderOffset;
+ buf->s.start = buf->s.renderOffset;
}
- else if ( buf->renderOffset == -1 && buf->start >= 0 )
+ else if ( buf->s.renderOffset == -1 && buf->s.start >= 0 )
{
- buf->renderOffset = buf->start;
+ buf->s.renderOffset = buf->s.start;
}
/*
- * Fill out buf->stop for subtitle packets
+ * Fill out buf->s.stop for subtitle packets
*
* libavcodec's MKV demuxer stores the duration of UTF-8 subtitles (CODEC_ID_TEXT)
* in the 'convergence_duration' field for some reason.
@@ -5477,30 +5476,30 @@ hb_buffer_t * hb_ffmpeg_read( hb_stream_t *stream )
switch ( codec_type )
{
case AVMEDIA_TYPE_VIDEO:
- buf->type = VIDEO_BUF;
+ buf->s.type = VIDEO_BUF;
break;
case AVMEDIA_TYPE_AUDIO:
- buf->type = AUDIO_BUF;
+ buf->s.type = AUDIO_BUF;
break;
case AVMEDIA_TYPE_SUBTITLE:
- buf->type = SUBTITLE_BUF;
+ buf->s.type = SUBTITLE_BUF;
break;
default:
- buf->type = OTHER_BUF;
+ buf->s.type = OTHER_BUF;
break;
}
if ( ffmpeg_pkt_codec == CODEC_ID_TEXT ) {
int64_t ffmpeg_pkt_duration = stream->ffmpeg_pkt->convergence_duration;
int64_t buf_duration = av_to_hb_pts( ffmpeg_pkt_duration, tsconv );
- buf->stop = buf->start + buf_duration;
+ buf->s.stop = buf->s.start + buf_duration;
}
if ( ffmpeg_pkt_codec == CODEC_ID_MOV_TEXT ) {
int64_t ffmpeg_pkt_duration = stream->ffmpeg_pkt->duration;
int64_t buf_duration = av_to_hb_pts( ffmpeg_pkt_duration, tsconv );
- buf->stop = buf->start + buf_duration;
+ buf->s.stop = buf->s.start + buf_duration;
}
/*
@@ -5512,7 +5511,7 @@ hb_buffer_t * hb_ffmpeg_read( hb_stream_t *stream )
* below handles both the chapters & no chapters case.
*/
if ( stream->ffmpeg_pkt->stream_index == stream->ffmpeg_video_id &&
- buf->start >= stream->chapter_end )
+ buf->s.start >= stream->chapter_end )
{
hb_chapter_t *chapter = hb_list_item( stream->title->list_chapter,
stream->chapter+1 );
@@ -5520,15 +5519,16 @@ hb_buffer_t * hb_ffmpeg_read( hb_stream_t *stream )
{
stream->chapter++;
stream->chapter_end += chapter->duration;
- buf->new_chap = stream->chapter + 1;
+ buf->s.new_chap = stream->chapter + 1;
hb_deep_log( 2, "ffmpeg_read starting chapter %i at %"PRId64,
- buf->new_chap, buf->start);
+ stream->chapter + 1, buf->s.start);
} else {
// Must have run out of chapters, stop looking.
stream->chapter_end = INT64_MAX;
+ buf->s.new_chap = 0;
}
} else {
- buf->new_chap = 0;
+ buf->s.new_chap = 0;
}
av_free_packet( stream->ffmpeg_pkt );
return buf;
diff --git a/libhb/sync.c b/libhb/sync.c
index 2c5bb9024..130800595 100644
--- a/libhb/sync.c
+++ b/libhb/sync.c
@@ -134,7 +134,12 @@ hb_work_object_t * hb_sync_init( hb_job_t * job )
ret = w = hb_get_work( WORK_SYNC_VIDEO );
w->private_data = pv;
w->fifo_in = job->fifo_raw;
- w->fifo_out = job->fifo_sync;
+
+ // When doing subtitle indepth scan, the pipeline ends at sync
+ if ( !job->indepth_scan )
+ w->fifo_out = job->fifo_sync;
+ else
+ w->fifo_out = NULL;
pv->job = job;
pv->common->pts_offset = INT64_MIN;
@@ -249,8 +254,6 @@ void syncVideoClose( hb_work_object_t * w )
***********************************************************************
*
**********************************************************************/
-static hb_buffer_t * copy_subtitle( hb_buffer_t * src );
-
int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
@@ -269,7 +272,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
/* Wait till we can determine the initial pts of all streams */
if( next->size != 0 && pv->common->pts_offset == INT64_MIN )
{
- pv->common->first_pts[0] = next->start;
+ pv->common->first_pts[0] = next->s.start;
hb_lock( pv->common->mutex );
while( pv->common->pts_offset == INT64_MIN )
{
@@ -289,7 +292,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
}
hb_lock( pv->common->mutex );
- next_start = next->start - pv->common->video_pts_slip;
+ next_start = next->s.start - pv->common->video_pts_slip;
hb_unlock( pv->common->mutex );
/* Wait for start of point-to-point encoding */
@@ -319,7 +322,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_DONE;
}
if ( pv->common->count_frames < job->frame_to_start ||
- next->start < job->pts_to_start )
+ next->s.start < job->pts_to_start )
{
// Flush any subtitles that have pts prior to the
// current frame
@@ -328,7 +331,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
subtitle = hb_list_item( job->list_subtitle, i );
while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) )
{
- if ( sub->start > next->start )
+ if ( sub->s.start > next->s.start )
break;
sub = hb_fifo_get( subtitle->fifo_raw );
hb_buffer_close( &sub );
@@ -397,22 +400,22 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_close( &next );
pv->common->first_pts[0] = INT64_MAX - 1;
- cur->start = sync->next_start;
- cur->stop = cur->start + 90000. / ((double)job->vrate / (double)job->vrate_base);
- sync->next_start += cur->stop - cur->start;;
+ cur->s.start = sync->next_start;
+ cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base);
+ sync->next_start += cur->s.stop - cur->s.start;;
/* Make sure last frame is reflected in frame count */
pv->common->count_frames++;
/* Push the frame to the renderer */
- hb_fifo_push( job->fifo_sync, cur );
+ *buf_out = cur;
sync->cur = NULL;
/* we got an end-of-stream. Feed it downstream & signal that
* we're done. Note that this means we drop the final frame of
* video (we don't know its duration). On DVDs the final frame
* is often strange and dropping it seems to be a good idea. */
- *buf_out = hb_buffer_init( 0 );
+ (*buf_out)->next = hb_buffer_init( 0 );
/*
* Push through any subtitle EOFs in case they were not synced through.
@@ -460,7 +463,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
{
// Drop an empty buffer into our output to ensure that things
// get flushed all the way out.
- hb_log( "sync: reached pts %"PRId64", exiting early", cur->start );
+ hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start );
hb_buffer_close( &sync->cur );
hb_buffer_close( &next );
*buf_out = hb_buffer_init( 0 );
@@ -482,7 +485,7 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
if( sync->first_frame )
{
/* This is our first frame */
- if ( cur->start > 0 )
+ if ( cur->s.start > 0 )
{
/*
* The first pts from a dvd should always be zero but
@@ -492,8 +495,8 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
* as if it started at zero so that our audio timing will
* be in sync.
*/
- hb_log( "sync: first pts is %"PRId64, cur->start );
- cur->start = 0;
+ hb_log( "sync: first pts is %"PRId64, cur->s.start );
+ cur->s.start = 0;
}
sync->first_frame = 0;
}
@@ -509,17 +512,17 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
* can deal with overlaps of up to a frame time but anything larger
* we handle by dropping frames here.
*/
- if ( next_start - cur->start <= 0 )
+ if ( next_start - cur->s.start <= 0 )
{
if ( sync->first_drop == 0 )
{
sync->first_drop = next_start;
}
++sync->drop_count;
- if ( next->new_chap )
+ if ( next->s.new_chap )
{
// don't drop a chapter mark when we drop the buffer
- sync->chap_mark = next->new_chap;
+ sync->chap_mark = next->s.new_chap;
}
hb_buffer_close( &next );
return HB_WORK_OK;
@@ -528,8 +531,8 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
{
hb_log( "sync: video time didn't advance - dropped %d frames "
"(delta %d ms, current %"PRId64", next %"PRId64", dur %d)",
- sync->drop_count, (int)( cur->start - sync->first_drop ) / 90,
- cur->start, next_start, (int)( next_start - cur->start ) );
+ sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90,
+ cur->s.start, next_start, (int)( next_start - cur->s.start ) );
sync->first_drop = 0;
sync->drop_count = 0;
}
@@ -541,215 +544,47 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
sync->video_sequence = cur->sequence;
/* Process subtitles that apply to this video frame */
-
- // NOTE: There is no logic in either subtitle-sync algorithm that waits for the
- // subtitle-decoder if it is lagging behind the video-decoder.
+ // NOTE: There is no logic in either subtitle-sync algorithm that waits
+ // for the subtitle-decoder if it is lagging behind the video-decoder.
//
- // Therefore there is the implicit assumption that the subtitle-decoder
- // is always faster than the video-decoder. This assumption is definitely
- // incorrect in some cases where the SSA subtitle decoder is used.
- // Enable the SUBSYNC_VERBOSE_TIMING flag below to debug.
-
+ // Therefore there is the implicit assumption that the subtitle-decoder
+ // is always faster than the video-decoder. This assumption is definitely
+ // incorrect in some cases where the SSA subtitle decoder is used.
-/*
- * Enables logging of three kinds of events:
- * SUB***: Subtitle received by sync object
- * SUB+++: Subtitle now shown
- * SUB---: Subtitle now hidden and disposed
- *
- * Lead times on SUB*** events should be positive.
- * Negative lead times lead to lag times on SUB+++ or the complete drop of a subtitle.
- * Lag times on SUB+++ and SUB--- should be small positive values in the 0-40ms range.
- */
-#define SUBSYNC_VERBOSE_TIMING 0
-
- /*
- * 1. Find all subtitles that need to be burned into the current video frame
- * and attach them to the frame.
- * 2. Find all subtitles that need to be passed thru and do so immediately.
- */
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
int64_t sub_start, sub_stop, duration;
subtitle = hb_list_item( job->list_subtitle, i );
- // If this subtitle track's packets are to be passed thru, do so immediately
- if( subtitle->config.dest == PASSTHRUSUB )
+ // Sanitize subtitle start and stop times, then pass to
+ // muxer or renderer filter.
+ while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL )
{
- while ( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) != NULL )
- {
- if ( sub->stop == -1 && hb_fifo_size( subtitle->fifo_raw ) < 2 )
- break;
+ if ( sub->s.stop == -1 && hb_fifo_size( subtitle->fifo_raw ) < 2 )
+ break;
- sub = hb_fifo_get( subtitle->fifo_raw );
- if ( sub->stop == -1 )
- {
- hb_buffer_t *next;
- next = hb_fifo_see( subtitle->fifo_raw );
- sub->stop = next->start;
- }
- // Need to re-write subtitle timestamps to account
- // for any slippage.
- hb_lock( pv->common->mutex );
- sub_start = sub->start - pv->common->video_pts_slip;
- hb_unlock( pv->common->mutex );
- duration = sub->stop - sub->start;
- sub_stop = sub_start + duration;
-
- sub->start = sub_start;
- sub->stop = sub_stop;
-
- hb_fifo_push( subtitle->fifo_out, sub );
- }
- }
- // If this subtitle track's packets are to be rendered, identify the
- // packets that need to be rendered on the current video frame
- else if( subtitle->config.dest == RENDERSUB )
- {
- // Migrate subtitles from 'subtitle->fifo_raw' to 'sub_list'
- // immediately. This make it so we can scan the list for
- // all overlapping subtitles that apply to the current
- // frame.
- //
- // Note that the size of 'sub_list' is unbounded.
- while ( ( sub = hb_fifo_get( subtitle->fifo_raw ) ) != NULL )
+ sub = hb_fifo_get( subtitle->fifo_raw );
+ if ( sub->s.stop == -1 )
{
- #if SUBSYNC_VERBOSE_TIMING
- printf( "\nSUB*** (%"PRId64"/%"PRId64":%"PRId64") @ %"PRId64"/%"PRId64":%"PRId64" (lead by %"PRId64"ms)\n",
- sub->start/90, sub->start/90/1000/60, sub->start/90/1000%60,
- cur->start/90, cur->start/90/1000/60, cur->start/90/1000%60,
- (sub->start - cur->start)/90);
- if (pv->common->video_pts_slip)
- {
- printf( " VIDEO-LAG: %"PRId64"\n", pv->common->video_pts_slip );
- }
- #endif
-
- // Append to sub_list
- if ( sync->sub_tail )
- sync->sub_tail->next = sub;
- else
- sync->sub_list = sub;
- sync->sub_tail = sub;
+ hb_buffer_t *next;
+ next = hb_fifo_see( subtitle->fifo_raw );
+ sub->s.stop = next->s.start;
}
-
- hb_buffer_t *prev_sub = NULL;
- hb_buffer_t *cur_sub_tail = NULL;
- for ( sub = sync->sub_list; sub != NULL; )
- {
- if ( sub->next && sub->stop == -1 )
- {
- sub->stop = sub->next->start;
- }
+ // Need to re-write subtitle timestamps to account
+ // for any slippage.
+ hb_lock( pv->common->mutex );
+ sub_start = sub->s.start - pv->common->video_pts_slip;
+ hb_unlock( pv->common->mutex );
+ duration = sub->s.stop - sub->s.start;
+ sub_stop = sub_start + duration;
- // Need to re-write subtitle timestamps to account
- // for any slippage.
- hb_lock( pv->common->mutex );
- sub_start = sub->start - pv->common->video_pts_slip;
- hb_unlock( pv->common->mutex );
- if ( sub->stop != -1 )
- {
- duration = sub->stop - sub->start;
- sub_stop = sub_start + duration;
- }
- else
- {
- sub_stop = -1;
- }
+ sub->s.start = sub_start;
+ sub->s.stop = sub_stop;
- if ( cur->start < sub_start )
- {
- // Subtitle starts in the future
- break;
- }
- else
- {
- // Subtitle starts now or in the past...
- if ( cur->start < sub_stop || sub_stop == -1 )
- {
- // Subtitle finishes in the future
-
- // Append a copy of the subtitle packet to the
- // current video packet to be burned in by
- // the 'render' work-object.
- // (Can't just alias it because we will have
- // to attach to multiple video frames and have
- // no easy way of synchronizing disposal)
- hb_buffer_t * sub_copy = copy_subtitle( sub );
- sub_copy->start = sub_start;
- sub_copy->stop = sub_stop;
- if ( cur_sub_tail )
- cur_sub_tail->next = sub_copy;
- else
- cur->sub = sub_copy;
- cur_sub_tail = sub_copy;
-
- #if SUBSYNC_VERBOSE_TIMING
- if (!(sub->new_chap & 0x01))
- {
- printf( "\nSUB+++ (%"PRId64"/%"PRId64":%"PRId64") @ %"PRId64"/%"PRId64":%"PRId64" (lag by %"PRId64"ms)\n",
- sub->start/90, sub->start/90/1000/60, sub->start/90/1000%60,
- cur->start/90, cur->start/90/1000/60, cur->start/90/1000%60,
- (cur->start - sub->start)/90 );
- if (pv->common->video_pts_slip)
- {
- printf( " VIDEO-LAG: %"PRId64"\n", pv->common->video_pts_slip );
- }
-
- sub->new_chap |= 0x01;
- }
- #endif
-
- // (Keep the subtitle in the stream)
- prev_sub = sub;
- sub = sub->next;
- }
- else
- {
- // Subtitle starts in the past and has already finished
-
- #if SUBSYNC_VERBOSE_TIMING
- printf( "\nSUB--- (%"PRId64"/%"PRId64":%"PRId64") @ %"PRId64"/%"PRId64":%"PRId64" (lag by %"PRId64"ms)\n",
- sub->start/90, sub->start/90/1000/60, sub->start/90/1000%60,
- cur->start/90, cur->start/90/1000/60, cur->start/90/1000%60,
- (cur->start - sub->stop)/90 );
- if (pv->common->video_pts_slip)
- {
- printf( " VIDEO-LAG: %"PRId64"\n", pv->common->video_pts_slip );
- }
- #endif
-
- // Remove it from the stream...
- if (prev_sub != NULL)
- {
- prev_sub->next = sub->next;
- }
- if (sync->sub_list == sub)
- {
- sync->sub_list = sub->next;
- if ( sync->sub_list == NULL )
- sync->sub_tail = NULL;
- }
- else if (sync->sub_tail == sub)
- {
- sync->sub_tail = prev_sub;
- }
-
- // ...and trash it
- hb_buffer_t *next_sub = sub->next;
- // Prevent hb_buffer_close from killing the whole list
- // before we finish iterating over it
- sub->next = NULL;
- hb_buffer_close( &sub );
-
- // (prev_sub remains the same)
- sub = next_sub;
- }
- }
- }
+ hb_fifo_push( subtitle->fifo_out, sub );
}
- } // end subtitles
+ }
/*
* Adjust the pts of the current frame so that it's contiguous
@@ -765,27 +600,27 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
* explicit stop time from the start time of the next frame.
*/
*buf_out = cur;
- int64_t duration = next_start - cur->start;
+ int64_t duration = next_start - cur->s.start;
sync->cur = cur = next;
cur->sub = NULL;
- cur->start -= pv->common->video_pts_slip;
- cur->stop -= pv->common->video_pts_slip;
+ cur->s.start -= pv->common->video_pts_slip;
+ cur->s.stop -= pv->common->video_pts_slip;
sync->pts_skip = 0;
if ( duration <= 0 )
{
hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"",
- duration, cur->start, next_start );
+ duration, cur->s.start, next_start );
}
- (*buf_out)->start = sync->next_start;
+ (*buf_out)->s.start = sync->next_start;
sync->next_start += duration;
- (*buf_out)->stop = sync->next_start;
+ (*buf_out)->s.stop = sync->next_start;
if ( sync->chap_mark )
{
// we have a pending chapter mark from a recent drop - put it on this
// buffer (this may make it one frame late but we can't do any better).
- (*buf_out)->new_chap = sync->chap_mark;
+ (*buf_out)->s.new_chap = sync->chap_mark;
sync->chap_mark = 0;
}
@@ -795,23 +630,6 @@ int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
return HB_WORK_OK;
}
-static hb_buffer_t * copy_subtitle( hb_buffer_t * src_sub )
-{
- hb_buffer_t * dst_sub = hb_buffer_init( src_sub->size );
-
- dst_sub->x = src_sub->x;
- dst_sub->y = src_sub->y;
- dst_sub->width = src_sub->width;
- dst_sub->height = src_sub->height;
- dst_sub->start = src_sub->start;
- dst_sub->stop = src_sub->stop;
- dst_sub->next = NULL;
-
- memcpy( dst_sub->data, src_sub->data, src_sub->size );
-
- return dst_sub;
-}
-
// sync*Init does nothing because sync has a special initializer
// that takes care of initializing video and all audio tracks
int syncVideoInit( hb_work_object_t * w, hb_job_t * job)
@@ -897,7 +715,7 @@ static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
/* Wait till we can determine the initial pts of all streams */
if( pv->common->pts_offset == INT64_MIN )
{
- pv->common->first_pts[sync->index+1] = buf->start;
+ pv->common->first_pts[sync->index+1] = buf->s.start;
hb_lock( pv->common->mutex );
while( pv->common->pts_offset == INT64_MIN )
{
@@ -918,7 +736,7 @@ static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
/* Wait for start frame if doing point-to-point */
hb_lock( pv->common->mutex );
- start = buf->start - pv->common->audio_pts_slip;
+ start = buf->s.start - pv->common->audio_pts_slip;
while ( !pv->common->start_found )
{
if ( pv->common->audio_pts_thresh < 0 )
@@ -928,14 +746,14 @@ static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
// after hb_sync_init is called.
pv->common->audio_pts_thresh = job->pts_to_start;
}
- if ( buf->start < pv->common->audio_pts_thresh )
+ if ( buf->s.start < pv->common->audio_pts_thresh )
{
hb_buffer_close( &buf );
hb_unlock( pv->common->mutex );
return HB_WORK_OK;
}
while ( !pv->common->start_found &&
- buf->start >= pv->common->audio_pts_thresh )
+ buf->s.start >= pv->common->audio_pts_thresh )
{
hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 );
// There is an unfortunate unavoidable deadlock that can occur.
@@ -962,7 +780,7 @@ static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
}
}
}
- start = buf->start - pv->common->audio_pts_slip;
+ start = buf->s.start - pv->common->audio_pts_slip;
}
if ( start < 0 )
{
@@ -1238,7 +1056,7 @@ static hb_buffer_t * OutputAudioFrame( hb_audio_t *audio, hb_buffer_t *buf,
hb_sync_audio_t *sync )
{
int64_t start = (int64_t)sync->next_start;
- double duration = buf->stop - buf->start;
+ double duration = buf->s.stop - buf->s.start;
if ( !( audio->config.out.codec & HB_ACODEC_PASS_FLAG ) )
{
@@ -1316,10 +1134,13 @@ static hb_buffer_t * OutputAudioFrame( hb_audio_t *audio, hb_buffer_t *buf,
}
}
}
- buf->frametype = HB_FRAME_AUDIO;
- buf->start = start;
+
+ buf->s.type = AUDIO_BUF;
+ buf->s.frametype = HB_FRAME_AUDIO;
+
+ buf->s.start = start;
sync->next_start += duration;
- buf->stop = (int64_t)sync->next_start;
+ buf->s.stop = (int64_t)sync->next_start;
return buf;
}
@@ -1352,8 +1173,8 @@ static void InsertSilence( hb_work_object_t * w, int64_t duration )
if( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
buf = hb_buffer_init( sync->silence_size );
- buf->start = sync->next_start;
- buf->stop = buf->start + frame_dur;
+ buf->s.start = sync->next_start;
+ buf->s.stop = buf->s.start + frame_dur;
memcpy( buf->data, sync->silence_buf, buf->size );
fifo = w->audio->priv.fifo_out;
}
@@ -1363,8 +1184,8 @@ static void InsertSilence( hb_work_object_t * w, int64_t duration )
sizeof( float ) *
HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT(
w->audio->config.out.mixdown) );
- buf->start = sync->next_start;
- buf->stop = buf->start + frame_dur;
+ buf->s.start = sync->next_start;
+ buf->s.stop = buf->s.start + frame_dur;
memset( buf->data, 0, buf->size );
fifo = w->audio->priv.fifo_sync;
}
diff --git a/libhb/vfr.c b/libhb/vfr.c
new file mode 100644
index 000000000..fb12393b9
--- /dev/null
+++ b/libhb/vfr.c
@@ -0,0 +1,616 @@
+
+#include "hb.h"
+
+struct hb_filter_private_s
+{
+ hb_job_t * job;
+ int cfr;
+ int input_vrate;
+ int input_vrate_base;
+ int vrate;
+ int vrate_base;
+ hb_fifo_t * delay_queue;
+ int dropped_frames;
+ int extended_frames;
+ uint64_t last_start[4];
+ uint64_t last_stop[4];
+ uint64_t lost_time[4];
+ uint64_t total_lost_time;
+ uint64_t total_gained_time;
+ int count_frames; // frames output so far
+ double frame_rate; // 90KHz ticks per frame (for CFR/PFR)
+ uint64_t out_last_stop; // where last frame ended (for CFR/PFR)
+ int drops; // frames dropped (for CFR/PFR)
+ int dups; // frames duped (for CFR/PFR)
+
+ // Duplicate frame detection members
+ float max_metric; // highest motion metric since
+ // last output frame
+ float frame_metric; // motion metric of last frame
+ float out_metric; // motion metric of last output frame
+ int sync_parity;
+ unsigned gamma_lut[256];
+};
+
+static int hb_vfr_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init );
+
+static int hb_vfr_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out );
+
+static void hb_vfr_close( hb_filter_object_t * filter );
+static int hb_vfr_info( hb_filter_object_t * filter, hb_filter_info_t * info );
+
+hb_filter_object_t hb_filter_vfr =
+{
+ .id = HB_FILTER_VFR,
+ .enforce_order = 1,
+ .name = "Framerate Shaper",
+ .settings = NULL,
+ .init = hb_vfr_init,
+ .work = hb_vfr_work,
+ .close = hb_vfr_close,
+ .info = hb_vfr_info,
+};
+
+// Create gamma lookup table.
+// Note that we are creating a scaled integer lookup table that will
+// not cause overflows in sse_block16() below. This results in
+// small values being truncated to 0 which is ok for this usage.
+static void build_gamma_lut( hb_filter_private_t * pv )
+{
+ int i;
+ for( i = 0; i < 256; i++ )
+ {
+ pv->gamma_lut[i] = 4095 * pow( ( (float)i / (float)255 ), 2.2f );
+ }
+}
+
+// insert buffer 'succ' after buffer chain element 'pred'.
+// caller must guarantee that 'pred' and 'succ' are non-null.
+static hb_buffer_t *insert_buffer_in_chain(
+ hb_buffer_t *pred,
+ hb_buffer_t *succ )
+{
+ succ->next = pred->next;
+ pred->next = succ;
+ return succ;
+}
+
+#define DUP_THRESH_SSE 5.0
+
+// Compute ths sum of squared errors for a 16x16 block
+// Gamma adjusts pixel values so that less visible diffreences
+// count less.
+static inline unsigned sse_block16( hb_filter_private_t *pv, uint8_t *a, uint8_t *b, int stride )
+{
+ int x, y;
+ unsigned sum = 0;
+ int diff;
+ unsigned *g = pv->gamma_lut;
+
+ for( y = 0; y < 16; y++ )
+ {
+ for( x = 0; x < 16; x++ )
+ {
+ diff = g[a[x]] - g[b[x]];
+ sum += diff * diff;
+ }
+ a += stride;
+ b += stride;
+ }
+ return sum;
+}
+
+// Sum of squared errors. Computes and sums the SSEs for all
+// 16x16 blocks in the images. Only checks the Y component.
+static float motion_metric( hb_filter_private_t * pv, hb_buffer_t * a, hb_buffer_t * b )
+{
+ int bw = a->f.width / 16;
+ int bh = a->f.height / 16;
+ int stride = a->plane[0].stride;
+ uint8_t * pa = a->plane[0].data;
+ uint8_t * pb = b->plane[0].data;
+ int x, y;
+ uint64_t sum = 0;
+
+ for( y = 0; y < bh; y++ )
+ {
+ for( x = 0; x < bw; x++ )
+ {
+ sum += sse_block16( pv, pa + y * 16 * stride + x * 16,
+ pb + y * 16 * stride + x * 16, stride );
+ }
+ }
+ return (float)sum / ( a->f.width * a->f.height );;
+}
+
+// This section of the code implements video frame rate control.
+// Since filters are allowed to duplicate and drop frames (which
+// changes the timing), this has to be the last thing done in render.
+//
+// There are three options, selected by the value of cfr:
+// 0 - Variable Frame Rate (VFR) or 'same as source': frame times
+// are left alone
+// 1 - Constant Frame Rate (CFR): Frame timings are adjusted so that all
+// frames are exactly vrate_base ticks apart. Frames are dropped
+// or duplicated if necessary to maintain this spacing.
+// 2 - Peak Frame Rate (PFR): vrate_base is treated as the peak
+// average frame rate. I.e., the average frame rate (current frame
+// end time divided by number of frames so far) is never allowed to be
+// greater than vrate_base and frames are dropped if necessary
+// to keep the average under this value. Other than those drops, frame
+// times are left alone.
+//
+
+static void adjust_frame_rate( hb_filter_private_t *pv, hb_buffer_t **buf_out )
+{
+ hb_buffer_t *out = *buf_out;
+
+ if ( out && out->size > 0 )
+ {
+ if ( pv->cfr == 0 )
+ {
+ ++pv->count_frames;
+ pv->out_last_stop = out->s.stop;
+ return;
+ }
+
+ // compute where this frame would stop if the frame rate were constant
+ // (this is our target stopping time for CFR and earliest possible
+ // stopping time for PFR).
+ double cfr_stop = pv->frame_rate * ( pv->count_frames + 1 );
+
+ hb_buffer_t * next = hb_fifo_see( pv->delay_queue );
+
+ float next_metric = 0;
+ if( next )
+ next_metric = motion_metric( pv, out, next );
+
+ if( pv->out_last_stop >= out->s.stop )
+ {
+ ++pv->drops;
+ hb_buffer_close( buf_out );
+
+ pv->frame_metric = next_metric;
+ if( next_metric > pv->max_metric )
+ pv->max_metric = next_metric;
+
+ return;
+ }
+
+ if( out->s.start <= pv->out_last_stop &&
+ out->s.stop > pv->out_last_stop &&
+ next && next->s.stop < cfr_stop )
+ {
+ // This frame starts before the end of the last output
+ // frame and ends after the end of the last output
+ // frame (i.e. it straddles it). Also the next frame
+ // ends before the end of the next output frame. If the
+ // next frame is not a duplicate, and we haven't seen
+ // a changed frame since the last output frame,
+ // then drop this frame.
+ //
+ // This causes us to sync to the pattern of progressive
+ // 23.976 fps content that has been upsampled to
+ // progressive 59.94 fps.
+ if( pv->out_metric > pv->max_metric &&
+ next_metric > pv->max_metric )
+ {
+ // Pattern: N R R N
+ // o c n
+ // N == new frame
+ // R == repeat frame
+ // o == last output frame
+ // c == current frame
+ // n == next frame
+ // We haven't seen a frame change since the last output
+ // frame and the next frame changes. Use the next frame,
+ // drop this one.
+ ++pv->drops;
+ pv->frame_metric = next_metric;
+ pv->max_metric = next_metric;
+ pv->sync_parity = 1;
+ hb_buffer_close( buf_out );
+ return;
+ }
+ else if( pv->sync_parity &&
+ pv->out_metric < pv->max_metric &&
+ pv->max_metric > pv->frame_metric &&
+ pv->frame_metric < next_metric )
+ {
+ // Pattern: R N R N
+ // o c n
+ // N == new frame
+ // R == repeat frame
+ // o == last output frame
+ // c == current frame
+ // n == next frame
+ // If we see this pattern, we must not use the next
+ // frame when straddling the current frame.
+ pv->sync_parity = 0;
+ }
+ else if( pv->sync_parity )
+ {
+ // The pattern is indeterminate. Continue dropping
+ // frames on the same schedule
+ ++pv->drops;
+ pv->frame_metric = next_metric;
+ pv->max_metric = next_metric;
+ pv->sync_parity = 1;
+ hb_buffer_close( buf_out );
+ return;
+ }
+
+ }
+
+ // this frame has to start where the last one stopped.
+ out->s.start = pv->out_last_stop;
+
+ pv->out_metric = pv->frame_metric;
+ pv->frame_metric = next_metric;
+ pv->max_metric = next_metric;
+
+ // at this point we know that this frame doesn't push the average
+ // rate over the limit so we just pass it on for PFR. For CFR we're
+ // going to return it (with its start & stop times modified) and
+ // we may have to dup it.
+ ++pv->count_frames;
+ if ( pv->cfr > 1 )
+ {
+ // PFR - we're going to keep the frame but may need to
+ // adjust it's stop time to meet the average rate constraint.
+ if ( out->s.stop <= cfr_stop )
+ {
+ out->s.stop = cfr_stop;
+ }
+ }
+ else
+ {
+ // we're doing CFR so we have to either trim some time from a
+ // buffer that ends too far in the future or, if the buffer is
+ // two or more frame times long, split it into multiple pieces,
+ // each of which is a frame time long.
+ double excess_dur = (double)out->s.stop - cfr_stop;
+ out->s.stop = cfr_stop;
+ for ( ; excess_dur >= pv->frame_rate; excess_dur -= pv->frame_rate )
+ {
+ /* next frame too far ahead - dup current frame */
+ hb_buffer_t *dup = hb_buffer_init( out->size );
+ memcpy( dup->data, out->data, out->size );
+ dup->s = out->s;
+ dup->s.new_chap = 0;
+ dup->s.start = cfr_stop;
+ cfr_stop += pv->frame_rate;
+ dup->s.stop = cfr_stop;
+ out = insert_buffer_in_chain( out, dup );
+ ++pv->dups;
+ ++pv->count_frames;
+ }
+ }
+ pv->out_last_stop = out->s.stop;
+ }
+}
+
+static int hb_vfr_init( hb_filter_object_t * filter,
+ hb_filter_init_t * init )
+{
+ filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
+ hb_filter_private_t * pv = filter->private_data;
+
+ build_gamma_lut( pv );
+ pv->cfr = init->cfr;
+ pv->input_vrate = pv->vrate = init->vrate;
+ pv->input_vrate_base = pv->vrate_base = init->vrate_base;
+ if( filter->settings )
+ {
+ sscanf( filter->settings, "%d:%d:%d",
+ &pv->cfr, &pv->vrate, &pv->vrate_base );
+ }
+
+ //pv->job = init->job;
+
+ /* Setup FIFO queue for subtitle cache */
+ pv->delay_queue = hb_fifo_init( 8, 1 );
+
+ /* VFR IVTC needs a bunch of time-keeping variables to track
+ how many frames are dropped, how many are extended, what the
+ last 4 start and stop times were (so they can be modified),
+ how much time has been lost and gained overall, how much time
+ the latest 4 frames should be extended by */
+ pv->dropped_frames = 0;
+ pv->extended_frames = 0;
+ pv->last_start[0] = 0;
+ pv->last_stop[0] = 0;
+ pv->total_lost_time = 0;
+ pv->total_gained_time = 0;
+ pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
+ pv->frame_metric = 1000; // Force first frame
+
+ if ( pv->cfr == 0 )
+ {
+ /* Ensure we're using "Same as source" FPS */
+ pv->vrate_base = init->vrate_base;
+ pv->vrate = init->vrate;
+ }
+ else if ( pv->cfr == 2 )
+ {
+ // For PFR, we want the framerate based on the source's actual
+ // framerate, unless it's higher than the specified peak framerate.
+ double source_fps = (double)init->vrate / init->vrate_base;
+ double peak_fps = (double)pv->vrate / pv->vrate_base;
+ if ( source_fps > peak_fps )
+ {
+ // peak framerate is lower than source framerate. so signal
+ // that the nominal framerate will be changed.
+ init->vrate = pv->vrate;
+ init->vrate_base = pv->vrate_base;
+ }
+ }
+ else
+ {
+ // Constant framerate. Signal the framerate we are using.
+ init->vrate = pv->vrate;
+ init->vrate_base = pv->vrate_base;
+ }
+ init->cfr = pv->cfr;
+ pv->frame_rate = (double)pv->vrate_base * 90000. / pv->vrate;
+
+ return 0;
+}
+
+static int hb_vfr_info( hb_filter_object_t * filter,
+ hb_filter_info_t * info )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
+ return 1;
+
+ memset( info, 0, sizeof( hb_filter_info_t ) );
+ info->out.vrate_base = pv->vrate_base;
+ info->out.vrate = pv->vrate;
+ info->out.cfr = pv->cfr;
+ if ( pv->cfr == 0 )
+ {
+ /* Ensure we're using "Same as source" FPS */
+ sprintf( info->human_readable_desc,
+ "frame rate: same as source (around %.3f fps)",
+ (float)pv->vrate / pv->vrate_base );
+ }
+ else if ( pv->cfr == 2 )
+ {
+ // For PFR, we want the framerate based on the source's actual
+ // framerate, unless it's higher than the specified peak framerate.
+ double source_fps = (double)pv->input_vrate / pv->input_vrate_base;
+ double peak_fps = (double)pv->vrate / pv->vrate_base;
+ sprintf( info->human_readable_desc,
+ "frame rate: %.3f fps -> peak rate limited to %.3f fps)",
+ source_fps , peak_fps );
+ }
+ else
+ {
+ // Constant framerate. Signal the framerate we are using.
+ double source_fps = (double)pv->input_vrate / pv->input_vrate_base;
+ double constant_fps = (double)pv->vrate / pv->vrate_base;
+ sprintf( info->human_readable_desc,
+ "frame rate: %.3f fps -> constant rate limited to %.3f fps)",
+ source_fps , constant_fps );
+ }
+
+ return 0;
+}
+
+static void hb_vfr_close( hb_filter_object_t * filter )
+{
+ hb_filter_private_t * pv = filter->private_data;
+
+ if( !pv )
+ return;
+
+ if ( pv->cfr )
+ {
+ hb_log("render: %d frames output, %d dropped and %d duped for CFR/PFR",
+ pv->count_frames, pv->drops, pv->dups );
+ }
+
+ if( pv->job )
+ {
+ hb_interjob_t * interjob = hb_interjob_get( pv->job->h );
+
+ /* Preserve dropped frame count for more accurate
+ * framerates in 2nd passes.
+ */
+ interjob->out_frame_count = pv->count_frames;
+ interjob->total_time = pv->out_last_stop;
+ }
+
+ hb_log("render: lost time: %"PRId64" (%i frames)",
+ pv->total_lost_time, pv->dropped_frames);
+ hb_log("render: gained time: %"PRId64" (%i frames) (%"PRId64" not accounted for)",
+ pv->total_gained_time, pv->extended_frames,
+ pv->total_lost_time - pv->total_gained_time);
+
+ if (pv->dropped_frames)
+ {
+ hb_log("render: average dropped frame duration: %"PRId64,
+ (pv->total_lost_time / pv->dropped_frames) );
+ }
+
+ if( pv->delay_queue )
+ {
+ hb_fifo_close( &pv->delay_queue );
+ }
+
+ /* Cleanup render work structure */
+ free( pv );
+ filter->private_data = NULL;
+}
+
+static int hb_vfr_work( hb_filter_object_t * filter,
+ hb_buffer_t ** buf_in,
+ hb_buffer_t ** buf_out )
+{
+ hb_filter_private_t * pv = filter->private_data;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * out = NULL;
+
+ *buf_in = NULL;
+ *buf_out = NULL;
+ if( in->size <= 0 )
+ {
+ hb_buffer_t *head = NULL, *tail = NULL, *next;
+ int counter = 2;
+
+ /* If the input buffer is end of stream, send out an empty one
+ * to the next stage as well. To avoid losing the contents of
+ * the delay queue connect the buffers in the delay queue in
+ * the correct order, and add the end of stream buffer to the
+ * end.
+ */
+ while( ( next = hb_fifo_get( pv->delay_queue ) ) != NULL )
+ {
+
+ /* We can't use the given time stamps. Previous frames
+ might already have been extended, throwing off the
+ raw values fed to render.c. Instead, their
+ stop and start times are stored in arrays.
+ The 4th cached frame will be the to use.
+ If it needed its duration extended to make up
+ lost time, it will have happened above. */
+ next->s.start = pv->last_start[counter];
+ next->s.stop = pv->last_stop[counter--];
+
+ adjust_frame_rate( pv, &next );
+
+ if( next )
+ {
+ if( !head && !tail )
+ {
+ head = tail = next;
+ } else {
+ tail->next = next;
+ tail = next;
+ }
+ }
+ }
+ if( tail )
+ {
+ tail->next = in;
+ *buf_out = head;
+ } else {
+ *buf_out = in;
+ }
+ return HB_FILTER_DONE;
+ }
+
+ // If there is a gap between the last stop and the current start
+ // then frame(s) were dropped.
+ if ( in->s.start > pv->last_stop[0] )
+ {
+ /* We need to compensate for the time lost by dropping frame(s).
+ Spread its duration out in quarters, because usually dropped frames
+ maintain a 1-out-of-5 pattern and this spreads it out amongst
+ the remaining ones. Store these in the lost_time array, which
+ has 4 slots in it. Because not every frame duration divides
+ evenly by 4, and we can't lose the remainder, we have to go
+ through an awkward process to preserve it in the 4th array index.
+ */
+ uint64_t temp_duration = in->s.start - pv->last_stop[0];
+ pv->lost_time[0] += (temp_duration / 4);
+ pv->lost_time[1] += (temp_duration / 4);
+ pv->lost_time[2] += (temp_duration / 4);
+ pv->lost_time[3] += ( temp_duration - 3 * (temp_duration / 4) );
+
+ pv->total_lost_time += temp_duration;
+ }
+
+ /* Cache frame start and stop times, so we can renumber
+ time stamps if dropping frames for VFR. */
+ int i;
+ for( i = 3; i >= 1; i-- )
+ {
+ pv->last_start[i] = pv->last_start[i-1];
+ pv->last_stop[i] = pv->last_stop[i-1];
+ }
+
+ /* In order to make sure we have continuous time stamps, store
+ the current frame's duration as starting when the last one stopped. */
+ pv->last_start[0] = pv->last_stop[1];
+ pv->last_stop[0] = pv->last_start[0] + (in->s.stop - in->s.start);
+
+ hb_fifo_push( pv->delay_queue, in );
+
+ /*
+ * Keep the last three frames in our queue, this ensures that we have
+ * the last two always in there should we need to rewrite the
+ * durations on them.
+ */
+
+ if( hb_fifo_size( pv->delay_queue ) >= 4 )
+ {
+ out = hb_fifo_get( pv->delay_queue );
+ }
+
+ if( out )
+ {
+ /* The current frame exists. That means it hasn't been dropped by a
+ * filter. We may edit its duration if needed.
+ */
+ if( pv->lost_time[3] > 0 )
+ {
+ int time_shift = 0;
+
+ for( i = 3; i >= 0; i-- )
+ {
+ /*
+ * A frame's been dropped earlier by VFR detelecine.
+ * Gotta make up the lost time. This will also
+ * slow down the video.
+ * The dropped frame's has to be accounted for, so
+ * divvy it up amongst the 4 frames left behind.
+ * This is what the delay_queue is for;
+ * telecined sequences start 2 frames before
+ * the dropped frame, so to slow down the right
+ * ones you need a 2 frame delay between
+ * reading input and writing output.
+ */
+
+ /* We want to extend the outputted frame's duration by the value
+ stored in the 4th slot of the lost_time array. Because we need
+ to adjust all the values in the array so they're contiguous,
+ extend the duration inside the array first, before applying
+ it to the current frame buffer. */
+ pv->last_start[i] += time_shift;
+ pv->last_stop[i] += pv->lost_time[i] + time_shift;
+
+ /* Log how much time has been added back in to the video. */
+ pv->total_gained_time += pv->lost_time[i];
+ time_shift += pv->lost_time[i];
+
+ pv->lost_time[i] = 0;
+
+ /* Log how many frames have had their durations extended. */
+ pv->extended_frames++;
+ }
+ }
+
+ /* We can't use the given time stamps. Previous frames
+ might already have been extended, throwing off the
+ raw values fed to render.c. Instead, their
+ stop and start times are stored in arrays.
+ The 4th cached frame will be the to use.
+ If it needed its duration extended to make up
+ lost time, it will have happened above. */
+ out->s.start = pv->last_start[3];
+ out->s.stop = pv->last_stop[3];
+
+ adjust_frame_rate( pv, &out );
+ }
+
+ *buf_out = out;
+ return HB_FILTER_OK;
+}
+
+
diff --git a/libhb/work.c b/libhb/work.c
index 0487a07f0..7b84cd30b 100644
--- a/libhb/work.c
+++ b/libhb/work.c
@@ -21,6 +21,7 @@ typedef struct
static void work_func();
static void do_job( hb_job_t *);
static void work_loop( void * );
+static void filter_loop( void * );
#define FIFO_UNBOUNDED 65536
#define FIFO_UNBOUNDED_WAKE 65535
@@ -28,6 +29,8 @@ static void work_loop( void * );
#define FIFO_LARGE_WAKE 16
#define FIFO_SMALL 16
#define FIFO_SMALL_WAKE 15
+#define FIFO_MINI 4
+#define FIFO_MINI_WAKE 3
/**
* Allocates work object and launches work thread with work_func.
@@ -271,6 +274,29 @@ void hb_display_job_info( hb_job_t * job )
(float) job->pfr_vrate / (float) job->pfr_vrate_base );
}
+ // Filters can modify dimensions. So show them first.
+ if( hb_list_count( job->list_filter ) )
+ {
+ hb_log(" + %s", hb_list_count( job->list_filter) > 1 ? "filters" : "filter" );
+ for( i = 0; i < hb_list_count( job->list_filter ); i++ )
+ {
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+ if( filter->settings )
+ hb_log(" + %s (%s)", filter->name, filter->settings);
+ else
+ hb_log(" + %s (default settings)", filter->name);
+ if( filter->info )
+ {
+ hb_filter_info_t info;
+ filter->info( filter, &info );
+ if( info.human_readable_desc[0] )
+ {
+ hb_log(" %s", info.human_readable_desc);
+ }
+ }
+ }
+ }
+
if( job->anamorphic.mode )
{
hb_log( " + %s anamorphic", job->anamorphic.mode == 1 ? "strict" : job->anamorphic.mode == 2? "loose" : "custom" );
@@ -278,9 +304,8 @@ void hb_display_job_info( hb_job_t * job )
{
hb_log( " + keeping source display aspect ratio");
}
- hb_log( " + storage dimensions: %d * %d -> %d * %d, crop %d/%d/%d/%d, mod %i",
- title->width, title->height, job->width, job->height,
- job->crop[0], job->crop[1], job->crop[2], job->crop[3], job->modulus );
+ hb_log( " + storage dimensions: %d * %d, mod %i",
+ job->width, job->height, job->modulus );
if( job->anamorphic.itu_par )
{
hb_log( " + using ITU pixel aspect ratio values");
@@ -291,27 +316,13 @@ void hb_display_job_info( hb_job_t * job )
}
else
{
- hb_log( " + dimensions: %d * %d -> %d * %d, crop %d/%d/%d/%d, mod %i",
- title->width, title->height, job->width, job->height,
- job->crop[0], job->crop[1], job->crop[2], job->crop[3], job->modulus );
+ hb_log( " + dimensions: %d * %d, mod %i",
+ job->width, job->height, job->modulus );
}
if ( job->grayscale )
hb_log( " + grayscale mode" );
- if( hb_list_count( job->filters ) )
- {
- hb_log(" + %s", hb_list_count( job->filters) > 1 ? "filters" : "filter" );
- for( i = 0; i < hb_list_count( job->filters ); i++ )
- {
- hb_filter_object_t * filter = hb_list_item( job->filters, i );
- if (filter->settings)
- hb_log(" + %s (%s)", filter->name, filter->settings);
- else
- hb_log(" + %s (default settings)", filter->name);
- }
- }
-
if( !job->indepth_scan )
{
/* Video encoder */
@@ -510,10 +521,44 @@ static void do_job( hb_job_t * job )
hb_log( "starting job" );
- if( job->anamorphic.mode )
+ // Filters have an effect on settings.
+ // So initialize the filters and update the job.
+ if( job->list_filter && hb_list_count( job->list_filter ) )
{
- hb_set_anamorphic_size(job, &job->width, &job->height, &job->anamorphic.par_width, &job->anamorphic.par_height);
+ hb_filter_init_t init;
+
+ init.job = job;
+ init.pix_fmt = PIX_FMT_YUV420P;
+ init.width = title->width;
+ init.height = title->height;
+ init.par_width = job->anamorphic.par_width;
+ init.par_height = job->anamorphic.par_height;
+ memcpy(init.crop, title->crop, sizeof(int[4]));
+ init.vrate_base = title->rate_base;
+ init.vrate = title->rate;
+ init.cfr = 0;
+ for( i = 0; i < hb_list_count( job->list_filter ); i++ )
+ {
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+ if( filter->init( filter, &init ) )
+ {
+ hb_error( "Failure to initialise filter '%s'", filter->name );
+ *job->die = 1;
+ goto cleanup;
+ }
+ }
+ job->width = init.width;
+ job->height = init.height;
+ job->anamorphic.par_width = init.par_width;
+ job->anamorphic.par_height = init.par_height;
+ memcpy(title->crop, init.crop, sizeof(int[4]));
+ job->vrate_base = init.vrate_base;
+ job->vrate = init.vrate;
+ job->cfr = init.cfr;
+ }
+ if( job->anamorphic.mode )
+ {
if( job->vcodec & HB_VCODEC_FFMPEG_MASK )
{
/* Just to make working with ffmpeg even more fun,
@@ -530,47 +575,6 @@ static void do_job( hb_job_t * job )
}
}
- /* Keep width and height within these boundaries,
- but ignore for anamorphic. For "loose" anamorphic encodes,
- this stuff is covered in the pixel_ratio section above. */
- if ( job->maxHeight && ( job->height > job->maxHeight ) && ( !job->anamorphic.mode ) )
- {
- job->height = job->maxHeight;
- hb_fix_aspect( job, HB_KEEP_HEIGHT );
- hb_log( "Height out of bounds, scaling down to %i", job->maxHeight );
- hb_log( "New dimensions %i * %i", job->width, job->height );
- }
- if ( job->maxWidth && ( job->width > job->maxWidth ) && ( !job->anamorphic.mode ) )
- {
- job->width = job->maxWidth;
- hb_fix_aspect( job, HB_KEEP_WIDTH );
- hb_log( "Width out of bounds, scaling down to %i", job->maxWidth );
- hb_log( "New dimensions %i * %i", job->width, job->height );
- }
-
- if ( job->cfr == 0 )
- {
- /* Ensure we're using "Same as source" FPS */
- job->vrate = title->rate;
- job->vrate_base = title->rate_base;
- }
- else if ( job->cfr == 2 )
- {
- job->pfr_vrate = job->vrate;
- job->pfr_vrate_base = job->vrate_base;
-
- // Ensure we're using "Same as source" FPS, with peak set by pfr_vrate_*
- // For PFR, we want the framerate based on the source's actual
- // framerate, unless it's higher than the specified peak framerate.
- double source_fps = (double)job->title->rate / job->title->rate_base;
- double peak_l_fps = (double)job->vrate / job->vrate_base;
- if ( source_fps < peak_l_fps )
- {
- job->vrate_base = title->rate_base;
- job->vrate = title->rate;
- }
- }
-
job->fifo_mpeg2 = hb_fifo_init( FIFO_LARGE, FIFO_LARGE_WAKE );
job->fifo_raw = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
job->fifo_sync = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
@@ -829,42 +833,6 @@ static void do_job( hb_job_t * job )
w->fifo_in = job->fifo_mpeg2;
w->fifo_out = job->fifo_raw;
- /* Video renderer */
- hb_list_add( job->list_work, ( w = hb_get_work( WORK_RENDER ) ) );
- w->fifo_in = job->fifo_sync;
- if( !job->indepth_scan )
- w->fifo_out = job->fifo_render;
- else
- w->fifo_out = NULL;
-
- if( !job->indepth_scan )
- {
-
- /* Video encoder */
- switch( job->vcodec )
- {
- case HB_VCODEC_FFMPEG_MPEG4:
- w = hb_get_work( WORK_ENCAVCODEC );
- w->codec_param = CODEC_ID_MPEG4;
- break;
- case HB_VCODEC_FFMPEG_MPEG2:
- w = hb_get_work( WORK_ENCAVCODEC );
- w->codec_param = CODEC_ID_MPEG2VIDEO;
- break;
- case HB_VCODEC_X264:
- w = hb_get_work( WORK_ENCX264 );
- break;
- case HB_VCODEC_THEORA:
- w = hb_get_work( WORK_ENCTHEORA );
- break;
- }
- w->fifo_in = job->fifo_render;
- w->fifo_out = job->fifo_mpeg4;
- w->config = &job->config;
-
- hb_list_add( job->list_work, w );
- }
-
/*
* Look for the scanned subtitle in the existing subtitle list
* select_subtitle implies that we did a scan.
@@ -919,7 +887,6 @@ static void do_job( hb_job_t * job )
}
}
-
for( i=0; i < hb_list_count(title->list_subtitle); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
@@ -996,8 +963,61 @@ static void do_job( hb_job_t * job )
}
}
+ /* Set up the video filter fifo pipeline */
if( !job->indepth_scan )
{
+ if( job->list_filter )
+ {
+ int filter_count = hb_list_count( job->list_filter );
+ int i;
+ hb_fifo_t * fifo_in = job->fifo_sync;
+
+ for( i = 0; i < filter_count; i++ )
+ {
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+
+ filter->fifo_in = fifo_in;
+ filter->fifo_out = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
+ fifo_in = filter->fifo_out;
+ }
+ job->fifo_render = fifo_in;
+ }
+ else if ( !job->list_filter )
+ {
+ hb_log("work: Internal Error: no filters");
+ job->fifo_render = NULL;
+ }
+
+ /* Video encoder */
+ switch( job->vcodec )
+ {
+ case HB_VCODEC_FFMPEG_MPEG4:
+ w = hb_get_work( WORK_ENCAVCODEC );
+ w->codec_param = CODEC_ID_MPEG4;
+ break;
+ case HB_VCODEC_FFMPEG_MPEG2:
+ w = hb_get_work( WORK_ENCAVCODEC );
+ w->codec_param = CODEC_ID_MPEG2VIDEO;
+ break;
+ case HB_VCODEC_X264:
+ w = hb_get_work( WORK_ENCX264 );
+ break;
+ case HB_VCODEC_THEORA:
+ w = hb_get_work( WORK_ENCTHEORA );
+ break;
+ }
+ // Handle case where there are no filters.
+ // This really should never happen.
+ if ( job->fifo_render )
+ w->fifo_in = job->fifo_render;
+ else
+ w->fifo_in = job->fifo_sync;
+
+ w->fifo_out = job->fifo_mpeg4;
+ w->config = &job->config;
+
+ hb_list_add( job->list_work, w );
+
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
audio = hb_list_item( title->list_audio, i );
@@ -1069,6 +1089,25 @@ static void do_job( hb_job_t * job )
job->done = 0;
+ if( job->list_filter && !job->indepth_scan )
+ {
+ int filter_count = hb_list_count( job->list_filter );
+ int i;
+
+ for( i = 0; i < filter_count; i++ )
+ {
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+
+ if( !filter ) continue;
+
+ // Filters were initialized earlier, so we just need
+ // to start the filter's thread
+ filter->done = &job->done;
+ filter->thread = hb_thread_init( filter->name, filter_loop, filter,
+ HB_LOW_PRIORITY );
+ }
+ }
+
/* Launch processing threads */
for( i = 0; i < hb_list_count( job->list_work ); i++ )
{
@@ -1180,6 +1219,26 @@ cleanup:
/* Stop the write thread (thread_close will block until the muxer finishes) */
job->done = 1;
+ // Close render filter pipeline
+ if( job->list_filter )
+ {
+ int filter_count = hb_list_count( job->list_filter );
+ int i;
+
+ for( i = 0; i < filter_count; i++ )
+ {
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
+
+ if( !filter ) continue;
+
+ if( filter->thread != NULL )
+ {
+ hb_thread_close( &filter->thread );
+ }
+ filter->close( filter );
+ }
+ }
+
/* Close work objects */
while( ( w = hb_list_item( job->list_work, 0 ) ) )
{
@@ -1315,14 +1374,14 @@ cleanup:
}
}
- if( job->filters )
+ if( job->list_filter )
{
- for( i = 0; i < hb_list_count( job->filters ); i++ )
+ for( i = 0; i < hb_list_count( job->list_filter ); i++ )
{
- hb_filter_object_t * filter = hb_list_item( job->filters, i );
+ hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
hb_filter_close( &filter );
}
- hb_list_close( &job->filters );
+ hb_list_close( &job->list_filter );
}
hb_buffer_pool_free();
@@ -1331,6 +1390,21 @@ cleanup:
free( job );
}
+static inline void copy_chapter( hb_buffer_t * dst, hb_buffer_t * src )
+{
+ // Propagate any chapter breaks for the worker if and only if the
+ // output frame has the same time stamp as the input frame (any
+ // worker that delays frames has to propagate the chapter marks itself
+ // and workers that move chapter marks to a different time should set
+ // 'src' to NULL so that this code won't generate spurious duplicates.)
+ if( src && dst && src->s.start == dst->s.start)
+ {
+ // restore log below to debug chapter mark propagation problems
+ //hb_log("work %s: Copying Chapter Break @ %"PRId64, w->name, src->s.start);
+ dst->s.new_chap = src->s.new_chap;
+ }
+}
+
/**
* Performs the work object's specific work function.
* Loops calling work function for associated work object. Sleeps when fifo is full.
@@ -1361,17 +1435,7 @@ static void work_loop( void * _w )
buf_out = NULL;
w->status = w->work( w, &buf_in, &buf_out );
- // Propagate any chapter breaks for the worker if and only if the
- // output frame has the same time stamp as the input frame (any
- // worker that delays frames has to propagate the chapter marks itself
- // and workers that move chapter marks to a different time should set
- // 'buf_in' to NULL so that this code won't generate spurious duplicates.)
- if( buf_in && buf_out && buf_in->new_chap && buf_in->start == buf_out->start)
- {
- // restore log below to debug chapter mark propagation problems
- //hb_log("work %s: Copying Chapter Break @ %"PRId64, w->name, buf_in->start);
- buf_out->new_chap = buf_in->new_chap;
- }
+ copy_chapter( buf_out, buf_in );
if( buf_in )
{
@@ -1408,3 +1472,78 @@ static void work_loop( void * _w )
hb_buffer_close( &buf_in );
}
}
+
+/**
+ * Performs the filter object's specific work function.
+ * Loops calling work function for associated filter object.
+ * Sleeps when fifo is full.
+ * Monitors work done indicator.
+ * Exits loop when work indiactor is set.
+ * @param _w Handle to work object.
+ */
+static void filter_loop( void * _f )
+{
+ hb_filter_object_t * f = _f;
+ hb_buffer_t * buf_in, * buf_out;
+
+ while( !*f->done && f->status != HB_FILTER_DONE )
+ {
+ buf_in = hb_fifo_get_wait( f->fifo_in );
+ if ( buf_in == NULL )
+ continue;
+
+ // Filters can drop buffers. Remember chapter information
+ // so that it can be propagated to the next buffer
+ if ( buf_in->s.new_chap )
+ {
+ f->chapter_time = buf_in->s.start;
+ f->chapter_val = buf_in->s.new_chap;
+ }
+ if ( *f->done )
+ {
+ if( buf_in )
+ {
+ hb_buffer_close( &buf_in );
+ }
+ break;
+ }
+
+ buf_out = NULL;
+ f->status = f->work( f, &buf_in, &buf_out );
+
+ if ( buf_out && f->chapter_val && f->chapter_time <= buf_out->s.start )
+ {
+ buf_out->s.new_chap = f->chapter_val;
+ f->chapter_val = 0;
+ }
+
+ if( buf_in )
+ {
+ hb_buffer_close( &buf_in );
+ }
+ if ( buf_out && f->fifo_out == NULL )
+ {
+ hb_buffer_close( &buf_out );
+ }
+ if( buf_out )
+ {
+ while ( !*f->done )
+ {
+ if ( hb_fifo_full_wait( f->fifo_out ) )
+ {
+ hb_fifo_push( f->fifo_out, buf_out );
+ break;
+ }
+ }
+ }
+ }
+ // Consume data in incoming fifo till job complete so that
+ // residual data does not stall the pipeline
+ while( !*f->done )
+ {
+ buf_in = hb_fifo_get_wait( f->fifo_in );
+ if ( buf_in != NULL )
+ hb_buffer_close( &buf_in );
+ }
+}
+
diff --git a/macosx/Controller.m b/macosx/Controller.m
index f0d58f9a9..69168b376 100644
--- a/macosx/Controller.m
+++ b/macosx/Controller.m
@@ -2747,6 +2747,8 @@ fWorkingCount = 0;
hb_add( fQueueEncodeLibhb, job );
}
+ hb_free_filters( job );
+
NSString *destinationDirectory = [[queueToApply objectForKey:@"DestinationPath"] stringByDeletingLastPathComponent];
[[NSUserDefaults standardUserDefaults] setObject:destinationDirectory forKey:@"LastDestinationDirectory"];
/* Lets mark our new encode as 1 or "Encoding" */
@@ -3278,6 +3280,11 @@ bool one_burned = FALSE;
}
i++;
}
+ if( one_burned )
+ {
+ filter = hb_filter_init( HB_FILTER_RENDER_SUB );
+ hb_add_filter( job, filter, NULL );
+ }
@@ -3337,25 +3344,23 @@ bool one_burned = FALSE;
job->grayscale = 0;
}
- /* Initialize the filters list */
- job->filters = hb_list_init();
-
/* Now lets call the filters if applicable.
* The order of the filters is critical
*/
+ hb_filter_object_t * filter;
/* Detelecine */
hb_filter_detelecine.settings = NULL;
+ filter = hb_filter_init( HB_FILTER_DETELECINE );
if ([fPictureController detelecine] == 1)
{
/* use a custom detelecine string */
- hb_filter_detelecine.settings = (char *) [[fPictureController detelecineCustomString] UTF8String];
- hb_list_add( job->filters, &hb_filter_detelecine );
+ hb_add_filter( job, filter, [[fPictureController detelecineCustomString] UTF8String] );
}
if ([fPictureController detelecine] == 2)
{
/* Default */
- hb_list_add( job->filters, &hb_filter_detelecine );
+ hb_add_filter( job, filter, NULL );
}
@@ -3363,77 +3368,69 @@ bool one_burned = FALSE;
if ([fPictureController useDecomb] == 1)
{
/* Decomb */
+ filter = hb_filter_init( HB_FILTER_DECOMB );
if ([fPictureController decomb] == 1)
{
/* use a custom decomb string */
- hb_filter_decomb.settings = (char *) [[fPictureController decombCustomString] UTF8String];
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, [[fPictureController decombCustomString] UTF8String] );
}
if ([fPictureController decomb] == 2)
{
/* use libhb defaults */
- hb_filter_decomb.settings = NULL;
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, NULL );
}
if ([fPictureController decomb] == 3)
{
/* use old defaults (decomb fast) */
- hb_filter_decomb.settings = "7:2:6:9:1:80";
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, "7:2:6:9:1:80" );
}
}
else
{
/* Deinterlace */
+ filter = hb_filter_init( HB_FILTER_DEINTERLACE );
if ([fPictureController deinterlace] == 1)
{
/* we add the custom string if present */
- hb_filter_deinterlace.settings = (char *) [[fPictureController deinterlaceCustomString] UTF8String];
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, [[fPictureController deinterlaceCustomString] UTF8String] );
}
else if ([fPictureController deinterlace] == 2)
{
/* Run old deinterlacer fd by default */
- hb_filter_deinterlace.settings = "-1";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "-1" );
}
else if ([fPictureController deinterlace] == 3)
{
/* Yadif mode 0 (without spatial deinterlacing.) */
- hb_filter_deinterlace.settings = "2";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "2" );
}
else if ([fPictureController deinterlace] == 4)
{
/* Yadif (with spatial deinterlacing) */
- hb_filter_deinterlace.settings = "0";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "0" );
}
}
/* Denoise */
+ filter = hb_filter_init( HB_FILTER_DENOISE );
if ([fPictureController denoise] == 1) // custom in popup
{
/* we add the custom string if present */
- hb_filter_denoise.settings = (char *) [[fPictureController denoiseCustomString] UTF8String];
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, [[fPictureController denoiseCustomString] UTF8String] );
}
else if ([fPictureController denoise] == 2) // Weak in popup
{
- hb_filter_denoise.settings = "2:1:2:3";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "2:1:2:3" );
}
else if ([fPictureController denoise] == 3) // Medium in popup
{
- hb_filter_denoise.settings = "3:2:2:3";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "3:2:2:3" );
}
else if ([fPictureController denoise] == 4) // Strong in popup
{
- hb_filter_denoise.settings = "7:7:5:5";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "7:7:5:5" );
}
@@ -3442,13 +3439,28 @@ bool one_burned = FALSE;
* the macgui's purposes a value of 0 actually means to not even use the filter
* current hb_filter_deblock.settings valid ranges are from 5 - 15
*/
+ filter = hb_filter_init( HB_FILTER_DEBLOCK );
if ([fPictureController deblock] != 0)
{
NSString *deblockStringValue = [NSString stringWithFormat: @"%d",[fPictureController deblock]];
- hb_filter_deblock.settings = (char *) [deblockStringValue UTF8String];
- hb_list_add( job->filters, &hb_filter_deblock );
+ hb_add_filter( job, filter, [deblockStringValue UTF8String] );
}
+ /* Add Crop/Scale filter */
+ char * filter_str;
+ filter_str = hb_strdup_printf("%d:%d:%d:%d:%d:%d",
+ job->width, job->height,
+ job->crop[0], job->crop[1], job->crop[2], job->crop[3] );
+ filter = hb_filter_init( HB_FILTER_CROP_SCALE );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
+
+ // Add framerate shaping filter
+ filter_str = hb_strdup_printf("%d:%d:%d",
+ job->cfr, job->vrate, job->vrate_base );
+ filter = hb_filter_init( HB_FILTER_VFR );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
}
@@ -3817,6 +3829,11 @@ bool one_burned = FALSE;
}
i++;
}
+ if( one_burned )
+ {
+ filter = hb_filter_init( HB_FILTER_RENDER_SUB );
+ hb_add_filter( job, filter, NULL );
+ }
#pragma mark -
@@ -3875,46 +3892,41 @@ bool one_burned = FALSE;
}
}
- /* Filters */
- job->filters = hb_list_init();
-
+ hb_filter_object_t * filter;
/* Now lets call the filters if applicable.
* The order of the filters is critical
*/
/* Detelecine */
- hb_filter_detelecine.settings = NULL;
+ filter = hb_filter_init( HB_FILTER_DETELECINE );
if ([[queueToApply objectForKey:@"PictureDetelecine"] intValue] == 1)
{
/* use a custom detelecine string */
- hb_filter_detelecine.settings = (char *) [[queueToApply objectForKey:@"PictureDetelecineCustom"] UTF8String];
- hb_list_add( job->filters, &hb_filter_detelecine );
+ hb_add_filter( job, filter, [[queueToApply objectForKey:@"PictureDetelecineCustom"] UTF8String] );
}
if ([[queueToApply objectForKey:@"PictureDetelecine"] intValue] == 2)
{
/* Use libhb's default values */
- hb_list_add( job->filters, &hb_filter_detelecine );
+ hb_add_filter( job, filter, NULL );
}
if ([[queueToApply objectForKey:@"PictureDecombDeinterlace"] intValue] == 1)
{
/* Decomb */
+ filter = hb_filter_init( HB_FILTER_DECOMB );
if ([[queueToApply objectForKey:@"PictureDecomb"] intValue] == 1)
{
/* use a custom decomb string */
- hb_filter_decomb.settings = (char *) [[queueToApply objectForKey:@"PictureDecombCustom"] UTF8String];
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, [[queueToApply objectForKey:@"PictureDecombCustom"] UTF8String] );
}
if ([[queueToApply objectForKey:@"PictureDecomb"] intValue] == 2)
{
/* use libhb defaults */
- hb_filter_decomb.settings = NULL;
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, NULL );
}
if ([[queueToApply objectForKey:@"PictureDecomb"] intValue] == 3)
{
/* use old defaults (decomb fast) */
- hb_filter_decomb.settings = "7:2:6:9:1:80";
- hb_list_add( job->filters, &hb_filter_decomb );
+ hb_add_filter( job, filter, "7:2:6:9:1:80" );
}
}
@@ -3922,54 +3934,48 @@ bool one_burned = FALSE;
{
/* Deinterlace */
+ filter = hb_filter_init( HB_FILTER_DEINTERLACE );
if ([[queueToApply objectForKey:@"PictureDeinterlace"] intValue] == 1)
{
/* we add the custom string if present */
- hb_filter_deinterlace.settings = (char *) [[queueToApply objectForKey:@"PictureDeinterlaceCustom"] UTF8String];
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, [[queueToApply objectForKey:@"PictureDeinterlaceCustom"] UTF8String] );
}
else if ([[queueToApply objectForKey:@"PictureDeinterlace"] intValue] == 2)
{
/* Run old deinterlacer fd by default */
- hb_filter_deinterlace.settings = "-1";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "-1" );
}
else if ([[queueToApply objectForKey:@"PictureDeinterlace"] intValue] == 3)
{
/* Yadif mode 0 (without spatial deinterlacing.) */
- hb_filter_deinterlace.settings = "2";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "2" );
}
else if ([[queueToApply objectForKey:@"PictureDeinterlace"] intValue] == 4)
{
/* Yadif (with spatial deinterlacing) */
- hb_filter_deinterlace.settings = "0";
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ hb_add_filter( job, filter, "0" );
}
}
/* Denoise */
+ filter = hb_filter_init( HB_FILTER_DENOISE );
if ([[queueToApply objectForKey:@"PictureDenoise"] intValue] == 1) // Custom in popup
{
/* we add the custom string if present */
- hb_filter_denoise.settings = (char *) [[queueToApply objectForKey:@"PictureDenoiseCustom"] UTF8String];
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, [[queueToApply objectForKey:@"PictureDenoiseCustom"] UTF8String] );
}
else if ([[queueToApply objectForKey:@"PictureDenoise"] intValue] == 2) // Weak in popup
{
- hb_filter_denoise.settings = "2:1:2:3";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "2:1:2:3" );
}
else if ([[queueToApply objectForKey:@"PictureDenoise"] intValue] == 3) // Medium in popup
{
- hb_filter_denoise.settings = "3:2:2:3";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "3:2:2:3" );
}
else if ([[queueToApply objectForKey:@"PictureDenoise"] intValue] == 4) // Strong in popup
{
- hb_filter_denoise.settings = "7:7:5:5";
- hb_list_add( job->filters, &hb_filter_denoise );
+ hb_add_filter( job, filter, "7:7:5:5" );
}
@@ -3978,11 +3984,28 @@ bool one_burned = FALSE;
* the macgui's purposes a value of 0 actually means to not even use the filter
* current hb_filter_deblock.settings valid ranges are from 5 - 15
*/
+ filter = hb_filter_init( HB_FILTER_DEBLOCK );
if ([[queueToApply objectForKey:@"PictureDeblock"] intValue] != 0)
{
- hb_filter_deblock.settings = (char *) [[queueToApply objectForKey:@"PictureDeblock"] UTF8String];
- hb_list_add( job->filters, &hb_filter_deblock );
+ hb_add_filter( job, filter, [[queueToApply objectForKey:@"PictureDeblock"] UTF8String] );
}
+
+ /* Add Crop/Scale filter */
+ char * filter_str;
+ filter_str = hb_strdup_printf("%d:%d:%d:%d:%d:%d",
+ job->width, job->height,
+ job->crop[0], job->crop[1], job->crop[2], job->crop[3] );
+ filter = hb_filter_init( HB_FILTER_CROP_SCALE );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
+
+ // Add framerate shaping filter
+ filter_str = hb_strdup_printf("%d:%d:%d",
+ job->cfr, job->vrate, job->vrate_base );
+ filter = hb_filter_init( HB_FILTER_VFR );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
+
[self writeToActivityLog: "prepareJob exiting"];
}
diff --git a/macosx/HBPreviewController.m b/macosx/HBPreviewController.m
index 4d0826b1d..dd2cb6ec4 100644
--- a/macosx/HBPreviewController.m
+++ b/macosx/HBPreviewController.m
@@ -906,6 +906,8 @@
job->pass = 0;
hb_add( fPreviewLibhb, job );
+ hb_free_filters( job );
+
[fEncodingControlBox setHidden: NO];
[fPictureControlBox setHidden: YES];
diff --git a/test/test.c b/test/test.c
index b600577d3..411c0adc6 100644
--- a/test/test.c
+++ b/test/test.c
@@ -61,6 +61,7 @@ static int decomb = 0;
static char * decomb_opt = 0;
static int rotate = 0;
static char * rotate_opt = 0;
+static int rotate_val = 0;
static int grayscale = 0;
static int vcodec = HB_VCODEC_FFMPEG_MPEG4;
static hb_list_t * audios = NULL;
@@ -1343,44 +1344,50 @@ static int HandleEvents( hb_handle_t * h )
job->deinterlace = deinterlace;
job->grayscale = grayscale;
+ hb_filter_object_t * filter;
+
/* Add selected filters */
- job->filters = hb_list_init();
-
- if( rotate )
- {
- hb_filter_rotate.settings = rotate_opt;
- hb_list_add( job->filters, &hb_filter_rotate);
- }
if( detelecine )
{
- hb_filter_detelecine.settings = detelecine_opt;
- hb_list_add( job->filters, &hb_filter_detelecine );
+ filter = hb_filter_init( HB_FILTER_DETELECINE );
+ hb_add_filter( job, filter, detelecine_opt );
}
if( decomb )
{
- hb_filter_decomb.settings = decomb_opt;
- hb_list_add( job->filters, &hb_filter_decomb );
+ filter = hb_filter_init( HB_FILTER_DECOMB );
+ hb_add_filter( job, filter, decomb_opt );
}
if( deinterlace )
{
- hb_filter_deinterlace.settings = deinterlace_opt;
- hb_list_add( job->filters, &hb_filter_deinterlace );
+ filter = hb_filter_init( HB_FILTER_DEINTERLACE );
+ hb_add_filter( job, filter, deinterlace_opt );
}
if( deblock )
{
- hb_filter_deblock.settings = deblock_opt;
- hb_list_add( job->filters, &hb_filter_deblock );
+ filter = hb_filter_init( HB_FILTER_DEBLOCK );
+ hb_add_filter( job, filter, deblock_opt );
}
if( denoise )
{
- hb_filter_denoise.settings = denoise_opt;
- hb_list_add( job->filters, &hb_filter_denoise );
+ filter = hb_filter_init( HB_FILTER_DENOISE );
+ hb_add_filter( job, filter, denoise_opt );
+ }
+ if( rotate )
+ {
+ filter = hb_filter_init( HB_FILTER_ROTATE );
+ hb_add_filter( job, filter, rotate_opt);
}
+
+ if (maxWidth)
+ job->maxWidth = maxWidth;
+ if (maxHeight)
+ job->maxHeight = maxHeight;
+
switch( anamorphic_mode )
{
case 0: // Non-anamorphic
-
+
if (modulus)
{
job->modulus = modulus;
@@ -1540,6 +1547,38 @@ static int HandleEvents( hb_handle_t * h )
break;
}
+ // Validate and adjust job picture dimensions
+ hb_validate_size( job );
+
+ // Add filter that does cropping and scaling
+ char * filter_str;
+ filter_str = hb_strdup_printf("%d:%d:%d:%d:%d:%d",
+ job->width, job->height,
+ job->crop[0], job->crop[1], job->crop[2], job->crop[3] );
+ filter = hb_filter_init( HB_FILTER_CROP_SCALE );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
+
+ // Add framerate shaping filter
+ if( vrate )
+ {
+ job->cfr = cfr;
+ job->vrate = 27000000;
+ job->vrate_base = vrate;
+ }
+ else if ( cfr )
+ {
+ // cfr or pfr flag with no rate specified implies
+ // use the title rate.
+ job->cfr = cfr;
+ job->vrate = title->rate;
+ job->vrate_base = title->rate_base;
+ }
+ filter_str = hb_strdup_printf("%d:%d:%d",
+ job->cfr, job->vrate, job->vrate_base );
+ filter = hb_filter_init( HB_FILTER_VFR );
+ hb_add_filter( job, filter, filter_str );
+ free( filter_str );
if( vquality >= 0.0 )
{
@@ -1555,20 +1594,6 @@ static int HandleEvents( hb_handle_t * h )
{
job->vcodec = vcodec;
}
- if( vrate )
- {
- job->cfr = cfr;
- job->vrate = 27000000;
- job->vrate_base = vrate;
- }
- else if ( cfr )
- {
- // cfr or pfr flag with no rate specified implies
- // use the title rate.
- job->cfr = cfr;
- job->vrate = title->rate;
- job->vrate_base = title->rate_base;
- }
/* Grab audio tracks */
if( atracks )
@@ -2235,6 +2260,12 @@ static int HandleEvents( hb_handle_t * h )
}
}
+ if ( sub_burned )
+ {
+ filter = hb_filter_init( HB_FILTER_RENDER_SUB );
+ hb_add_filter( job, filter, NULL);
+ }
+
if( srtfile )
{
char * token;
@@ -2377,6 +2408,7 @@ static int HandleEvents( hb_handle_t * h )
{
job->advanced_opts = NULL;
}
+
job->x264_profile = x264_profile;
job->x264_preset = x264_preset;
job->x264_tune = x264_tune;
@@ -2514,6 +2546,7 @@ static int HandleEvents( hb_handle_t * h )
job->pass = 0;
hb_add( h, job );
}
+ hb_reset_job( job );
hb_start( h );
break;
}
@@ -3507,6 +3540,7 @@ static int ParseOptions( int argc, char ** argv )
if( optarg != NULL )
{
rotate_opt = strdup( optarg );
+ rotate_val = atoi( optarg );
}
rotate = 1;
break;