diff options
author | Kenneth Graunke <[email protected]> | 2013-10-15 16:00:16 -0700 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2013-10-17 14:27:03 -0700 |
commit | 80a9c42e9e9012bf0b4c143f1b8dd325c8c88120 (patch) | |
tree | a34861fd23922621c6a6b0b6e8706e9d86a317fa | |
parent | 6613f346acc54a161046ee77e4a017c3e3d1a99f (diff) |
i965: Un-virtualize brw_new_batch().
Since the i915/i965 split, there's only one implementation of this
virtual function. We may as well just call it directly.
Signed-off-by: Kenneth Graunke <[email protected]>
Reviewed-by: Eric Anholt <[email protected]>
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_context.h | 1 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_vtbl.c | 42 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/intel_batchbuffer.c | 43 |
3 files changed, 42 insertions, 44 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index ace4bd01624..81fc1b9d5fc 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -923,7 +923,6 @@ struct brw_context struct { void (*destroy) (struct brw_context * brw); - void (*new_batch) (struct brw_context * brw); void (*update_texture_surface)(struct gl_context *ctx, unsigned unit, diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c index 0f7671bd929..d78af8b3496 100644 --- a/src/mesa/drivers/dri/i965/brw_vtbl.c +++ b/src/mesa/drivers/dri/i965/brw_vtbl.c @@ -90,50 +90,8 @@ brw_destroy_context(struct brw_context *brw) drm_intel_gem_context_destroy(brw->hw_ctx); } -/** - * called from intelFlushBatchLocked - */ -static void -brw_new_batch(struct brw_context *brw) -{ - /* If the kernel supports hardware contexts, then most hardware state is - * preserved between batches; we only need to re-emit state that is required - * to be in every batch. Otherwise we need to re-emit all the state that - * would otherwise be stored in the context (which for all intents and - * purposes means everything). - */ - if (brw->hw_ctx == NULL) - brw->state.dirty.brw |= BRW_NEW_CONTEXT; - - brw->state.dirty.brw |= BRW_NEW_BATCH; - - /* Assume that the last command before the start of our batch was a - * primitive, for safety. - */ - brw->batch.need_workaround_flush = true; - - brw->state_batch_count = 0; - - brw->ib.type = -1; - - /* Mark that the current program cache BO has been used by the GPU. - * It will be reallocated if we need to put new programs in for the - * next batch. - */ - brw->cache.bo_used_by_gpu = true; - - /* We need to periodically reap the shader time results, because rollover - * happens every few seconds. We also want to see results every once in a - * while, because many programs won't cleanly destroy our context, so the - * end-of-run printout may not happen. - */ - if (INTEL_DEBUG & DEBUG_SHADER_TIME) - brw_collect_and_report_shader_time(brw); -} - void brwInitVtbl( struct brw_context *brw ) { - brw->vtbl.new_batch = brw_new_batch; brw->vtbl.destroy = brw_destroy_context; assert(brw->gen >= 4); diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index 20a6d83cb41..6d1ae797426 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -173,6 +173,47 @@ do_batch_dump(struct brw_context *brw) } /** + * Called when starting a new batch buffer. + */ +static void +brw_new_batch(struct brw_context *brw) +{ + /* If the kernel supports hardware contexts, then most hardware state is + * preserved between batches; we only need to re-emit state that is required + * to be in every batch. Otherwise we need to re-emit all the state that + * would otherwise be stored in the context (which for all intents and + * purposes means everything). + */ + if (brw->hw_ctx == NULL) + brw->state.dirty.brw |= BRW_NEW_CONTEXT; + + brw->state.dirty.brw |= BRW_NEW_BATCH; + + /* Assume that the last command before the start of our batch was a + * primitive, for safety. + */ + brw->batch.need_workaround_flush = true; + + brw->state_batch_count = 0; + + brw->ib.type = -1; + + /* Mark that the current program cache BO has been used by the GPU. + * It will be reallocated if we need to put new programs in for the + * next batch. + */ + brw->cache.bo_used_by_gpu = true; + + /* We need to periodically reap the shader time results, because rollover + * happens every few seconds. We also want to see results every once in a + * while, because many programs won't cleanly destroy our context, so the + * end-of-run printout may not happen. + */ + if (INTEL_DEBUG & DEBUG_SHADER_TIME) + brw_collect_and_report_shader_time(brw); +} + +/** * Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and * sending it off. * @@ -245,7 +286,7 @@ do_flush_locked(struct brw_context *brw) fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret)); exit(1); } - brw->vtbl.new_batch(brw); + brw_new_batch(brw); return ret; } |