summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri
diff options
context:
space:
mode:
authorIago Toral Quiroga <[email protected]>2017-01-03 09:10:13 +0100
committerIago Toral Quiroga <[email protected]>2017-01-04 08:14:30 +0100
commit1a8f2629e6d76517bc5b16e9f69cfa7cb4a342bd (patch)
treea6b04cc8d8678f2cf27d3423c469bbb772836078 /src/mesa/drivers/dri
parentba30e0ca20c1e25e076da7f0779f71da563cf2f9 (diff)
i965: remove brw_context dependency from intel_batchbuffer_init()
Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri')
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c59
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.h3
3 files changed, 36 insertions, 28 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index f248fac6181..3f3da7d2009 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -1060,7 +1060,7 @@ brwCreateContext(gl_api api,
intel_fbo_init(brw);
- intel_batchbuffer_init(brw);
+ intel_batchbuffer_init(&brw->batch, brw->bufmgr, brw->has_llc);
if (brw->gen >= 6) {
/* Create a new hardware context. Using a hardware context means that
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 71e0ed08da4..d1b9317a8c1 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -36,48 +36,55 @@
#include <i915_drm.h>
static void
-intel_batchbuffer_reset(struct brw_context *brw);
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc);
void
-intel_batchbuffer_init(struct brw_context *brw)
+intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- intel_batchbuffer_reset(brw);
+ intel_batchbuffer_reset(batch, bufmgr, has_llc);
- if (!brw->has_llc) {
- brw->batch.cpu_map = malloc(BATCH_SZ);
- brw->batch.map = brw->batch.cpu_map;
- brw->batch.map_next = brw->batch.cpu_map;
+ if (!has_llc) {
+ batch->cpu_map = malloc(BATCH_SZ);
+ batch->map = batch->cpu_map;
+ batch->map_next = batch->cpu_map;
}
}
static void
-intel_batchbuffer_reset(struct brw_context *brw)
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- if (brw->batch.last_bo != NULL) {
- drm_intel_bo_unreference(brw->batch.last_bo);
- brw->batch.last_bo = NULL;
+ if (batch->last_bo != NULL) {
+ drm_intel_bo_unreference(batch->last_bo);
+ batch->last_bo = NULL;
}
- brw->batch.last_bo = brw->batch.bo;
+ batch->last_bo = batch->bo;
- brw_render_cache_set_clear(brw);
-
- brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
- BATCH_SZ, 4096);
- if (brw->has_llc) {
- drm_intel_bo_map(brw->batch.bo, true);
- brw->batch.map = brw->batch.bo->virtual;
+ batch->bo = drm_intel_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ if (has_llc) {
+ drm_intel_bo_map(batch->bo, true);
+ batch->map = batch->bo->virtual;
}
- brw->batch.map_next = brw->batch.map;
+ batch->map_next = batch->map;
- brw->batch.reserved_space = BATCH_RESERVED;
- brw->batch.state_batch_offset = brw->batch.bo->size;
- brw->batch.needs_sol_reset = false;
- brw->batch.state_base_address_emitted = false;
+ batch->reserved_space = BATCH_RESERVED;
+ batch->state_batch_offset = batch->bo->size;
+ batch->needs_sol_reset = false;
+ batch->state_base_address_emitted = false;
/* We don't know what ring the new batch will be sent to until we see the
* first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
*/
- brw->batch.ring = UNKNOWN_RING;
+ batch->ring = UNKNOWN_RING;
+}
+
+static void
+intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
+{
+ intel_batchbuffer_reset(&brw->batch, brw->bufmgr, brw->has_llc);
+ brw_render_cache_set_clear(brw);
}
void
@@ -186,7 +193,7 @@ brw_new_batch(struct brw_context *brw)
{
/* Create a new batchbuffer and reset the associated state: */
drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
- intel_batchbuffer_reset(brw);
+ intel_batchbuffer_reset_and_clear_render_cache(brw);
/* If the kernel supports hardware contexts, then most hardware state is
* preserved between batches; we only need to re-emit state that is required
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index d0ddf75866c..ee03a44c9e1 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -39,7 +39,8 @@ extern "C" {
struct intel_batchbuffer;
void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw);
-void intel_batchbuffer_init(struct brw_context *brw);
+void intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_save_state(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);