summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorChris Wilson <[email protected]>2017-07-07 13:12:54 +0100
committerKenneth Graunke <[email protected]>2017-07-10 11:18:08 -0700
commit833108ac14ade91f54cc6e1e157ecc19b62da404 (patch)
treea2260f529f57504215bce601afaa9266d55bb073 /src
parent3b28eaabf603657c388caa72bc92b1b660d00b2a (diff)
i965: Use brw_bo_wait() for brw_bo_wait_rendering()
Currently, we use set_domain() to cause a stall on rendering. But the set-domain ioctl has the side-effect of changing the kernel's cache domain underneath the struct_mutex, which may perturb state if there was no rendering to wait upon and in general is much heavier than the lockless wait-ioctl. Historically libdrm used set-domain as we did not have an explicit wait-ioctl (and the patches to teach it to use wait if available were lost in the mists). Since mesa already depends upon a kernel support the wait-ioctl, we do not need to supply a fallback. Signed-off-by: Chris Wilson <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_bufmgr.c8
-rw-r--r--src/mesa/drivers/dri/i965/brw_bufmgr.h2
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.c2
-rw-r--r--src/mesa/drivers/dri/i965/brw_performance_query.c2
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c4
5 files changed, 10 insertions, 8 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index da12a131526..ee4a5cfa2c8 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -831,10 +831,12 @@ brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
/** Waits for all GPU rendering with the object to have completed. */
void
-brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
+brw_bo_wait_rendering(struct brw_bo *bo)
{
- set_domain(brw, "waiting for",
- bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ /* We require a kernel recent enough for WAIT_IOCTL support.
+ * See intel_init_bufmgr()
+ */
+ brw_bo_wait(bo, -1);
}
/**
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h
index 4d671b6aaeb..80c71825e80 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
@@ -227,7 +227,7 @@ int brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
-void brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo);
+void brw_bo_wait_rendering(struct brw_bo *bo);
/**
* Tears down the buffer manager instance.
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index 0b3fdc68429..8a3ffab443f 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -256,7 +256,7 @@ intel_finish(struct gl_context * ctx)
intel_glFlush(ctx);
if (brw->batch.last_bo)
- brw_bo_wait_rendering(brw, brw->batch.last_bo);
+ brw_bo_wait_rendering(brw->batch.last_bo);
}
static void
diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c
index 81389dbd3e3..e4e1854bf2e 100644
--- a/src/mesa/drivers/dri/i965/brw_performance_query.c
+++ b/src/mesa/drivers/dri/i965/brw_performance_query.c
@@ -1350,7 +1350,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
- brw_bo_wait_rendering(brw, bo);
+ brw_bo_wait_rendering(bo);
/* Due to a race condition between the OA unit signaling report
* availability and the report actually being written into memory,
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 62d2fe8ef35..28c2f474c0a 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -497,7 +497,7 @@ throttle(struct brw_context *brw)
/* Pass NULL rather than brw so we avoid perf_debug warnings;
* stalling is common and expected here...
*/
- brw_bo_wait_rendering(NULL, brw->throttle_batch[1]);
+ brw_bo_wait_rendering(brw->throttle_batch[1]);
}
brw_bo_unreference(brw->throttle_batch[1]);
}
@@ -723,7 +723,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- brw_bo_wait_rendering(brw, brw->batch.bo);
+ brw_bo_wait_rendering(brw->batch.bo);
}
/* Start a new batch buffer. */