summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers
diff options
context:
space:
mode:
authorMatt Turner <[email protected]>2017-05-31 21:12:01 -0700
committerMatt Turner <[email protected]>2017-06-06 11:47:46 -0700
commitce17d4c5f5489e88554a8ef59f8cc6e453953336 (patch)
tree6f709d06e198ab9ee9eef641792f22b5d5acdc7f /src/mesa/drivers
parent68bfc377fbdf831a96a3614cf2b7eedb842e3c16 (diff)
i965: Implement brw_bo_map_unsynchronized() with MAP_ASYNC
This way we can let brw_bo_map() choose the best mapping type. Part of the patch inlines map_gtt() into brw_bo_map_gtt() (and removes map_gtt()). brw_bo_map_gtt() just wrapped map_gtt() with locking and a call to set_domain(). map_gtt() is called by brw_bo_map_unsynchronized() to avoid the call to set_domain(). With the MAP_ASYNC flag, we now have the same behavior previously provided by brw_bo_map_unsynchronized(). Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/drivers')
-rw-r--r--src/mesa/drivers/dri/i965/brw_bufmgr.c49
1 files changed, 9 insertions, 40 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index 9e5847fb609..4ab72cd3e51 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -702,10 +702,12 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
}
static void *
-map_gtt(struct brw_bo *bo)
+brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
+ pthread_mutex_lock(&bufmgr->lock);
+
/* Get a mapping of the buffer if we haven't before. */
if (bo->map_gtt == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -721,6 +723,7 @@ map_gtt(struct brw_bo *bo)
if (ret != 0) {
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+ pthread_mutex_unlock(&bufmgr->lock);
return NULL;
}
@@ -731,39 +734,15 @@ map_gtt(struct brw_bo *bo)
bo->map_gtt = NULL;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+ pthread_mutex_unlock(&bufmgr->lock);
return NULL;
}
+ bo->map_count++;
}
DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
bo->map_gtt);
- bo->map_count++;
- return bo->map_gtt;
-}
-
-static void *
-brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
-{
- struct brw_bufmgr *bufmgr = bo->bufmgr;
-
- pthread_mutex_lock(&bufmgr->lock);
-
- void *map = map_gtt(bo);
- if (map == NULL) {
- pthread_mutex_unlock(&bufmgr->lock);
- return NULL;
- }
-
- /* Now move it to the GTT domain so that the GPU and CPU
- * caches are flushed and the GPU isn't actively using the
- * buffer.
- *
- * The pagefault handler does this domain change for us when
- * it has unbound the BO from the GTT, but it's up to us to
- * tell it when we're about to use things if we had done
- * rendering and it still happens to be bound to the GTT.
- */
if (!(flags & MAP_ASYNC)) {
set_domain(brw, "GTT mapping", bo,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -773,7 +752,7 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_gtt, bo->size));
pthread_mutex_unlock(&bufmgr->lock);
- return map;
+ return bo->map_gtt;
}
/**
@@ -804,18 +783,8 @@ brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
*/
if (!bufmgr->has_llc)
return brw_bo_map_gtt(brw, bo, MAP_READ | MAP_WRITE);
-
- pthread_mutex_lock(&bufmgr->lock);
-
- void *map = map_gtt(bo);
- if (map != NULL) {
- bo_mark_mmaps_incoherent(bo);
- VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_gtt, bo->size));
- }
-
- pthread_mutex_unlock(&bufmgr->lock);
-
- return map;
+ else
+ return brw_bo_map_gtt(brw, bo, MAP_READ | MAP_WRITE | MAP_ASYNC);
}
static bool