From e9adfa2ba1af9c3579b25327335c47118b6c7c3f Mon Sep 17 00:00:00 2001 From: Chad Versace Date: Thu, 6 Oct 2011 14:18:35 -0700 Subject: intel: Assert that no batch is emitted if a region is mapped What I would prefer to assert is that, for each region that is currently mapped, no batch is emitted that uses that region's bo. However, it's much easier to implement this big hammer. Observe that this requires that the batch flush in intel_region_map() be moved to within the map_refcount guard. v2: Add comments (borrowed from anholt's reply) explaining why the assertion is a good idea. Reviewed-by: Eric Anholt Reviewed-by: Ian Romanick Signed-off-by: Chad Versace --- src/mesa/drivers/dri/intel/intel_batchbuffer.c | 7 +++++++ src/mesa/drivers/dri/intel/intel_context.h | 8 ++++++++ src/mesa/drivers/dri/intel/intel_regions.c | 18 +++++++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c index 37c13c967ab..8dfae677e8a 100644 --- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c @@ -148,6 +148,13 @@ void _intel_batchbuffer_flush(struct intel_context *intel, const char *file, int line) { + /* No batch should be emitted that uses a mapped region, because that would + * cause the map to be incoherent with GPU rendering done by the + * batchbuffer. To ensure that condition, we assert a condition that is + * stronger but easier to implement: that *no* region is mapped. + */ + assert(intel->num_mapped_regions == 0); + if (intel->batch.used == 0) return; diff --git a/src/mesa/drivers/dri/intel/intel_context.h b/src/mesa/drivers/dri/intel/intel_context.h index cf7ab9e665a..cb316e82bed 100644 --- a/src/mesa/drivers/dri/intel/intel_context.h +++ b/src/mesa/drivers/dri/intel/intel_context.h @@ -287,6 +287,14 @@ struct intel_context */ GLboolean is_front_buffer_reading; + /** + * Count of intel_regions that are mapped. + * + * This allows us to assert that no batch buffer is emitted if a + * region is mapped. + */ + int num_mapped_regions; + GLboolean use_texture_tiling; GLboolean use_early_z; diff --git a/src/mesa/drivers/dri/intel/intel_regions.c b/src/mesa/drivers/dri/intel/intel_regions.c index 7faf4ca40f5..67142a13162 100644 --- a/src/mesa/drivers/dri/intel/intel_regions.c +++ b/src/mesa/drivers/dri/intel/intel_regions.c @@ -111,15 +111,28 @@ debug_backtrace(void) GLubyte * intel_region_map(struct intel_context *intel, struct intel_region *region) { - intel_flush(&intel->ctx); + /* We have the region->map_refcount controlling mapping of the BO because + * in software fallbacks we may end up mapping the same buffer multiple + * times on Mesa's behalf, so we refcount our mappings to make sure that + * the pointer stays valid until the end of the unmap chain. However, we + * must not emit any batchbuffers between the start of mapping and the end + * of unmapping, or further use of the map will be incoherent with the GPU + * rendering done by that batchbuffer. Hence we assert in + * intel_batchbuffer_flush() that that doesn't happen, which means that the + * flush is only needed on first map of the buffer. + */ _DBG("%s %p\n", __FUNCTION__, region); if (!region->map_refcount++) { + intel_flush(&intel->ctx); + if (region->tiling != I915_TILING_NONE) drm_intel_gem_bo_map_gtt(region->bo); else drm_intel_bo_map(region->bo, GL_TRUE); + region->map = region->bo->virtual; + ++intel->num_mapped_regions; } return region->map; @@ -134,7 +147,10 @@ intel_region_unmap(struct intel_context *intel, struct intel_region *region) drm_intel_gem_bo_unmap_gtt(region->bo); else drm_intel_bo_unmap(region->bo); + region->map = NULL; + --intel->num_mapped_regions; + assert(intel->num_mapped_regions >= 0); } } -- cgit v1.2.3