summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_context.h1
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.c20
-rw-r--r--src/mesa/drivers/dri/i965/brw_vtbl.c3
3 files changed, 17 insertions, 7 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index 0ffc9f62389..8a71a4f5009 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -414,6 +414,7 @@ struct brw_context
GLboolean emit_state_always;
GLboolean wrap;
GLboolean tmp_fallback;
+ GLboolean no_batch_wrap;
struct {
struct brw_state_flags dirty;
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 9225748aaac..887eebe4752 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -270,12 +270,23 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
LOCK_HARDWARE(intel);
if (brw->intel.numClipRects == 0) {
- assert(intel->batch->ptr == intel->batch->map);
UNLOCK_HARDWARE(intel);
return GL_TRUE;
}
{
+ /* Flush the batch if it's approaching full, so that we don't wrap while
+ * we've got validated state that needs to be in the same batch as the
+ * primitives. This fraction is just a guess (minimal full state plus
+ * a primitive is around 512 bytes), and would be better if we had
+ * an upper bound of how much we might emit in a single
+ * brw_try_draw_prims().
+ */
+ if (intel->batch->ptr - intel->batch->map > intel->batch->size * 3 / 4)
+ intel_batchbuffer_flush(intel->batch);
+
+ brw->no_batch_wrap = GL_TRUE;
+
/* Set the first primitive early, ahead of validate_state:
*/
brw_set_prim(brw, prim[0].mode);
@@ -310,12 +321,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
out:
- /* Currently have to do this to synchronize with the map/unmap of
- * the vertex buffer in brw_exec_api.c. Not sure if there is any
- * way around this, as not every flush is due to a buffer filling
- * up.
- */
- intel_batchbuffer_flush( brw->intel.batch );
+ brw->no_batch_wrap = GL_FALSE;
/* Free any old data so it doesn't clog up texture memory - we
* won't be referencing it again.
diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c
index 277c8dca8b2..e9fed4dae10 100644
--- a/src/mesa/drivers/dri/i965/brw_vtbl.c
+++ b/src/mesa/drivers/dri/i965/brw_vtbl.c
@@ -91,6 +91,9 @@ static void brw_new_batch( struct intel_context *intel )
{
struct brw_context *brw = brw_context(&intel->ctx);
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!brw->no_batch_wrap);
+
/* Mark all context state as needing to be re-emitted.
* This is probably not as severe as on 915, since almost all of our state
* is just in referenced buffers.