summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/freedreno/freedreno_context.c
diff options
context:
space:
mode:
authorRob Clark <[email protected]>2016-07-13 09:49:53 -0400
committerRob Clark <[email protected]>2016-07-30 09:23:42 -0400
commit00bed8a794de3d80a46b65b9ab23c6df83e416a8 (patch)
tree7098b39af0e3a10665314e97487e6773c2c9312f /src/gallium/drivers/freedreno/freedreno_context.c
parentc44163876a2858aea219a08bd2e048b76953cff9 (diff)
freedreno: threaded batch flush
With the state accessed from GMEM+submit factored out of fd_context and into fd_batch, now it is possible to punt this off to a helper thread. And more importantly, since there are cases where one context might force the batch-cache to flush another context's batches (ie. when there are too many in-flight batches), using a per-context helper thread keeps various different flushes for a given context serialized. TODO as with batch-cache, there are a few places where we'll need a mutex to protect critical sections, which is completely missing at the moment. Signed-off-by: Rob Clark <[email protected]>
Diffstat (limited to 'src/gallium/drivers/freedreno/freedreno_context.c')
-rw-r--r--src/gallium/drivers/freedreno/freedreno_context.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/src/gallium/drivers/freedreno/freedreno_context.c b/src/gallium/drivers/freedreno/freedreno_context.c
index 1c32cd9ae92..599f94ffec1 100644
--- a/src/gallium/drivers/freedreno/freedreno_context.c
+++ b/src/gallium/drivers/freedreno/freedreno_context.c
@@ -48,7 +48,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
if (!ctx->screen->reorder) {
struct fd_batch *batch = NULL;
fd_batch_reference(&batch, ctx->batch);
- fd_batch_flush(batch);
+ fd_batch_flush(batch, true);
timestamp = fd_ringbuffer_timestamp(batch->gmem);
fd_batch_reference(&batch, NULL);
} else {
@@ -103,6 +103,9 @@ fd_context_destroy(struct pipe_context *pctx)
DBG("");
+ if (ctx->screen->reorder)
+ util_queue_destroy(&ctx->flush_queue);
+
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
@@ -179,8 +182,11 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
* batches per compute job (since it isn't using tiling, so no point
* in getting involved with the re-ordering madness)..
*/
- if (!screen->reorder)
+ if (!screen->reorder) {
ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
+ } else {
+ util_queue_init(&ctx->flush_queue, "flush_queue", 16, 1);
+ }
fd_reset_wfi(ctx);