summaryrefslogtreecommitdiffstats
path: root/src/gallium
diff options
context:
space:
mode:
authorRob Clark <[email protected]>2018-07-17 09:40:23 -0400
committerRob Clark <[email protected]>2018-07-17 11:00:00 -0400
commitf2570409f90e18c290beb7375107aa4862bd1019 (patch)
tree28f263fdc224f02a4f59aa1496478988f5065c86 /src/gallium
parent71add09e79bf11b337b56e6ef68e6ad29d165a0d (diff)
freedreno: hold batch references when flushing
It is possible for a batch to be freed under our feet when flushing, so it is best to hold a reference to all of them up-front. Signed-off-by: Rob Clark <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/drivers/freedreno/freedreno_batch_cache.c70
1 files changed, 38 insertions, 32 deletions
diff --git a/src/gallium/drivers/freedreno/freedreno_batch_cache.c b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
index 07dc1a93f07..c4640a7cfee 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch_cache.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
@@ -124,35 +124,54 @@ fd_bc_fini(struct fd_batch_cache *cache)
_mesa_hash_table_destroy(cache->ht, NULL);
}
-void
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
{
- struct hash_entry *entry;
- struct fd_batch *last_batch = NULL;
+ /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+ * can cause batches to be unref'd and freed under our feet, so grab
+ * a reference to all the batches we need up-front.
+ */
+ struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+ struct fd_batch *batch;
+ unsigned n = 0;
- mtx_lock(&ctx->screen->lock);
+ fd_context_lock(ctx);
- hash_table_foreach(cache->ht, entry) {
- struct fd_batch *batch = NULL;
- /* hold a reference since we can drop screen->lock: */
- fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
+ foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx) {
- mtx_unlock(&ctx->screen->lock);
- fd_batch_reference(&last_batch, batch);
- fd_batch_flush(batch, false, false);
- mtx_lock(&ctx->screen->lock);
+ fd_batch_reference_locked(&batches[n++], batch);
}
- fd_batch_reference_locked(&batch, NULL);
}
- mtx_unlock(&ctx->screen->lock);
+ if (deferred) {
+ struct fd_batch *current_batch = ctx->batch;
+
+ for (unsigned i = 0; i < n; i++) {
+ if (batches[i] != current_batch) {
+ fd_batch_add_dep(current_batch, batches[i]);
+ }
+ }
+
+ fd_context_unlock(ctx);
+ } else {
+ fd_context_unlock(ctx);
+
+ for (unsigned i = 0; i < n; i++) {
+ fd_batch_flush(batches[i], false, false);
+ }
+ }
- if (last_batch) {
- fd_batch_sync(last_batch);
- fd_batch_reference(&last_batch, NULL);
+ for (unsigned i = 0; i < n; i++) {
+ fd_batch_reference(&batches[i], NULL);
}
}
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+ bc_flush(cache, ctx, false);
+}
+
/* deferred flush doesn't actually flush, but it marks every other
* batch associated with the context as dependent on the current
* batch. So when the current batch gets flushed, all other batches
@@ -161,20 +180,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
void
fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
{
- struct fd_batch *current_batch = ctx->batch;
- struct hash_entry *entry;
-
- mtx_lock(&ctx->screen->lock);
-
- hash_table_foreach(cache->ht, entry) {
- struct fd_batch *batch = entry->data;
- if (batch == current_batch)
- continue;
- if (batch->ctx == ctx)
- fd_batch_add_dep(current_batch, batch);
- }
-
- mtx_unlock(&ctx->screen->lock);
+ bc_flush(cache, ctx, true);
}
void