summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/iris
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2019-01-18 12:26:41 -0800
committerKenneth Graunke <[email protected]>2019-02-21 10:26:11 -0800
commit4801af2f26ced530b6a8c86a79a16857e670959a (patch)
tree1a3981a5fc951128c7c7bc19c1f40bd986267ab1 /src/gallium/drivers/iris
parent0f33204f0524390b7428ea68fcc024063e4ef358 (diff)
iris: Do binder address allocations per-context, not globally.
iris_bufmgr allocates addresses across the entire screen, since buffers may be shared between multiple contexts. There used to be a single special address, IRIS_BINDER_ADDRESS, that was per-context - and all contexts used the same address. When I moved to the multi-binder system, I made a separate memory zone for them. I wanted there to be 2-3 binders per context, so we could cycle them to avoid the stalls inherent in pinning two buffers to the same address in back-to-back batches. But I figured I'd allow 100 binders just to be wildly excessive/cautious. What I didn't realize was that we need 2-3 binders per *context*, and what I did was allocate 100 binders per *screen*. Web browsers, for example, might have 1-2 contexts per tab, leading to hundreds of contexts, and thus binders. To fix this, we stop allocating VMA for binders in bufmgr, and let the binder handle it itself. Binders are per-context, and they can assign context-local addresses for the buffers by simply doing a ringbuffer style approach. We only hold on to one binder BO at a time, so we won't ever have a conflicting address. This fixes dEQP-EGL.functional.multicontext.non_shared_clear. Huge thanks to Tapani Pälli for debugging this whole mess and figuring out what was going wrong. Reviewed-by: Tapani Pälli <[email protected]>
Diffstat (limited to 'src/gallium/drivers/iris')
-rw-r--r--src/gallium/drivers/iris/iris_binder.c15
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c19
2 files changed, 25 insertions, 9 deletions
diff --git a/src/gallium/drivers/iris/iris_binder.c b/src/gallium/drivers/iris/iris_binder.c
index ca60287df3f..6d23de229bf 100644
--- a/src/gallium/drivers/iris/iris_binder.c
+++ b/src/gallium/drivers/iris/iris_binder.c
@@ -71,10 +71,23 @@ binder_realloc(struct iris_context *ice)
struct iris_bufmgr *bufmgr = screen->bufmgr;
struct iris_binder *binder = &ice->state.binder;
- iris_bo_unreference(binder->bo);
+ uint64_t next_address = IRIS_MEMZONE_BINDER_START;
+
+ if (binder->bo) {
+ /* Place the new binder just after the old binder, unless we've hit the
+ * end of the memory zone...then wrap around to the start again.
+ */
+ next_address = binder->bo->gtt_offset + IRIS_BINDER_SIZE;
+ if (next_address >= IRIS_MEMZONE_SURFACE_START)
+ next_address = IRIS_MEMZONE_BINDER_START;
+
+ iris_bo_unreference(binder->bo);
+ }
+
binder->bo =
iris_bo_alloc(bufmgr, "binder", IRIS_BINDER_SIZE, IRIS_MEMZONE_BINDER);
+ binder->bo->gtt_offset = next_address;
binder->map = iris_bo_map(NULL, binder->bo, MAP_WRITE);
binder->insert_point = INIT_INSERT_POINT;
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index 837908e9ebb..92ede934056 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -369,10 +369,6 @@ get_bucket_allocator(struct iris_bufmgr *bufmgr,
enum iris_memory_zone memzone,
uint64_t size)
{
- /* Bucketing is not worth using for binders...we'll never have 64... */
- if (memzone == IRIS_MEMZONE_BINDER)
- return NULL;
-
/* Skip using the bucket allocator for very large sizes, as it allocates
* 64 of them and this can balloon rather quickly.
*/
@@ -402,6 +398,10 @@ vma_alloc(struct iris_bufmgr *bufmgr,
if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
return IRIS_BORDER_COLOR_POOL_ADDRESS;
+ /* The binder handles its own allocations. Return non-zero here. */
+ if (memzone == IRIS_MEMZONE_BINDER)
+ return IRIS_MEMZONE_BINDER_START;
+
struct bo_cache_bucket *bucket =
get_bucket_allocator(bufmgr, memzone, size);
uint64_t addr;
@@ -434,6 +434,11 @@ vma_free(struct iris_bufmgr *bufmgr,
return;
enum iris_memory_zone memzone = memzone_for_address(address);
+
+ /* The binder handles its own allocations. */
+ if (memzone == IRIS_MEMZONE_BINDER)
+ return;
+
struct bo_cache_bucket *bucket =
get_bucket_allocator(bufmgr, memzone, size);
@@ -1286,7 +1291,8 @@ iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
_mesa_hash_table_destroy(bufmgr->handle_table, NULL);
for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
- util_vma_heap_finish(&bufmgr->vma_allocator[z]);
+ if (z != IRIS_MEMZONE_BINDER)
+ util_vma_heap_finish(&bufmgr->vma_allocator[z]);
}
free(bufmgr);
@@ -1611,9 +1617,6 @@ iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
PAGE_SIZE, _4GB);
- util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDER],
- IRIS_MEMZONE_BINDER_START,
- IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
IRIS_MEMZONE_SURFACE_START,
_4GB - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);