diff options
author | Kenneth Graunke <[email protected]> | 2019-05-26 17:11:59 -0700 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2019-05-29 20:03:45 -0700 |
commit | 6244da8e23e5470d067680a9a3930db82dcb4db1 (patch) | |
tree | 805ca926df44e9dec2bdd3ec7de236de71845337 /src/gallium | |
parent | 4c2d9729dfc6c3577b823a92e2bbbf425ffee143 (diff) |
iris: Dig through the cache to find a BO in the right memzone
This saves some util_vma thrash when the first entry in the cache
happens to be in a different memory zone, but one just a tiny bit
ahead is already there and instantly reusable. Hopefully the cost
of a little extra searching won't break the bank - if it does, we
can consider having separate list heads or keeping a separate VMA
cache.
Improves OglDrvRes performance by 22%, restoring a regression from
deleting the bucket allocators in 694d1a08d3e5883d97d5352895f8431f.
Thanks to Clayton Craft for alerting me to the regression.
Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r-- | src/gallium/drivers/iris/iris_bufmgr.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c index 60645fbb2cc..40559b4c1f9 100644 --- a/src/gallium/drivers/iris/iris_bufmgr.c +++ b/src/gallium/drivers/iris/iris_bufmgr.c @@ -364,18 +364,22 @@ static struct iris_bo * alloc_bo_from_cache(struct iris_bufmgr *bufmgr, struct bo_cache_bucket *bucket, enum iris_memory_zone memzone, - unsigned flags) + unsigned flags, + bool match_zone) { if (!bucket) return NULL; struct iris_bo *bo = NULL; - while (!list_empty(&bucket->head)) { - struct iris_bo *cur = LIST_ENTRY(struct iris_bo, bucket->head.next, head); + list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) { + /* Try a little harder to find one that's already in the right memzone */ + if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset)) + continue; - /* If the last BO in the cache is busy, there are no idle BOs. - * Fall back to allocating a fresh buffer. + /* If the last BO in the cache is busy, there are no idle BOs. Bail, + * either falling back to a non-matching memzone, or if that fails, + * allocating a fresh buffer. */ if (iris_bo_busy(cur)) return NULL; @@ -471,8 +475,14 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr, mtx_lock(&bufmgr->lock); - /* Get a buffer out of the cache if available */ - bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags); + /* Get a buffer out of the cache if available. First, we try to find + * one with a matching memory zone so we can avoid reallocating VMA. + */ + bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags, true); + + /* If that fails, we try for any cached BO, without matching memzone. */ + if (!bo) + bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags, false); if (!bo) { alloc_pages = true; |