summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/iris/iris_bufmgr.c
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2019-05-30 00:04:38 -0700
committerKenneth Graunke <[email protected]>2019-05-30 00:46:37 -0700
commite917bb7ad4300a1943a0100114c708915324127c (patch)
tree34eaa7abc28da36862c99a0f8fb017c66f6463a9 /src/gallium/drivers/iris/iris_bufmgr.c
parent0cb380a6b318376743174d173f5bc9e18b9b8ce3 (diff)
iris: Avoid holding the lock while allocating pages.
We only need the lock for: 1. Rummaging through the cache 2. Allocating VMA We don't need it for alloc_fresh_bo(), which does GEM_CREATE, and also SET_DOMAIN to allocate the underlying pages. The idea behind calling SET_DOMAIN was to avoid a lock in the kernel while allocating pages, now we avoid our own global lock as well. We do have to re-lock around VMA. Hopefully this shouldn't happen too much in practice because we'll find a cached BO in the right memzone and not have to reallocate it. Reviewed-by: Chris Wilson <[email protected]>
Diffstat (limited to 'src/gallium/drivers/iris/iris_bufmgr.c')
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index 4628d72643c..9894618b1de 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -482,14 +482,18 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
if (!bo)
bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags, false);
+ mtx_unlock(&bufmgr->lock);
+
if (!bo) {
bo = alloc_fresh_bo(bufmgr, bo_size);
if (!bo)
- goto err;
+ return NULL;
}
if (bo->gtt_offset == 0ull) {
+ mtx_lock(&bufmgr->lock);
bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
+ mtx_unlock(&bufmgr->lock);
if (bo->gtt_offset == 0ull)
goto err_free;
@@ -498,8 +502,6 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
if (bo_set_tiling_internal(bo, tiling_mode, stride))
goto err_free;
- mtx_unlock(&bufmgr->lock);
-
bo->name = name;
p_atomic_set(&bo->refcount, 1);
bo->reusable = bucket && bufmgr->bo_reuse;
@@ -531,8 +533,6 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
err_free:
bo_free(bo);
-err:
- mtx_unlock(&bufmgr->lock);
return NULL;
}