aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2019-05-29 23:40:20 -0700
committerKenneth Graunke <[email protected]>2019-05-30 00:15:26 -0700
commit0cb380a6b318376743174d173f5bc9e18b9b8ce3 (patch)
treeb3783b3072f2cce215fafbba781b4bb6eeade08f /src/gallium/drivers
parent53878f7a8989879b0f3ca37df9fd1fb37f2525ca (diff)
iris: Move SET_DOMAIN to alloc_fresh_bo()
Chris pointed out that the order between SET_DOMAIN and SET_TILING doesn't matter, so we can just do the page allocation when creating a new BO. Simplifies the flow a bit. Reviewed-by: Chris Wilson <[email protected]>
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index 6531118d1b0..4628d72643c 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -434,6 +434,21 @@ alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo->stride = 0;
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
+
+ if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
+ bo_free(bo);
+ return NULL;
+ }
+
return bo;
}
@@ -448,7 +463,6 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
{
struct iris_bo *bo;
unsigned int page_size = getpagesize();
- bool alloc_pages = false;
struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
/* Round the size up to the bucket size, or if we don't have caching
@@ -469,7 +483,6 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags, false);
if (!bo) {
- alloc_pages = true;
bo = alloc_fresh_bo(bufmgr, bo_size);
if (!bo)
goto err;
@@ -485,21 +498,6 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
if (bo_set_tiling_internal(bo, tiling_mode, stride))
goto err_free;
- if (alloc_pages) {
- /* Calling set_domain() will allocate pages for the BO outside of the
- * struct mutex lock in the kernel, which is more efficient than waiting
- * to create them during the first execbuf that uses the BO.
- */
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = I915_GEM_DOMAIN_CPU,
- .write_domain = 0,
- };
-
- if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
- goto err_free;
- }
-
mtx_unlock(&bufmgr->lock);
bo->name = name;