summaryrefslogtreecommitdiffstats
path: root/src/gallium
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2019-05-26 13:34:28 -0700
committerKenneth Graunke <[email protected]>2019-05-29 19:41:08 -0700
commit06421e5be72a3893b48d9f0b98cd847803bbb167 (patch)
treea830fffaf502620c9b115fb90d97c340bb5a6f62 /src/gallium
parent43d835cb0f030f2ba8edc9c8dbe09038b6481e6a (diff)
iris: Do SET_TILING at a single point rather than in two places.
Both the from-cache and fresh-from-GEM cases were calling SET_TILING. In the cached case, we would retry the allocation on failure, pitching one BO from the cache each time. This is silly, because the only time it should fail is if the tiling or stride parameters are unacceptable, which has nothing to do with the particular BO in question. So there's no point in retrying - we should simply fail the allocation. This patch moves both calls to bo_set_tiling_internal() below the cache/fresh split, so we have it at a single point in time instead of two. To preserve the ordering between SET_TILING and SET_DOMAIN, we move that below as well. (I am unsure if the order matters.) Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index daf67101be3..64dd73adaaa 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -376,6 +376,7 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
bool alloc_from_cache;
uint64_t bo_size;
bool zeroed = false;
+ bool alloc_pages = false;
if (flags & BO_ALLOC_ZEROED)
zeroed = true;
@@ -413,11 +414,6 @@ retry:
goto retry;
}
- if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
- bo_free(bo);
- goto retry;
- }
-
if (zeroed) {
void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
if (!map) {
@@ -464,21 +460,7 @@ retry:
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo->stride = 0;
- if (bo_set_tiling_internal(bo, tiling_mode, stride))
- goto err_free;
-
- /* Calling set_domain() will allocate pages for the BO outside of the
- * struct mutex lock in the kernel, which is more efficient than waiting
- * to create them during the first execbuf that uses the BO.
- */
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = I915_GEM_DOMAIN_CPU,
- .write_domain = 0,
- };
-
- if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
- goto err_free;
+ alloc_pages = true;
}
bo->name = name;
@@ -501,6 +483,24 @@ retry:
goto err_free;
}
+ if (bo_set_tiling_internal(bo, tiling_mode, stride))
+ goto err_free;
+
+ if (alloc_pages) {
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
+
+ if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
+ goto err_free;
+ }
+
mtx_unlock(&bufmgr->lock);
if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {