diff options
author | Kenneth Graunke <[email protected]> | 2018-03-26 16:14:29 -0700 |
---|---|---|
committer | Kenneth Graunke <[email protected]> | 2019-02-21 10:26:05 -0800 |
commit | f6be3d4f3aeaa2dfca34ff32610929561adb16a1 (patch) | |
tree | 2075809bfb56d9510d26ede5ab10b72cd380eddf /src/gallium/drivers/iris/iris_bufmgr.c | |
parent | 902a122404ddb7664d2ee9320f1de9fc58544c1c (diff) |
iris: bufmgr updates.
Drop BO_ALLOC_BUSY (best not to hand people a loaded gun...)
Drop vestiges of alignment
Diffstat (limited to 'src/gallium/drivers/iris/iris_bufmgr.c')
-rw-r--r-- | src/gallium/drivers/iris/iris_bufmgr.c | 55 |
1 files changed, 14 insertions, 41 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c index ce1066beaf0..9bc101ee5ab 100644 --- a/src/gallium/drivers/iris/iris_bufmgr.c +++ b/src/gallium/drivers/iris/iris_bufmgr.c @@ -239,7 +239,7 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr, uint64_t size, unsigned flags, uint32_t tiling_mode, - uint32_t stride, uint64_t alignment) + uint32_t stride) { struct iris_bo *bo; unsigned int page_size = getpagesize(); @@ -247,20 +247,11 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr, struct bo_cache_bucket *bucket; bool alloc_from_cache; uint64_t bo_size; - bool busy = false; bool zeroed = false; - if (flags & BO_ALLOC_BUSY) - busy = true; - if (flags & BO_ALLOC_ZEROED) zeroed = true; - /* BUSY does doesn't really jive with ZEROED as we have to wait for it to - * be idle before we can memset. Just disallow that combination. - */ - assert(!(busy && zeroed)); - /* Round the allocated size up to a power of two number of pages. */ bucket = bucket_for_size(bufmgr, size); @@ -280,31 +271,13 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr, retry: alloc_from_cache = false; if (bucket != NULL && !list_empty(&bucket->head)) { - if (busy && !zeroed) { - /* Allocate new render-target BOs from the tail (MRU) - * of the list, as it will likely be hot in the GPU - * cache and in the aperture for us. If the caller - * asked us to zero the buffer, we don't want this - * because we are going to mmap it. - */ - bo = LIST_ENTRY(struct iris_bo, bucket->head.prev, head); - list_del(&bo->head); + /* If the last BO in the cache is idle, then reuse it. Otherwise, + * allocate a fresh buffer to avoid stalling. + */ + bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head); + if (!iris_bo_busy(bo)) { alloc_from_cache = true; - bo->align = alignment; - } else { - assert(alignment == 0); - /* For non-render-target BOs (where we're probably - * going to map it first thing in order to fill it - * with data), check if the last BO in the cache is - * unbusy, and only reuse in that case. Otherwise, - * allocating a new buffer is probably faster than - * waiting for the GPU to finish. - */ - bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head); - if (!iris_bo_busy(bo)) { - alloc_from_cache = true; - list_del(&bo->head); - } + list_del(&bo->head); } if (alloc_from_cache) { @@ -352,7 +325,6 @@ retry: bo->gem_handle = create.handle; bo->bufmgr = bufmgr; - bo->align = alignment; bo->tiling_mode = I915_TILING_NONE; bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; @@ -397,17 +369,18 @@ err: struct iris_bo * iris_bo_alloc(struct iris_bufmgr *bufmgr, - const char *name, uint64_t size, uint64_t alignment) + const char *name, + uint64_t size) { - return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0); + return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0); } struct iris_bo * iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name, - uint64_t size, uint32_t tiling_mode, uint32_t pitch, - unsigned flags) + uint64_t size, uint32_t tiling_mode, uint32_t pitch, + unsigned flags) { - return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0); + return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch); } /** @@ -418,7 +391,7 @@ iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name, */ struct iris_bo * iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr, - const char *name, unsigned int handle) + const char *name, unsigned int handle) { struct iris_bo *bo; |