summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2019-05-26 15:52:56 -0700
committerKenneth Graunke <[email protected]>2019-05-29 19:41:52 -0700
commit76c5a196681e2cbcc894582395ebe54e41f5c6ca (patch)
treef430ccb56aebe8c5719e8bc62efddea13b5bbaa9
parentcea6671395864d8b4c5020193b2f84955de827e4 (diff)
iris: Move cached BO allocation into a helper function.
There's enough going on here to warrant a helper. This also simplifies the control flow and eliminates the last non-error-case goto. Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
-rw-r--r--src/gallium/drivers/iris/iris_bufmgr.c108
1 files changed, 64 insertions, 44 deletions
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
index 659840c47aa..c0e6e4efa84 100644
--- a/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/src/gallium/drivers/iris/iris_bufmgr.c
@@ -361,6 +361,67 @@ bo_calloc(void)
}
static struct iris_bo *
+alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
+ struct bo_cache_bucket *bucket,
+ enum iris_memory_zone memzone,
+ unsigned flags)
+{
+ if (!bucket)
+ return NULL;
+
+ struct iris_bo *bo = NULL;
+
+ while (!list_empty(&bucket->head)) {
+ struct iris_bo *cur = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
+
+ /* If the last BO in the cache is busy, there are no idle BOs.
+ * Fall back to allocating a fresh buffer.
+ */
+ if (iris_bo_busy(cur))
+ return NULL;
+
+ list_del(&cur->head);
+
+ /* Tell the kernel we need this BO. If it still exists, we're done! */
+ if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
+ bo = cur;
+ break;
+ }
+
+ /* This BO was purged, clean up any others and retry */
+ bo_free(cur);
+
+ iris_bo_cache_purge_bucket(bufmgr, bucket);
+ }
+
+ if (!bo)
+ return NULL;
+
+ /* If the cached BO isn't in the right memory zone, free the old
+ * memory and assign it a new address.
+ */
+ if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
+ vma_free(bufmgr, bo->gtt_offset, bo->size);
+ bo->gtt_offset = 0ull;
+ }
+
+ /* Zero the contents if necessary. If this fails, fall back to
+ * allocating a fresh BO, which will always be zeroed by the kernel.
+ */
+ if (flags & BO_ALLOC_ZEROED) {
+ void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (map) {
+ memset(map, 0, bo->size);
+ } else {
+ bo_free(bo);
+ return NULL;
+ }
+ }
+
+ return bo;
+}
+
+static struct iris_bo *
alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
{
struct iris_bo *bo = bo_calloc();
@@ -400,14 +461,9 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
struct iris_bo *bo;
unsigned int page_size = getpagesize();
struct bo_cache_bucket *bucket;
- bool alloc_from_cache;
uint64_t bo_size;
- bool zeroed = false;
bool alloc_pages = false;
- if (flags & BO_ALLOC_ZEROED)
- zeroed = true;
-
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
@@ -421,47 +477,11 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
}
mtx_lock(&bufmgr->lock);
- /* Get a buffer out of the cache if available */
-retry:
- alloc_from_cache = false;
- if (bucket != NULL && !list_empty(&bucket->head)) {
- /* If the last BO in the cache is idle, then reuse it. Otherwise,
- * allocate a fresh buffer to avoid stalling.
- */
- bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
- if (!iris_bo_busy(bo)) {
- alloc_from_cache = true;
- list_del(&bo->head);
- }
- if (alloc_from_cache) {
- if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
- bo_free(bo);
- iris_bo_cache_purge_bucket(bufmgr, bucket);
- goto retry;
- }
-
- if (zeroed) {
- void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
- if (map) {
- memset(map, 0, bo_size);
- } else {
- alloc_from_cache = false;
- bo_free(bo);
- }
- }
- }
- }
+ /* Get a buffer out of the cache if available */
+ bo = alloc_bo_from_cache(bufmgr, bucket, memzone, flags);
- if (alloc_from_cache) {
- /* If the cached BO isn't in the right memory zone, free the old
- * memory and assign it a new address.
- */
- if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
- vma_free(bufmgr, bo->gtt_offset, bo->size);
- bo->gtt_offset = 0ull;
- }
- } else {
+ if (!bo) {
alloc_pages = true;
bo = alloc_fresh_bo(bufmgr, bo_size);
if (!bo)