diff options
author | Rafael Antognolli <[email protected]> | 2018-11-30 11:59:02 -0800 |
---|---|---|
committer | Rafael Antognolli <[email protected]> | 2019-01-17 15:07:56 -0800 |
commit | 234c9d8a40b2e30b99823799a504629c65a1c313 (patch) | |
tree | b6e1ae44f3dac4ce1cf9e4cd51c2862f635fef60 /src/intel | |
parent | e2179aceaf628b7d14a3e78791f1e181ac766157 (diff) |
anv/allocator: Remove anv_free_list.
The next commit already renames anv_free_list2 -> anv_free_list since
the old one is gone.
Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r-- | src/intel/vulkan/anv_allocator.c | 55 | ||||
-rw-r--r-- | src/intel/vulkan/anv_private.h | 11 |
2 files changed, 0 insertions, 66 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index ff880751fe5..9604c898b2e 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -369,61 +369,6 @@ anv_free_list_pop2(union anv_free_list2 *list, return NULL; } -static bool -anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset) -{ - union anv_free_list current, new, old; - - current.u64 = list->u64; - while (current.offset != EMPTY) { - /* We have to add a memory barrier here so that the list head (and - * offset) gets read before we read the map pointer. This way we - * know that the map pointer is valid for the given offset at the - * point where we read it. - */ - __sync_synchronize(); - - int32_t *next_ptr = *map + current.offset; - new.offset = VG_NOACCESS_READ(next_ptr); - new.count = current.count + 1; - old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64); - if (old.u64 == current.u64) { - *offset = current.offset; - return true; - } - current = old; - } - - return false; -} - -static void -anv_free_list_push(union anv_free_list *list, void *map, int32_t offset, - uint32_t size, uint32_t count) -{ - union anv_free_list current, old, new; - int32_t *next_ptr = map + offset; - - /* If we're returning more than one chunk, we need to build a chain to add - * to the list. Fortunately, we can do this without any atomics since we - * own everything in the chain right now. `offset` is left pointing to the - * head of our chain list while `next_ptr` points to the tail. - */ - for (uint32_t i = 1; i < count; i++) { - VG_NOACCESS_WRITE(next_ptr, offset + i * size); - next_ptr = map + offset + i * size; - } - - old = *list; - do { - current = old; - VG_NOACCESS_WRITE(next_ptr, current.offset); - new.offset = offset; - new.count = current.count + 1; - old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64); - } while (old.u64 != current.u64); -} - /* All pointers in the ptr_free_list are assumed to be page-aligned. This * means that the bottom 12 bits should all be zero. */ diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 497386c0cba..f1411fad418 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -606,16 +606,6 @@ anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size) * both the block pool and the state pools. Unfortunately, in order to * solve the ABA problem, we can't use a single uint32_t head. */ -union anv_free_list { - struct { - int32_t offset; - - /* A simple count that is incremented every time the head changes. */ - uint32_t count; - }; - uint64_t u64; -}; - union anv_free_list2 { struct { uint32_t offset; @@ -626,7 +616,6 @@ union anv_free_list2 { uint64_t u64; }; -#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } }) #define ANV_FREE_LIST2_EMPTY ((union anv_free_list2) { { UINT32_MAX, 0 } }) struct anv_block_state { |