summaryrefslogtreecommitdiffstats
path: root/src/vulkan
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-08-03 00:38:48 -0700
committerJason Ekstrand <[email protected]>2015-08-03 00:38:48 -0700
commitfd64598462689ccc9ac14dccdddb96c8a6ff8364 (patch)
tree746d86f49aab1032339378229f246f76c59acc06 /src/vulkan
parent481122f4ac11fff402fa5b0884757462bcb1e933 (diff)
vk/allocator: Fix a data race in the state pool
The previous algorithm had a race because of the way we were using __sync_fetch_and_add for everything. In particular, the concept of "returning" over-allocated states in the "next > end" case was completely bogus. If too many threads were hitting the state pool at the same time, it was possible to have the following sequence: A: Get an offset (next == end) B: Get an offset (next > end) A: Resize the pool (now next < end by a lot) C: Get an offset (next < end) B: Return the over-allocated offset D: Get an offset in which case D will get the same offset as C. The solution to this race is to get rid of the concept of "returning" over-allocated states. Instead, the thread that gets a new block simply sets the next and end offsets directly and threads that over-allocate don't return anything and just futex-wait. Since you can only ever hit the over-allocate case if someone else hit the "next == end" case and hasn't resized yet, you're guaranteed that the end value will get updated and the futex won't block forever.
Diffstat (limited to 'src/vulkan')
-rw-r--r--src/vulkan/anv_allocator.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/vulkan/anv_allocator.c b/src/vulkan/anv_allocator.c
index 04293f0d9be..601539bcf25 100644
--- a/src/vulkan/anv_allocator.c
+++ b/src/vulkan/anv_allocator.c
@@ -424,15 +424,15 @@ anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- new.next = anv_block_pool_alloc(block_pool);
- new.end = new.next + block_pool->block_size;
- old.u64 = __sync_fetch_and_add(&pool->block.u64, new.u64 - block.u64);
+ offset = anv_block_pool_alloc(block_pool);
+ new.next = offset + pool->state_size;
+ new.end = offset + block_pool->block_size;
+ old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
if (old.next != block.next)
futex_wake(&pool->block.end, INT_MAX);
- return new.next;
+ return offset;
} else {
futex_wait(&pool->block.end, block.end);
- __sync_fetch_and_add(&pool->block.u64, -pool->state_size);
goto restart;
}
}