summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael Antognolli <[email protected]>2018-12-04 15:37:33 -0800
committerRafael Antognolli <[email protected]>2019-01-17 15:08:19 -0800
commitdfc9ab2ccd93863387073e7eb5c50c29f0abb68f (patch)
tree59b7509c9c9f043e4aa851da9e600f836e4554f7
parent7ed0898a8d43340011095586af3be53380bb084c (diff)
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but we try to allocate a state larger than that state. This means such state would start somewhere within the range of the old block_pool, and end after that range, within the range of the new size. That's fine when we use userptr, since the memory in the block pool is CPU mapped continuously. However, by the end of this series, we will have the block_pool split into different BOs, with different CPU mapping ranges that are not necessarily continuous. So we must avoid such case of a given state being part of two different BOs in the block pool. This commit solves the issue by detecting that we are growing the block_pool even though we are not at the end of the range. If that happens, we don't use the space left at the end of the old size, and consider it as "padding" that can't be used in the allocation. We update the size requested from the block pool to take the padding into account, and return the offset after the padding, which happens to be at the start of the new address range. Additionally, we return the amount of padding we used, so the caller knows that this happens and can return that padding back into a list of free states, that can be reused later. This way we hopefully don't waste any space, but also avoid having a state split between two different BOs. v3: - Calculate offset + padding at anv_block_pool_alloc_new (Jason). v4: - Remove extra "leftover". Reviewed-by: Jason Ekstrand <[email protected]>
-rw-r--r--src/intel/vulkan/anv_allocator.c55
-rw-r--r--src/intel/vulkan/anv_private.h2
-rw-r--r--src/intel/vulkan/tests/block_pool_no_free.c2
3 files changed, 49 insertions, 10 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index b6de2650abb..8fafcd31bca 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -787,15 +787,35 @@ done:
static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool *pool,
struct anv_block_state *pool_state,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
struct anv_block_state state, old, new;
+ /* Most allocations won't generate any padding */
+ if (padding)
+ *padding = 0;
+
while (1) {
state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
if (state.next + block_size <= state.end) {
return state.next;
} else if (state.next <= state.end) {
+ if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) {
+ /* We need to grow the block pool, but still have some leftover
+ * space that can't be used by that particular allocation. So we
+ * add that as a "padding", and return it.
+ */
+ uint32_t leftover = state.end - state.next;
+
+ /* If there is some leftover space in the pool, the caller must
+ * deal with it.
+ */
+ assert(leftover == 0 || padding);
+ if (padding)
+ *padding = leftover;
+ state.next += leftover;
+ }
+
/* We allocated the first block outside the pool so we have to grow
* the pool. pool_state->next acts a mutex: threads who try to
* allocate now will get block indexes above the current limit and
@@ -819,9 +839,13 @@ anv_block_pool_alloc_new(struct anv_block_pool *pool,
int32_t
anv_block_pool_alloc(struct anv_block_pool *pool,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
- return anv_block_pool_alloc_new(pool, &pool->state, block_size);
+ uint32_t offset;
+
+ offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
+
+ return offset;
}
/* Allocates a block out of the back of the block pool.
@@ -838,7 +862,7 @@ anv_block_pool_alloc_back(struct anv_block_pool *pool,
uint32_t block_size)
{
int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
- block_size);
+ block_size, NULL);
/* The offset we get out of anv_block_pool_alloc_new() is actually the
* number of bytes downwards from the middle to the end of the block.
@@ -894,16 +918,24 @@ static uint32_t
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool,
uint32_t state_size,
- uint32_t block_size)
+ uint32_t block_size,
+ uint32_t *padding)
{
struct anv_block_state block, old, new;
uint32_t offset;
+ /* We don't always use anv_block_pool_alloc(), which would set *padding to
+ * zero for us. So if we have a pointer to padding, we must zero it out
+ * ourselves here, to make sure we always return some sensible value.
+ */
+ if (padding)
+ *padding = 0;
+
/* If our state is large, we don't need any sub-allocation from a block.
* Instead, we just grab whole (potentially large) blocks.
*/
if (state_size >= block_size)
- return anv_block_pool_alloc(block_pool, state_size);
+ return anv_block_pool_alloc(block_pool, state_size, padding);
restart:
block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
@@ -911,7 +943,7 @@ anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- offset = anv_block_pool_alloc(block_pool, block_size);
+ offset = anv_block_pool_alloc(block_pool, block_size, padding);
new.next = offset + state_size;
new.end = offset + block_size;
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
@@ -1093,10 +1125,12 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
}
}
+ uint32_t padding;
offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
&pool->block_pool,
alloc_size,
- pool->block_size);
+ pool->block_size,
+ &padding);
/* Everytime we allocate a new state, add it to the state pool */
uint32_t idx;
VkResult result = anv_state_table_add(&pool->table, &idx, 1);
@@ -1107,6 +1141,11 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset);
+ if (padding > 0) {
+ uint32_t return_offset = offset - padding;
+ anv_state_pool_return_chunk(pool, return_offset, padding, 0);
+ }
+
done:
return *state;
}
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 5f75528fc91..cf0ec1dc8c6 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -758,7 +758,7 @@ VkResult anv_block_pool_init(struct anv_block_pool *pool,
uint64_t bo_flags);
void anv_block_pool_finish(struct anv_block_pool *pool);
int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
- uint32_t block_size);
+ uint32_t block_size, uint32_t *padding);
int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
uint32_t block_size);
void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset);
diff --git a/src/intel/vulkan/tests/block_pool_no_free.c b/src/intel/vulkan/tests/block_pool_no_free.c
index 9cd3e83b462..dd1856ea714 100644
--- a/src/intel/vulkan/tests/block_pool_no_free.c
+++ b/src/intel/vulkan/tests/block_pool_no_free.c
@@ -46,7 +46,7 @@ static void *alloc_blocks(void *_job)
int32_t block, *data;
for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++) {
- block = anv_block_pool_alloc(job->pool, block_size);
+ block = anv_block_pool_alloc(job->pool, block_size, NULL);
data = anv_block_pool_map(job->pool, block);
*data = block;
assert(block >= 0);