summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorRafael Antognolli <[email protected]>2018-11-29 10:49:31 -0800
committerRafael Antognolli <[email protected]>2019-01-17 15:07:50 -0800
commitd18267fb48106872dbd08acac33c16dd3dd910c0 (patch)
tree2d5439abf832c1e8568439d7160593d08160f535 /src/intel
parent6a1dcfe73d070d42f19b1450b883e345c5aa001a (diff)
anv/allocator: Use anv_state_table on anv_state_pool_alloc.
Use anv_state_pool_return_blocks() to return blocks to the pool, instead of manually pushing them. v3: - return blocks from the end of the chunk (Jason). Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/vulkan/anv_allocator.c81
-rw-r--r--src/intel/vulkan/anv_private.h2
2 files changed, 48 insertions, 35 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index ce481fb45f1..38705cf65f7 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -877,11 +877,17 @@ anv_state_pool_init(struct anv_state_pool *pool,
if (result != VK_SUCCESS)
return result;
+ result = anv_state_table_init(&pool->table, device, 64);
+ if (result != VK_SUCCESS) {
+ anv_block_pool_finish(&pool->block_pool);
+ return result;
+ }
+
assert(util_is_power_of_two_or_zero(block_size));
pool->block_size = block_size;
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
- pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
+ pool->buckets[i].free_list = ANV_FREE_LIST2_EMPTY;
pool->buckets[i].block.next = 0;
pool->buckets[i].block.end = 0;
}
@@ -894,6 +900,7 @@ void
anv_state_pool_finish(struct anv_state_pool *pool)
{
VG(VALGRIND_DESTROY_MEMPOOL(pool));
+ anv_state_table_finish(&pool->table);
anv_block_pool_finish(&pool->block_pool);
}
@@ -987,22 +994,29 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
{
uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
- struct anv_state state;
- state.alloc_size = anv_state_pool_get_bucket_size(bucket);
+ struct anv_state *state;
+ uint32_t alloc_size = anv_state_pool_get_bucket_size(bucket);
+ int32_t offset;
/* Try free list first. */
- if (anv_free_list_pop(&pool->buckets[bucket].free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset >= 0);
+ state = anv_free_list_pop2(&pool->buckets[bucket].free_list,
+ &pool->table);
+ if (state) {
+ assert(state->offset >= 0);
goto done;
}
/* Try to grab a chunk from some larger bucket and split it up */
for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
- int32_t chunk_offset;
- if (anv_free_list_pop(&pool->buckets[b].free_list,
- &pool->block_pool.map, &chunk_offset)) {
+ state = anv_free_list_pop2(&pool->buckets[b].free_list, &pool->table);
+ if (state) {
unsigned chunk_size = anv_state_pool_get_bucket_size(b);
+ int32_t chunk_offset = state->offset;
+
+ /* First lets update the state we got to its new size. offset and map
+ * remain the same.
+ */
+ state->alloc_size = alloc_size;
/* We've found a chunk that's larger than the requested state size.
* There are a couple of options as to what we do with it:
@@ -1031,43 +1045,43 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
* We choose option (3).
*/
if (chunk_size > pool->block_size &&
- state.alloc_size < pool->block_size) {
+ alloc_size < pool->block_size) {
assert(chunk_size % pool->block_size == 0);
/* We don't want to split giant chunks into tiny chunks. Instead,
* break anything bigger than a block into block-sized chunks and
* then break it down into bucket-sized chunks from there. Return
* all but the first block of the chunk to the block bucket.
*/
- const uint32_t block_bucket =
- anv_state_pool_get_bucket(pool->block_size);
- anv_free_list_push(&pool->buckets[block_bucket].free_list,
- pool->block_pool.map,
- chunk_offset + pool->block_size,
- pool->block_size,
- (chunk_size / pool->block_size) - 1);
+ uint32_t push_back = (chunk_size / pool->block_size) - 1;
+ anv_state_pool_return_blocks(pool, chunk_offset + pool->block_size,
+ push_back, pool->block_size);
chunk_size = pool->block_size;
}
- assert(chunk_size % state.alloc_size == 0);
- anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool.map,
- chunk_offset + state.alloc_size,
- state.alloc_size,
- (chunk_size / state.alloc_size) - 1);
-
- state.offset = chunk_offset;
+ assert(chunk_size % alloc_size == 0);
+ uint32_t push_back = (chunk_size / alloc_size) - 1;
+ anv_state_pool_return_blocks(pool, chunk_offset + alloc_size,
+ push_back, alloc_size);
goto done;
}
}
- state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
- &pool->block_pool,
- state.alloc_size,
- pool->block_size);
+ offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
+ &pool->block_pool,
+ alloc_size,
+ pool->block_size);
+ /* Everytime we allocate a new state, add it to the state pool */
+ uint32_t idx;
+ VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = anv_block_pool_map(&pool->block_pool, offset);
done:
- state.map = anv_block_pool_map(&pool->block_pool, state.offset);
- return state;
+ return *state;
}
struct anv_state
@@ -1114,9 +1128,8 @@ anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
pool->block_pool.map, state.offset,
state.alloc_size, 1);
} else {
- anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ anv_free_list_push2(&pool->buckets[bucket].free_list,
+ &pool->table, state.idx, 1);
}
}
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index bdbd4aad7f4..5ee087fb5c9 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -705,7 +705,7 @@ struct anv_state {
#define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
struct anv_fixed_size_state_pool {
- union anv_free_list free_list;
+ union anv_free_list2 free_list;
struct anv_block_state block;
};