summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2017-04-24 08:50:23 -0700
committerJason Ekstrand <[email protected]>2017-05-04 19:07:54 -0700
commitd3ed72e2c2164d0ba5f0d2e6d652d8710030aa2b (patch)
tree6e7742bd688d69465a9e606abc3dd6453248e660 /src/intel/vulkan
parentbb2a3f0df8e9f92e7694c3e643c38807bfa79902 (diff)
anv/allocator: Embed the block_pool in the state_pool
Now that the state stream is allocating off of the state pool, there's no reason why we need the block pool to be separate. Reviewed-by: Juan A. Suarez Romero <[email protected]>
Diffstat (limited to 'src/intel/vulkan')
-rw-r--r--src/intel/vulkan/anv_allocator.c30
-rw-r--r--src/intel/vulkan/anv_batch_chain.c35
-rw-r--r--src/intel/vulkan/anv_blorp.c2
-rw-r--r--src/intel/vulkan/anv_device.c30
-rw-r--r--src/intel/vulkan/anv_private.h17
-rw-r--r--src/intel/vulkan/gen8_cmd_buffer.c6
-rw-r--r--src/intel/vulkan/genX_blorp_exec.c2
-rw-r--r--src/intel/vulkan/genX_cmd_buffer.c15
-rw-r--r--src/intel/vulkan/tests/state_pool.c5
-rw-r--r--src/intel/vulkan/tests/state_pool_free_list_only.c5
-rw-r--r--src/intel/vulkan/tests/state_pool_no_free.c5
11 files changed, 66 insertions, 86 deletions
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 8569f692f63..63f0f9cabca 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -607,12 +607,16 @@ anv_block_pool_alloc_back(struct anv_block_pool *pool,
return -(offset + block_size);
}
-void
+VkResult
anv_state_pool_init(struct anv_state_pool *pool,
- struct anv_block_pool *block_pool,
+ struct anv_device *device,
uint32_t block_size)
{
- pool->block_pool = block_pool;
+ VkResult result = anv_block_pool_init(&pool->block_pool, device,
+ block_size * 16);
+ if (result != VK_SUCCESS)
+ return result;
+
assert(util_is_power_of_two(block_size));
pool->block_size = block_size;
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
@@ -622,12 +626,15 @@ anv_state_pool_init(struct anv_state_pool *pool,
pool->buckets[i].block.end = 0;
}
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+
+ return VK_SUCCESS;
}
void
anv_state_pool_finish(struct anv_state_pool *pool)
{
VG(VALGRIND_DESTROY_MEMPOOL(pool));
+ anv_block_pool_finish(&pool->block_pool);
}
static uint32_t
@@ -673,18 +680,18 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
/* Try free list first. */
if (anv_free_list_pop(&pool->buckets[bucket].free_list,
- &pool->block_pool->map, &state.offset)) {
+ &pool->block_pool.map, &state.offset)) {
assert(state.offset >= 0);
goto done;
}
state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
- pool->block_pool,
+ &pool->block_pool,
state.alloc_size,
pool->block_size);
done:
- state.map = pool->block_pool->map + state.offset;
+ state.map = pool->block_pool.map + state.offset;
return state;
}
@@ -706,15 +713,16 @@ anv_state_pool_alloc_back(struct anv_state_pool *pool)
state.alloc_size = pool->block_size;
if (anv_free_list_pop(&pool->back_alloc_free_list,
- &pool->block_pool->map, &state.offset)) {
+ &pool->block_pool.map, &state.offset)) {
assert(state.offset < 0);
goto done;
}
- state.offset = anv_block_pool_alloc_back(pool->block_pool, pool->block_size);
+ state.offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
done:
- state.map = pool->block_pool->map + state.offset;
+ state.map = pool->block_pool.map + state.offset;
VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
return state;
}
@@ -731,10 +739,10 @@ anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
if (state.offset < 0) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
- pool->block_pool->map, state.offset);
+ pool->block_pool.map, state.offset);
} else {
anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool->map, state.offset);
+ pool->block_pool.map, state.offset);
}
}
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
index 480084fd42b..6d6babadea1 100644
--- a/src/intel/vulkan/anv_batch_chain.c
+++ b/src/intel/vulkan/anv_batch_chain.c
@@ -455,7 +455,7 @@ anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = &cmd_buffer->device->surface_state_block_pool.bo,
+ .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
.offset = bt_block->offset,
};
}
@@ -632,7 +632,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
return (struct anv_state) { 0 };
state.offset = cmd_buffer->bt_next;
- state.map = state_pool->block_pool->map + bt_block->offset + state.offset;
+ state.map = state_pool->block_pool.map + bt_block->offset + state.offset;
cmd_buffer->bt_next += state.alloc_size;
@@ -1093,12 +1093,12 @@ write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
}
static void
-adjust_relocations_from_state_pool(struct anv_block_pool *pool,
+adjust_relocations_from_state_pool(struct anv_state_pool *pool,
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
- assert(last_pool_center_bo_offset <= pool->center_bo_offset);
- uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
+ assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+ uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
for (size_t i = 0; i < relocs->num_relocs; i++) {
/* All of the relocations from this block pool to other BO's should
@@ -1111,13 +1111,13 @@ adjust_relocations_from_state_pool(struct anv_block_pool *pool,
}
static void
-adjust_relocations_to_state_pool(struct anv_block_pool *pool,
+adjust_relocations_to_state_pool(struct anv_state_pool *pool,
struct anv_bo *from_bo,
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
- assert(last_pool_center_bo_offset <= pool->center_bo_offset);
- uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
+ assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+ uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
/* When we initially emit relocations into a block pool, we don't
* actually know what the final center_bo_offset will be so we just emit
@@ -1126,7 +1126,7 @@ adjust_relocations_to_state_pool(struct anv_block_pool *pool,
* relocations that point to the pool bo with the correct offset.
*/
for (size_t i = 0; i < relocs->num_relocs; i++) {
- if (relocs->reloc_bos[i] == &pool->bo) {
+ if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
/* Adjust the delta value in the relocation to correctly
* correspond to the new delta. Initially, this value may have
* been negative (if treated as unsigned), but we trust in
@@ -1140,7 +1140,8 @@ adjust_relocations_to_state_pool(struct anv_block_pool *pool,
* use by the GPU at the moment.
*/
assert(relocs->relocs[i].offset < from_bo->size);
- write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
+ write_reloc(pool->block_pool.device,
+ from_bo->map + relocs->relocs[i].offset,
relocs->relocs[i].presumed_offset +
relocs->relocs[i].delta, false);
}
@@ -1230,7 +1231,7 @@ relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
* given time. The only option is to always relocate them.
*/
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
- &cmd_buffer->device->surface_state_block_pool.bo,
+ &cmd_buffer->device->surface_state_pool.block_pool.bo,
true /* always relocate surface states */);
/* Since we own all of the batch buffers, we know what values are stored
@@ -1254,14 +1255,14 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
struct anv_cmd_buffer *cmd_buffer)
{
struct anv_batch *batch = &cmd_buffer->batch;
- struct anv_block_pool *ss_pool =
- &cmd_buffer->device->surface_state_block_pool;
+ struct anv_state_pool *ss_pool =
+ &cmd_buffer->device->surface_state_pool;
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
- VkResult result =
- anv_execbuf_add_bo(execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs, 0,
- &cmd_buffer->device->alloc);
+ VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
+ &cmd_buffer->surface_relocs, 0,
+ &cmd_buffer->device->alloc);
if (result != VK_SUCCESS)
return result;
@@ -1283,7 +1284,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
* record the surface state pool center so future executions of the command
* buffer can adjust correctly.
*/
- cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
+ cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
struct anv_batch_bo *first_batch_bo =
list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
diff --git a/src/intel/vulkan/anv_blorp.c b/src/intel/vulkan/anv_blorp.c
index e3e952060af..7b6944ad531 100644
--- a/src/intel/vulkan/anv_blorp.c
+++ b/src/intel/vulkan/anv_blorp.c
@@ -710,7 +710,7 @@ void anv_CmdUpdateBuffer(
bs = gcd_pow2_u64(bs, copy_size);
do_buffer_copy(&batch,
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
tmp_data.offset,
dst_buffer->bo, dst_buffer->offset + dstOffset,
copy_size / bs, 1, bs);
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 71967b6dbd2..0773f5f9a54 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -983,7 +983,7 @@ anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align,
state = anv_state_pool_alloc(pool, size, align);
memcpy(state.map, p, size);
- anv_state_flush(pool->block_pool->device, state);
+ anv_state_flush(pool->block_pool.device, state);
return state;
}
@@ -1109,33 +1109,19 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
- result = anv_block_pool_init(&device->dynamic_state_block_pool, device,
- 16384 * 16);
+ result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384);
if (result != VK_SUCCESS)
goto fail_bo_cache;
- anv_state_pool_init(&device->dynamic_state_pool,
- &device->dynamic_state_block_pool,
- 16384);
-
- result = anv_block_pool_init(&device->instruction_block_pool, device,
- 1024 * 1024 * 16);
+ result = anv_state_pool_init(&device->instruction_state_pool, device,
+ 1024 * 1024);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
- anv_state_pool_init(&device->instruction_state_pool,
- &device->instruction_block_pool,
- 1024 * 1024);
-
- result = anv_block_pool_init(&device->surface_state_block_pool, device,
- 4096 * 16);
+ result = anv_state_pool_init(&device->surface_state_pool, device, 4096);
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
- anv_state_pool_init(&device->surface_state_pool,
- &device->surface_state_block_pool,
- 4096);
-
result = anv_bo_init_new(&device->workaround_bo, device, 1024);
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
@@ -1180,13 +1166,10 @@ VkResult anv_CreateDevice(
anv_gem_close(device, device->workaround_bo.gem_handle);
fail_surface_state_pool:
anv_state_pool_finish(&device->surface_state_pool);
- anv_block_pool_finish(&device->surface_state_block_pool);
fail_instruction_state_pool:
anv_state_pool_finish(&device->instruction_state_pool);
- anv_block_pool_finish(&device->instruction_block_pool);
fail_dynamic_state_pool:
anv_state_pool_finish(&device->dynamic_state_pool);
- anv_block_pool_finish(&device->dynamic_state_block_pool);
fail_bo_cache:
anv_bo_cache_finish(&device->bo_cache);
fail_batch_bo_pool:
@@ -1230,11 +1213,8 @@ void anv_DestroyDevice(
anv_gem_close(device, device->workaround_bo.gem_handle);
anv_state_pool_finish(&device->surface_state_pool);
- anv_block_pool_finish(&device->surface_state_block_pool);
anv_state_pool_finish(&device->instruction_state_pool);
- anv_block_pool_finish(&device->instruction_block_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
- anv_block_pool_finish(&device->dynamic_state_block_pool);
anv_bo_cache_finish(&device->bo_cache);
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index d66a2681dce..9b0dd678fdc 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -499,7 +499,7 @@ struct anv_fixed_size_state_pool {
#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
struct anv_state_pool {
- struct anv_block_pool *block_pool;
+ struct anv_block_pool block_pool;
/* The size of blocks which will be allocated from the block pool */
uint32_t block_size;
@@ -557,6 +557,9 @@ anv_invalidate_range(void *start, size_t size)
__builtin_ia32_mfence();
}
+/* The block_pool functions exported for testing only. The block pool should
+ * only be used via a state pool (see below).
+ */
VkResult anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device,
uint32_t initial_size);
@@ -565,9 +568,10 @@ int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
uint32_t block_size);
int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
uint32_t block_size);
-void anv_state_pool_init(struct anv_state_pool *pool,
- struct anv_block_pool *block_pool,
- uint32_t block_size);
+
+VkResult anv_state_pool_init(struct anv_state_pool *pool,
+ struct anv_device *device,
+ uint32_t block_size);
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
uint32_t state_size, uint32_t alignment);
@@ -734,13 +738,8 @@ struct anv_device {
struct anv_bo_cache bo_cache;
- struct anv_block_pool dynamic_state_block_pool;
struct anv_state_pool dynamic_state_pool;
-
- struct anv_block_pool instruction_block_pool;
struct anv_state_pool instruction_state_pool;
-
- struct anv_block_pool surface_state_block_pool;
struct anv_state_pool surface_state_pool;
struct anv_bo workaround_bo;
diff --git a/src/intel/vulkan/gen8_cmd_buffer.c b/src/intel/vulkan/gen8_cmd_buffer.c
index 75fdeb7b823..52412064a95 100644
--- a/src/intel/vulkan/gen8_cmd_buffer.c
+++ b/src/intel/vulkan/gen8_cmd_buffer.c
@@ -609,7 +609,7 @@ void genX(CmdSetEvent)(
pc.DestinationAddressType = DAT_PPGTT,
pc.PostSyncOperation = WriteImmediateData,
pc.Address = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
pc.ImmediateData = VK_EVENT_SET;
@@ -633,7 +633,7 @@ void genX(CmdResetEvent)(
pc.DestinationAddressType = DAT_PPGTT;
pc.PostSyncOperation = WriteImmediateData;
pc.Address = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
pc.ImmediateData = VK_EVENT_RESET;
@@ -662,7 +662,7 @@ void genX(CmdWaitEvents)(
sem.CompareOperation = COMPARE_SAD_EQUAL_SDD,
sem.SemaphoreDataDword = VK_EVENT_SET,
sem.SemaphoreAddress = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
}
diff --git a/src/intel/vulkan/genX_blorp_exec.c b/src/intel/vulkan/genX_blorp_exec.c
index 7f22b677ea6..71ed70741ec 100644
--- a/src/intel/vulkan/genX_blorp_exec.c
+++ b/src/intel/vulkan/genX_blorp_exec.c
@@ -132,7 +132,7 @@ blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
*addr = (struct blorp_address) {
- .buffer = &cmd_buffer->device->dynamic_state_block_pool.bo,
+ .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = vb_state.offset,
};
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index bfb54729c0f..ef9b7d0554c 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -91,7 +91,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddress =
- (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
+ (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
sba.DynamicStateBaseAddressModifyEnable = true;
@@ -100,7 +100,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
sba.IndirectObjectBaseAddressModifyEnable = true;
sba.InstructionBaseAddress =
- (struct anv_address) { &device->instruction_block_pool.bo, 0 };
+ (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
sba.InstructionMemoryObjectControlState = GENX(MOCS);
sba.InstructionBaseAddressModifyEnable = true;
@@ -677,7 +677,8 @@ genX(CmdExecuteCommands)(
* copy the surface states for the current subpass into the storage
* we allocated for them in BeginCommandBuffer.
*/
- struct anv_bo *ss_bo = &primary->device->surface_state_block_pool.bo;
+ struct anv_bo *ss_bo =
+ &primary->device->surface_state_pool.block_pool.bo;
struct anv_state src_state = primary->state.render_pass_states;
struct anv_state dst_state = secondary->state.render_pass_states;
assert(src_state.alloc_size == dst_state.alloc_size);
@@ -1456,7 +1457,7 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
c._3DCommandSubOpcode = push_constant_opcodes[stage],
c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
#if GEN_GEN >= 9
- .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
+ .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_pool.block_pool.bo, state.offset },
.ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
#else
.PointerToConstantBuffer0 = { .offset = state.offset },
@@ -1662,7 +1663,7 @@ emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
anv_state_flush(cmd_buffer->device, id_state);
emit_base_vertex_instance_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
}
static void
@@ -1676,7 +1677,7 @@ emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
anv_state_flush(cmd_buffer->device, state);
emit_vertex_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
state.offset, 4, ANV_DRAWID_VB_INDEX);
}
@@ -2097,7 +2098,7 @@ void genX(CmdDispatch)(
anv_state_flush(cmd_buffer->device, state);
cmd_buffer->state.num_workgroups_offset = state.offset;
cmd_buffer->state.num_workgroups_bo =
- &cmd_buffer->device->dynamic_state_block_pool.bo;
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo;
}
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
diff --git a/src/intel/vulkan/tests/state_pool.c b/src/intel/vulkan/tests/state_pool.c
index db3f3ec08a4..249fe64fe92 100644
--- a/src/intel/vulkan/tests/state_pool.c
+++ b/src/intel/vulkan/tests/state_pool.c
@@ -38,14 +38,12 @@ int main(int argc, char **argv)
struct anv_device device = {
.instance = &instance,
};
- struct anv_block_pool block_pool;
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
for (unsigned i = 0; i < NUM_RUNS; i++) {
- anv_block_pool_init(&block_pool, &device, 4096);
- anv_state_pool_init(&state_pool, &block_pool, 256);
+ anv_state_pool_init(&state_pool, &device, 256);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);
@@ -53,7 +51,6 @@ int main(int argc, char **argv)
run_state_pool_test(&state_pool);
anv_state_pool_finish(&state_pool);
- anv_block_pool_finish(&block_pool);
}
pthread_mutex_destroy(&device.mutex);
diff --git a/src/intel/vulkan/tests/state_pool_free_list_only.c b/src/intel/vulkan/tests/state_pool_free_list_only.c
index 93b71efd437..6a04d641ce4 100644
--- a/src/intel/vulkan/tests/state_pool_free_list_only.c
+++ b/src/intel/vulkan/tests/state_pool_free_list_only.c
@@ -37,12 +37,10 @@ int main(int argc, char **argv)
struct anv_device device = {
.instance = &instance,
};
- struct anv_block_pool block_pool;
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_block_pool_init(&block_pool, &device, 4096);
- anv_state_pool_init(&state_pool, &block_pool, 4096);
+ anv_state_pool_init(&state_pool, &device, 4096);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);
@@ -64,6 +62,5 @@ int main(int argc, char **argv)
run_state_pool_test(&state_pool);
anv_state_pool_finish(&state_pool);
- anv_block_pool_finish(&block_pool);
pthread_mutex_destroy(&device.mutex);
}
diff --git a/src/intel/vulkan/tests/state_pool_no_free.c b/src/intel/vulkan/tests/state_pool_no_free.c
index c3c7c24a0a3..1ba832cf665 100644
--- a/src/intel/vulkan/tests/state_pool_no_free.c
+++ b/src/intel/vulkan/tests/state_pool_no_free.c
@@ -58,12 +58,10 @@ static void run_test()
struct anv_device device = {
.instance = &instance,
};
- struct anv_block_pool block_pool;
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_block_pool_init(&block_pool, &device, 4096);
- anv_state_pool_init(&state_pool, &block_pool, 64);
+ anv_state_pool_init(&state_pool, &device, 64);
pthread_barrier_init(&barrier, NULL, NUM_THREADS);
@@ -109,7 +107,6 @@ static void run_test()
}
anv_state_pool_finish(&state_pool);
- anv_block_pool_finish(&block_pool);
pthread_mutex_destroy(&device.mutex);
}