summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_batch_chain.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2017-04-24 08:50:23 -0700
committerJason Ekstrand <[email protected]>2017-05-04 19:07:54 -0700
commitd3ed72e2c2164d0ba5f0d2e6d652d8710030aa2b (patch)
tree6e7742bd688d69465a9e606abc3dd6453248e660 /src/intel/vulkan/anv_batch_chain.c
parentbb2a3f0df8e9f92e7694c3e643c38807bfa79902 (diff)
anv/allocator: Embed the block_pool in the state_pool
Now that the state stream is allocating off of the state pool, there's no reason why we need the block pool to be separate. Reviewed-by: Juan A. Suarez Romero <[email protected]>
Diffstat (limited to 'src/intel/vulkan/anv_batch_chain.c')
-rw-r--r--src/intel/vulkan/anv_batch_chain.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
index 480084fd42b..6d6babadea1 100644
--- a/src/intel/vulkan/anv_batch_chain.c
+++ b/src/intel/vulkan/anv_batch_chain.c
@@ -455,7 +455,7 @@ anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = &cmd_buffer->device->surface_state_block_pool.bo,
+ .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
.offset = bt_block->offset,
};
}
@@ -632,7 +632,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
return (struct anv_state) { 0 };
state.offset = cmd_buffer->bt_next;
- state.map = state_pool->block_pool->map + bt_block->offset + state.offset;
+ state.map = state_pool->block_pool.map + bt_block->offset + state.offset;
cmd_buffer->bt_next += state.alloc_size;
@@ -1093,12 +1093,12 @@ write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
}
static void
-adjust_relocations_from_state_pool(struct anv_block_pool *pool,
+adjust_relocations_from_state_pool(struct anv_state_pool *pool,
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
- assert(last_pool_center_bo_offset <= pool->center_bo_offset);
- uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
+ assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+ uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
for (size_t i = 0; i < relocs->num_relocs; i++) {
/* All of the relocations from this block pool to other BO's should
@@ -1111,13 +1111,13 @@ adjust_relocations_from_state_pool(struct anv_block_pool *pool,
}
static void
-adjust_relocations_to_state_pool(struct anv_block_pool *pool,
+adjust_relocations_to_state_pool(struct anv_state_pool *pool,
struct anv_bo *from_bo,
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
- assert(last_pool_center_bo_offset <= pool->center_bo_offset);
- uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
+ assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+ uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
/* When we initially emit relocations into a block pool, we don't
* actually know what the final center_bo_offset will be so we just emit
@@ -1126,7 +1126,7 @@ adjust_relocations_to_state_pool(struct anv_block_pool *pool,
* relocations that point to the pool bo with the correct offset.
*/
for (size_t i = 0; i < relocs->num_relocs; i++) {
- if (relocs->reloc_bos[i] == &pool->bo) {
+ if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
/* Adjust the delta value in the relocation to correctly
* correspond to the new delta. Initially, this value may have
* been negative (if treated as unsigned), but we trust in
@@ -1140,7 +1140,8 @@ adjust_relocations_to_state_pool(struct anv_block_pool *pool,
* use by the GPU at the moment.
*/
assert(relocs->relocs[i].offset < from_bo->size);
- write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
+ write_reloc(pool->block_pool.device,
+ from_bo->map + relocs->relocs[i].offset,
relocs->relocs[i].presumed_offset +
relocs->relocs[i].delta, false);
}
@@ -1230,7 +1231,7 @@ relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
* given time. The only option is to always relocate them.
*/
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
- &cmd_buffer->device->surface_state_block_pool.bo,
+ &cmd_buffer->device->surface_state_pool.block_pool.bo,
true /* always relocate surface states */);
/* Since we own all of the batch buffers, we know what values are stored
@@ -1254,14 +1255,14 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
struct anv_cmd_buffer *cmd_buffer)
{
struct anv_batch *batch = &cmd_buffer->batch;
- struct anv_block_pool *ss_pool =
- &cmd_buffer->device->surface_state_block_pool;
+ struct anv_state_pool *ss_pool =
+ &cmd_buffer->device->surface_state_pool;
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
- VkResult result =
- anv_execbuf_add_bo(execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs, 0,
- &cmd_buffer->device->alloc);
+ VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
+ &cmd_buffer->surface_relocs, 0,
+ &cmd_buffer->device->alloc);
if (result != VK_SUCCESS)
return result;
@@ -1283,7 +1284,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
* record the surface state pool center so future executions of the command
* buffer can adjust correctly.
*/
- cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
+ cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
struct anv_batch_bo *first_batch_bo =
list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);