summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_batch_chain.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-11-07 08:07:43 -0800
committerJason Ekstrand <[email protected]>2016-11-09 11:31:10 -0800
commit595400d57745fba198b42d95f3c4f5d855023c33 (patch)
tree8e3953d38208edd658eeac056a17ff793f6f02f2 /src/intel/vulkan/anv_batch_chain.c
parent0fe68294275a4ab1d9f71bb48d21a5e6b830e1ca (diff)
anv/batch: Move last_ss_pool_bo_offset to the command buffer
The original reason for putting it in the batch_bo was to allow primaries to share it across secondaries or something like that. However, the relocation lists in secondary command buffers are are always left alone and copied into the primary command buffer's relocation list. This means that the offset really applies at the command buffer level and putting it in the batch_bo doesn't make sense. This fixes a couple of potential bugs around re-submission of command buffers that are not likely to be hit but are bugs none the less. Signed-off-by: Jason Ekstrand <[email protected]> Reviewed-by: Kristian H. Kristensen <[email protected]> Cc: "13.0" <[email protected]>
Diffstat (limited to 'src/intel/vulkan/anv_batch_chain.c')
-rw-r--r--src/intel/vulkan/anv_batch_chain.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
index 3b2c167c310..29c79951be7 100644
--- a/src/intel/vulkan/anv_batch_chain.c
+++ b/src/intel/vulkan/anv_batch_chain.c
@@ -297,8 +297,6 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
bbo->length = other_bbo->length;
memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
- bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
-
*bbo_out = bbo;
return VK_SUCCESS;
@@ -318,7 +316,6 @@ anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
batch->next = batch->start = bbo->bo.map;
batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
batch->relocs = &bbo->relocs;
- bbo->last_ss_pool_bo_offset = 0;
bbo->relocs.num_relocs = 0;
}
@@ -705,6 +702,7 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
&cmd_buffer->pool->alloc);
if (result != VK_SUCCESS)
goto fail_bt_blocks;
+ cmd_buffer->last_ss_pool_center = 0;
anv_cmd_buffer_new_binding_table_block(cmd_buffer);
@@ -770,6 +768,7 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->bt_next = 0;
cmd_buffer->surface_relocs.num_relocs = 0;
+ cmd_buffer->last_ss_pool_center = 0;
/* Reset the list of seen buffers */
cmd_buffer->seen_bbos.head = 0;
@@ -1056,15 +1055,19 @@ write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
static void
adjust_relocations_from_state_pool(struct anv_block_pool *pool,
- struct anv_reloc_list *relocs)
+ struct anv_reloc_list *relocs,
+ uint32_t last_pool_center_bo_offset)
{
+ assert(last_pool_center_bo_offset <= pool->center_bo_offset);
+ uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
+
for (size_t i = 0; i < relocs->num_relocs; i++) {
/* All of the relocations from this block pool to other BO's should
* have been emitted relative to the surface block pool center. We
* need to add the center offset to make them relative to the
* beginning of the actual GEM bo.
*/
- relocs->relocs[i].offset += pool->center_bo_offset;
+ relocs->relocs[i].offset += delta;
}
}
@@ -1072,10 +1075,10 @@ static void
adjust_relocations_to_state_pool(struct anv_block_pool *pool,
struct anv_bo *from_bo,
struct anv_reloc_list *relocs,
- uint32_t *last_pool_center_bo_offset)
+ uint32_t last_pool_center_bo_offset)
{
- assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
- uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
+ assert(last_pool_center_bo_offset <= pool->center_bo_offset);
+ uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
/* When we initially emit relocations into a block pool, we don't
* actually know what the final center_bo_offset will be so we just emit
@@ -1103,8 +1106,6 @@ adjust_relocations_to_state_pool(struct anv_block_pool *pool,
relocs->relocs[i].delta, false);
}
}
-
- *last_pool_center_bo_offset = pool->center_bo_offset;
}
void
@@ -1116,7 +1117,9 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->execbuf2.bo_count = 0;
- adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs);
+ adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
+ cmd_buffer->last_ss_pool_center);
+
anv_execbuf_add_bo(&cmd_buffer->execbuf2, &ss_pool->bo,
&cmd_buffer->surface_relocs,
&cmd_buffer->pool->alloc);
@@ -1127,12 +1130,18 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
struct anv_batch_bo **bbo;
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
- &(*bbo)->last_ss_pool_bo_offset);
+ cmd_buffer->last_ss_pool_center);
anv_execbuf_add_bo(&cmd_buffer->execbuf2, &(*bbo)->bo, &(*bbo)->relocs,
&cmd_buffer->pool->alloc);
}
+ /* Now that we've adjusted all of the surface state relocations, we need to
+ * record the surface state pool center so future executions of the command
+ * buffer can adjust correctly.
+ */
+ cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
+
struct anv_batch_bo *first_batch_bo =
list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);