summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-07-29 15:28:51 -0700
committerJason Ekstrand <[email protected]>2015-07-29 15:30:15 -0700
commit3ed9cea84d42d7a5f37ed64d99a6a5ba064767f3 (patch)
tree186bd9ce58fb3ab110f2d760c8e7b1d359f35ef8
parent0f31c580bfaa2788e8d320c0bf4acb9b70a90e05 (diff)
vk/cmd_buffer: Use an array to track all know anv_batch_bo objects
Instead of walking the list of batch and surface buffers, we simply keep track of all known batch and surface buffers as we build the command buffer. Then we use this new list to construct the validate list.
-rw-r--r--src/vulkan/anv_cmd_buffer.c61
-rw-r--r--src/vulkan/anv_private.h7
2 files changed, 51 insertions, 17 deletions
diff --git a/src/vulkan/anv_cmd_buffer.c b/src/vulkan/anv_cmd_buffer.c
index b47650aff73..2d46cc0c0ad 100644
--- a/src/vulkan/anv_cmd_buffer.c
+++ b/src/vulkan/anv_cmd_buffer.c
@@ -305,6 +305,13 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
if (result != VK_SUCCESS)
return result;
+ struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
+ if (seen_bbo == NULL) {
+ anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ *seen_bbo = new_bbo;
+
/* We set the end of the batch a little short so we would be sure we
* have room for the chaining command. Since we're about to emit the
* chaining command, let's set it back where it should go.
@@ -370,6 +377,13 @@ anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer *cmd_buffer)
if (result != VK_SUCCESS)
return result;
+ struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
+ if (seen_bbo == NULL) {
+ anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ *seen_bbo = new_bbo;
+
cmd_buffer->surface_next = 1;
list_addtail(&new_bbo->link, &cmd_buffer->surface_bos);
@@ -406,6 +420,15 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
list_addtail(&surface_bbo->link, &cmd_buffer->surface_bos);
+ int success = anv_vector_init(&cmd_buffer->seen_bbos,
+ sizeof(struct anv_bo *),
+ 8 * sizeof(struct anv_bo *));
+ if (!success)
+ goto fail_surface_bo;
+
+ *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
+ *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = surface_bbo;
+
/* Start surface_next at 1 so surface offset 0 is invalid. */
cmd_buffer->surface_next = 1;
@@ -415,6 +438,8 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
return VK_SUCCESS;
+ fail_surface_bo:
+ anv_batch_bo_destroy(surface_bbo, device);
fail_batch_bo:
anv_batch_bo_destroy(batch_bo, device);
@@ -426,6 +451,8 @@ anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_device *device = cmd_buffer->device;
+ anv_vector_finish(&cmd_buffer->seen_bbos);
+
/* Destroy all of the batch buffers */
list_for_each_entry_safe(struct anv_batch_bo, bbo,
&cmd_buffer->batch_bos, link) {
@@ -472,6 +499,15 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs.num_relocs = 0;
cmd_buffer->surface_next = 1;
+
+ /* Reset the list of seen buffers */
+ cmd_buffer->seen_bbos.head = 0;
+ cmd_buffer->seen_bbos.tail = 0;
+
+ *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
+ anv_cmd_buffer_current_batch_bo(cmd_buffer);
+ *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
+ anv_cmd_buffer_current_surface_bbo(cmd_buffer);
}
void
@@ -595,15 +631,12 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->execbuf2.bo_count = 0;
cmd_buffer->execbuf2.need_reloc = false;
- list_for_each_entry(struct anv_batch_bo, bbo,
- &cmd_buffer->batch_bos, link) {
- anv_cmd_buffer_add_bo(cmd_buffer, &bbo->bo, &bbo->relocs);
- }
-
- list_for_each_entry(struct anv_batch_bo, bbo,
- &cmd_buffer->surface_bos, link) {
- anv_cmd_buffer_add_bo(cmd_buffer, &bbo->bo, &bbo->relocs);
- }
+ /* First, we walk over all of the bos we've seen and add them and their
+ * relocations to the validate list.
+ */
+ struct anv_batch_bo **bbo;
+ anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
+ anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
struct anv_batch_bo *first_batch_bo =
list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
@@ -636,14 +669,8 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
* the correct indices in the object array. We have to do this after we
* reorder the list above as some of the indices may have changed.
*/
- list_for_each_entry(struct anv_batch_bo, bbo,
- &cmd_buffer->surface_bos, link) {
- anv_cmd_buffer_process_relocs(cmd_buffer, &bbo->relocs);
- }
- list_for_each_entry_rev(struct anv_batch_bo, bbo,
- &cmd_buffer->batch_bos, link) {
- anv_cmd_buffer_process_relocs(cmd_buffer, &bbo->relocs);
- }
+ anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
+ anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
.buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
diff --git a/src/vulkan/anv_private.h b/src/vulkan/anv_private.h
index 1d04dfca9d7..a3787229a74 100644
--- a/src/vulkan/anv_private.h
+++ b/src/vulkan/anv_private.h
@@ -703,6 +703,13 @@ struct anv_cmd_buffer {
struct list_head surface_bos;
uint32_t surface_next;
+ /* A vector of anv_batch_bo pointers for every batch or surface buffer
+ * referenced by this command buffer
+ *
+ * initialized by anv_cmd_buffer_init_batch_bo_chain()
+ */
+ struct anv_vector seen_bbos;
+
/* Information needed for execbuf
*
* These fields are generated by anv_cmd_buffer_prepare_execbuf().