summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/amd/vulkan/radv_cmd_buffer.c4
-rw-r--r--src/amd/vulkan/radv_device.c27
-rw-r--r--src/amd/vulkan/radv_meta_buffer.c1
-rw-r--r--src/amd/vulkan/radv_private.h2
-rw-r--r--src/amd/vulkan/radv_query.c4
-rw-r--r--src/amd/vulkan/si_cmd_buffer.c2
6 files changed, 8 insertions, 32 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index ef44859ccf0..5aea0e9de4b 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -1278,7 +1278,6 @@ radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer, bool instanced_o
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
- cmd_buffer->no_draws = false;
if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) &&
cmd_buffer->state.pipeline->num_vertex_attribs) {
unsigned vb_offset;
@@ -1600,7 +1599,6 @@ static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
cmd_buffer->record_fail = false;
cmd_buffer->ring_offsets_idx = -1;
- cmd_buffer->no_draws = true;
}
VkResult radv_ResetCommandBuffer(
@@ -2447,7 +2445,6 @@ void radv_CmdDrawIndexedIndirectCountAMD(
static void
radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer)
{
- cmd_buffer->no_draws = false;
radv_emit_compute_pipeline(cmd_buffer);
radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline,
VK_SHADER_STAGE_COMPUTE_BIT);
@@ -2890,7 +2887,6 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
- cmd_buffer->no_draws = false;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 6f2fac2a9fe..7900ece9c89 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -1461,18 +1461,8 @@ VkResult radv_QueueSubmit(
struct radeon_winsys_cs **cs_array;
bool can_patch = true;
uint32_t advance;
- int draw_cmd_buffers_count = 0;
- for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
- pSubmits[i].pCommandBuffers[j]);
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- if (cmd_buffer->no_draws == true)
- continue;
- draw_cmd_buffers_count++;
- }
-
- if (!draw_cmd_buffers_count) {
+ if (!pSubmits[i].commandBufferCount) {
if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
&queue->device->empty_cs[queue->queue_family_index],
@@ -1491,27 +1481,24 @@ VkResult radv_QueueSubmit(
continue;
}
- cs_array = malloc(sizeof(struct radeon_winsys_cs *) * draw_cmd_buffers_count);
+ cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
+ pSubmits[i].commandBufferCount);
- int draw_cmd_buffer_idx = 0;
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- if (cmd_buffer->no_draws == true)
- continue;
- cs_array[draw_cmd_buffer_idx] = cmd_buffer->cs;
- draw_cmd_buffer_idx++;
+ cs_array[j] = cmd_buffer->cs;
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
can_patch = false;
}
- for (uint32_t j = 0; j < draw_cmd_buffers_count; j += advance) {
+ for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
advance = MIN2(max_cs_submission,
- draw_cmd_buffers_count - j);
+ pSubmits[i].commandBufferCount - j);
bool b = j == 0;
- bool e = j + advance == draw_cmd_buffers_count;
+ bool e = j + advance == pSubmits[i].commandBufferCount;
if (queue->device->trace_bo)
*queue->device->trace_id_ptr = 0;
diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c
index 4857d3dc54f..cd2973fa4a9 100644
--- a/src/amd/vulkan/radv_meta_buffer.c
+++ b/src/amd/vulkan/radv_meta_buffer.c
@@ -523,7 +523,6 @@ void radv_CmdUpdateBuffer(
assert(!(dataSize & 3));
assert(!(va & 3));
- cmd_buffer->no_draws = false;
if (dataSize < 4096) {
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index 099dba3f643..ac21b075c2c 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -746,8 +746,6 @@ struct radv_cmd_buffer {
uint32_t gsvs_ring_size_needed;
int ring_offsets_idx; /* just used for verification */
-
- bool no_draws;
};
struct radv_image;
diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index fd5d0659a9e..a29a05d4b84 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -211,7 +211,6 @@ void radv_CmdCopyQueryPoolResults(
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
- cmd_buffer->no_draws = false;
for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
unsigned query = firstQuery + i;
@@ -311,7 +310,6 @@ void radv_CmdBeginQuery(
va += pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
- cmd_buffer->no_draws = false;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
@@ -345,7 +343,6 @@ void radv_CmdEndQuery(
va += pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
- cmd_buffer->no_draws = false;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
@@ -397,7 +394,6 @@ void radv_CmdWriteTimestamp(
uint64_t query_va = va + pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
- cmd_buffer->no_draws = false;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c
index e20e3bd101e..e2ba413b835 100644
--- a/src/amd/vulkan/si_cmd_buffer.c
+++ b/src/amd/vulkan/si_cmd_buffer.c
@@ -923,7 +923,7 @@ static void si_emit_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer,
static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
uint64_t remaining_size, unsigned *flags)
{
- cmd_buffer->no_draws = false;
+
/* Flush the caches for the first copy only.
* Also wait for the previous CP DMA operations.
*/