summaryrefslogtreecommitdiffstats
path: root/src/amd/vulkan
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2018-03-19 01:24:52 +0000
committerDave Airlie <[email protected]>2018-03-19 19:29:05 +0000
commit4034dc5c72791e010eb64dece4bca542f56cec09 (patch)
tree8ac110a100aa9d9ddc8b04c66c59880e0d0f2677 /src/amd/vulkan
parentd4c74aed7a81c65ef91d4d3065b3f822355746e7 (diff)
radv/query: split out begin/end query emission
This just splits out the begin/end query hw emissions, it makes it easier to add multiview support for queries. Reviewed-by: Samuel Pitoiset <[email protected]>
Diffstat (limited to 'src/amd/vulkan')
-rw-r--r--src/amd/vulkan/radv_query.c98
1 files changed, 57 insertions, 41 deletions
diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index 9fee4d2b491..5fae8b65658 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -1077,33 +1077,12 @@ void radv_CmdResetQueryPool(
}
}
-void radv_CmdBeginQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t query,
- VkQueryControlFlags flags)
+static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
+ uint64_t va,
+ VkQueryType query_type)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(pool->bo);
- va += pool->stride * query;
-
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8);
-
- if (cmd_buffer->pending_reset_query) {
- if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
- /* Only need to flush caches if the query pool size is
- * large enough to be resetted using the compute shader
- * path. Small pools don't need any cache flushes
- * because we use a CP dma clear.
- */
- si_emit_cache_flush(cmd_buffer);
- cmd_buffer->pending_reset_query = false;
- }
- }
-
- switch (pool->type) {
+ switch (query_type) {
case VK_QUERY_TYPE_OCCLUSION:
radeon_check_space(cmd_buffer->device->ws, cs, 7);
@@ -1127,26 +1106,15 @@ void radv_CmdBeginQuery(
default:
unreachable("beginning unhandled query type");
}
-}
+}
-void radv_CmdEndQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t query)
+static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
+ uint64_t va, uint64_t avail_va,
+ VkQueryType query_type)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(pool->bo);
- uint64_t avail_va = va + pool->availability_offset + 4 * query;
- va += pool->stride * query;
-
- /* Do not need to add the pool BO to the list because the query must
- * currently be active, which means the BO is already in the list.
- */
-
- switch (pool->type) {
+ switch (query_type) {
case VK_QUERY_TYPE_OCCLUSION:
radeon_check_space(cmd_buffer->device->ws, cs, 14);
@@ -1182,6 +1150,54 @@ void radv_CmdEndQuery(
}
}
+void radv_CmdBeginQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ uint64_t va = radv_buffer_get_va(pool->bo);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8);
+
+ if (cmd_buffer->pending_reset_query) {
+ if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
+ /* Only need to flush caches if the query pool size is
+ * large enough to be resetted using the compute shader
+ * path. Small pools don't need any cache flushes
+ * because we use a CP dma clear.
+ */
+ si_emit_cache_flush(cmd_buffer);
+ cmd_buffer->pending_reset_query = false;
+ }
+ }
+
+ va += pool->stride * query;
+
+ emit_begin_query(cmd_buffer, va, pool->type);
+}
+
+
+void radv_CmdEndQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ uint64_t va = radv_buffer_get_va(pool->bo);
+ uint64_t avail_va = va + pool->availability_offset + 4 * query;
+ va += pool->stride * query;
+
+ /* Do not need to add the pool BO to the list because the query must
+ * currently be active, which means the BO is already in the list.
+ */
+ emit_end_query(cmd_buffer, va, avail_va, pool->type);
+}
+
void radv_CmdWriteTimestamp(
VkCommandBuffer commandBuffer,
VkPipelineStageFlagBits pipelineStage,