diff options
author | Connor Abbott <[email protected]> | 2020-06-09 15:30:43 +0200 |
---|---|---|
committer | Marge Bot <[email protected]> | 2020-06-09 14:40:52 +0000 |
commit | f4f6a9be9f639d106055597f21a814b87eb5997b (patch) | |
tree | e0d52800e9d139b36d1eb39cebadb3c5f0aefed6 /src/freedreno | |
parent | dfb176a0acf2326d36d4867fc43751e1b7d0d66f (diff) |
tu: Don't actually track seqno's for events
We just dropped the last user which actually cared about the seqno.
This never worked anyway, since the seqno was never reset between
multiple executions of the same command buffer. Turn the part of the
control buffer which used to track the seqno into a dummy dword, and
figure out automatically whether we need to include it. We will
implement seqnos again eventually, with timline semaphores, but that
will likely be totally different.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4964>
Diffstat (limited to 'src/freedreno')
-rw-r--r-- | src/freedreno/vulkan/tu_clear_blit.c | 60 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_cmd_buffer.c | 78 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_private.h | 8 | ||||
-rw-r--r-- | src/freedreno/vulkan/tu_query.c | 8 |
4 files changed, 78 insertions, 76 deletions
diff --git a/src/freedreno/vulkan/tu_clear_blit.c b/src/freedreno/vulkan/tu_clear_blit.c index 6627c03b6b4..e6f23b07ed4 100644 --- a/src/freedreno/vulkan/tu_clear_blit.c +++ b/src/freedreno/vulkan/tu_clear_blit.c @@ -471,11 +471,11 @@ r2d_setup(struct tu_cmd_buffer *cmd, const struct tu_physical_device *phys_dev = cmd->device->physical_device; /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); tu_cs_emit_wfi(cs); tu_cs_emit_regs(cs, @@ -491,9 +491,9 @@ r2d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs) tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE)); /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); } /* r3d_ = shader path operations */ @@ -916,11 +916,11 @@ r3d_setup(struct tu_cmd_buffer *cmd, if (!cmd->state.pass) { /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); tu_cs_emit_regs(cs, A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass)); @@ -982,9 +982,9 @@ r3d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs) if (!cmd->state.pass) { /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); } } @@ -1610,8 +1610,8 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, /* When executed by the user there has to be a pipeline barrier here, * but since we're doing it manually we'll have to flush ourselves. */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); tu_image_view_blit2(&staging, &staging_image, dst_format, &staging_subresource, 0, false); @@ -1948,10 +1948,10 @@ tu_clear_sysmem_attachments_2d(struct tu_cmd_buffer *cmd, a = subpass->depth_stencil_attachment.attachment; /* sync depth into color */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); /* also flush color to avoid losing contents from invalidate */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR); } if (a == VK_ATTACHMENT_UNUSED) @@ -1984,11 +1984,11 @@ tu_clear_sysmem_attachments_2d(struct tu_cmd_buffer *cmd, * note: cache invalidate might be needed to, and just not covered by test cases */ if (attachments[j].colorAttachment > 0) - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); } else { /* sync color into depth */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH); } } } @@ -2216,7 +2216,7 @@ tu_emit_clear_gmem_attachment(struct tu_cmd_buffer *cmd, tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4); tu_cs_emit_array(cs, clear_vals, 4); - tu6_emit_event_write(cmd, cs, BLIT, false); + tu6_emit_event_write(cmd, cs, BLIT); } static void @@ -2371,7 +2371,7 @@ tu_emit_blit(struct tu_cmd_buffer *cmd, tu_cs_emit_regs(cs, A6XX_RB_BLIT_BASE_GMEM(attachment->gmem_offset)); - tu6_emit_event_write(cmd, cs, BLIT, false); + tu6_emit_event_write(cmd, cs, BLIT); } static bool @@ -2489,13 +2489,13 @@ tu_store_gmem_attachment(struct tu_cmd_buffer *cmd, A6XX_SP_PS_2D_SRC_PITCH(.pitch = tiling->tile0.extent.width * src->cpp)); /* sync GMEM writes with CACHE */ - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); tu_cs_emit_pkt7(cs, CP_BLIT, 1); tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE)); /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); } diff --git a/src/freedreno/vulkan/tu_cmd_buffer.c b/src/freedreno/vulkan/tu_cmd_buffer.c index 2ba4964fa31..35312920dc6 100644 --- a/src/freedreno/vulkan/tu_cmd_buffer.c +++ b/src/freedreno/vulkan/tu_cmd_buffer.c @@ -316,35 +316,43 @@ tu6_index_size(VkIndexType type) } } -unsigned +void tu6_emit_event_write(struct tu_cmd_buffer *cmd, struct tu_cs *cs, - enum vgt_event_type event, - bool need_seqno) -{ - unsigned seqno = 0; + enum vgt_event_type event) +{ + bool need_seqno = false; + switch (event) { + case CACHE_FLUSH_TS: + case WT_DONE_TS: + case RB_DONE_TS: + case PC_CCU_FLUSH_DEPTH_TS: + case PC_CCU_FLUSH_COLOR_TS: + case PC_CCU_RESOLVE_TS: + need_seqno = true; + break; + default: + break; + } tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1); tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event)); if (need_seqno) { tu_cs_emit_qw(cs, cmd->scratch_bo.iova); - seqno = ++cmd->scratch_seqno; - tu_cs_emit(cs, seqno); + tu_cs_emit(cs, 0); } - - return seqno; } static void tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs) { - tu6_emit_event_write(cmd, cs, 0x31, false); + tu6_emit_event_write(cmd, cs, 0x31); } static void tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs) { - tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false); + tu6_emit_event_write(cmd, cs, LRZ_FLUSH); } static void @@ -1064,7 +1072,7 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs) * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as * part of draws). */ - tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true); + tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS); tu_cs_emit_wfi(cs); @@ -1130,9 +1138,9 @@ tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs, tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1); tu_cs_emit(cs, 0x0); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false); - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); tu6_emit_wfi(cmd, cs); tu_cs_emit_regs(cs, @@ -1174,8 +1182,8 @@ tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs) tu6_emit_lrz_flush(cmd, cs); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); tu_cs_sanity_check(cs); } @@ -1196,10 +1204,10 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs) tu_cs_emit(cs, 0x0); /* TODO: flushing with barriers instead of blindly always flushing */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false); - tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR); + tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH); tu_cs_emit_wfi(cs); tu_cs_emit_regs(cs, @@ -1291,7 +1299,7 @@ tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs) tu6_emit_lrz_flush(cmd, cs); - tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS); tu_cs_sanity_check(cs); } @@ -1704,8 +1712,6 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer, tu_cs_begin(&cmd_buffer->draw_cs); tu_cs_begin(&cmd_buffer->draw_epilogue_cs); - cmd_buffer->scratch_seqno = 0; - /* setup initial configuration into command buffer */ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { switch (cmd_buffer->queue_family_index) { @@ -1932,10 +1938,8 @@ tu_EndCommandBuffer(VkCommandBuffer commandBuffer) { TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer); - if (cmd_buffer->scratch_seqno) { - tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo, - MSM_SUBMIT_BO_WRITE); - } + tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo, + MSM_SUBMIT_BO_WRITE); if (cmd_buffer->use_vsc_data) { tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm, @@ -2364,11 +2368,11 @@ tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) /* Emit flushes so that input attachments will read the correct value. * TODO: use subpass dependencies to flush or not */ - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS); if (subpass->resolve_attachments) { - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); for (unsigned i = 0; i < subpass->color_count; i++) { uint32_t a = subpass->resolve_attachments[i].attachment; @@ -2379,14 +2383,14 @@ tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) subpass->color_attachments[i].attachment); } - tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true); + tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); } tu_cond_exec_end(cs); /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */ if (cmd->state.subpass->input_count) - tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */ tu6_emit_zs(cmd, cmd->state.subpass, cs); @@ -3298,7 +3302,7 @@ tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw) if (cmd->state.streamout_enabled) { for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) { if (cmd->state.streamout_enabled & (1 << i)) - tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false); + tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i); } } @@ -3684,9 +3688,9 @@ tu_barrier(struct tu_cmd_buffer *cmd, * and in sysmem mode we might not need either color/depth flush */ if (cmd->state.pass) { - tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_COLOR_TS, true); - tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_DEPTH_TS, true); - tu6_emit_event_write(cmd, &cmd->draw_cs, CACHE_INVALIDATE, false); + tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_COLOR_TS); + tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_DEPTH_TS); + tu6_emit_event_write(cmd, &cmd->draw_cs, CACHE_INVALIDATE); return; } } diff --git a/src/freedreno/vulkan/tu_private.h b/src/freedreno/vulkan/tu_private.h index 4d70bdc9e36..372aff4225d 100644 --- a/src/freedreno/vulkan/tu_private.h +++ b/src/freedreno/vulkan/tu_private.h @@ -994,7 +994,7 @@ tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other); /* This struct defines the layout of the scratch_bo */ struct tu6_control { - uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */ + uint32_t seqno_dummy; /* dummy seqno for CP_EVENT_WRITE */ uint32_t _pad0; volatile uint32_t vsc_overflow; uint32_t _pad1; @@ -1048,7 +1048,6 @@ struct tu_cmd_buffer struct tu_cs sub_cs; struct tu_bo scratch_bo; - uint32_t scratch_seqno; struct tu_bo vsc_draw_strm; struct tu_bo vsc_prim_strm; @@ -1072,11 +1071,10 @@ struct tu_reg_value { uint32_t bo_shift; }; -unsigned +void tu6_emit_event_write(struct tu_cmd_buffer *cmd, struct tu_cs *cs, - enum vgt_event_type event, - bool need_seqno); + enum vgt_event_type event); bool tu_get_memory_fd(struct tu_device *device, diff --git a/src/freedreno/vulkan/tu_query.c b/src/freedreno/vulkan/tu_query.c index 44e6c27bbc2..6a106a40614 100644 --- a/src/freedreno/vulkan/tu_query.c +++ b/src/freedreno/vulkan/tu_query.c @@ -546,7 +546,7 @@ emit_begin_xfb_query(struct tu_cmd_buffer *cmdbuf, uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0); tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS_LO(begin_iova)); - tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS, false); + tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS); } void @@ -693,10 +693,10 @@ emit_end_xfb_query(struct tu_cmd_buffer *cmdbuf, uint64_t available_iova = query_available_iova(pool, query); tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS_LO(end_iova)); - tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS, false); + tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS); tu_cs_emit_wfi(cs); - tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS, true); + tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS); /* Set the count of written primitives */ tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9); @@ -707,7 +707,7 @@ emit_end_xfb_query(struct tu_cmd_buffer *cmdbuf, tu_cs_emit_qw(cs, end_written_iova); tu_cs_emit_qw(cs, begin_written_iova); - tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS, true); + tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS); /* Set the count of generated primitives */ tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9); |