diff options
author | Samuel Pitoiset <[email protected]> | 2018-06-26 22:35:04 +0200 |
---|---|---|
committer | Samuel Pitoiset <[email protected]> | 2018-06-27 09:48:15 +0200 |
commit | 9c09e7d66e6b93976778a0a34a1828bd513fcce9 (patch) | |
tree | 481e44bc671eade1a78fa07372f3a72e04311daf /src/amd | |
parent | a6b64d6dde5bb6f4fade2da98b22dae9de831fd6 (diff) |
radv: remove unused 'predicated' parameter from some functions
It's always false.
Signed-off-by: Samuel Pitoiset <[email protected]>
Reviewed-by: Dave Airlie <[email protected]>
Diffstat (limited to 'src/amd')
-rw-r--r-- | src/amd/vulkan/radv_cmd_buffer.c | 3 | ||||
-rw-r--r-- | src/amd/vulkan/radv_private.h | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_query.c | 7 | ||||
-rw-r--r-- | src/amd/vulkan/si_cmd_buffer.c | 28 |
4 files changed, 15 insertions, 25 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 110a9a960a9..d2c30174cf3 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -4231,7 +4231,6 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer, * the stage mask. */ si_cs_emit_write_event_eop(cs, - cmd_buffer->state.predicating, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, @@ -4283,7 +4282,7 @@ void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); - si_emit_wait_fence(cs, false, va, 1, 0xffffffff); + si_emit_wait_fence(cs, va, 1, 0xffffffff); assert(cmd_buffer->cs->cdw <= cdw_max); } diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index a202697e935..bb0499913a1 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -1065,7 +1065,6 @@ uint32_t si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_draw, bool indirect_draw, uint32_t draw_vertex_count); void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, - bool predicated, enum chip_class chip_class, bool is_mec, unsigned event, unsigned event_flags, @@ -1075,7 +1074,6 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, uint32_t new_fence); void si_emit_wait_fence(struct radeon_cmdbuf *cs, - bool predicated, uint64_t va, uint32_t ref, uint32_t mask); void si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 384d75c210d..267d45890ed 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -992,7 +992,7 @@ void radv_CmdCopyQueryPoolResults( uint64_t avail_va = va + pool->availability_offset + 4 * query; /* This waits on the ME. All copies below are done on the ME */ - si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff); + si_emit_wait_fence(cs, avail_va, 1, 0xffffffff); } } radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline, @@ -1015,7 +1015,7 @@ void radv_CmdCopyQueryPoolResults( uint64_t avail_va = va + pool->availability_offset + 4 * query; /* This waits on the ME. All copies below are done on the ME */ - si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff); + si_emit_wait_fence(cs, avail_va, 1, 0xffffffff); } if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) { uint64_t avail_va = va + pool->availability_offset + 4 * query; @@ -1176,7 +1176,6 @@ static void emit_end_query(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cs, va >> 32); si_cs_emit_write_event_eop(cs, - false, cmd_buffer->device->physical_device->rad_info.chip_class, radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS, 0, @@ -1300,14 +1299,12 @@ void radv_CmdWriteTimestamp( break; default: si_cs_emit_write_event_eop(cs, - false, cmd_buffer->device->physical_device->rad_info.chip_class, mec, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DATA_SEL_TIMESTAMP, query_va, 0, 0); si_cs_emit_write_event_eop(cs, - false, cmd_buffer->device->physical_device->rad_info.chip_class, mec, V_028A90_BOTTOM_OF_PIPE_TS, 0, diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c index 3491710ad86..454fd8c39c8 100644 --- a/src/amd/vulkan/si_cmd_buffer.c +++ b/src/amd/vulkan/si_cmd_buffer.c @@ -673,7 +673,6 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, } void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, - bool predicated, enum chip_class chip_class, bool is_mec, unsigned event, unsigned event_flags, @@ -694,7 +693,7 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM); if (chip_class >= GFX9 || is_gfx8_mec) { - radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, predicated)); + radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false)); radeon_emit(cs, op); radeon_emit(cs, sel); radeon_emit(cs, va); /* address lo */ @@ -710,7 +709,7 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, * (and optional cache flushes executed) before the timestamp * is written. */ - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false)); radeon_emit(cs, op); radeon_emit(cs, va); radeon_emit(cs, ((va >> 32) & 0xffff) | sel); @@ -718,7 +717,7 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, radeon_emit(cs, 0); /* unused */ } - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false)); radeon_emit(cs, op); radeon_emit(cs, va); radeon_emit(cs, ((va >> 32) & 0xffff) | sel); @@ -729,11 +728,10 @@ void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, void si_emit_wait_fence(struct radeon_cmdbuf *cs, - bool predicated, uint64_t va, uint32_t ref, uint32_t mask) { - radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, predicated)); + radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false)); radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); @@ -745,13 +743,12 @@ si_emit_wait_fence(struct radeon_cmdbuf *cs, static void si_emit_acquire_mem(struct radeon_cmdbuf *cs, bool is_mec, - bool predicated, bool is_gfx9, unsigned cp_coher_cntl) { if (is_mec || is_gfx9) { uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff; - radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, predicated) | + radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) | PKT3_SHADER_TYPE_S(is_mec)); radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ @@ -761,7 +758,7 @@ si_emit_acquire_mem(struct radeon_cmdbuf *cs, radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */ } else { /* ACQUIRE_MEM is only required on a compute ring. */ - radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, predicated)); + radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false)); radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ radeon_emit(cs, 0); /* CP_COHER_BASE */ @@ -801,7 +798,6 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, /* Necessary for DCC */ if (chip_class >= VI) { si_cs_emit_write_event_eop(cs, - false, chip_class, is_mec, V_028A90_FLUSH_AND_INV_CB_DATA_TS, @@ -875,10 +871,10 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, assert(flush_cnt); uint32_t old_fence = (*flush_cnt)++; - si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, + si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags, EOP_DATA_SEL_VALUE_32BIT, flush_va, old_fence, *flush_cnt); - si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff); + si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff); } /* VGT state sync */ @@ -902,7 +898,7 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) || (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) { - si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, + si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) | @@ -916,7 +912,7 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, * * WB doesn't work without NC. */ - si_emit_acquire_mem(cs, is_mec, false, + si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | @@ -925,7 +921,7 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, } if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) { si_emit_acquire_mem(cs, is_mec, - false, chip_class >= GFX9, + chip_class >= GFX9, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1)); cp_coher_cntl = 0; @@ -936,7 +932,7 @@ si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, * Therefore, it should be last. Done in PFP. */ if (cp_coher_cntl) - si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl); + si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl); if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) { radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); |