diff options
author | Matthew Nicholls <[email protected]> | 2018-01-29 16:26:18 +0000 |
---|---|---|
committer | Dave Airlie <[email protected]> | 2018-01-31 13:37:18 +1000 |
commit | ef272b161e05e8216f2d1f4df5023f3aed0ae4fa (patch) | |
tree | 7e4267a88bc19c40e8d5e0d1e7bc126ec7d99c11 | |
parent | 1ea9efd2f8892cc238b12ec3f329e8322a9e5d2f (diff) |
radv: remove predication on cache flushes
This can lead to a situation where cache flushes could get conditionally
disabled while still clearing the flush_bits, and thus flushes due to
application pipeline barriers may never get executed.
Fixes: a6c2001ace (radv: add support for cmd predication.)
Signed-off-by: Dave Airlie <[email protected]>
-rw-r--r-- | src/amd/vulkan/radv_cmd_buffer.c | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_device.c | 2 | ||||
-rw-r--r-- | src/amd/vulkan/radv_private.h | 1 | ||||
-rw-r--r-- | src/amd/vulkan/si_cmd_buffer.c | 26 |
4 files changed, 13 insertions, 18 deletions
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 1280a186525..48fe09da53b 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -446,7 +446,7 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, } /* Force wait for graphics or compute engines to be idle. */ - si_cs_emit_cache_flush(cmd_buffer->cs, false, + si_cs_emit_cache_flush(cmd_buffer->cs, cmd_buffer->device->physical_device->rad_info.chip_class, ptr, va, radv_cmd_buffer_uses_mec(cmd_buffer), diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 2ce667fd212..aea723cfbcd 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -1771,7 +1771,6 @@ radv_get_preamble_cs(struct radv_queue *queue, if (i == 0) { si_cs_emit_cache_flush(cs, - false, queue->device->physical_device->rad_info.chip_class, NULL, 0, queue->queue_family_index == RING_COMPUTE && @@ -1783,7 +1782,6 @@ radv_get_preamble_cs(struct radv_queue *queue, RADV_CMD_FLAG_INV_GLOBAL_L2); } else if (i == 1) { si_cs_emit_cache_flush(cs, - false, queue->device->physical_device->rad_info.chip_class, NULL, 0, queue->queue_family_index == RING_COMPUTE && diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 503881fc83b..6239e331839 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -1021,7 +1021,6 @@ void si_emit_wait_fence(struct radeon_winsys_cs *cs, uint64_t va, uint32_t ref, uint32_t mask); void si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, - bool predicated, enum chip_class chip_class, uint32_t *fence_ptr, uint64_t va, bool is_mec, diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c index d9c78bf1170..06e8442100a 100644 --- a/src/amd/vulkan/si_cmd_buffer.c +++ b/src/amd/vulkan/si_cmd_buffer.c @@ -917,7 +917,6 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs, void si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, - bool predicated, enum chip_class chip_class, uint32_t *flush_cnt, uint64_t flush_va, @@ -948,7 +947,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, /* Necessary for DCC */ if (chip_class >= VI) { si_cs_emit_write_event_eop(cs, - predicated, + false, chip_class, is_mec, V_028A90_FLUSH_AND_INV_CB_DATA_TS, @@ -962,12 +961,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, } if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0)); } if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0)); } @@ -980,7 +979,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, } if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); } @@ -1037,14 +1036,14 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, assert(flush_cnt); uint32_t old_fence = (*flush_cnt)++; - si_cs_emit_write_event_eop(cs, predicated, chip_class, false, cb_db_event, tc_flags, 1, + si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, 1, flush_va, old_fence, *flush_cnt); - si_emit_wait_fence(cs, predicated, flush_va, *flush_cnt, 0xffffffff); + si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff); } /* VGT state sync */ if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0)); } @@ -1057,13 +1056,13 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, RADV_CMD_FLAG_INV_GLOBAL_L2 | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) && !is_mec) { - radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, predicated)); + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); radeon_emit(cs, 0); } if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) || (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) { - si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, + si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) | @@ -1077,7 +1076,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, * * WB doesn't work without NC. */ - si_emit_acquire_mem(cs, is_mec, predicated, + si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | @@ -1086,7 +1085,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, } if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) { si_emit_acquire_mem(cs, is_mec, - predicated, chip_class >= GFX9, + false, chip_class >= GFX9, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1)); cp_coher_cntl = 0; @@ -1097,7 +1096,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, * Therefore, it should be last. Done in PFP. */ if (cp_coher_cntl) - si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, cp_coher_cntl); + si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl); } void @@ -1127,7 +1126,6 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer) ptr = &cmd_buffer->gfx9_fence_idx; } si_cs_emit_cache_flush(cmd_buffer->cs, - cmd_buffer->state.predicating, cmd_buffer->device->physical_device->rad_info.chip_class, ptr, va, radv_cmd_buffer_uses_mec(cmd_buffer), |