diff options
author | Jason Ekstrand <[email protected]> | 2020-04-27 01:54:40 -0500 |
---|---|---|
committer | Marge Bot <[email protected]> | 2020-04-28 22:55:25 +0000 |
commit | cb9292091b162e0dd7d5069646e94d03e112e3ee (patch) | |
tree | ffcada9c7e7ad4f9e5ea1b4aa03e382d47baf7b1 /src/compiler | |
parent | ed677171675fe8ee204deac1e2089f480681b1b4 (diff) |
nir/dead_write_vars: Handle volatile
We can't remove volatile writes and we can't combine them with other
volatile writes so all we can do is clear the unused bits.
Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4767>
Diffstat (limited to 'src/compiler')
-rw-r--r-- | src/compiler/nir/nir_opt_dead_write_vars.c | 19 | ||||
-rw-r--r-- | src/compiler/nir/tests/vars_tests.cpp | 42 |
2 files changed, 61 insertions, 0 deletions
diff --git a/src/compiler/nir/nir_opt_dead_write_vars.c b/src/compiler/nir/nir_opt_dead_write_vars.c index 6bd20db07ca..5a8e2ffaaa0 100644 --- a/src/compiler/nir/nir_opt_dead_write_vars.c +++ b/src/compiler/nir/nir_opt_dead_write_vars.c @@ -176,6 +176,19 @@ remove_dead_write_vars_local(void *mem_ctx, nir_block *block) case nir_intrinsic_store_deref: { nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]); + + if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) { + /* Consider a volatile write to also be a sort of read. This + * prevents us from deleting a non-volatile write just before a + * volatile write thanks to a non-volatile write afterwards. It's + * quite the corner case, but this should be safer and more + * predictable for the programmer than allowing two non-volatile + * writes to be combined with a volatile write between them. + */ + clear_unused_for_read(&unused_writes, dst); + break; + } + nir_component_mask_t mask = nir_intrinsic_write_mask(intrin); progress |= update_unused_writes(&unused_writes, intrin, dst, mask); break; @@ -185,6 +198,12 @@ remove_dead_write_vars_local(void *mem_ctx, nir_block *block) nir_deref_instr *src = nir_src_as_deref(intrin->src[1]); nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]); + if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) { + clear_unused_for_read(&unused_writes, src); + clear_unused_for_read(&unused_writes, dst); + break; + } + /* Self-copy is removed. */ if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) { nir_instr_remove(instr); diff --git a/src/compiler/nir/tests/vars_tests.cpp b/src/compiler/nir/tests/vars_tests.cpp index c1b82791e7e..6079eb7d4e6 100644 --- a/src/compiler/nir/tests/vars_tests.cpp +++ b/src/compiler/nir/tests/vars_tests.cpp @@ -1345,6 +1345,48 @@ TEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block) ASSERT_FALSE(progress); } +TEST_F(nir_dead_write_vars_test, volatile_write) +{ + nir_variable *v = create_int(nir_var_mem_ssbo, "v"); + + nir_store_var(b, v, nir_imm_int(b, 0), 0x1); + nir_store_var_volatile(b, v, nir_imm_int(b, 1), 0x1); + nir_store_var(b, v, nir_imm_int(b, 2), 0x1); + + /* Our approach here is a bit scorched-earth. We expect the volatile store + * in the middle to cause both that store and the one before it to be kept. + * Technically, volatile only prevents combining the volatile store with + * another store and one could argue that the store before the volatile and + * the one after it could be combined. However, it seems safer to just + * treat a volatile store like an atomic and prevent any combining across + * it. + */ + bool progress = nir_opt_dead_write_vars(b->shader); + ASSERT_FALSE(progress); +} + +TEST_F(nir_dead_write_vars_test, volatile_copies) +{ + nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2); + + nir_copy_var(b, v[0], v[1]); + nir_copy_deref_with_access(b, nir_build_deref_var(b, v[0]), + nir_build_deref_var(b, v[1]), + ACCESS_VOLATILE, (gl_access_qualifier)0); + nir_copy_var(b, v[0], v[1]); + + /* Our approach here is a bit scorched-earth. We expect the volatile store + * in the middle to cause both that store and the one before it to be kept. + * Technically, volatile only prevents combining the volatile store with + * another store and one could argue that the store before the volatile and + * the one after it could be combined. However, it seems safer to just + * treat a volatile store like an atomic and prevent any combining across + * it. + */ + bool progress = nir_opt_dead_write_vars(b->shader); + ASSERT_FALSE(progress); +} + TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement) { nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6); |