diff options
author | Ian Romanick <[email protected]> | 2019-05-20 11:22:12 -0700 |
---|---|---|
committer | Ian Romanick <[email protected]> | 2019-05-31 08:47:03 -0700 |
commit | 3ee2e84c608a022d2b83fb6c7f4fa5b8a33fa10c (patch) | |
tree | a8195a365bb33e80a6d47661f7eacc7aab93ea51 /src | |
parent | 336eab063009ebc96c3625ef1b7cffb2501448ce (diff) |
nir: Rematerialize compare instructions
On some architectures, Boolean values used to control conditional
branches or condtional selection must be propagated into a flag. This
generally means that a stored Boolean value must be compared with zero.
Rather than force the generation of extra compares with zero, re-emit
the original comparison instruction. This can save register pressure by
not needing to store the Boolean value.
There are several possible ares for future improvement to this pass:
1. Be more conservative. If both sources to the comparison instruction
are non-constants, it may be better for register pressure to emit the
extra compare. The current shader-db results on Intel GPUs (next
commit) lead me to believe that this is not currently a problem.
2. Be less conservative. Currently the pass requires that all users of
the comparison match the pattern. The idea is that after the pass is
complete, no instruction will use the resulting Boolean value. The only
uses will be of the flag value. It may be beneficial to relax this
requirement in some cases.
3. Be less conservative. Also try to rematerialize comparisons used for
discard_if intrinsics. After changing the way the Intel compiler
generates cod e for discard_if (see MR!935), I tried implementing this
already. The changes were pretty small. Instructions were helped in 19
shaders, but, overall, cycles were hurt. A commit "nir: Rematerialize
comparisons for nir_intrinsic_discard_if too" is on my fd.o cgit.
4. Copy the preceeding ALU instruction. If the comparison is a
comparison with zero, and it is the only user of a particular ALU
instruction (e.g., (a+b) != 0.0), it may be a further improvment to also
copy the preceeding ALU instruction. On Intel GPUs, this may enable
cmod propagation to make additional progress.
v2: Use much simpler method to get the prev_block for an if-statement.
Suggested by Tim.
Reviewed-by: Matt Turner <[email protected]>
Diffstat (limited to 'src')
-rw-r--r-- | src/compiler/Makefile.sources | 1 | ||||
-rw-r--r-- | src/compiler/nir/meson.build | 1 | ||||
-rw-r--r-- | src/compiler/nir/nir.h | 2 | ||||
-rw-r--r-- | src/compiler/nir/nir_opt_rematerialize_compares.c | 181 |
4 files changed, 185 insertions, 0 deletions
diff --git a/src/compiler/Makefile.sources b/src/compiler/Makefile.sources index 1b6dc25f1ed..0708b256feb 100644 --- a/src/compiler/Makefile.sources +++ b/src/compiler/Makefile.sources @@ -306,6 +306,7 @@ NIR_FILES = \ nir/nir_opt_move_comparisons.c \ nir/nir_opt_move_load_ubo.c \ nir/nir_opt_peephole_select.c \ + nir/nir_opt_rematerialize_compares.c \ nir/nir_opt_remove_phis.c \ nir/nir_opt_shrink_load.c \ nir/nir_opt_trivial_continues.c \ diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build index 69de4121a73..ad380e172f1 100644 --- a/src/compiler/nir/meson.build +++ b/src/compiler/nir/meson.build @@ -187,6 +187,7 @@ files_libnir = files( 'nir_opt_move_comparisons.c', 'nir_opt_move_load_ubo.c', 'nir_opt_peephole_select.c', + 'nir_opt_rematerialize_compares.c', 'nir_opt_remove_phis.c', 'nir_opt_shrink_load.c', 'nir_opt_trivial_continues.c', diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 0ed451b9d1a..9307b93b6b6 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -3570,6 +3570,8 @@ bool nir_opt_move_load_ubo(nir_shader *shader); bool nir_opt_peephole_select(nir_shader *shader, unsigned limit, bool indirect_load_ok, bool expensive_alu_ok); +bool nir_opt_rematerialize_compares(nir_shader *shader); + bool nir_opt_remove_phis(nir_shader *shader); bool nir_opt_shrink_load(nir_shader *shader); diff --git a/src/compiler/nir/nir_opt_rematerialize_compares.c b/src/compiler/nir/nir_opt_rematerialize_compares.c new file mode 100644 index 00000000000..806dbd2f29a --- /dev/null +++ b/src/compiler/nir/nir_opt_rematerialize_compares.c @@ -0,0 +1,181 @@ +/* + * Copyright © 2019 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "nir.h" +#include "nir/nir_builder.h" +#include "nir_constant_expressions.h" +#include "nir_control_flow.h" +#include "nir_loop_analyze.h" + +static bool +is_two_src_comparison(const nir_alu_instr *instr) +{ + switch (instr->op) { + case nir_op_flt: + case nir_op_flt32: + case nir_op_fge: + case nir_op_fge32: + case nir_op_feq: + case nir_op_feq32: + case nir_op_fne: + case nir_op_fne32: + case nir_op_ilt: + case nir_op_ilt32: + case nir_op_ult: + case nir_op_ult32: + case nir_op_ige: + case nir_op_ige32: + case nir_op_uge: + case nir_op_uge32: + case nir_op_ieq: + case nir_op_ieq32: + case nir_op_ine: + case nir_op_ine32: + return true; + default: + return false; + } +} + +static bool +all_uses_are_bcsel(const nir_alu_instr *instr) +{ + if (!instr->dest.dest.is_ssa) + return false; + + nir_foreach_use(use, &instr->dest.dest.ssa) { + if (use->parent_instr->type != nir_instr_type_alu) + return false; + + nir_alu_instr *const alu = nir_instr_as_alu(use->parent_instr); + if (alu->op != nir_op_bcsel && + alu->op != nir_op_b32csel) + return false; + + /* Not only must the result be used by a bcsel, but it must be used as + * the first source (the condition). + */ + if (alu->src[0].src.ssa != &instr->dest.dest.ssa) + return false; + } + + return true; +} + +static bool +nir_opt_rematerialize_compares_impl(nir_shader *shader, nir_function_impl *impl) +{ + bool progress = false; + + nir_foreach_block(block, impl) { + nir_foreach_instr(instr, block) { + if (instr->type != nir_instr_type_alu) + continue; + + nir_alu_instr *const alu = nir_instr_as_alu(instr); + if (!is_two_src_comparison(alu)) + continue; + + if (!all_uses_are_bcsel(alu)) + continue; + + /* At this point it is known that alu is a comparison instruction + * that is only used by nir_op_bcsel and possibly by if-statements + * (though the latter has not been explicitly checked). + * + * Iterate through each use of the comparison. For every use (or use + * by an if-statement) that is in a different block, emit a copy of + * the comparison. Care must be taken here. The original + * instruction must be duplicated only once in each block because CSE + * cannot be run after this pass. + */ + nir_foreach_use_safe(use, &alu->dest.dest.ssa) { + nir_instr *const use_instr = use->parent_instr; + + /* If the use is in the same block as the def, don't + * rematerialize. + */ + if (use_instr->block == alu->instr.block) + continue; + + nir_alu_instr *clone = nir_alu_instr_clone(shader, alu); + + nir_instr_insert_before(use_instr, &clone->instr); + + nir_alu_instr *const use_alu = nir_instr_as_alu(use_instr); + for (unsigned i = 0; i < nir_op_infos[use_alu->op].num_inputs; i++) { + if (use_alu->src[i].src.ssa == &alu->dest.dest.ssa) { + nir_instr_rewrite_src(&use_alu->instr, + &use_alu->src[i].src, + nir_src_for_ssa(&clone->dest.dest.ssa)); + progress = true; + } + } + } + + nir_foreach_if_use_safe(use, &alu->dest.dest.ssa) { + nir_if *const if_stmt = use->parent_if; + + nir_block *const prev_block = + nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node)); + + /* If the compare is from the previous block, don't + * rematerialize. + */ + if (prev_block == alu->instr.block) + continue; + + nir_alu_instr *clone = nir_alu_instr_clone(shader, alu); + + nir_instr_insert_after_block(prev_block, &clone->instr); + + nir_if_rewrite_condition(if_stmt, + nir_src_for_ssa(&clone->dest.dest.ssa)); + progress = true; + } + } + } + + if (progress) { + nir_metadata_preserve(impl, nir_metadata_block_index | + nir_metadata_dominance); + } + + return progress; +} + +bool +nir_opt_rematerialize_compares(nir_shader *shader) +{ + bool progress = false; + + nir_foreach_function(function, shader) { + if (function->impl == NULL) + continue; + + progress = nir_opt_rematerialize_compares_impl(shader, function->impl) + || progress; + } + + return progress; +} |