summaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorVasily Khoruzhick <[email protected]>2019-08-29 21:14:54 -0700
committerVasily Khoruzhick <[email protected]>2019-09-06 01:51:28 +0000
commit9367d2ca37a3b8108ecb74e2875a600b5543c163 (patch)
tree01a8c4937a0ff41d3e7bfe4d27008649f46f522e /src/compiler
parentf9f7cbc1aa36cce6caa42c0cf58c5cbefedc19fd (diff)
nir: allow specifying filter callback in lower_alu_to_scalar
Set of opcodes doesn't have enough flexibility in certain cases. E.g. Utgard PP has vector conditional select operation, but condition is always scalar. Lowering all the vector selects to scalar increases instruction number, so we need a way to filter only those ops that can't be handled in hardware. Reviewed-by: Qiang Yu <[email protected]> Reviewed-by: Eric Anholt <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]> Signed-off-by: Vasily Khoruzhick <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/nir/nir.h2
-rw-r--r--src/compiler/nir/nir_lower_alu_to_scalar.c20
2 files changed, 16 insertions, 6 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 5149a0e8c01..bad1d6af212 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -3606,7 +3606,7 @@ bool nir_lower_alu(nir_shader *shader);
bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
bool always_precise, bool have_ffma);
-bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set);
+bool nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
bool nir_lower_bool_to_float(nir_shader *shader);
bool nir_lower_bool_to_int32(nir_shader *shader);
bool nir_lower_int_to_float(nir_shader *shader);
diff --git a/src/compiler/nir/nir_lower_alu_to_scalar.c b/src/compiler/nir/nir_lower_alu_to_scalar.c
index b16624bd8aa..bcd92908253 100644
--- a/src/compiler/nir/nir_lower_alu_to_scalar.c
+++ b/src/compiler/nir/nir_lower_alu_to_scalar.c
@@ -24,6 +24,11 @@
#include "nir.h"
#include "nir_builder.h"
+struct alu_to_scalar_data {
+ nir_instr_filter_cb cb;
+ const void *data;
+};
+
/** @file nir_lower_alu_to_scalar.c
*
* Replaces nir_alu_instr operations with more than one channel used in the
@@ -89,9 +94,9 @@ lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
}
static nir_ssa_def *
-lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_state)
+lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_data)
{
- BITSET_WORD *lower_set = _state;
+ struct alu_to_scalar_data *data = _data;
nir_alu_instr *alu = nir_instr_as_alu(instr);
unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan;
@@ -102,7 +107,7 @@ lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_state)
b->cursor = nir_before_instr(&alu->instr);
b->exact = alu->exact;
- if (lower_set && !BITSET_TEST(lower_set, alu->op))
+ if (data->cb && !data->cb(instr, data->data))
return NULL;
#define LOWER_REDUCTION(name, chan, merge) \
@@ -246,10 +251,15 @@ lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_state)
}
bool
-nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set)
+nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *_data)
{
+ struct alu_to_scalar_data data = {
+ .cb = cb,
+ .data = _data,
+ };
+
return nir_shader_lower_instructions(shader,
inst_is_vector_alu,
lower_alu_instr_scalar,
- lower_set);
+ &data);
}