diff options
author | Iago Toral Quiroga <[email protected]> | 2018-04-26 10:07:56 +0200 |
---|---|---|
committer | Iago Toral Quiroga <[email protected]> | 2018-05-03 11:40:25 +0200 |
commit | b11e9425df24f8c07a4cda85717407726f2d5330 (patch) | |
tree | 6c0e076b7758d0face0100dcad6571fdab13ce24 /src/intel/compiler/brw_nir.c | |
parent | b9a3d8c23e53b261ec626c13f0f0f6858f919371 (diff) |
intel/compiler: lower some 16-bit integer operations to 32-bit
These are not supported in hardware for 16-bit integers.
We do the lowering pass after the optimization loop to ensure that we
lower ALU operations injected by algebraic optimizations too.
Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r-- | src/intel/compiler/brw_nir.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index 9998c59586e..97d28db88e1 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -592,6 +592,25 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler, return nir; } +static unsigned +lower_bit_size_callback(const nir_alu_instr *alu, void *data) +{ + assert(alu->dest.dest.is_ssa); + if (alu->dest.dest.ssa.bit_size != 16) + return 0; + + switch (alu->op) { + case nir_op_idiv: + case nir_op_imod: + case nir_op_irem: + case nir_op_udiv: + case nir_op_umod: + return 32; + default: + return 0; + } +} + /* Does some simple lowering and runs the standard suite of optimizations * * This is intended to be called more-or-less directly after you get the @@ -645,6 +664,8 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) nir = brw_nir_optimize(nir, compiler, is_scalar); + nir_lower_bit_size(nir, lower_bit_size_callback, NULL); + if (is_scalar) { OPT(nir_lower_load_const_to_scalar); } |