summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorIago Toral Quiroga <[email protected]>2018-07-17 09:02:27 +0200
committerJuan A. Suarez Romero <[email protected]>2019-04-18 11:05:18 +0200
commit092b14777433bbcd6735b45379dbdbd403500340 (patch)
tree1312eba9ebc9dae2a3e886dded47a6588e8545c7 /src/intel
parent472244b374a953fc3a8953a722fdab746aef0676 (diff)
intel/compiler: rework conversion opcodes
Now that we have the regioning lowering pass we can just put all of these opcodes together in a single block and we can just assert on the few cases of conversion instructions that are not supported in hardware and that should be lowered in brw_nir_lower_conversions. The only cases what we still handle separately are the conversions from float to half-float since the rounding variants would need to fallthrough and we are already doing this for boolean opcodes (since they need to negate), plus there is also a large comment about these opcodes that we probably want to keep so it is just easier to keep these separate. Suggested-by: Jason Ekstrand <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp41
1 files changed, 22 insertions, 19 deletions
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 0a4ccc50c09..a1b4a96c625 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -876,7 +876,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
brw_imm_d(brw_rnd_mode_from_nir_op(instr->op)));
/* fallthrough */
-
+ case nir_op_f2f16:
/* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
* on the HW gen, it is a special hw opcode or just a MOV, and
* brw_F32TO16 (at brw_eu_emit) would do the work to chose.
@@ -886,23 +886,11 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
* only for gen8+, it will be better to use directly the MOV, and use
* BRW_OPCODE_F32TO16 when/if we work for HF support on gen7.
*/
-
- case nir_op_f2f16:
- case nir_op_i2f16:
- case nir_op_u2f16:
assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
inst = bld.MOV(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
- case nir_op_f2f64:
- case nir_op_f2i64:
- case nir_op_f2u64:
- assert(type_sz(op[0].type) > 2); /* brw_nir_lower_conversions */
- inst = bld.MOV(result, op[0]);
- inst->saturate = instr->dest.saturate;
- break;
-
case nir_op_b2i8:
case nir_op_b2i16:
case nir_op_b2i32:
@@ -919,19 +907,34 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
case nir_op_i2i64:
case nir_op_u2f64:
case nir_op_u2u64:
- assert(type_sz(op[0].type) > 1); /* brw_nir_lower_conversions */
- /* fallthrough */
+ case nir_op_f2f64:
+ case nir_op_f2i64:
+ case nir_op_f2u64:
+ case nir_op_i2i32:
+ case nir_op_u2u32:
case nir_op_f2f32:
case nir_op_f2i32:
case nir_op_f2u32:
- case nir_op_f2i16:
- case nir_op_f2u16:
- case nir_op_i2i32:
- case nir_op_u2u32:
+ case nir_op_i2f16:
case nir_op_i2i16:
+ case nir_op_u2f16:
case nir_op_u2u16:
+ case nir_op_f2i16:
+ case nir_op_f2u16:
case nir_op_i2i8:
case nir_op_u2u8:
+ case nir_op_f2i8:
+ case nir_op_f2u8:
+ if (result.type == BRW_REGISTER_TYPE_B ||
+ result.type == BRW_REGISTER_TYPE_UB ||
+ result.type == BRW_REGISTER_TYPE_HF)
+ assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
+
+ if (op[0].type == BRW_REGISTER_TYPE_B ||
+ op[0].type == BRW_REGISTER_TYPE_UB ||
+ op[0].type == BRW_REGISTER_TYPE_HF)
+ assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
+
inst = bld.MOV(result, op[0]);
inst->saturate = instr->dest.saturate;
break;