diff options
author | Matt Turner <[email protected]> | 2013-08-03 11:02:59 -0700 |
---|---|---|
committer | Matt Turner <[email protected]> | 2013-09-17 16:59:23 -0700 |
commit | d0b8ea60b7999d50c0f136b1170b1ddb4688d796 (patch) | |
tree | 3fd9ec5dc6042c83e08f3af3bb339b95ca1fb40a /src/glsl/lower_instructions.cpp | |
parent | 5561251b58c976a70125bb07dc1c6cc2bd2541f4 (diff) |
glsl: Add ldexp_to_arith lowering pass.
Reviewed-by: Paul Berry <[email protected]>
Diffstat (limited to 'src/glsl/lower_instructions.cpp')
-rw-r--r-- | src/glsl/lower_instructions.cpp | 128 |
1 files changed, 128 insertions, 0 deletions
diff --git a/src/glsl/lower_instructions.cpp b/src/glsl/lower_instructions.cpp index d32ec80d6bb..cb530489593 100644 --- a/src/glsl/lower_instructions.cpp +++ b/src/glsl/lower_instructions.cpp @@ -37,6 +37,7 @@ * - POW_TO_EXP2 * - LOG_TO_LOG2 * - MOD_TO_FRACT + * - LDEXP_TO_ARITH * - LRP_TO_ARITH * - BITFIELD_INSERT_TO_BFM_BFI * @@ -82,6 +83,10 @@ * if we have to break it down like this anyway, it gives an * opportunity to do things like constant fold the (1.0 / op1) easily. * + * LDEXP_TO_ARITH: + * ------------- + * Converts ir_binop_ldexp to arithmetic and bit operations. + * * LRP_TO_ARITH: * ------------- * Converts ir_triop_lrp to (op0 * (1.0f - op2)) + (op1 * op2). @@ -125,6 +130,7 @@ private: void log_to_log2(ir_expression *); void lrp_to_arith(ir_expression *); void bitfield_insert_to_bfm_bfi(ir_expression *); + void ldexp_to_arith(ir_expression *); }; /** @@ -332,6 +338,123 @@ lower_instructions_visitor::bitfield_insert_to_bfm_bfi(ir_expression *ir) this->progress = true; } +void +lower_instructions_visitor::ldexp_to_arith(ir_expression *ir) +{ + /* Translates + * ir_binop_ldexp x exp + * into + * + * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift); + * resulting_biased_exp = extracted_biased_exp + exp; + * + * if (resulting_biased_exp < 1) { + * return copysign(0.0, x); + * } + * + * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) | + * lshift(i2u(resulting_biased_exp), exp_shift)); + * + * which we can't actually implement as such, since the GLSL IR doesn't + * have vectorized if-statements. We actually implement it without branches + * using conditional-select: + * + * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift); + * resulting_biased_exp = extracted_biased_exp + exp; + * + * is_not_zero_or_underflow = gequal(resulting_biased_exp, 1); + * x = csel(is_not_zero_or_underflow, x, copysign(0.0f, x)); + * resulting_biased_exp = csel(is_not_zero_or_underflow, + * resulting_biased_exp, 0); + * + * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) | + * lshift(i2u(resulting_biased_exp), exp_shift)); + */ + + const unsigned vec_elem = ir->type->vector_elements; + + /* Types */ + const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1); + const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1); + + /* Constants */ + ir_constant *zeroi = ir_constant::zero(ir, ivec); + ir_constant *zerof = ir_constant::zero(ir, ir->type); + + ir_constant *sign_mantissa_mask = new(ir) ir_constant(0x807fffffu, vec_elem); + ir_constant *sign_mask = new(ir) ir_constant(0x80000000u, vec_elem); + + ir_constant *exp_shift = new(ir) ir_constant(23u, vec_elem); + + /* Temporary variables */ + ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary); + ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary); + + ir_variable *zero_sign_x = new(ir) ir_variable(ir->type, "zero_sign_x", + ir_var_temporary); + + ir_variable *extracted_biased_exp = + new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary); + ir_variable *resulting_biased_exp = + new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary); + + ir_variable *is_not_zero_or_underflow = + new(ir) ir_variable(bvec, "is_not_zero_or_underflow", ir_var_temporary); + + ir_instruction &i = *base_ir; + + /* Copy <x> and <exp> arguments. */ + i.insert_before(x); + i.insert_before(assign(x, ir->operands[0])); + i.insert_before(exp); + i.insert_before(assign(exp, ir->operands[1])); + + /* Extract the biased exponent from <x>. */ + i.insert_before(extracted_biased_exp); + i.insert_before(assign(extracted_biased_exp, + rshift(bitcast_f2i(abs(x)), exp_shift))); + + i.insert_before(resulting_biased_exp); + i.insert_before(assign(resulting_biased_exp, + add(extracted_biased_exp, exp))); + + /* Test if result is ±0.0, subnormal, or underflow by checking if the + * resulting biased exponent would be less than 0x1. If so, the result is + * 0.0 with the sign of x. (Actually, invert the conditions so that + * immediate values are the second arguments, which is better for i965) + */ + i.insert_before(zero_sign_x); + i.insert_before(assign(zero_sign_x, + bitcast_u2f(bit_or(bit_and(bitcast_f2u(x), sign_mask), + bitcast_f2u(zerof))))); + + i.insert_before(is_not_zero_or_underflow); + i.insert_before(assign(is_not_zero_or_underflow, + gequal(resulting_biased_exp, + new(ir) ir_constant(0x1, vec_elem)))); + i.insert_before(assign(x, csel(is_not_zero_or_underflow, + x, zero_sign_x))); + i.insert_before(assign(resulting_biased_exp, + csel(is_not_zero_or_underflow, + resulting_biased_exp, zeroi))); + + /* We could test for overflows by checking if the resulting biased exponent + * would be greater than 0xFE. Turns out we don't need to because the GLSL + * spec says: + * + * "If this product is too large to be represented in the + * floating-point type, the result is undefined." + */ + + ir_constant *exp_shift_clone = exp_shift->clone(ir, NULL); + ir->operation = ir_unop_bitcast_u2f; + ir->operands[0] = bit_or(bit_and(bitcast_f2u(x), sign_mantissa_mask), + lshift(i2u(resulting_biased_exp), exp_shift_clone)); + ir->operands[1] = NULL; + + this->progress = true; +} + ir_visitor_status lower_instructions_visitor::visit_leave(ir_expression *ir) { @@ -378,6 +501,11 @@ lower_instructions_visitor::visit_leave(ir_expression *ir) bitfield_insert_to_bfm_bfi(ir); break; + case ir_binop_ldexp: + if (lowering(LDEXP_TO_ARITH)) + ldexp_to_arith(ir); + break; + default: return visit_continue; } |