summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir/nir_opt_algebraic.py
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2016-03-17 11:31:48 -0700
committerJason Ekstrand <[email protected]>2016-03-23 16:28:02 -0700
commit0dbda153aae548a4087f7364c9013583a076e0e9 (patch)
tree9b3522cbbeccf0fb456a72e15748362c96bd793b /src/compiler/nir/nir_opt_algebraic.py
parented3a029e8088cb17af073c3b5f7444cb7e2f1cfb (diff)
nir/algebraic: Flag inexact optimizations
Many of our optimizations, while great for cutting shaders down to size, aren't really precision-safe. This commit tries to flag all of the inexact floating-point optimizations so they don't get run on values that are flagged "exact". It's a bit conservative and maybe flags some safe optimizations as unsafe but that's better than missing one. Reviewed-by: Francisco Jerez <[email protected]>
Diffstat (limited to 'src/compiler/nir/nir_opt_algebraic.py')
-rw-r--r--src/compiler/nir/nir_opt_algebraic.py121
1 files changed, 62 insertions, 59 deletions
diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py
index 7e3aa5aa798..53633233f2b 100644
--- a/src/compiler/nir/nir_opt_algebraic.py
+++ b/src/compiler/nir/nir_opt_algebraic.py
@@ -61,19 +61,19 @@ optimizations = [
(('fabs', ('fneg', a)), ('fabs', a)),
(('iabs', ('iabs', a)), ('iabs', a)),
(('iabs', ('ineg', a)), ('iabs', a)),
- (('fadd', a, 0.0), a),
+ (('~fadd', a, 0.0), a),
(('iadd', a, 0), a),
(('usadd_4x8', a, 0), a),
(('usadd_4x8', a, ~0), ~0),
- (('fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
+ (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
(('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
- (('fadd', ('fneg', a), a), 0.0),
+ (('~fadd', ('fneg', a), a), 0.0),
(('iadd', ('ineg', a), a), 0),
(('iadd', ('ineg', a), ('iadd', a, b)), b),
(('iadd', a, ('iadd', ('ineg', a), b)), b),
- (('fadd', ('fneg', a), ('fadd', a, b)), b),
- (('fadd', a, ('fadd', ('fneg', a), b)), b),
- (('fmul', a, 0.0), 0.0),
+ (('~fadd', ('fneg', a), ('fadd', a, b)), b),
+ (('~fadd', a, ('fadd', ('fneg', a), b)), b),
+ (('~fmul', a, 0.0), 0.0),
(('imul', a, 0), 0),
(('umul_unorm_4x8', a, 0), 0),
(('umul_unorm_4x8', a, ~0), a),
@@ -81,29 +81,29 @@ optimizations = [
(('imul', a, 1), a),
(('fmul', a, -1.0), ('fneg', a)),
(('imul', a, -1), ('ineg', a)),
- (('ffma', 0.0, a, b), b),
- (('ffma', a, 0.0, b), b),
- (('ffma', a, b, 0.0), ('fmul', a, b)),
+ (('~ffma', 0.0, a, b), b),
+ (('~ffma', a, 0.0, b), b),
+ (('~ffma', a, b, 0.0), ('fmul', a, b)),
(('ffma', a, 1.0, b), ('fadd', a, b)),
(('ffma', 1.0, a, b), ('fadd', a, b)),
- (('flrp', a, b, 0.0), a),
- (('flrp', a, b, 1.0), b),
- (('flrp', a, a, b), a),
- (('flrp', 0.0, a, b), ('fmul', a, b)),
- (('flrp', a, b, ('b2f', c)), ('bcsel', c, b, a), 'options->lower_flrp'),
+ (('~flrp', a, b, 0.0), a),
+ (('~flrp', a, b, 1.0), b),
+ (('~flrp', a, a, b), a),
+ (('~flrp', 0.0, a, b), ('fmul', a, b)),
+ (('~flrp', a, b, ('b2f', c)), ('bcsel', c, b, a), 'options->lower_flrp'),
(('flrp', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp'),
(('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
- (('fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', c)))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp'),
- (('fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c ))), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp'),
- (('fadd', a, ('fmul', ('b2f', c), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp'),
- (('fadd', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'),
+ (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', c)))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp'),
+ (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c ))), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp'),
+ (('~fadd', a, ('fmul', ('b2f', c), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp'),
+ (('~fadd', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'),
(('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
- (('fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'),
+ (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'),
# Comparison simplifications
- (('inot', ('flt', a, b)), ('fge', a, b)),
- (('inot', ('fge', a, b)), ('flt', a, b)),
- (('inot', ('feq', a, b)), ('fne', a, b)),
- (('inot', ('fne', a, b)), ('feq', a, b)),
+ (('~inot', ('flt', a, b)), ('fge', a, b)),
+ (('~inot', ('fge', a, b)), ('flt', a, b)),
+ (('~inot', ('feq', a, b)), ('fne', a, b)),
+ (('~inot', ('fne', a, b)), ('feq', a, b)),
(('inot', ('ilt', a, b)), ('ige', a, b)),
(('inot', ('ige', a, b)), ('ilt', a, b)),
(('inot', ('ieq', a, b)), ('ine', a, b)),
@@ -132,15 +132,15 @@ optimizations = [
(('imax', a, a), a),
(('umin', a, a), a),
(('umax', a, a), a),
- (('fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
- (('fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
+ (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
+ (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
(('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
(('fsat', ('fsat', a)), ('fsat', a)),
(('fmin', ('fmax', ('fmin', ('fmax', a, 0.0), 1.0), 0.0), 1.0), ('fmin', ('fmax', a, 0.0), 1.0)),
- (('ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
- (('ior', ('flt', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
- (('ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
- (('ior', ('fge', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
+ (('~ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
+ (('~ior', ('flt', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
+ (('~ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
+ (('~ior', ('fge', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
(('fabs', ('slt', a, b)), ('slt', a, b)),
(('fabs', ('sge', a, b)), ('sge', a, b)),
(('fabs', ('seq', a, b)), ('seq', a, b)),
@@ -191,35 +191,35 @@ optimizations = [
(('iand', 0xff, ('ushr', a, 24)), ('ushr', a, 24)),
(('iand', 0xffff, ('ushr', a, 16)), ('ushr', a, 16)),
# Exponential/logarithmic identities
- (('fexp2', ('flog2', a)), a), # 2^lg2(a) = a
- (('flog2', ('fexp2', a)), a), # lg2(2^a) = a
+ (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
+ (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
(('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
- (('fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
- (('fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
- ('fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
- (('fpow', a, 1.0), a),
- (('fpow', a, 2.0), ('fmul', a, a)),
- (('fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
- (('fpow', 2.0, a), ('fexp2', a)),
- (('fpow', ('fpow', a, 2.2), 0.454545), a),
- (('fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
- (('fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
- (('frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
- (('frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
- (('flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
- (('flog2', ('frcp', a)), ('fneg', ('flog2', a))),
- (('flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
- (('flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
- (('fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))),
- (('fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))),
- (('fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))),
+ (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
+ (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
+ ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
+ (('~fpow', a, 1.0), a),
+ (('~fpow', a, 2.0), ('fmul', a, a)),
+ (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
+ (('~fpow', 2.0, a), ('fexp2', a)),
+ (('~fpow', ('fpow', a, 2.2), 0.454545), a),
+ (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
+ (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
+ (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
+ (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
+ (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
+ (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
+ (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
+ (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
+ (('~fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))),
+ (('~fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))),
+ (('~fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))),
# Division and reciprocal
- (('fdiv', 1.0, a), ('frcp', a)),
+ (('~fdiv', 1.0, a), ('frcp', a)),
(('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
- (('frcp', ('frcp', a)), a),
- (('frcp', ('fsqrt', a)), ('frsq', a)),
+ (('~frcp', ('frcp', a)), a),
+ (('~frcp', ('fsqrt', a)), ('frsq', a)),
(('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
- (('frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
+ (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
# Boolean simplifications
(('ieq', 'a@bool', True), a),
(('ine', 'a@bool', True), ('inot', a)),
@@ -256,7 +256,7 @@ optimizations = [
(('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
# Subtracts
- (('fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)),
+ (('~fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)),
(('isub', a, ('isub', 0, b)), ('iadd', a, b)),
(('ussub_4x8', a, 0), a),
(('ussub_4x8', a, ~0), 0),
@@ -264,7 +264,7 @@ optimizations = [
(('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'),
(('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
(('ineg', a), ('isub', 0, a), 'options->lower_negate'),
- (('fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)),
+ (('~fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)),
(('iadd', a, ('isub', 0, b)), ('isub', a, b)),
(('fabs', ('fsub', 0.0, a)), ('fabs', a)),
(('iabs', ('isub', 0, a)), ('iabs', a)),
@@ -393,10 +393,13 @@ for op in ['flt', 'fge', 'feq', 'fne',
# they help code generation but do not necessarily produce code that is
# more easily optimizable.
late_optimizations = [
+ # Most of these optimizations aren't quite safe when you get infinity or
+ # Nan involved but the first one should be fine.
(('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
- (('fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
- (('feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
- (('fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
+ (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
+ (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
+ (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
+
(('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
(('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
(('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),