summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary/util/u_math.h
diff options
context:
space:
mode:
authorZack Rusin <[email protected]>2013-07-08 23:45:55 -0400
committerZack Rusin <[email protected]>2013-07-09 23:30:55 -0400
commit63386b2f66a6d450889cd5368bc599beb7f1efbf (patch)
treea82f6a2acbbfc7d639687033060a44a0621e12a3 /src/gallium/auxiliary/util/u_math.h
parent80bc14370a4db876ababc13404a93526c2b14de7 (diff)
util: treat denorm'ed floats like zero
The D3D10 spec is very explicit about treatment of denorm floats and the behavior is exactly the same for them as it would be for -0 or +0. This makes our shading code match that behavior, since OpenGL doesn't care and on a few cpu's it's faster (worst case the same). Float16 conversions will likely break but we'll fix them in a follow up commit. Signed-off-by: Zack Rusin <[email protected]> Reviewed-by: Jose Fonseca <[email protected]> Reviewed-by: Roland Scheidegger <[email protected]>
Diffstat (limited to 'src/gallium/auxiliary/util/u_math.h')
-rw-r--r--src/gallium/auxiliary/util/u_math.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/util/u_math.h b/src/gallium/auxiliary/util/u_math.h
index 64d16cbe715..bc3948875a9 100644
--- a/src/gallium/auxiliary/util/u_math.h
+++ b/src/gallium/auxiliary/util/u_math.h
@@ -763,6 +763,13 @@ static INLINE int32_t util_signed_fixed(float value, unsigned frac_bits)
return (int32_t)(value * (1<<frac_bits));
}
+unsigned
+util_fpstate_get(void);
+unsigned
+util_fpstate_set_denorms_to_zero(unsigned current_fpstate);
+void
+util_fpstate_set(unsigned fpstate);
+
#ifdef __cplusplus