diff options
author | Chris Robinson <[email protected]> | 2019-03-18 22:06:01 -0700 |
---|---|---|
committer | Chris Robinson <[email protected]> | 2019-03-18 22:06:01 -0700 |
commit | d31514f8beff74d4425a7dd7bcc2084023a3ffdd (patch) | |
tree | 8e3484aeb023fcf8351c127c3cd238858cb857d3 /OpenAL32/Include/alMain.h | |
parent | 3e816de4fb3f8be4746643f5b9c5c07186e16b6c (diff) |
Move some inline functions from alMain.h to alnumeric.h
Diffstat (limited to 'OpenAL32/Include/alMain.h')
-rw-r--r-- | OpenAL32/Include/alMain.h | 239 |
1 files changed, 0 insertions, 239 deletions
diff --git a/OpenAL32/Include/alMain.h b/OpenAL32/Include/alMain.h index f3761a5b..ca5661af 100644 --- a/OpenAL32/Include/alMain.h +++ b/OpenAL32/Include/alMain.h @@ -12,12 +12,6 @@ #ifdef HAVE_STRINGS_H #include <strings.h> #endif -#ifdef HAVE_INTRIN_H -#include <intrin.h> -#endif -#ifdef HAVE_SSE_INTRINSICS -#include <xmmintrin.h> -#endif #include <array> #include <vector> @@ -36,7 +30,6 @@ #include "alnumeric.h" #include "threads.h" #include "ambidefs.h" -#include "opthelpers.h" template<typename T, size_t N> @@ -58,108 +51,6 @@ constexpr inline size_t countof(const T(&)[N]) noexcept #endif -/* Define CTZ macros (count trailing zeros), and POPCNT macros (population - * count/count 1 bits), for 32- and 64-bit integers. The CTZ macros' results - * are *UNDEFINED* if the value is 0. - */ -#ifdef __GNUC__ - -#define POPCNT32 __builtin_popcount -#define CTZ32 __builtin_ctz -#if SIZEOF_LONG == 8 -#define POPCNT64 __builtin_popcountl -#define CTZ64 __builtin_ctzl -#else -#define POPCNT64 __builtin_popcountll -#define CTZ64 __builtin_ctzll -#endif - -#elif defined(HAVE_BITSCANFORWARD64_INTRINSIC) - -inline int msvc64_popcnt32(ALuint v) -{ return (int)__popcnt(v); } -#define POPCNT32 msvc64_popcnt32 -inline int msvc64_ctz32(ALuint v) -{ - unsigned long idx = 32; - _BitScanForward(&idx, v); - return (int)idx; -} -#define CTZ32 msvc64_ctz32 - -inline int msvc64_popcnt64(uint64_t v) -{ return (int)__popcnt64(v); } -#define POPCNT64 msvc64_popcnt64 -inline int msvc64_ctz64(uint64_t v) -{ - unsigned long idx = 64; - _BitScanForward64(&idx, v); - return (int)idx; -} -#define CTZ64 msvc64_ctz64 - -#elif defined(HAVE_BITSCANFORWARD_INTRINSIC) - -inline int msvc_popcnt32(ALuint v) -{ return (int)__popcnt(v); } -#define POPCNT32 msvc_popcnt32 -inline int msvc_ctz32(ALuint v) -{ - unsigned long idx = 32; - _BitScanForward(&idx, v); - return (int)idx; -} -#define CTZ32 msvc_ctz32 - -inline int msvc_popcnt64(uint64_t v) -{ return (int)(__popcnt((ALuint)v) + __popcnt((ALuint)(v>>32))); } -#define POPCNT64 msvc_popcnt64 -inline int msvc_ctz64(uint64_t v) -{ - unsigned long idx = 64; - if(!_BitScanForward(&idx, v&0xffffffff)) - { - if(_BitScanForward(&idx, v>>32)) - idx += 32; - } - return (int)idx; -} -#define CTZ64 msvc_ctz64 - -#else - -/* There be black magics here. The popcnt method is derived from - * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel - * while the ctz-utilizing-popcnt algorithm is shown here - * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt - * as the ntz2 variant. These likely aren't the most efficient methods, but - * they're good enough if the GCC or MSVC intrinsics aren't available. - */ -inline int fallback_popcnt32(ALuint v) -{ - v = v - ((v >> 1) & 0x55555555u); - v = (v & 0x33333333u) + ((v >> 2) & 0x33333333u); - v = (v + (v >> 4)) & 0x0f0f0f0fu; - return (int)((v * 0x01010101u) >> 24); -} -#define POPCNT32 fallback_popcnt32 -inline int fallback_ctz32(ALuint value) -{ return fallback_popcnt32(~value & (value - 1)); } -#define CTZ32 fallback_ctz32 - -inline int fallback_popcnt64(uint64_t v) -{ - v = v - ((v >> 1) & 0x5555555555555555_u64); - v = (v & 0x3333333333333333_u64) + ((v >> 2) & 0x3333333333333333_u64); - v = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f_u64; - return (int)((v * 0x0101010101010101_u64) >> 56); -} -#define POPCNT64 fallback_popcnt64 -inline int fallback_ctz64(uint64_t value) -{ return fallback_popcnt64(~value & (value - 1)); } -#define CTZ64 fallback_ctz64 -#endif - #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) #define IS_LITTLE_ENDIAN (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #else @@ -194,136 +85,6 @@ struct bs2b; #define MIN_OUTPUT_RATE (8000) -/* Fast float-to-int conversion. No particular rounding mode is assumed; the - * IEEE-754 default is round-to-nearest with ties-to-even, though an app could - * change it on its own threads. On some systems, a truncating conversion may - * always be the fastest method. - */ -inline int fastf2i(float f) noexcept -{ -#if defined(HAVE_SSE_INTRINSICS) - return _mm_cvt_ss2si(_mm_set_ss(f)); - -#elif defined(_MSC_VER) && defined(_M_IX86_FP) - - ALint i; - __asm fld f - __asm fistp i - return i; - -#elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) - - int i; -#ifdef __SSE_MATH__ - __asm__("cvtss2si %1, %0" : "=r"(i) : "x"(f)); -#else - __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st"); -#endif - return i; - - /* On GCC when compiling with -fno-math-errno, lrintf can be inlined to - * some simple instructions. Clang does not inline it, always generating a - * libc call, while MSVC's implementation is horribly slow, so always fall - * back to a normal integer conversion for them. - */ -#elif !defined(_MSC_VER) && !defined(__clang__) - - return lrintf(f); - -#else - - return (ALint)f; -#endif -} - -/* Converts float-to-int using standard behavior (truncation). */ -inline int float2int(float f) noexcept -{ -#if defined(HAVE_SSE_INTRINSICS) - return _mm_cvtt_ss2si(_mm_set_ss(f)); - -#elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \ - !defined(__SSE_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) - ALint sign, shift, mant; - union { - ALfloat f; - ALint i; - } conv; - - conv.f = f; - sign = (conv.i>>31) | 1; - shift = ((conv.i>>23)&0xff) - (127+23); - - /* Over/underflow */ - if(UNLIKELY(shift >= 31 || shift < -23)) - return 0; - - mant = (conv.i&0x7fffff) | 0x800000; - if(LIKELY(shift < 0)) - return (mant >> -shift) * sign; - return (mant << shift) * sign; - -#else - - return static_cast<ALint>(f); -#endif -} - -/* Rounds a float to the nearest integral value, according to the current - * rounding mode. This is essentially an inlined version of rintf, although - * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0). - */ -inline float fast_roundf(float f) noexcept -{ -#if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \ - !defined(__SSE_MATH__) - - float out; - __asm__ __volatile__("frndint" : "=t"(out) : "0"(f)); - return out; - -#else - - /* Integral limit, where sub-integral precision is not available for - * floats. - */ - static const float ilim[2] = { - 8388608.0f /* 0x1.0p+23 */, - -8388608.0f /* -0x1.0p+23 */ - }; - ALuint sign, expo; - union { - ALfloat f; - ALuint i; - } conv; - - conv.f = f; - sign = (conv.i>>31)&0x01; - expo = (conv.i>>23)&0xff; - - if(UNLIKELY(expo >= 150/*+23*/)) - { - /* An exponent (base-2) of 23 or higher is incapable of sub-integral - * precision, so it's already an integral value. We don't need to worry - * about infinity or NaN here. - */ - return f; - } - /* Adding the integral limit to the value (with a matching sign) forces a - * result that has no sub-integral precision, and is consequently forced to - * round to an integral value. Removing the integral limit then restores - * the initial value rounded to the integral. The compiler should not - * optimize this out because of non-associative rules on floating-point - * math (as long as you don't use -fassociative-math, - * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this - * may break). - */ - f += ilim[sign]; - return f - ilim[sign]; -#endif -} - - enum DevProbe { ALL_DEVICE_PROBE, CAPTURE_DEVICE_PROBE |