diff options
author | Jose Fonseca <[email protected]> | 2002-04-18 08:09:50 +0000 |
---|---|---|
committer | Jose Fonseca <[email protected]> | 2002-04-18 08:09:50 +0000 |
commit | ef65c60d887e47d9f5da72b415b52e215850988d (patch) | |
tree | bc6c6edcea8b8ea76da033464bb18821b4e86858 | |
parent | 4c1f79264e1923b21571abb13983f38ea1a90dee (diff) |
Removed code that was proven to have faster alternatives to reduce complexity and facilitate reusability.
-rw-r--r-- | src/mesa/x86/mmx_blend.S | 69 |
1 files changed, 0 insertions, 69 deletions
diff --git a/src/mesa/x86/mmx_blend.S b/src/mesa/x86/mmx_blend.S index 16897377c85..e679aa7bc79 100644 --- a/src/mesa/x86/mmx_blend.S +++ b/src/mesa/x86/mmx_blend.S @@ -49,20 +49,6 @@ */ #define GMBT_GEOMETRIC_CORRECTION 1 -/* - * do - * - * s = (q - p)*a + q - * - * instead of - * - * s = p*a + q*(1-a) - * - * this eliminates a multiply at the expense of - * complicating the roundoff but is generally worth it - */ -#define GMBT_SIGNED_ARITHMETIC 1 - #if GMBT_ROUNDOFF SEG_DATA @@ -126,7 +112,6 @@ GLNAME( _mesa_mmx_blend_transparency ): PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */ #endif -#if GMBT_SIGNED_ARITHMETIC PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ PSLLW ( CONST(8), MM1 ) /* q1 << 8 */ @@ -145,20 +130,6 @@ GLNAME( _mesa_mmx_blend_transparency ): PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */ #endif -#else - PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */ - PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */ - MOVQ ( MM4, MM0 ) - - PMULLW ( MM3, MM2 ) /* p1*pa1 */ - - PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */ - - PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */ - - PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */ -#endif - #if GMBT_ROUNDOFF MOVQ ( CONTENT(const_80), MM4 ) @@ -179,9 +150,7 @@ GLNAME( _mesa_mmx_blend_transparency ): #endif #endif -#if GMBT_SIGNED_ARITHMETIC PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */ -#endif PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */ @@ -236,7 +205,6 @@ LLBL (GMBT_loop_begin): PSUBW ( MM4, MM5 ) /* pa2 + 1 | pa2 + 1 | pa2 + 1 | pa2 + 1 */ #endif -#if GMBT_SIGNED_ARITHMETIC PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ PSUBW ( MM7, MM6 ) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */ @@ -262,24 +230,6 @@ LLBL (GMBT_loop_begin): PSUBW ( MM4, MM7 ) /* t2 -=? 0x100 */ #endif -#else - PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */ - PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */ - MOVQ ( MM4, MM0 ) - - PMULLW ( MM3, MM2 ) /* p1*pa1 */ - PMULLW ( MM5, MM6 ) /* p2*pa2 */ - - PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */ - PSUBW ( MM5, MM4 ) /* 255 - pa2 | 255 - pa2 | 255 - pa2 | 255 - pa2 */ - - PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */ - PMULLW ( MM4, MM7 ) /* q2*(255 - pa2) */ - - PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */ - PADDW ( MM7, MM6 ) /* t2 = p2*pa2 + q2*(255 - pa2) */ -#endif - #if GMBT_ROUNDOFF MOVQ ( CONTENT(const_80), MM4 ) @@ -306,10 +256,8 @@ LLBL (GMBT_loop_begin): #endif #endif -#if GMBT_SIGNED_ARITHMETIC PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */ PADDW ( MM7, MM6 ) /* (t2/255 + q2) << 8 */ -#endif PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */ PSRLW ( CONST(8), MM6 ) /* sa2 | sb2 | sg2 | sr2 */ @@ -354,7 +302,6 @@ LLBL (GMBT_loop_end): PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */ #endif -#if GMBT_SIGNED_ARITHMETIC PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */ PSLLW ( CONST(8), MM1 ) /* q1 << 8 */ @@ -373,20 +320,6 @@ LLBL (GMBT_loop_end): PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */ #endif -#else - PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */ - PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */ - MOVQ ( MM4, MM0 ) - - PMULLW ( MM3, MM2 ) /* p1*pa1 */ - - PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */ - - PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */ - - PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */ -#endif - #if GMBT_ROUNDOFF MOVQ ( CONTENT(const_80), MM4 ) @@ -407,9 +340,7 @@ LLBL (GMBT_loop_end): #endif #endif -#if GMBT_SIGNED_ARITHMETIC PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */ -#endif PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */ |