aboutsummaryrefslogtreecommitdiffstats
path: root/src/mesa
diff options
context:
space:
mode:
authorBrian Paul <[email protected]>2002-04-10 16:32:32 +0000
committerBrian Paul <[email protected]>2002-04-10 16:32:32 +0000
commit9add9a21d8c51ee4238169265541fa9a40f0a8b0 (patch)
tree978d75c2a5b475b19d87348f5e303d54997cfe51 /src/mesa
parent0cd8a1ea13f013df8e3fd16793a2a0aea4fed68d (diff)
new MMX blend code (Jose Fonseca)
Diffstat (limited to 'src/mesa')
-rw-r--r--src/mesa/swrast/s_blend.c8
-rw-r--r--src/mesa/x86/mmx_blend.S714
2 files changed, 381 insertions, 341 deletions
diff --git a/src/mesa/swrast/s_blend.c b/src/mesa/swrast/s_blend.c
index 3fdb808abf0..845709a1119 100644
--- a/src/mesa/swrast/s_blend.c
+++ b/src/mesa/swrast/s_blend.c
@@ -1,4 +1,4 @@
-/* $Id: s_blend.c,v 1.18 2002/04/04 16:53:26 brianp Exp $ */
+/* $Id: s_blend.c,v 1.19 2002/04/10 16:32:32 brianp Exp $ */
/*
* Mesa 3-D graphics library
@@ -646,10 +646,8 @@ void _swrast_choose_blend_func( GLcontext *ctx )
SWRAST_CONTEXT(ctx)->BlendFunc = blend_general;
}
else if (eq==GL_FUNC_ADD_EXT && srcRGB==GL_SRC_ALPHA
- && dstRGB==GL_ONE_MINUS_SRC_ALPHA)
- {
- /* XXX It looks like the MMX blend code is broken. Disable for now. */
-#if 0 && defined(USE_MMX_ASM)
+ && dstRGB==GL_ONE_MINUS_SRC_ALPHA) {
+#if defined(USE_MMX_ASM)
if ( cpu_has_mmx ) {
SWRAST_CONTEXT(ctx)->BlendFunc = _mesa_mmx_blend_transparency;
}
diff --git a/src/mesa/x86/mmx_blend.S b/src/mesa/x86/mmx_blend.S
index c2be6812ec0..caf1f17ba44 100644
--- a/src/mesa/x86/mmx_blend.S
+++ b/src/mesa/x86/mmx_blend.S
@@ -1,8 +1,69 @@
+/*
+ * Written by Jos� Fonseca <[email protected]>
+ */
+
#include "matypes.h"
+/*
+ * make the following approximation to the division (Sree)
+ *
+ * rgb*a/255 ~= (rgb*(a+1)) >> 256
+ *
+ * which is the fastest method that satisfies the following OpenGL criteria
+ *
+ * 0*0 = 0 and 255*255 = 255
+ *
+ * note this one should be used alone
+ */
+#define GMBT_ALPHA_PLUS_ONE 0
+
+/*
+ * take the geometric series approximation to the division
+ *
+ * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
+ *
+ * in this case just the first two terms to fit in 16bit arithmetic
+ *
+ * t/255 ~= (t + (t >> 8)) >> 8
+ *
+ * note that just by itself it doesn't satisfies the OpenGL criteria, as 255*255 = 254,
+ * so the special case a = 255 must be accounted or roundoff must be used
+ */
+#define GMBT_GEOMETRIC_SERIES 1
+
+/*
+ * when using a geometric series division instead of truncating the result
+ * use roundoff in the approximation (Jim Blinn)
+ *
+ * t = rgb*a + 0x80
+ *
+ * achieving the exact results
+ */
+#define GMBT_ROUNDOFF 1
+
+/*
+ * do
+ *
+ * s = (q - p)*a + q
+ *
+ * instead of
+ *
+ * s = p*a + q*(1-a)
+ *
+ * this eliminates a multiply at the expense of
+ * complicating the roundoff but is generally worth it
+ */
+#define GMBT_SIGNED_ARITHMETIC 1
+
+#if GMBT_ROUNDOFF
+ SEG_DATA
-SEG_TEXT
+ALIGNDATA8
+const_80:
+ D_LONG 0x00800080, 0x00800080
+#endif
+ SEG_TEXT
ALIGNTEXT16
GLOBL GLNAME(_mesa_mmx_blend_transparency)
@@ -17,344 +78,325 @@ GLOBL GLNAME(_mesa_mmx_blend_transparency)
* Common transparency blending mode.
*/
GLNAME( _mesa_mmx_blend_transparency ):
- PUSH_L ( EBP )
- MOV_L ( ESP, EBP )
- SUB_L ( CONST(52), ESP )
- PUSH_L ( EBX )
-
- MOV_L ( CONST(16711680), REGOFF(-8, EBP) )
- MOV_L ( CONST(16711680), REGOFF(-4, EBP) )
- MOV_L ( CONST(0), REGOFF(-16, EBP) )
- MOV_L ( CONST(-1), REGOFF(-12, EBP) )
- MOV_L ( CONST(-1), REGOFF(-24, EBP) )
- MOV_L ( CONST(0), REGOFF(-20, EBP) )
- MOV_L ( REGOFF(24, EBP), EAX ) /* rgba */
- ADD_L ( CONST(4), EAX )
- MOV_L ( EAX, EDX )
- AND_L ( REGOFF(20, EBP), EDX ) /* mask */
- MOV_L ( EDX, EAX )
- AND_L ( CONST(4), EAX )
- CMP_L ( CONST(8), EAX )
- JNE ( LLBL(GMBT_skip_runin) )
- MOV_L ( REGOFF(20, EBP), EAX )
- ADD_L ( CONST(3), EAX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(EAX), DL )
- MOV_L ( EDX, REGOFF(-32, EBP) )
- MOV_L ( CONST(255), EAX )
- MOV_L ( EAX, EBX )
- SUB_L ( REGOFF(-32, EBP), EBX )
- MOV_L ( EBX, REGOFF(-36, EBP) )
- MOV_L ( REGOFF(20, EBP), EAX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(EAX), DL )
- MOV_L ( EDX, EAX )
- IMUL_L ( REGOFF(-32, EBP), EAX )
- MOV_L ( REGOFF(24, EBP), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EDX )
- IMUL_L ( REGOFF(-36, EBP), EDX )
- ADD_L ( EDX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-40, EBP) )
- MOV_L ( REGOFF(20, EBP), EAX )
- INC_L ( EAX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(EAX), DL )
- MOV_L ( EDX, EAX )
- IMUL_L ( REGOFF(-32, EBP), EAX )
- MOV_L ( REGOFF(24, EBP), EDX )
- INC_L ( EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EDX )
- IMUL_L ( REGOFF(-36, EBP), EDX )
- ADD_L ( EDX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-44, EBP) )
- MOV_L ( REGOFF(20, EBP), EAX )
- ADD_L ( CONST(2), EAX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(EAX), DL )
- MOV_L ( EDX, EAX )
- IMUL_L ( REGOFF(-32, EBP), EAX )
- MOV_L ( REGOFF(24, EBP), EDX )
- ADD_L ( CONST(2), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EDX )
- IMUL_L ( REGOFF(-36, EBP), EDX )
- ADD_L ( EDX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-48, EBP) )
- MOV_L ( REGOFF(20, EBP), EAX )
- ADD_L ( CONST(3), EAX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(EAX), DL )
- MOV_L ( EDX, EAX )
- IMUL_L ( REGOFF(-32, EBP), EAX )
- MOV_L ( REGOFF(24, EBP), EDX )
- ADD_L ( CONST(3), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EDX )
- IMUL_L ( REGOFF(-36, EBP), EDX )
- ADD_L ( EDX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-52, EBP) )
- MOV_L ( REGOFF(20, EBP), EAX )
- MOV_B ( REGOFF(-40, EBP), DL )
- MOV_B ( DL, REGIND(EAX) )
- MOV_L ( REGOFF(20, EBP), EAX )
- INC_L ( EAX )
- MOV_B ( REGOFF(-44, EBP), DL )
- MOV_B ( DL, REGIND(EAX) )
- MOV_L ( REGOFF(20, EBP), EAX )
- ADD_L ( CONST(2), EAX )
- MOV_B ( REGOFF(-48, EBP), DL )
- MOV_B ( DL, REGIND(EAX) )
- MOV_L ( REGOFF(20, EBP), EAX )
- ADD_L ( CONST(3), EAX )
- MOV_B ( REGOFF(-52, EBP), DL )
- MOV_B ( DL, REGIND(EAX) )
- INC_L ( REGOFF(16, EBP) )
- ADD_L ( CONST(4), REGOFF(20, EBP) )
- ADD_L ( CONST(4), REGOFF(24, EBP) )
- DEC_L ( REGOFF(12, EBP) )
-
-LLBL (GMBT_skip_runin):
-
- CMP_L ( CONST(0), REGOFF(12, EBP) ) /* n == 0 */
- JE ( LLBL(GMBT_runout) )
- MOV_L ( CONST(0), REGOFF(-28, EBP) )
-
-ALIGNTEXT4
-LLBL (GMBT_main_loop):
-
- MOV_L ( REGOFF(12, EBP), EDX )
- MOV_L ( EDX, EAX )
- SHR_L ( CONST(1), EAX ) /* eax = n/2 */
- CMP_L ( EAX, REGOFF(-28, EBP) )
- JB ( LLBL(GMBT_no_jump) )
- JMP ( LLBL(GMBT_end_loop) )
-ALIGNTEXT16
-LLBL (GMBT_no_jump):
-
- MOV_L ( REGOFF(-28, EBP), EAX ) /* eax = i */
- LEA_L ( REGDIS(0,EAX,2), EDX ) /* edx = i*2 */
- MOV_L ( REGOFF(16, EBP), EAX ) /* eax = mask */
- CMP_W ( CONST(0), REGBI(EAX,EDX) ) /* ((unsigned *) mask)[i] == 0 */
- JE ( LLBL(GMBT_masked) )
- MOV_L ( REGOFF(-28, EBP), EAX ) /* eax = i */
- MOV_L ( EAX, EDX )
- LEA_L ( REGDIS(0,EDX,8), ECX ) /* ecx = i*8 */
- MOV_L ( ECX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- MOV_L ( REGOFF(-28, EBP), EDX )
- MOV_L ( EDX, ECX )
- LEA_L ( REGDIS(0,ECX,8), EDX )
- MOV_L ( EDX, ECX )
- ADD_L ( REGOFF(24, EBP), ECX )
-
- MOVQ ( REGIND(EAX), MM4 )
- PXOR ( MM5, MM5 ) /* mm5 = 00 00 00 00 00 00 00 00 */
- MOVQ ( MM4, MM1 )
- MOVQ ( REGIND(ECX), MM7 )
- PUNPCKLBW ( MM5, MM1 )
- MOVQ ( MM7, MM6 )
- MOVQ ( MM1, MM0 )
- PUNPCKLBW ( MM5, MM6 )
- MOVQ ( MM1, MM2 )
- PSRLQ ( CONST(48), MM0 )
- PUNPCKHBW ( MM5, MM4 )
- PACKSSDW ( MM0, MM0 )
- MOVQ ( MM0, MM3 )
- PUNPCKHBW ( MM5, MM7 )
- PSLLQ ( CONST(16), MM3 )
- POR ( REGOFF(-8, EBP), MM0 )
- PUNPCKLWD ( MM6, MM1 )
- PSUBW ( MM3, MM0 )
- PUNPCKHWD ( MM6, MM2 )
- MOVQ ( MM4, MM3 )
- PSRLQ ( CONST(48), MM3 )
- PACKSSDW ( MM3, MM3 )
- MOVQ ( MM3, MM6 )
- POR ( REGOFF(-8, EBP), MM3 )
- PSLLQ ( CONST(16), MM6 )
- PSUBW ( MM6, MM3 )
- MOVQ ( MM4, MM5 )
- PUNPCKLWD ( MM7, MM4 )
- PUNPCKHWD ( MM7, MM5 )
- PMADDWD ( MM0, MM1 )
- PMADDWD ( MM3, MM4 )
- PMADDWD ( MM0, MM2 )
- PMADDWD ( MM3, MM5 )
- PSRLD ( CONST(8), MM1 )
- PSRLD ( CONST(8), MM2 )
- PSRLD ( CONST(8), MM4 )
- PACKSSDW ( MM2, MM1 )
- PSRLD ( CONST(8), MM5 )
- PACKUSWB ( MM1, MM1 )
- PACKSSDW ( MM5, MM4 )
- PAND ( REGOFF(-24, EBP), MM1 )
- PACKUSWB ( MM4, MM4 )
- PAND ( REGOFF(-16, EBP), MM4 )
- POR ( MM1, MM4 )
- MOVQ ( MM4, REGIND(EAX) )
-
-LLBL (GMBT_masked):
-
- INC_L ( REGOFF(-28, EBP) )
- JMP ( LLBL(GMBT_main_loop) )
+ PUSH_L ( EBP )
+ MOV_L ( ESP, EBP )
+ PUSH_L ( ESI )
+ PUSH_L ( EDI )
+ PUSH_L ( EBX )
+
+ MOV_L ( REGOFF(12, EBP), ECX ) /* n */
+ CMP_L ( CONST(0), ECX)
+ JE ( LLBL (GMBT_return) )
+
+ MOV_L ( REGOFF(16, EBP), EBX ) /* mask */
+ MOV_L ( REGOFF(20, EBP), EDI ) /* rgba */
+ MOV_L ( REGOFF(24, EBP), ESI ) /* dest */
+
+ TEST_L ( CONST(4), EDI ) /* align rgba on an 8-byte boundary */
+ JZ ( LLBL (GMBT_align_end) )
+
+ CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
+ JE ( LLBL (GMBT_align_continue) )
+
+ PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
+
+ MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
+ MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
+
+ PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
+ PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
+
+ MOVQ ( MM2, MM3 )
+
+ PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
+ PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
+
+#if GMBT_ALPHA_PLUS_ONE
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+
+ PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
+
+ PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
+
+#if GMBT_ROUNDOFF
+ MOVQ ( MM2, MM4 )
+#endif
+
+ PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
+
+#if GMBT_ROUNDOFF
+ PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
+
+ PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
+
+ PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
+#endif
+
+#else
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+ PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
+ MOVQ ( MM4, MM0 )
+
+ PMULLW ( MM3, MM2 ) /* p1*pa1 */
+
+ PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
+
+ PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
+
+ PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
+#endif
+
+#if GMBT_ROUNDOFF
+ MOVQ ( CONTENT(const_80), MM4 )
+
+ PADDW ( MM4, MM2 ) /* t1 += 0x80 */
+#endif
+
+#if GMBT_GEOMETRIC_SERIES
+ MOVQ ( MM2, MM3 )
+
+ PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
+
+ PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
+#endif
+
+ PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
+
+ PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
+ MOVD ( MM2, REGIND(EDI) )
+
+LLBL (GMBT_align_continue):
+
+ DEC_L ( ECX ) /* n -= 1 */
+ INC_L ( EBX ) /* mask += 1 */
+ ADD_L ( CONST(4), EDI ) /* rgba += 1 */
+ ADD_L ( CONST(4), ESI ) /* dest += 1 */
+
+LLBL (GMBT_align_end):
+
+ CMP_L ( CONST(2), ECX)
+ JB ( LLBL (GMBT_loop_end) )
ALIGNTEXT16
-LLBL (GMBT_end_loop):
+LLBL (GMBT_loop_begin):
+
+ CMP_W ( CONST(0), REGIND(EBX) ) /* *mask == 0 && *(mask + 1) == 0 */
+ JE ( LLBL (GMBT_loop_continue) )
+
+ /* NOTE: the instruction pairing when multiple pipelines are available must be checked */
+
+ PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
+
+ MOVQ ( REGIND(ESI), MM7 ) /* qa2 | qb2 | qg2 | qr2 | qa1 | qb1 | qg1 | qr1 */
+ MOVQ ( REGIND(EDI), MM6 ) /* pa2 | pb2 | pg2 | pr2 | pa1 | pb1 | pg1 | pr1 */
+
+ MOVQ ( MM7, MM1 )
+ MOVQ ( MM6, MM2 )
+
+ PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
+ PUNPCKHBW ( MM0, MM7 ) /* qa2 | qb2 | qg2 | qr2 */
+ PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
+ PUNPCKHBW ( MM0, MM6 ) /* pa2 | pb2 | pg2 | pr2 */
+
+ MOVQ ( MM2, MM3 )
+ MOVQ ( MM6, MM5 )
+
+ PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
+ PUNPCKHWD ( MM5, MM5 ) /* pa2 | pa2 | | */
+ PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
+ PUNPCKHDQ ( MM5, MM5 ) /* pa2 | pa2 | pa2 | pa2 */
+
+#if GMBT_ALPHA_PLUS_ONE
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+
+ PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
+ PSUBW ( MM4, MM5 ) /* pa2 + 1 | pa2 + 1 | pa2 + 1 | pa2 + 1 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
+ PSUBW ( MM7, MM6 ) /* pa2 - qa2 | pb2 - qb2 | pg2 - qg2 | pr2 - qr2 */
+
+ PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
+ PSLLW ( CONST(8), MM7 ) /* q2 << 8 */
+
+#if GMBT_ROUNDOFF
+ MOVQ ( MM2, MM0 )
+ MOVQ ( MM6, MM4 )
+#endif
+
+ PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
+ PMULLW ( MM5, MM6 ) /* t2 = (q2 - p2)*pa2 */
+
+#if GMBT_ROUNDOFF
+ PSRLW ( CONST(15), MM0 ) /* q1 > p1 ? 1 : 0 */
+ PSRLW ( CONST(15), MM4 ) /* q2 > q2 ? 1 : 0 */
+
+ PSLLW ( CONST(8), MM0 ) /* q1 > p1 ? 0x100 : 0 */
+ PSLLW ( CONST(8), MM4 ) /* q2 > q2 ? 0x100 : 0 */
+
+ PSUBW ( MM0, MM2 ) /* t1 -=? 0x100 */
+ PSUBW ( MM4, MM7 ) /* t2 -=? 0x100 */
+#endif
+
+#else
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+ PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
+ MOVQ ( MM4, MM0 )
+
+ PMULLW ( MM3, MM2 ) /* p1*pa1 */
+ PMULLW ( MM5, MM6 ) /* p2*pa2 */
+
+ PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
+ PSUBW ( MM5, MM4 ) /* 255 - pa2 | 255 - pa2 | 255 - pa2 | 255 - pa2 */
+
+ PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
+ PMULLW ( MM4, MM7 ) /* q2*(255 - pa2) */
+
+ PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
+ PADDW ( MM7, MM6 ) /* t2 = p2*pa2 + q2*(255 - pa2) */
+#endif
+
+#if GMBT_ROUNDOFF
+ MOVQ ( CONTENT(const_80), MM4 )
+
+ PADDW ( MM4, MM2 ) /* t1 += 0x80 */
+ PADDW ( MM4, MM6 ) /* t2 += 0x80 */
+#endif
+
+#if GMBT_GEOMETRIC_SERIES
+ MOVQ ( MM2, MM3 )
+ MOVQ ( MM6, MM5 )
+
+ PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
+ PSRLW ( CONST(8), MM5 ) /* t2 >> 8 */
+
+ PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
+ PADDW ( MM5, MM6 ) /* t2 + (t2 >> 8) ~= (t2/255) << 8 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
+ PADDW ( MM7, MM6 ) /* (t2/255 + q2) << 8 */
+#endif
+
+ PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
+ PSRLW ( CONST(8), MM6 ) /* sa2 | sb2 | sg2 | sr2 */
+
+ PACKUSWB ( MM6, MM2 ) /* sa2 | sb2 | sg2 | sr2 | sa1 | sb1 | sg1 | sr1 */
+ MOVQ ( MM2, REGIND(EDI) )
+
+LLBL (GMBT_loop_continue):
+
+ DEC_L ( ECX )
+ DEC_L ( ECX ) /* n -= 2 */
+ ADD_L ( CONST(2), EBX ) /* mask += 2 */
+ ADD_L ( CONST(8), EDI ) /* rgba += 2 */
+ ADD_L ( CONST(8), ESI ) /* dest += 2 */
+ CMP_L ( CONST(2), ECX )
+ JAE ( LLBL (GMBT_loop_begin) )
+
+LLBL (GMBT_loop_end):
+
+ CMP_L ( CONST(1), ECX )
+ JB ( LLBL (GMBT_done) )
+
+ CMP_B ( CONST(0), REGIND(EBX) ) /* *mask == 0 */
+ JE ( LLBL (GMBT_done) )
+
+ PXOR ( MM0, MM0 ) /* 0x0000 | 0x0000 | 0x0000 | 0x0000 */
+
+ MOVD ( REGIND(ESI), MM1 ) /* | | | | qa1 | qb1 | qg1 | qr1 */
+ MOVD ( REGIND(EDI), MM2 ) /* | | | | pa1 | pb1 | pg1 | pr1 */
+
+ PUNPCKLBW ( MM0, MM1 ) /* qa1 | qb1 | qg1 | qr1 */
+ PUNPCKLBW ( MM0, MM2 ) /* pa1 | pb1 | pg1 | pr1 */
+
+ MOVQ ( MM2, MM3 )
+
+ PUNPCKHWD ( MM3, MM3 ) /* pa1 | pa1 | | */
+ PUNPCKHDQ ( MM3, MM3 ) /* pa1 | pa1 | pa1 | pa1 */
+
+#if GMBT_ALPHA_PLUS_ONE
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+
+ PSUBW ( MM4, MM3 ) /* pa1 + 1 | pa1 + 1 | pa1 + 1 | pa1 + 1 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PSUBW ( MM1, MM2 ) /* pa1 - qa1 | pb1 - qb1 | pg1 - qg1 | pr1 - qr1 */
+
+ PSLLW ( CONST(8), MM1 ) /* q1 << 8 */
+
+#if GMBT_ROUNDOFF
+ MOVQ ( MM2, MM4 )
+#endif
+
+ PMULLW ( MM3, MM2 ) /* t1 = (q1 - p1)*pa1 */
+
+#if GMBT_ROUNDOFF
+ PSRLW ( CONST(15), MM4 ) /* q1 > p1 ? 1 : 0 */
+
+ PSLLW ( CONST(8), MM4 ) /* q1 > p1 ? 0x100 : 0 */
+
+ PSUBW ( MM4, MM2 ) /* t1 -=? 0x100 */
+#endif
+
+#else
+ PCMPEQW ( MM4, MM4 ) /* 0xffff | 0xffff | 0xffff | 0xffff */
+ PUNPCKLBW ( MM0, MM4 ) /* 0x00ff | 0x00ff | 0x00ff | 0x00ff */
+ MOVQ ( MM4, MM0 )
+
+ PMULLW ( MM3, MM2 ) /* p1*pa1 */
+
+ PSUBW ( MM3, MM0 ) /* 255 - pa1 | 255 - pa1 | 255 - pa1 | 255 - pa1 */
+
+ PMULLW ( MM0, MM1 ) /* q1*(255 - pa1) */
+
+ PADDW ( MM1, MM2 ) /* t1 = p1*pa1 + q1*(255 - pa1) */
+#endif
+
+#if GMBT_ROUNDOFF
+ MOVQ ( CONTENT(const_80), MM4 )
+
+ PADDW ( MM4, MM2 ) /* t1 += 0x80 */
+#endif
+
+#if GMBT_GEOMETRIC_SERIES
+ MOVQ ( MM2, MM3 )
+
+ PSRLW ( CONST(8), MM3 ) /* t1 >> 8 */
+
+ PADDW ( MM3, MM2 ) /* t1 + (t1 >> 8) ~= (t1/255) << 8 */
+#endif
+
+#if GMBT_SIGNED_ARITHMETIC
+ PADDW ( MM1, MM2 ) /* (t1/255 + q1) << 8 */
+#endif
+
+ PSRLW ( CONST(8), MM2 ) /* sa1 | sb1 | sg1 | sr1 */
+
+ PACKUSWB ( MM0, MM2 ) /* | | | | sa1 | sb1 | sg1 | sr1 */
+ MOVD ( MM2, REGIND(EDI) )
+
+LLBL (GMBT_done):
+
EMMS
-LLBL (GMBT_runout):
-
- MOV_L ( REGOFF(12, EBP), EAX )
- AND_L ( CONST(1), EAX )
- TEST_L ( EAX, EAX )
- JE ( LLBL(GMBT_skip_runout) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-1, EAX), EDX )
- XOR_L ( EAX, EAX )
- MOV_B ( REGIND(EDX), AL )
- MOV_L ( EAX, REGOFF(-52, EBP) )
- MOV_L ( CONST(255), EAX )
- MOV_L ( EAX, EBX )
- SUB_L ( REGOFF(-52, EBP), EBX )
- MOV_L ( EBX, REGOFF(-48, EBP) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-4, EAX), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EAX )
- IMUL_L ( REGOFF(-52, EBP), EAX )
- MOV_L ( REGOFF(12, EBP), EDX )
- LEA_L ( REGDIS(0,EDX,4), ECX )
- MOV_L ( ECX, EDX )
- ADD_L ( REGOFF(24, EBP), EDX )
- LEA_L ( REGOFF(-4, EDX), ECX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(ECX), DL )
- MOV_L ( EDX, ECX )
- IMUL_L ( REGOFF(-48, EBP), ECX )
- ADD_L ( ECX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-44, EBP) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-3, EAX), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EAX )
- IMUL_L ( REGOFF(-52, EBP), EAX )
- MOV_L ( REGOFF(12, EBP), EDX )
- LEA_L ( REGDIS(0,EDX,4), ECX )
- MOV_L ( ECX, EDX )
- ADD_L ( REGOFF(24, EBP), EDX )
- LEA_L ( REGOFF(-3, EDX), ECX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(ECX), DL )
- MOV_L ( EDX, ECX )
- IMUL_L ( REGOFF(-48, EBP), ECX )
- ADD_L ( ECX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-40, EBP) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-2, EAX), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EAX )
- IMUL_L ( REGOFF(-52, EBP), EAX )
- MOV_L ( REGOFF(12, EBP), EDX )
- LEA_L ( REGDIS(0,EDX,4), ECX )
- MOV_L ( ECX, EDX )
- ADD_L ( REGOFF(24, EBP), EDX )
- LEA_L ( REGOFF(-2, EDX), ECX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(ECX), DL )
- MOV_L ( EDX, ECX )
- IMUL_L ( REGOFF(-48, EBP), ECX )
- ADD_L ( ECX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-36, EBP) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-1, EAX), EDX )
- XOR_L ( ECX, ECX )
- MOV_B ( REGIND(EDX), CL )
- MOV_L ( ECX, EAX )
- IMUL_L ( REGOFF(-52, EBP), EAX )
- MOV_L ( REGOFF(12, EBP), EDX )
- LEA_L ( REGDIS(0,EDX,4), ECX )
- MOV_L ( ECX, EDX )
- ADD_L ( REGOFF(24, EBP), EDX )
- LEA_L ( REGOFF(-1, EDX), ECX )
- XOR_L ( EDX, EDX )
- MOV_B ( REGIND(ECX), DL )
- MOV_L ( EDX, ECX )
- IMUL_L ( REGOFF(-48, EBP), ECX )
- ADD_L ( ECX, EAX )
- MOV_L ( EAX, EBX )
- SAR_L ( CONST(8), EBX )
- MOV_L ( EBX, REGOFF(-32, EBP) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-4, EAX), EDX )
- MOV_B ( REGOFF(-44, EBP), AL )
- MOV_B ( AL, REGIND(EDX) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-3, EAX), EDX )
- MOV_B ( REGOFF(-40, EBP), AL )
- MOV_B ( AL, REGIND(EDX) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-2, EAX), EDX )
- MOV_B ( REGOFF(-36, EBP), AL )
- MOV_B ( AL, REGIND(EDX) )
- MOV_L ( REGOFF(12, EBP), EAX )
- LEA_L ( REGDIS(0,EAX,4), EDX )
- MOV_L ( EDX, EAX )
- ADD_L ( REGOFF(20, EBP), EAX )
- LEA_L ( REGOFF(-1, EAX), EDX )
- MOV_B ( REGOFF(-32, EBP), AL )
- MOV_B ( AL, REGIND(EDX) )
-
-LLBL (GMBT_skip_runout):
-
- MOV_L ( REGOFF(-56, EBP), EBX )
- MOV_L ( EBP, ESP )
- POP_L ( EBP )
+LLBL (GMBT_return):
+
+ POP_L ( EBX )
+ POP_L ( EDI )
+ POP_L ( ESI )
+ MOV_L ( EBP, ESP )
+ POP_L ( EBP )
RET