summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary/rtasm/rtasm_x86sse.c')
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_x86sse.c1660
1 files changed, 1660 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/rtasm/rtasm_x86sse.c b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
new file mode 100644
index 00000000000..f4ca282dd93
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
@@ -0,0 +1,1660 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_X86)
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_debug.h"
+#include "pipe/p_pointer.h"
+
+#include "rtasm_execmem.h"
+#include "rtasm_x86sse.h"
+
+#define DISASSEM 0
+#define X86_TWOB 0x0f
+
+
+#define DUMP_SSE 0
+
+
+void x86_print_reg( struct x86_reg reg )
+{
+ if (reg.mod != mod_REG)
+ debug_printf( "[" );
+
+ switch( reg.file ) {
+ case file_REG32:
+ switch( reg.idx ) {
+ case reg_AX: debug_printf( "EAX" ); break;
+ case reg_CX: debug_printf( "ECX" ); break;
+ case reg_DX: debug_printf( "EDX" ); break;
+ case reg_BX: debug_printf( "EBX" ); break;
+ case reg_SP: debug_printf( "ESP" ); break;
+ case reg_BP: debug_printf( "EBP" ); break;
+ case reg_SI: debug_printf( "ESI" ); break;
+ case reg_DI: debug_printf( "EDI" ); break;
+ }
+ break;
+ case file_MMX:
+ debug_printf( "MMX%u", reg.idx );
+ break;
+ case file_XMM:
+ debug_printf( "XMM%u", reg.idx );
+ break;
+ case file_x87:
+ debug_printf( "fp%u", reg.idx );
+ break;
+ }
+
+ if (reg.mod == mod_DISP8 ||
+ reg.mod == mod_DISP32)
+ debug_printf("+%d", reg.disp);
+
+ if (reg.mod != mod_REG)
+ debug_printf( "]" );
+}
+
+#if DUMP_SSE
+
+#define DUMP_START() debug_printf( "\n" )
+#define DUMP_END() debug_printf( "\n" )
+
+#define DUMP() do { \
+ const char *foo = __FUNCTION__; \
+ while (*foo && *foo != '_') \
+ foo++; \
+ if (*foo) \
+ foo++; \
+ debug_printf( "\n% 4x% 15s ", p->csr - p->store, foo ); \
+} while (0)
+
+#define DUMP_I( I ) do { \
+ DUMP(); \
+ debug_printf( "%u", I ); \
+} while( 0 )
+
+#define DUMP_R( R0 ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+} while( 0 )
+
+#define DUMP_RR( R0, R1 ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", " ); \
+ x86_print_reg( R1 ); \
+} while( 0 )
+
+#define DUMP_RI( R0, I ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", %u", I ); \
+} while( 0 )
+
+#define DUMP_RRI( R0, R1, I ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", " ); \
+ x86_print_reg( R1 ); \
+ debug_printf( ", %u", I ); \
+} while( 0 )
+
+#else
+
+#define DUMP_START()
+#define DUMP_END()
+#define DUMP( )
+#define DUMP_I( I )
+#define DUMP_R( R0 )
+#define DUMP_RR( R0, R1 )
+#define DUMP_RI( R0, I )
+#define DUMP_RRI( R0, R1, I )
+
+#endif
+
+
+static void do_realloc( struct x86_function *p )
+{
+ if (p->store == p->error_overflow) {
+ p->csr = p->store;
+ }
+ else if (p->size == 0) {
+ p->size = 1024;
+ p->store = rtasm_exec_malloc(p->size);
+ p->csr = p->store;
+ }
+ else {
+ uintptr_t used = pointer_to_uintptr( p->csr ) - pointer_to_uintptr( p->store );
+ unsigned char *tmp = p->store;
+ p->size *= 2;
+ p->store = rtasm_exec_malloc(p->size);
+
+ if (p->store) {
+ memcpy(p->store, tmp, used);
+ p->csr = p->store + used;
+ }
+ else {
+ p->csr = p->store;
+ }
+
+ rtasm_exec_free(tmp);
+ }
+
+ if (p->store == NULL) {
+ p->store = p->csr = p->error_overflow;
+ p->size = sizeof(p->error_overflow);
+ }
+}
+
+/* Emit bytes to the instruction stream:
+ */
+static unsigned char *reserve( struct x86_function *p, int bytes )
+{
+ if (p->csr + bytes - p->store > (int) p->size)
+ do_realloc(p);
+
+ {
+ unsigned char *csr = p->csr;
+ p->csr += bytes;
+ return csr;
+ }
+}
+
+
+
+static void emit_1b( struct x86_function *p, char b0 )
+{
+ char *csr = (char *)reserve(p, 1);
+ *csr = b0;
+}
+
+static void emit_1i( struct x86_function *p, int i0 )
+{
+ int *icsr = (int *)reserve(p, sizeof(i0));
+ *icsr = i0;
+}
+
+static void emit_1ub( struct x86_function *p, unsigned char b0 )
+{
+ unsigned char *csr = reserve(p, 1);
+ *csr++ = b0;
+}
+
+static void emit_2ub( struct x86_function *p, unsigned char b0, unsigned char b1 )
+{
+ unsigned char *csr = reserve(p, 2);
+ *csr++ = b0;
+ *csr++ = b1;
+}
+
+static void emit_3ub( struct x86_function *p, unsigned char b0, unsigned char b1, unsigned char b2 )
+{
+ unsigned char *csr = reserve(p, 3);
+ *csr++ = b0;
+ *csr++ = b1;
+ *csr++ = b2;
+}
+
+
+/* Build a modRM byte + possible displacement. No treatment of SIB
+ * indexing. BZZT - no way to encode an absolute address.
+ *
+ * This is the "/r" field in the x86 manuals...
+ */
+static void emit_modrm( struct x86_function *p,
+ struct x86_reg reg,
+ struct x86_reg regmem )
+{
+ unsigned char val = 0;
+
+ assert(reg.mod == mod_REG);
+
+ val |= regmem.mod << 6; /* mod field */
+ val |= reg.idx << 3; /* reg field */
+ val |= regmem.idx; /* r/m field */
+
+ emit_1ub(p, val);
+
+ /* Oh-oh we've stumbled into the SIB thing.
+ */
+ if (regmem.file == file_REG32 &&
+ regmem.idx == reg_SP) {
+ emit_1ub(p, 0x24); /* simplistic! */
+ }
+
+ switch (regmem.mod) {
+ case mod_REG:
+ case mod_INDIRECT:
+ break;
+ case mod_DISP8:
+ emit_1b(p, (char) regmem.disp);
+ break;
+ case mod_DISP32:
+ emit_1i(p, regmem.disp);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+/* Emits the "/0".."/7" specialized versions of the modrm ("/r") bytes.
+ */
+static void emit_modrm_noreg( struct x86_function *p,
+ unsigned op,
+ struct x86_reg regmem )
+{
+ struct x86_reg dummy = x86_make_reg(file_REG32, op);
+ emit_modrm(p, dummy, regmem);
+}
+
+/* Many x86 instructions have two opcodes to cope with the situations
+ * where the destination is a register or memory reference
+ * respectively. This function selects the correct opcode based on
+ * the arguments presented.
+ */
+static void emit_op_modrm( struct x86_function *p,
+ unsigned char op_dst_is_reg,
+ unsigned char op_dst_is_mem,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ switch (dst.mod) {
+ case mod_REG:
+ emit_1ub(p, op_dst_is_reg);
+ emit_modrm(p, dst, src);
+ break;
+ case mod_INDIRECT:
+ case mod_DISP32:
+ case mod_DISP8:
+ assert(src.mod == mod_REG);
+ emit_1ub(p, op_dst_is_mem);
+ emit_modrm(p, src, dst);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+
+
+
+
+
+
+/* Create and manipulate registers and regmem values:
+ */
+struct x86_reg x86_make_reg( enum x86_reg_file file,
+ enum x86_reg_name idx )
+{
+ struct x86_reg reg;
+
+ reg.file = file;
+ reg.idx = idx;
+ reg.mod = mod_REG;
+ reg.disp = 0;
+
+ return reg;
+}
+
+struct x86_reg x86_make_disp( struct x86_reg reg,
+ int disp )
+{
+ assert(reg.file == file_REG32);
+
+ if (reg.mod == mod_REG)
+ reg.disp = disp;
+ else
+ reg.disp += disp;
+
+ if (reg.disp == 0 && reg.idx != reg_BP)
+ reg.mod = mod_INDIRECT;
+ else if (reg.disp <= 127 && reg.disp >= -128)
+ reg.mod = mod_DISP8;
+ else
+ reg.mod = mod_DISP32;
+
+ return reg;
+}
+
+struct x86_reg x86_deref( struct x86_reg reg )
+{
+ return x86_make_disp(reg, 0);
+}
+
+struct x86_reg x86_get_base_reg( struct x86_reg reg )
+{
+ return x86_make_reg( reg.file, reg.idx );
+}
+
+int x86_get_label( struct x86_function *p )
+{
+ return p->csr - p->store;
+}
+
+
+
+/***********************************************************************
+ * x86 instructions
+ */
+
+
+void x86_jcc( struct x86_function *p,
+ enum x86_cc cc,
+ int label )
+{
+ int offset = label - (x86_get_label(p) + 2);
+ DUMP_I(cc);
+
+ if (offset < 0) {
+ assert(p->csr - p->store > -offset);
+ }
+
+ if (offset <= 127 && offset >= -128) {
+ emit_1ub(p, 0x70 + cc);
+ emit_1b(p, (char) offset);
+ }
+ else {
+ offset = label - (x86_get_label(p) + 6);
+ emit_2ub(p, 0x0f, 0x80 + cc);
+ emit_1i(p, offset);
+ }
+}
+
+/* Always use a 32bit offset for forward jumps:
+ */
+int x86_jcc_forward( struct x86_function *p,
+ enum x86_cc cc )
+{
+ DUMP_I(cc);
+ emit_2ub(p, 0x0f, 0x80 + cc);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+int x86_jmp_forward( struct x86_function *p)
+{
+ DUMP();
+ emit_1ub(p, 0xe9);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+int x86_call_forward( struct x86_function *p)
+{
+ DUMP();
+
+ emit_1ub(p, 0xe8);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+/* Fixup offset from forward jump:
+ */
+void x86_fixup_fwd_jump( struct x86_function *p,
+ int fixup )
+{
+ *(int *)(p->store + fixup - 4) = x86_get_label(p) - fixup;
+}
+
+void x86_jmp( struct x86_function *p, int label)
+{
+ DUMP_I( label );
+ emit_1ub(p, 0xe9);
+ emit_1i(p, label - x86_get_label(p) - 4);
+}
+
+void x86_call( struct x86_function *p, struct x86_reg reg)
+{
+ DUMP_R( reg );
+ emit_1ub(p, 0xff);
+ emit_modrm_noreg(p, 2, reg);
+}
+
+
+/* michal:
+ * Temporary. As I need immediate operands, and dont want to mess with the codegen,
+ * I load the immediate into general purpose register and use it.
+ */
+void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ assert(dst.mod == mod_REG);
+ emit_1ub(p, 0xb8 + dst.idx);
+ emit_1i(p, imm);
+}
+
+void x86_add_reg_imm8( struct x86_function *p, struct x86_reg dst, ubyte imm )
+{
+ DUMP_RI( dst, imm );
+ assert(dst.mod == mod_REG);
+ emit_1ub(p, 0x80);
+ emit_modrm_noreg(p, 0, dst);
+ emit_1ub(p, imm);
+}
+
+
+void x86_push( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ if (reg.mod == mod_REG)
+ emit_1ub(p, 0x50 + reg.idx);
+ else
+ {
+ emit_1ub(p, 0xff);
+ emit_modrm_noreg(p, 6, reg);
+ }
+
+
+ p->stack_offset += 4;
+}
+
+void x86_push_imm32( struct x86_function *p,
+ int imm32 )
+{
+ DUMP_I( imm32 );
+ emit_1ub(p, 0x68);
+ emit_1i(p, imm32);
+
+ p->stack_offset += 4;
+}
+
+
+void x86_pop( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x58 + reg.idx);
+ p->stack_offset -= 4;
+}
+
+void x86_inc( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x40 + reg.idx);
+}
+
+void x86_dec( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x48 + reg.idx);
+}
+
+void x86_ret( struct x86_function *p )
+{
+ DUMP();
+ assert(p->stack_offset == 0);
+ emit_1ub(p, 0xc3);
+}
+
+void x86_retw( struct x86_function *p, unsigned short imm )
+{
+ DUMP();
+ emit_3ub(p, 0xc2, imm & 0xff, (imm >> 8) & 0xff);
+}
+
+void x86_sahf( struct x86_function *p )
+{
+ DUMP();
+ emit_1ub(p, 0x9e);
+}
+
+void x86_mov( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x8b, 0x89, dst, src );
+}
+
+void x86_xor( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x33, 0x31, dst, src );
+}
+
+void x86_cmp( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x3b, 0x39, dst, src );
+}
+
+void x86_lea( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, 0x8d);
+ emit_modrm( p, dst, src );
+}
+
+void x86_test( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, 0x85);
+ emit_modrm( p, dst, src );
+}
+
+void x86_add( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm(p, 0x03, 0x01, dst, src );
+}
+
+/* Calculate EAX * src, results in EDX:EAX.
+ */
+void x86_mul( struct x86_function *p,
+ struct x86_reg src )
+{
+ DUMP_R( src );
+ emit_1ub(p, 0xf7);
+ emit_modrm_noreg(p, 4, src );
+}
+
+
+void x86_imul( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0xAF);
+ emit_modrm(p, dst, src);
+}
+
+
+void x86_sub( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm(p, 0x2b, 0x29, dst, src );
+}
+
+void x86_or( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x0b, 0x09, dst, src );
+}
+
+void x86_and( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x23, 0x21, dst, src );
+}
+
+
+
+/***********************************************************************
+ * SSE instructions
+ */
+
+
+void sse_movss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, 0xF3, X86_TWOB);
+ emit_op_modrm( p, 0x10, 0x11, dst, src );
+}
+
+void sse_movaps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x28, 0x29, dst, src );
+}
+
+void sse_movups( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x10, 0x11, dst, src );
+}
+
+void sse_movhps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod != mod_REG || src.mod != mod_REG);
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x16, 0x17, dst, src ); /* cf movlhps */
+}
+
+void sse_movlps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod != mod_REG || src.mod != mod_REG);
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x12, 0x13, dst, src ); /* cf movhlps */
+}
+
+void sse_maxps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5F);
+ emit_modrm( p, dst, src );
+}
+
+void sse_maxss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x5F);
+ emit_modrm( p, dst, src );
+}
+
+void sse_divss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x5E);
+ emit_modrm( p, dst, src );
+}
+
+void sse_minps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5D);
+ emit_modrm( p, dst, src );
+}
+
+void sse_subps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5C);
+ emit_modrm( p, dst, src );
+}
+
+void sse_mulps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x59);
+ emit_modrm( p, dst, src );
+}
+
+void sse_mulss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x59);
+ emit_modrm( p, dst, src );
+}
+
+void sse_addps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x58);
+ emit_modrm( p, dst, src );
+}
+
+void sse_addss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x58);
+ emit_modrm( p, dst, src );
+}
+
+void sse_andnps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x55);
+ emit_modrm( p, dst, src );
+}
+
+void sse_andps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x54);
+ emit_modrm( p, dst, src );
+}
+
+void sse_rsqrtps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x52);
+ emit_modrm( p, dst, src );
+}
+
+void sse_rsqrtss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x52);
+ emit_modrm( p, dst, src );
+
+}
+
+void sse_movhlps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod == mod_REG && src.mod == mod_REG);
+ emit_2ub(p, X86_TWOB, 0x12);
+ emit_modrm( p, dst, src );
+}
+
+void sse_movlhps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod == mod_REG && src.mod == mod_REG);
+ emit_2ub(p, X86_TWOB, 0x16);
+ emit_modrm( p, dst, src );
+}
+
+void sse_orps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x56);
+ emit_modrm( p, dst, src );
+}
+
+void sse_xorps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x57);
+ emit_modrm( p, dst, src );
+}
+
+void sse_cvtps2pi( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_XMM || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x2d);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_cvtdq2ps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5b);
+ emit_modrm( p, dst, src );
+}
+
+
+/* Shufps can also be used to implement a reduced swizzle when dest ==
+ * arg0.
+ */
+void sse_shufps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ unsigned char shuf)
+{
+ DUMP_RRI( dst, src, shuf );
+ emit_2ub(p, X86_TWOB, 0xC6);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, shuf);
+}
+
+void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub( p, X86_TWOB, 0x15 );
+ emit_modrm( p, dst, src );
+}
+
+void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub( p, X86_TWOB, 0x14 );
+ emit_modrm( p, dst, src );
+}
+
+void sse_cmpps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ enum sse_cc cc)
+{
+ DUMP_RRI( dst, src, cc );
+ emit_2ub(p, X86_TWOB, 0xC2);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, cc);
+}
+
+void sse_pmovmskb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0xD7);
+ emit_modrm(p, dst, src);
+}
+
+/***********************************************************************
+ * SSE2 instructions
+ */
+
+/**
+ * Perform a reduced swizzle:
+ */
+void sse2_pshufd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ unsigned char shuf)
+{
+ DUMP_RRI( dst, src, shuf );
+ emit_3ub(p, 0x66, X86_TWOB, 0x70);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, shuf);
+}
+
+void sse2_cvttps2dq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub( p, 0xF3, X86_TWOB, 0x5B );
+ emit_modrm( p, dst, src );
+}
+
+void sse2_cvtps2dq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x5B);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packssdw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x6B);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packsswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x63);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packuswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x67);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_punpcklbw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x60);
+ emit_modrm( p, dst, src );
+}
+
+
+void sse2_rcpps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x53);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_rcpss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x53);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_movd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, 0x66, X86_TWOB);
+ emit_op_modrm( p, 0x6e, 0x7e, dst, src );
+}
+
+
+
+
+/***********************************************************************
+ * x87 instructions
+ */
+static void note_x87_pop( struct x86_function *p )
+{
+ p->x87_stack--;
+ assert(p->x87_stack >= 0);
+}
+
+static void note_x87_push( struct x86_function *p )
+{
+ p->x87_stack++;
+ assert(p->x87_stack <= 7);
+}
+
+void x87_assert_stack_empty( struct x86_function *p )
+{
+ assert (p->x87_stack == 0);
+}
+
+
+void x87_fist( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ emit_1ub(p, 0xdb);
+ emit_modrm_noreg(p, 2, dst);
+}
+
+void x87_fistp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ emit_1ub(p, 0xdb);
+ emit_modrm_noreg(p, 3, dst);
+ note_x87_pop(p);
+}
+
+void x87_fild( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_1ub(p, 0xdf);
+ emit_modrm_noreg(p, 0, arg);
+ note_x87_push(p);
+}
+
+void x87_fldz( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xee);
+ note_x87_push(p);
+}
+
+
+void x87_fldcw( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_REG32);
+ assert(arg.mod != mod_REG);
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 5, arg);
+}
+
+void x87_fld1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe8);
+ note_x87_push(p);
+}
+
+void x87_fldl2e( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xea);
+ note_x87_push(p);
+}
+
+void x87_fldln2( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xed);
+ note_x87_push(p);
+}
+
+void x87_fwait( struct x86_function *p )
+{
+ DUMP();
+ emit_1ub(p, 0x9b);
+}
+
+void x87_fnclex( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xdb, 0xe2);
+}
+
+void x87_fclex( struct x86_function *p )
+{
+ x87_fwait(p);
+ x87_fnclex(p);
+}
+
+void x87_fcmovb( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xc0+arg.idx);
+}
+
+void x87_fcmove( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xc8+arg.idx);
+}
+
+void x87_fcmovbe( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xd0+arg.idx);
+}
+
+void x87_fcmovnb( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xc0+arg.idx);
+}
+
+void x87_fcmovne( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xc8+arg.idx);
+}
+
+void x87_fcmovnbe( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xd0+arg.idx);
+}
+
+
+
+static void x87_arith_op( struct x86_function *p, struct x86_reg dst, struct x86_reg arg,
+ unsigned char dst0ub0,
+ unsigned char dst0ub1,
+ unsigned char arg0ub0,
+ unsigned char arg0ub1,
+ unsigned char argmem_noreg)
+{
+ assert(dst.file == file_x87);
+
+ if (arg.file == file_x87) {
+ if (dst.idx == 0)
+ emit_2ub(p, dst0ub0, dst0ub1+arg.idx);
+ else if (arg.idx == 0)
+ emit_2ub(p, arg0ub0, arg0ub1+arg.idx);
+ else
+ assert(0);
+ }
+ else if (dst.idx == 0) {
+ assert(arg.file == file_REG32);
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, argmem_noreg, arg);
+ }
+ else
+ assert(0);
+}
+
+void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xc8,
+ 0xdc, 0xc8,
+ 4);
+}
+
+void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xe0,
+ 0xdc, 0xe8,
+ 4);
+}
+
+void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xe8,
+ 0xdc, 0xe0,
+ 5);
+}
+
+void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xc0,
+ 0xdc, 0xc0,
+ 0);
+}
+
+void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xf0,
+ 0xdc, 0xf8,
+ 6);
+}
+
+void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xf8,
+ 0xdc, 0xf0,
+ 7);
+}
+
+void x87_fmulp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xc8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fsubp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xe8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fsubrp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xe0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_faddp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xc0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fdivp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xf8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fdivrp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xf0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_ftst( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe4);
+}
+
+void x87_fucom( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdd, 0xe0+arg.idx);
+}
+
+void x87_fucomp( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdd, 0xe8+arg.idx);
+ note_x87_pop(p);
+}
+
+void x87_fucompp( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xda, 0xe9);
+ note_x87_pop(p); /* pop twice */
+ note_x87_pop(p); /* pop twice */
+}
+
+void x87_fxch( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xd9, 0xc8+arg.idx);
+}
+
+void x87_fabs( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe1);
+}
+
+void x87_fchs( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe0);
+}
+
+void x87_fcos( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xff);
+}
+
+
+void x87_fprndint( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfc);
+}
+
+void x87_fscale( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfd);
+}
+
+void x87_fsin( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfe);
+}
+
+void x87_fsincos( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfb);
+}
+
+void x87_fsqrt( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfa);
+}
+
+void x87_fxtract( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf4);
+}
+
+/* st0 = (2^st0)-1
+ *
+ * Restrictions: -1.0 <= st0 <= 1.0
+ */
+void x87_f2xm1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf0);
+}
+
+/* st1 = st1 * log2(st0);
+ * pop_stack;
+ */
+void x87_fyl2x( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf1);
+ note_x87_pop(p);
+}
+
+/* st1 = st1 * log2(st0 + 1.0);
+ * pop_stack;
+ *
+ * A fast operation, with restrictions: -.29 < st0 < .29
+ */
+void x87_fyl2xp1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf9);
+ note_x87_pop(p);
+}
+
+
+void x87_fld( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ if (arg.file == file_x87)
+ emit_2ub(p, 0xd9, 0xc0 + arg.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 0, arg);
+ }
+ note_x87_push(p);
+}
+
+void x87_fst( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xdd, 0xd0 + dst.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 2, dst);
+ }
+}
+
+void x87_fstp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xdd, 0xd8 + dst.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 3, dst);
+ }
+ note_x87_pop(p);
+}
+
+void x87_fpop( struct x86_function *p )
+{
+ x87_fstp( p, x86_make_reg( file_x87, 0 ));
+}
+
+
+void x87_fcom( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xd8, 0xd0 + dst.idx);
+ else {
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, 2, dst);
+ }
+}
+
+
+void x87_fcomp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xd8, 0xd8 + dst.idx);
+ else {
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, 3, dst);
+ }
+ note_x87_pop(p);
+}
+
+void x87_fcomi( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_2ub(p, 0xdb, 0xf0+arg.idx);
+}
+
+void x87_fcomip( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_2ub(p, 0xdb, 0xf0+arg.idx);
+ note_x87_pop(p);
+}
+
+
+void x87_fnstsw( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_REG32);
+
+ if (dst.idx == reg_AX &&
+ dst.mod == mod_REG)
+ emit_2ub(p, 0xdf, 0xe0);
+ else {
+ emit_1ub(p, 0xdd);
+ emit_modrm_noreg(p, 7, dst);
+ }
+}
+
+
+void x87_fnstcw( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_REG32);
+
+ emit_1ub(p, 0x9b); /* WAIT -- needed? */
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 7, dst);
+}
+
+
+
+
+/***********************************************************************
+ * MMX instructions
+ */
+
+void mmx_emms( struct x86_function *p )
+{
+ DUMP();
+ assert(p->need_emms);
+ emit_2ub(p, 0x0f, 0x77);
+ p->need_emms = 0;
+}
+
+void mmx_packssdw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_MMX || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x6b);
+ emit_modrm( p, dst, src );
+}
+
+void mmx_packuswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_MMX || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x67);
+ emit_modrm( p, dst, src );
+}
+
+void mmx_movd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ p->need_emms = 1;
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x6e, 0x7e, dst, src );
+}
+
+void mmx_movq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ p->need_emms = 1;
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x6f, 0x7f, dst, src );
+}
+
+
+/***********************************************************************
+ * Helper functions
+ */
+
+
+void x86_cdecl_caller_push_regs( struct x86_function *p )
+{
+ x86_push(p, x86_make_reg(file_REG32, reg_AX));
+ x86_push(p, x86_make_reg(file_REG32, reg_CX));
+ x86_push(p, x86_make_reg(file_REG32, reg_DX));
+}
+
+void x86_cdecl_caller_pop_regs( struct x86_function *p )
+{
+ x86_pop(p, x86_make_reg(file_REG32, reg_DX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_CX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_AX));
+}
+
+
+/* Retreive a reference to one of the function arguments, taking into
+ * account any push/pop activity:
+ */
+struct x86_reg x86_fn_arg( struct x86_function *p,
+ unsigned arg )
+{
+ return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
+ p->stack_offset + arg * 4); /* ??? */
+}
+
+
+void x86_init_func( struct x86_function *p )
+{
+ p->size = 0;
+ p->store = NULL;
+ p->csr = p->store;
+ DUMP_START();
+}
+
+void x86_init_func_size( struct x86_function *p, unsigned code_size )
+{
+ p->size = code_size;
+ p->store = rtasm_exec_malloc(code_size);
+ if (p->store == NULL) {
+ p->store = p->error_overflow;
+ }
+ p->csr = p->store;
+ DUMP_START();
+}
+
+void x86_release_func( struct x86_function *p )
+{
+ if (p->store && p->store != p->error_overflow)
+ rtasm_exec_free(p->store);
+
+ p->store = NULL;
+ p->csr = NULL;
+ p->size = 0;
+}
+
+
+void (*x86_get_func( struct x86_function *p ))(void)
+{
+ DUMP_END();
+ if (DISASSEM && p->store)
+ debug_printf("disassemble %p %p\n", p->store, p->csr);
+
+ if (p->store == p->error_overflow)
+ return (void (*)(void)) NULL;
+ else
+ return (void (*)(void)) p->store;
+}
+
+#else
+
+void x86sse_dummy( void )
+{
+}
+
+#endif