summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Whitwell <[email protected]>2007-05-24 11:37:08 +0100
committerKeith Whitwell <[email protected]>2007-05-24 11:37:08 +0100
commitb1dc66b7bd9b13498204c32182d0935d3a7d3eec (patch)
treef2ca79880665d0b8bb19a831e7c2b6206e520a9d
parentb939adfa155f2b3ca5c5226e86da85629654d79b (diff)
Add the vf module.
This is a cleaned up version of the code in tnl/t_vertex*.
-rw-r--r--src/mesa/vf/vf.c372
-rw-r--r--src/mesa/vf/vf.h226
-rw-r--r--src/mesa/vf/vf_generic.c981
-rw-r--r--src/mesa/vf/vf_sse.c664
4 files changed, 2243 insertions, 0 deletions
diff --git a/src/mesa/vf/vf.c b/src/mesa/vf/vf.c
new file mode 100644
index 00000000000..cb25f2e1131
--- /dev/null
+++ b/src/mesa/vf/vf.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2003 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <[email protected]>
+ */
+
+#include "glheader.h"
+#include "context.h"
+#include "colormac.h"
+
+#include "vf.h"
+
+#define DBG 0
+
+
+
+static GLboolean match_fastpath( struct vertex_fetch *vf,
+ const struct vf_fastpath *fp)
+{
+ GLuint j;
+
+ if (vf->attr_count != fp->attr_count)
+ return GL_FALSE;
+
+ for (j = 0; j < vf->attr_count; j++)
+ if (vf->attr[j].format != fp->attr[j].format ||
+ vf->attr[j].inputsize != fp->attr[j].size ||
+ vf->attr[j].vertoffset != fp->attr[j].offset)
+ return GL_FALSE;
+
+ if (fp->match_strides) {
+ if (vf->vertex_stride != fp->vertex_stride)
+ return GL_FALSE;
+
+ for (j = 0; j < vf->attr_count; j++)
+ if (vf->attr[j].inputstride != fp->attr[j].stride)
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+static GLboolean search_fastpath_emit( struct vertex_fetch *vf )
+{
+ struct vf_fastpath *fp = vf->fastpath;
+
+ for ( ; fp ; fp = fp->next) {
+ if (match_fastpath(vf, fp)) {
+ vf->emit = fp->func;
+ return GL_TRUE;
+ }
+ }
+
+ return GL_FALSE;
+}
+
+void vf_register_fastpath( struct vertex_fetch *vf,
+ GLboolean match_strides )
+{
+ struct vf_fastpath *fastpath = CALLOC_STRUCT(vf_fastpath);
+ GLuint i;
+
+ fastpath->vertex_stride = vf->vertex_stride;
+ fastpath->attr_count = vf->attr_count;
+ fastpath->match_strides = match_strides;
+ fastpath->func = vf->emit;
+ fastpath->attr = (struct vf_attr_type *)
+ _mesa_malloc(vf->attr_count * sizeof(fastpath->attr[0]));
+
+ for (i = 0; i < vf->attr_count; i++) {
+ fastpath->attr[i].format = vf->attr[i].format;
+ fastpath->attr[i].stride = vf->attr[i].inputstride;
+ fastpath->attr[i].size = vf->attr[i].inputsize;
+ fastpath->attr[i].offset = vf->attr[i].vertoffset;
+ }
+
+ fastpath->next = vf->fastpath;
+ vf->fastpath = fastpath;
+}
+
+
+
+
+/***********************************************************************
+ * Build codegen functions or return generic ones:
+ */
+static void choose_emit_func( struct vertex_fetch *vf,
+ GLuint count,
+ GLubyte *dest)
+{
+ vf->emit = NULL;
+
+ /* Does this match an existing (hardwired, codegen or known-bad)
+ * fastpath?
+ */
+ if (search_fastpath_emit(vf)) {
+ /* Use this result. If it is null, then it is already known
+ * that the current state will fail for codegen and there is no
+ * point trying again.
+ */
+ }
+ else if (vf->codegen_emit) {
+ vf->codegen_emit( vf );
+ }
+
+ if (!vf->emit) {
+ vf_generate_hardwired_emit(vf);
+ }
+
+ /* Otherwise use the generic version:
+ */
+ if (!vf->emit)
+ vf->emit = vf_generic_emit;
+
+ vf->emit( vf, count, dest );
+}
+
+
+
+
+
+/***********************************************************************
+ * Public entrypoints, mostly dispatch to the above:
+ */
+
+
+
+GLuint vf_set_vertex_attributes( struct vertex_fetch *vf,
+ const struct vf_attr_map *map,
+ GLuint nr,
+ GLuint vertex_stride )
+{
+ GLuint offset = 0;
+ GLuint i, j;
+
+ assert(nr < VF_ATTRIB_MAX);
+
+ memset(vf->lookup, 0, sizeof(vf->lookup));
+
+ for (j = 0, i = 0; i < nr; i++) {
+ const GLuint format = map[i].format;
+ if (format == EMIT_PAD) {
+ if (DBG)
+ _mesa_printf("%d: pad %d, offset %d\n", i,
+ map[i].offset, offset);
+
+ offset += map[i].offset;
+
+ }
+ else {
+ assert(vf->lookup[map[i].attrib] == 0);
+ vf->lookup[map[i].attrib] = &vf->attr[j];
+
+ vf->attr[j].attrib = map[i].attrib;
+ vf->attr[j].format = format;
+ vf->attr[j].insert = vf_format_info[format].insert;
+ vf->attr[j].extract = vf_format_info[format].extract;
+ vf->attr[j].vertattrsize = vf_format_info[format].attrsize;
+ vf->attr[j].vertoffset = offset;
+
+ if (DBG)
+ _mesa_printf("%d: %s, offset %d\n", i,
+ vf_format_info[format].name,
+ vf->attr[j].vertoffset);
+
+ offset += vf_format_info[format].attrsize;
+ j++;
+ }
+ }
+
+ vf->attr_count = j;
+ vf->vertex_stride = vertex_stride ? vertex_stride : offset;
+ vf->emit = choose_emit_func;
+
+ assert(vf->vertex_stride >= offset);
+ return vf->vertex_stride;
+}
+
+
+
+void vf_set_vp_matrix( struct vertex_fetch *vf,
+ const GLfloat *viewport )
+{
+ assert(vf->allow_viewport_emits);
+
+ /* scale */
+ vf->vp[0] = viewport[MAT_SX];
+ vf->vp[1] = viewport[MAT_SY];
+ vf->vp[2] = viewport[MAT_SZ];
+ vf->vp[3] = 1.0;
+
+ /* translate */
+ vf->vp[4] = viewport[MAT_TX];
+ vf->vp[5] = viewport[MAT_TY];
+ vf->vp[6] = viewport[MAT_TZ];
+ vf->vp[7] = 0.0;
+}
+
+void vf_set_vp_scale_translate( struct vertex_fetch *vf,
+ const GLfloat *scale,
+ const GLfloat *translate )
+{
+ assert(vf->allow_viewport_emits);
+
+ vf->vp[0] = scale[0];
+ vf->vp[1] = scale[1];
+ vf->vp[2] = scale[2];
+ vf->vp[3] = scale[3];
+
+ vf->vp[4] = translate[0];
+ vf->vp[5] = translate[1];
+ vf->vp[6] = translate[2];
+ vf->vp[7] = translate[3];
+}
+
+
+/* Set attribute pointers, adjusted for start position:
+ */
+void vf_set_sources( struct vertex_fetch *vf,
+ GLvector4f * const sources[],
+ GLuint start )
+{
+ struct vf_attr *a = vf->attr;
+ GLuint j;
+
+ for (j = 0; j < vf->attr_count; j++) {
+ const GLvector4f *vptr = sources[a[j].attrib];
+
+ if ((a[j].inputstride != vptr->stride) ||
+ (a[j].inputsize != vptr->size))
+ vf->emit = choose_emit_func;
+
+ a[j].inputstride = vptr->stride;
+ a[j].inputsize = vptr->size;
+ a[j].do_insert = a[j].insert[vptr->size - 1];
+ a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride;
+ }
+}
+
+
+
+/* Emit count VB vertices to dest.
+ */
+void vf_emit_vertices( struct vertex_fetch *vf,
+ GLuint count,
+ void *dest )
+{
+ vf->emit( vf, count, (GLubyte*) dest );
+}
+
+
+/* Extract a named attribute from a hardware vertex. Will have to
+ * reverse any viewport transformation, swizzling or other conversions
+ * which may have been applied.
+ *
+ * This is mainly required for on-the-fly vertex translations to
+ * swrast format.
+ */
+void vf_get_attr( struct vertex_fetch *vf,
+ const void *vertex,
+ GLenum attr,
+ const GLfloat *dflt,
+ GLfloat *dest )
+{
+ const struct vf_attr *a = vf->attr;
+ const GLuint attr_count = vf->attr_count;
+ GLuint j;
+
+ for (j = 0; j < attr_count; j++) {
+ if (a[j].attrib == attr) {
+ a[j].extract( &a[j], dest, (GLubyte *)vertex + a[j].vertoffset );
+ return;
+ }
+ }
+
+ /* Else return the value from ctx->Current.
+ */
+ _mesa_memcpy( dest, dflt, 4*sizeof(GLfloat));
+}
+
+
+
+
+struct vertex_fetch *vf_create( GLboolean allow_viewport_emits )
+{
+ struct vertex_fetch *vf = CALLOC_STRUCT(vertex_fetch);
+ GLuint i;
+
+ for (i = 0; i < VF_ATTRIB_MAX; i++)
+ vf->attr[i].vf = vf;
+
+ vf->allow_viewport_emits = allow_viewport_emits;
+
+ switch(CHAN_TYPE) {
+ case GL_UNSIGNED_BYTE:
+ vf->chan_scale[0] = 255.0;
+ vf->chan_scale[1] = 255.0;
+ vf->chan_scale[2] = 255.0;
+ vf->chan_scale[3] = 255.0;
+ break;
+ case GL_UNSIGNED_SHORT:
+ vf->chan_scale[0] = 65535.0;
+ vf->chan_scale[1] = 65535.0;
+ vf->chan_scale[2] = 65535.0;
+ vf->chan_scale[3] = 65535.0;
+ break;
+ default:
+ vf->chan_scale[0] = 1.0;
+ vf->chan_scale[1] = 1.0;
+ vf->chan_scale[2] = 1.0;
+ vf->chan_scale[3] = 1.0;
+ break;
+ }
+
+ vf->identity[0] = 0.0;
+ vf->identity[1] = 0.0;
+ vf->identity[2] = 0.0;
+ vf->identity[3] = 1.0;
+
+ vf->codegen_emit = NULL;
+
+#ifdef USE_SSE_ASM
+ if (!_mesa_getenv("MESA_NO_CODEGEN"))
+ vf->codegen_emit = vf_generate_sse_emit;
+#endif
+
+ return vf;
+}
+
+
+void vf_destroy( struct vertex_fetch *vf )
+{
+ struct vf_fastpath *fp, *tmp;
+
+ for (fp = vf->fastpath ; fp ; fp = tmp) {
+ tmp = fp->next;
+ FREE(fp->attr);
+
+ /* KW: At the moment, fp->func is constrained to be allocated by
+ * _mesa_exec_alloc(), as the hardwired fastpaths in
+ * t_vertex_generic.c are handled specially. It would be nice
+ * to unify them, but this probably won't change until this
+ * module gets another overhaul.
+ */
+ _mesa_exec_free((void *) fp->func);
+ FREE(fp);
+ }
+
+ vf->fastpath = NULL;
+ FREE(vf);
+}
diff --git a/src/mesa/vf/vf.h b/src/mesa/vf/vf.h
new file mode 100644
index 00000000000..fcbf490ce14
--- /dev/null
+++ b/src/mesa/vf/vf.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2003 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <[email protected]>
+ */
+
+#ifndef VF_VERTEX_H
+#define VF_VERTEX_H
+
+#include "mtypes.h"
+#include "m_vector.h"
+
+enum {
+ VF_ATTRIB_POS = 0,
+ VF_ATTRIB_WEIGHT = 1,
+ VF_ATTRIB_NORMAL = 2,
+ VF_ATTRIB_COLOR0 = 3,
+ VF_ATTRIB_COLOR1 = 4,
+ VF_ATTRIB_FOG = 5,
+ VF_ATTRIB_COLOR_INDEX = 6,
+ VF_ATTRIB_EDGEFLAG = 7,
+ VF_ATTRIB_TEX0 = 8,
+ VF_ATTRIB_TEX1 = 9,
+ VF_ATTRIB_TEX2 = 10,
+ VF_ATTRIB_TEX3 = 11,
+ VF_ATTRIB_TEX4 = 12,
+ VF_ATTRIB_TEX5 = 13,
+ VF_ATTRIB_TEX6 = 14,
+ VF_ATTRIB_TEX7 = 15,
+ VF_ATTRIB_POINTSIZE = 16,
+ VF_ATTRIB_BFC0 = 17,
+ VF_ATTRIB_BFC1 = 18,
+ VF_ATTRIB_CLIP_POS = 19,
+ VF_ATTRIB_VERTEX_HEADER = 20,
+ VF_ATTRIB_MAX = 21
+};
+
+
+enum vf_attr_format {
+ EMIT_1F,
+ EMIT_2F,
+ EMIT_3F,
+ EMIT_4F,
+ EMIT_2F_VIEWPORT, /* do viewport transform and emit */
+ EMIT_3F_VIEWPORT, /* do viewport transform and emit */
+ EMIT_4F_VIEWPORT, /* do viewport transform and emit */
+ EMIT_3F_XYW, /* for projective texture */
+ EMIT_1UB_1F, /* for fog coordinate */
+ EMIT_3UB_3F_RGB, /* for specular color */
+ EMIT_3UB_3F_BGR, /* for specular color */
+ EMIT_4UB_4F_RGBA, /* for color */
+ EMIT_4UB_4F_BGRA, /* for color */
+ EMIT_4UB_4F_ARGB, /* for color */
+ EMIT_4UB_4F_ABGR, /* for color */
+ EMIT_4CHAN_4F_RGBA, /* for swrast color */
+ EMIT_PAD, /* leave a hole of 'offset' bytes */
+ EMIT_MAX
+};
+
+struct vf_attr_map {
+ GLuint attrib;
+ enum vf_attr_format format;
+ GLuint offset;
+};
+
+struct vertex_fetch;
+
+void vf_set_vp_matrix( struct vertex_fetch *vf,
+ const GLfloat *viewport );
+
+void vf_set_vp_scale_translate( struct vertex_fetch *vf,
+ const GLfloat *scale,
+ const GLfloat *translate );
+
+GLuint vf_set_vertex_attributes( struct vertex_fetch *vf,
+ const struct vf_attr_map *map,
+ GLuint nr,
+ GLuint vertex_stride );
+
+void vf_set_sources( struct vertex_fetch *vf,
+ GLvector4f * const attrib[],
+ GLuint start );
+
+void vf_emit_vertices( struct vertex_fetch *vf,
+ GLuint count,
+ void *dest );
+
+void vf_get_attr( struct vertex_fetch *vf,
+ const void *vertex,
+ GLenum attr,
+ const GLfloat *dflt,
+ GLfloat *dest );
+
+struct vertex_fetch *vf_create( GLboolean allow_viewport_emits );
+
+void vf_destroy( struct vertex_fetch *vf );
+
+
+
+/***********************************************************************
+ * Internal functions and structs:
+ */
+
+struct vf_attr;
+
+typedef void (*vf_extract_func)( const struct vf_attr *a,
+ GLfloat *out,
+ const GLubyte *v );
+
+typedef void (*vf_insert_func)( const struct vf_attr *a,
+ GLubyte *v,
+ const GLfloat *in );
+
+typedef void (*vf_emit_func)( struct vertex_fetch *vf,
+ GLuint count,
+ GLubyte *dest );
+
+
+
+/* Describes how to convert/move a vertex attribute from a vertex
+ * array to a vertex structure.
+ */
+struct vf_attr
+{
+ struct vertex_fetch *vf;
+
+ GLuint format;
+ GLuint inputsize;
+ GLuint inputstride;
+ GLuint vertoffset; /* position of the attrib in the vertex struct */
+
+ GLuint attrib; /* which vertex attrib (0=position, etc) */
+ GLuint vertattrsize; /* size of the attribute in bytes */
+
+ GLubyte *inputptr;
+ const vf_insert_func *insert;
+ vf_insert_func do_insert;
+ vf_extract_func extract;
+};
+
+struct vertex_fetch
+{
+ struct vf_attr attr[VF_ATTRIB_MAX];
+ GLuint attr_count;
+ GLuint vertex_stride;
+
+ struct vf_attr *lookup[VF_ATTRIB_MAX];
+
+ vf_emit_func emit;
+
+ /* Parameters and constants for codegen:
+ */
+ GLboolean allow_viewport_emits;
+ GLfloat vp[8];
+ GLfloat chan_scale[4];
+ GLfloat identity[4];
+
+ struct vf_fastpath *fastpath;
+
+ void (*codegen_emit)( struct vertex_fetch *vf );
+};
+
+
+struct vf_attr_type {
+ GLuint format;
+ GLuint size;
+ GLuint stride;
+ GLuint offset;
+};
+
+struct vf_fastpath {
+ GLuint vertex_stride;
+ GLuint attr_count;
+ GLboolean match_strides;
+
+ struct vf_attr_type *attr;
+
+ vf_emit_func func;
+ struct vf_fastpath *next;
+};
+
+
+void vf_register_fastpath( struct vertex_fetch *vtx,
+ GLboolean match_strides );
+
+void vf_generic_emit( struct vertex_fetch *vf,
+ GLuint count,
+ GLubyte *v );
+
+void vf_generate_hardwired_emit( struct vertex_fetch *vf );
+
+void vf_generate_sse_emit( struct vertex_fetch *vf );
+
+
+struct vf_format_info {
+ const char *name;
+ vf_extract_func extract;
+ vf_insert_func insert[4];
+ const GLuint attrsize;
+};
+
+const struct vf_format_info vf_format_info[EMIT_MAX];
+
+
+#endif
diff --git a/src/mesa/vf/vf_generic.c b/src/mesa/vf/vf_generic.c
new file mode 100644
index 00000000000..68d8d0897b8
--- /dev/null
+++ b/src/mesa/vf/vf_generic.c
@@ -0,0 +1,981 @@
+
+/*
+ * Copyright 2003 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <[email protected]>
+ */
+
+#include "glheader.h"
+#include "context.h"
+#include "colormac.h"
+#include "simple_list.h"
+
+#include "vf/vf.h"
+
+
+/*
+ * These functions take the NDC coordinates pointed to by 'in', apply the
+ * NDC->Viewport mapping and store the results at 'v'.
+ */
+
+static INLINE void insert_4f_viewport_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+ out[2] = scale[2] * in[2] + trans[2];
+ out[3] = in[3];
+}
+
+static INLINE void insert_4f_viewport_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+ out[2] = scale[2] * in[2] + trans[2];
+ out[3] = 1;
+}
+
+static INLINE void insert_4f_viewport_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+ out[2] = trans[2];
+ out[3] = 1;
+}
+
+static INLINE void insert_4f_viewport_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = trans[1];
+ out[2] = trans[2];
+ out[3] = 1;
+}
+
+static INLINE void insert_3f_viewport_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+ out[2] = scale[2] * in[2] + trans[2];
+}
+
+static INLINE void insert_3f_viewport_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+ out[2] = scale[2] * in[2] + trans[2];
+}
+
+static INLINE void insert_3f_viewport_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = trans[1];
+ out[2] = trans[2];
+}
+
+static INLINE void insert_2f_viewport_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = scale[1] * in[1] + trans[1];
+}
+
+static INLINE void insert_2f_viewport_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = scale[0] * in[0] + trans[0];
+ out[1] = trans[1];
+}
+
+
+/*
+ * These functions do the same as above, except for the viewport mapping.
+ */
+
+static INLINE void insert_4f_4( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+}
+
+static INLINE void insert_4f_3( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = 1;
+}
+
+static INLINE void insert_4f_2( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = 0;
+ out[3] = 1;
+}
+
+static INLINE void insert_4f_1( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = 0;
+ out[2] = 0;
+ out[3] = 1;
+}
+
+static INLINE void insert_3f_xyw_4( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[3];
+}
+
+static INLINE void insert_3f_xyw_err( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ (void) a; (void) v; (void) in;
+ _mesa_exit(1);
+}
+
+static INLINE void insert_3f_3( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+}
+
+static INLINE void insert_3f_2( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = 0;
+}
+
+static INLINE void insert_3f_1( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = 0;
+ out[2] = 0;
+}
+
+
+static INLINE void insert_2f_2( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+}
+
+static INLINE void insert_2f_1( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = 0;
+}
+
+static INLINE void insert_1f_1( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ GLfloat *out = (GLfloat *)(v);
+ (void) a;
+
+ out[0] = in[0];
+}
+
+static INLINE void insert_null( const struct vf_attr *a, GLubyte *v, const GLfloat *in )
+{
+ (void) a; (void) v; (void) in;
+}
+
+static INLINE void insert_4chan_4f_rgba_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLchan *c = (GLchan *)v;
+ (void) a;
+ UNCLAMPED_FLOAT_TO_CHAN(c[0], in[0]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[1], in[1]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[2], in[2]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[3], in[3]);
+}
+
+static INLINE void insert_4chan_4f_rgba_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLchan *c = (GLchan *)v;
+ (void) a;
+ UNCLAMPED_FLOAT_TO_CHAN(c[0], in[0]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[1], in[1]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[2], in[2]);
+ c[3] = CHAN_MAX;
+}
+
+static INLINE void insert_4chan_4f_rgba_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLchan *c = (GLchan *)v;
+ (void) a;
+ UNCLAMPED_FLOAT_TO_CHAN(c[0], in[0]);
+ UNCLAMPED_FLOAT_TO_CHAN(c[1], in[1]);
+ c[2] = 0;
+ c[3] = CHAN_MAX;
+}
+
+static INLINE void insert_4chan_4f_rgba_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ GLchan *c = (GLchan *)v;
+ (void) a;
+ UNCLAMPED_FLOAT_TO_CHAN(c[0], in[0]);
+ c[1] = 0;
+ c[2] = 0;
+ c[3] = CHAN_MAX;
+}
+
+static INLINE void insert_4ub_4f_rgba_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[3]);
+}
+
+static INLINE void insert_4ub_4f_rgba_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]);
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_rgba_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ v[2] = 0;
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_rgba_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ v[1] = 0;
+ v[2] = 0;
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_bgra_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[3]);
+}
+
+static INLINE void insert_4ub_4f_bgra_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[2]);
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_bgra_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ v[0] = 0;
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_bgra_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ v[1] = 0;
+ v[0] = 0;
+ v[3] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_argb_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[3]);
+}
+
+static INLINE void insert_4ub_4f_argb_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[2]);
+ v[0] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_argb_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ v[3] = 0x00;
+ v[0] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_argb_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[0]);
+ v[2] = 0x00;
+ v[3] = 0x00;
+ v[0] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_abgr_4( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[3]);
+}
+
+static INLINE void insert_4ub_4f_abgr_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[2]);
+ v[0] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_abgr_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[1]);
+ v[1] = 0x00;
+ v[0] = 0xff;
+}
+
+static INLINE void insert_4ub_4f_abgr_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[3], in[0]);
+ v[2] = 0x00;
+ v[1] = 0x00;
+ v[0] = 0xff;
+}
+
+static INLINE void insert_3ub_3f_rgb_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[2]);
+}
+
+static INLINE void insert_3ub_3f_rgb_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ v[2] = 0;
+}
+
+static INLINE void insert_3ub_3f_rgb_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+ v[1] = 0;
+ v[2] = 0;
+}
+
+static INLINE void insert_3ub_3f_bgr_3( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[2]);
+}
+
+static INLINE void insert_3ub_3f_bgr_2( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(v[1], in[1]);
+ v[0] = 0;
+}
+
+static INLINE void insert_3ub_3f_bgr_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[2], in[0]);
+ v[1] = 0;
+ v[0] = 0;
+}
+
+
+static INLINE void insert_1ub_1f_1( const struct vf_attr *a, GLubyte *v,
+ const GLfloat *in )
+{
+ (void) a;
+ UNCLAMPED_FLOAT_TO_UBYTE(v[0], in[0]);
+}
+
+
+/***********************************************************************
+ * Functions to perform the reverse operations to the above, for
+ * swrast translation and clip-interpolation.
+ *
+ * Currently always extracts a full 4 floats.
+ */
+
+static void extract_4f_viewport( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ /* Although included for completeness, the position coordinate is
+ * usually handled differently during clipping.
+ */
+ out[0] = (in[0] - trans[0]) / scale[0];
+ out[1] = (in[1] - trans[1]) / scale[1];
+ out[2] = (in[2] - trans[2]) / scale[2];
+ out[3] = in[3];
+}
+
+static void extract_3f_viewport( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = (in[0] - trans[0]) / scale[0];
+ out[1] = (in[1] - trans[1]) / scale[1];
+ out[2] = (in[2] - trans[2]) / scale[2];
+ out[3] = 1;
+}
+
+
+static void extract_2f_viewport( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ const GLfloat *scale = a->vf->vp;
+ const GLfloat *trans = a->vf->vp + 4;
+
+ out[0] = (in[0] - trans[0]) / scale[0];
+ out[1] = (in[1] - trans[1]) / scale[1];
+ out[2] = 0;
+ out[3] = 1;
+}
+
+
+static void extract_4f( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+}
+
+static void extract_3f_xyw( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = 0;
+ out[3] = in[2];
+}
+
+
+static void extract_3f( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = 1;
+}
+
+
+static void extract_2f( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = 0;
+ out[3] = 1;
+}
+
+static void extract_1f( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ const GLfloat *in = (const GLfloat *)v;
+ (void) a;
+
+ out[0] = in[0];
+ out[1] = 0;
+ out[2] = 0;
+ out[3] = 1;
+}
+
+static void extract_4chan_4f_rgba( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ GLchan *c = (GLchan *)v;
+ (void) a;
+
+ out[0] = CHAN_TO_FLOAT(c[0]);
+ out[1] = CHAN_TO_FLOAT(c[1]);
+ out[2] = CHAN_TO_FLOAT(c[2]);
+ out[3] = CHAN_TO_FLOAT(c[3]);
+}
+
+static void extract_4ub_4f_rgba( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[0] = UBYTE_TO_FLOAT(v[0]);
+ out[1] = UBYTE_TO_FLOAT(v[1]);
+ out[2] = UBYTE_TO_FLOAT(v[2]);
+ out[3] = UBYTE_TO_FLOAT(v[3]);
+}
+
+static void extract_4ub_4f_bgra( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[2] = UBYTE_TO_FLOAT(v[0]);
+ out[1] = UBYTE_TO_FLOAT(v[1]);
+ out[0] = UBYTE_TO_FLOAT(v[2]);
+ out[3] = UBYTE_TO_FLOAT(v[3]);
+}
+
+static void extract_4ub_4f_argb( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[3] = UBYTE_TO_FLOAT(v[0]);
+ out[0] = UBYTE_TO_FLOAT(v[1]);
+ out[1] = UBYTE_TO_FLOAT(v[2]);
+ out[2] = UBYTE_TO_FLOAT(v[3]);
+}
+
+static void extract_4ub_4f_abgr( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[3] = UBYTE_TO_FLOAT(v[0]);
+ out[2] = UBYTE_TO_FLOAT(v[1]);
+ out[1] = UBYTE_TO_FLOAT(v[2]);
+ out[0] = UBYTE_TO_FLOAT(v[3]);
+}
+
+static void extract_3ub_3f_rgb( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[0] = UBYTE_TO_FLOAT(v[0]);
+ out[1] = UBYTE_TO_FLOAT(v[1]);
+ out[2] = UBYTE_TO_FLOAT(v[2]);
+ out[3] = 1;
+}
+
+static void extract_3ub_3f_bgr( const struct vf_attr *a, GLfloat *out,
+ const GLubyte *v )
+{
+ (void) a;
+ out[2] = UBYTE_TO_FLOAT(v[0]);
+ out[1] = UBYTE_TO_FLOAT(v[1]);
+ out[0] = UBYTE_TO_FLOAT(v[2]);
+ out[3] = 1;
+}
+
+static void extract_1ub_1f( const struct vf_attr *a, GLfloat *out, const GLubyte *v )
+{
+ (void) a;
+ out[0] = UBYTE_TO_FLOAT(v[0]);
+ out[1] = 0;
+ out[2] = 0;
+ out[3] = 1;
+}
+
+
+const struct vf_format_info vf_format_info[EMIT_MAX] =
+{
+ { "1f",
+ extract_1f,
+ { insert_1f_1, insert_1f_1, insert_1f_1, insert_1f_1 },
+ sizeof(GLfloat) },
+
+ { "2f",
+ extract_2f,
+ { insert_2f_1, insert_2f_2, insert_2f_2, insert_2f_2 },
+ 2 * sizeof(GLfloat) },
+
+ { "3f",
+ extract_3f,
+ { insert_3f_1, insert_3f_2, insert_3f_3, insert_3f_3 },
+ 3 * sizeof(GLfloat) },
+
+ { "4f",
+ extract_4f,
+ { insert_4f_1, insert_4f_2, insert_4f_3, insert_4f_4 },
+ 4 * sizeof(GLfloat) },
+
+ { "2f_viewport",
+ extract_2f_viewport,
+ { insert_2f_viewport_1, insert_2f_viewport_2, insert_2f_viewport_2,
+ insert_2f_viewport_2 },
+ 2 * sizeof(GLfloat) },
+
+ { "3f_viewport",
+ extract_3f_viewport,
+ { insert_3f_viewport_1, insert_3f_viewport_2, insert_3f_viewport_3,
+ insert_3f_viewport_3 },
+ 3 * sizeof(GLfloat) },
+
+ { "4f_viewport",
+ extract_4f_viewport,
+ { insert_4f_viewport_1, insert_4f_viewport_2, insert_4f_viewport_3,
+ insert_4f_viewport_4 },
+ 4 * sizeof(GLfloat) },
+
+ { "3f_xyw",
+ extract_3f_xyw,
+ { insert_3f_xyw_err, insert_3f_xyw_err, insert_3f_xyw_err,
+ insert_3f_xyw_4 },
+ 3 * sizeof(GLfloat) },
+
+ { "1ub_1f",
+ extract_1ub_1f,
+ { insert_1ub_1f_1, insert_1ub_1f_1, insert_1ub_1f_1, insert_1ub_1f_1 },
+ sizeof(GLubyte) },
+
+ { "3ub_3f_rgb",
+ extract_3ub_3f_rgb,
+ { insert_3ub_3f_rgb_1, insert_3ub_3f_rgb_2, insert_3ub_3f_rgb_3,
+ insert_3ub_3f_rgb_3 },
+ 3 * sizeof(GLubyte) },
+
+ { "3ub_3f_bgr",
+ extract_3ub_3f_bgr,
+ { insert_3ub_3f_bgr_1, insert_3ub_3f_bgr_2, insert_3ub_3f_bgr_3,
+ insert_3ub_3f_bgr_3 },
+ 3 * sizeof(GLubyte) },
+
+ { "4ub_4f_rgba",
+ extract_4ub_4f_rgba,
+ { insert_4ub_4f_rgba_1, insert_4ub_4f_rgba_2, insert_4ub_4f_rgba_3,
+ insert_4ub_4f_rgba_4 },
+ 4 * sizeof(GLubyte) },
+
+ { "4ub_4f_bgra",
+ extract_4ub_4f_bgra,
+ { insert_4ub_4f_bgra_1, insert_4ub_4f_bgra_2, insert_4ub_4f_bgra_3,
+ insert_4ub_4f_bgra_4 },
+ 4 * sizeof(GLubyte) },
+
+ { "4ub_4f_argb",
+ extract_4ub_4f_argb,
+ { insert_4ub_4f_argb_1, insert_4ub_4f_argb_2, insert_4ub_4f_argb_3,
+ insert_4ub_4f_argb_4 },
+ 4 * sizeof(GLubyte) },
+
+ { "4ub_4f_abgr",
+ extract_4ub_4f_abgr,
+ { insert_4ub_4f_abgr_1, insert_4ub_4f_abgr_2, insert_4ub_4f_abgr_3,
+ insert_4ub_4f_abgr_4 },
+ 4 * sizeof(GLubyte) },
+
+ { "4chan_4f_rgba",
+ extract_4chan_4f_rgba,
+ { insert_4chan_4f_rgba_1, insert_4chan_4f_rgba_2, insert_4chan_4f_rgba_3,
+ insert_4chan_4f_rgba_4 },
+ 4 * sizeof(GLchan) },
+
+ { "pad",
+ NULL,
+ { NULL, NULL, NULL, NULL },
+ 0 }
+
+};
+
+
+
+
+/***********************************************************************
+ * Hardwired fastpaths for emitting whole vertices or groups of
+ * vertices
+ */
+#define EMIT5(NR, F0, F1, F2, F3, F4, NAME) \
+static void NAME( struct vertex_fetch *vf, \
+ GLuint count, \
+ GLubyte *v ) \
+{ \
+ struct vf_attr *a = vf->attr; \
+ GLuint i; \
+ \
+ for (i = 0 ; i < count ; i++, v += vf->vertex_stride) { \
+ if (NR > 0) { \
+ F0( &a[0], v + a[0].vertoffset, (GLfloat *)a[0].inputptr ); \
+ a[0].inputptr += a[0].inputstride; \
+ } \
+ \
+ if (NR > 1) { \
+ F1( &a[1], v + a[1].vertoffset, (GLfloat *)a[1].inputptr ); \
+ a[1].inputptr += a[1].inputstride; \
+ } \
+ \
+ if (NR > 2) { \
+ F2( &a[2], v + a[2].vertoffset, (GLfloat *)a[2].inputptr ); \
+ a[2].inputptr += a[2].inputstride; \
+ } \
+ \
+ if (NR > 3) { \
+ F3( &a[3], v + a[3].vertoffset, (GLfloat *)a[3].inputptr ); \
+ a[3].inputptr += a[3].inputstride; \
+ } \
+ \
+ if (NR > 4) { \
+ F4( &a[4], v + a[4].vertoffset, (GLfloat *)a[4].inputptr ); \
+ a[4].inputptr += a[4].inputstride; \
+ } \
+ } \
+}
+
+
+#define EMIT2(F0, F1, NAME) EMIT5(2, F0, F1, insert_null, \
+ insert_null, insert_null, NAME)
+
+#define EMIT3(F0, F1, F2, NAME) EMIT5(3, F0, F1, F2, insert_null, \
+ insert_null, NAME)
+
+#define EMIT4(F0, F1, F2, F3, NAME) EMIT5(4, F0, F1, F2, F3, \
+ insert_null, NAME)
+
+
+EMIT2(insert_3f_viewport_3, insert_4ub_4f_rgba_4, emit_viewport3_rgba4)
+EMIT2(insert_3f_viewport_3, insert_4ub_4f_bgra_4, emit_viewport3_bgra4)
+EMIT2(insert_3f_3, insert_4ub_4f_rgba_4, emit_xyz3_rgba4)
+
+EMIT3(insert_4f_viewport_4, insert_4ub_4f_rgba_4, insert_2f_2, emit_viewport4_rgba4_st2)
+EMIT3(insert_4f_viewport_4, insert_4ub_4f_bgra_4, insert_2f_2, emit_viewport4_bgra4_st2)
+EMIT3(insert_4f_4, insert_4ub_4f_rgba_4, insert_2f_2, emit_xyzw4_rgba4_st2)
+
+EMIT4(insert_4f_viewport_4, insert_4ub_4f_rgba_4, insert_2f_2, insert_2f_2, emit_viewport4_rgba4_st2_st2)
+EMIT4(insert_4f_viewport_4, insert_4ub_4f_bgra_4, insert_2f_2, insert_2f_2, emit_viewport4_bgra4_st2_st2)
+EMIT4(insert_4f_4, insert_4ub_4f_rgba_4, insert_2f_2, insert_2f_2, emit_xyzw4_rgba4_st2_st2)
+
+
+/* Use the codegen paths to select one of a number of hardwired
+ * fastpaths.
+ */
+void vf_generate_hardwired_emit( struct vertex_fetch *vf )
+{
+ vf_emit_func func = NULL;
+
+ /* Does it fit a hardwired fastpath? Help! this is growing out of
+ * control!
+ */
+ switch (vf->attr_count) {
+ case 2:
+ if (vf->attr[0].do_insert == insert_3f_viewport_3) {
+ if (vf->attr[1].do_insert == insert_4ub_4f_bgra_4)
+ func = emit_viewport3_bgra4;
+ else if (vf->attr[1].do_insert == insert_4ub_4f_rgba_4)
+ func = emit_viewport3_rgba4;
+ }
+ else if (vf->attr[0].do_insert == insert_3f_3 &&
+ vf->attr[1].do_insert == insert_4ub_4f_rgba_4) {
+ func = emit_xyz3_rgba4;
+ }
+ break;
+ case 3:
+ if (vf->attr[2].do_insert == insert_2f_2) {
+ if (vf->attr[1].do_insert == insert_4ub_4f_rgba_4) {
+ if (vf->attr[0].do_insert == insert_4f_viewport_4)
+ func = emit_viewport4_rgba4_st2;
+ else if (vf->attr[0].do_insert == insert_4f_4)
+ func = emit_xyzw4_rgba4_st2;
+ }
+ else if (vf->attr[1].do_insert == insert_4ub_4f_bgra_4 &&
+ vf->attr[0].do_insert == insert_4f_viewport_4)
+ func = emit_viewport4_bgra4_st2;
+ }
+ break;
+ case 4:
+ if (vf->attr[2].do_insert == insert_2f_2 &&
+ vf->attr[3].do_insert == insert_2f_2) {
+ if (vf->attr[1].do_insert == insert_4ub_4f_rgba_4) {
+ if (vf->attr[0].do_insert == insert_4f_viewport_4)
+ func = emit_viewport4_rgba4_st2_st2;
+ else if (vf->attr[0].do_insert == insert_4f_4)
+ func = emit_xyzw4_rgba4_st2_st2;
+ }
+ else if (vf->attr[1].do_insert == insert_4ub_4f_bgra_4 &&
+ vf->attr[0].do_insert == insert_4f_viewport_4)
+ func = emit_viewport4_bgra4_st2_st2;
+ }
+ break;
+ }
+
+ vf->emit = func;
+}
+
+/***********************************************************************
+ * Generic (non-codegen) functions for whole vertices or groups of
+ * vertices
+ */
+
+void vf_generic_emit( struct vertex_fetch *vf,
+ GLuint count,
+ GLubyte *v )
+{
+ struct vf_attr *a = vf->attr;
+ const GLuint attr_count = vf->attr_count;
+ const GLuint stride = vf->vertex_stride;
+ GLuint i, j;
+
+ for (i = 0 ; i < count ; i++, v += stride) {
+ for (j = 0; j < attr_count; j++) {
+ GLfloat *in = (GLfloat *)a[j].inputptr;
+ a[j].inputptr += a[j].inputstride;
+ a[j].do_insert( &a[j], v + a[j].vertoffset, in );
+ }
+ }
+}
+
+
diff --git a/src/mesa/vf/vf_sse.c b/src/mesa/vf/vf_sse.c
new file mode 100644
index 00000000000..a5d143461bf
--- /dev/null
+++ b/src/mesa/vf/vf_sse.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2003 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <[email protected]>
+ */
+
+#include "glheader.h"
+#include "colormac.h"
+#include "simple_list.h"
+#include "enums.h"
+
+#include "vf/vf.h"
+
+#if defined(USE_SSE_ASM)
+
+#include "x86/rtasm/x86sse.h"
+#include "x86/common_x86_asm.h"
+
+
+#define X 0
+#define Y 1
+#define Z 2
+#define W 3
+
+
+struct x86_program {
+ struct x86_function func;
+
+ struct vertex_fetch *vf;
+ GLboolean inputs_safe;
+ GLboolean outputs_safe;
+ GLboolean have_sse2;
+
+ struct x86_reg identity;
+ struct x86_reg chan0;
+};
+
+
+static struct x86_reg get_identity( struct x86_program *p )
+{
+ return p->identity;
+}
+
+static void emit_load4f_4( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movups(&p->func, dest, arg0);
+}
+
+static void emit_load4f_3( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ /* Have to jump through some hoops:
+ *
+ * c 0 0 0
+ * c 0 0 1
+ * 0 0 c 1
+ * a b c 1
+ */
+ sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
+ sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
+ sse_shufps(&p->func, dest, dest, SHUF(Y,Z,X,W) );
+ sse_movlps(&p->func, dest, arg0);
+}
+
+static void emit_load4f_2( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ /* Initialize from identity, then pull in low two words:
+ */
+ sse_movups(&p->func, dest, get_identity(p));
+ sse_movlps(&p->func, dest, arg0);
+}
+
+static void emit_load4f_1( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ /* Pull in low word, then swizzle in identity */
+ sse_movss(&p->func, dest, arg0);
+ sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
+}
+
+
+
+static void emit_load3f_3( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ /* Over-reads by 1 dword - potential SEGV if input is a vertex
+ * array.
+ */
+ if (p->inputs_safe) {
+ sse_movups(&p->func, dest, arg0);
+ }
+ else {
+ /* c 0 0 0
+ * c c c c
+ * a b c c
+ */
+ sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
+ sse_shufps(&p->func, dest, dest, SHUF(X,X,X,X));
+ sse_movlps(&p->func, dest, arg0);
+ }
+}
+
+static void emit_load3f_2( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ emit_load4f_2(p, dest, arg0);
+}
+
+static void emit_load3f_1( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ emit_load4f_1(p, dest, arg0);
+}
+
+static void emit_load2f_2( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movlps(&p->func, dest, arg0);
+}
+
+static void emit_load2f_1( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ emit_load4f_1(p, dest, arg0);
+}
+
+static void emit_load1f_1( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movss(&p->func, dest, arg0);
+}
+
+static void (*load[4][4])( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 ) = {
+ { emit_load1f_1,
+ emit_load1f_1,
+ emit_load1f_1,
+ emit_load1f_1 },
+
+ { emit_load2f_1,
+ emit_load2f_2,
+ emit_load2f_2,
+ emit_load2f_2 },
+
+ { emit_load3f_1,
+ emit_load3f_2,
+ emit_load3f_3,
+ emit_load3f_3 },
+
+ { emit_load4f_1,
+ emit_load4f_2,
+ emit_load4f_3,
+ emit_load4f_4 }
+};
+
+static void emit_load( struct x86_program *p,
+ struct x86_reg dest,
+ GLuint sz,
+ struct x86_reg src,
+ GLuint src_sz)
+{
+ load[sz-1][src_sz-1](p, dest, src);
+}
+
+static void emit_store4f( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movups(&p->func, dest, arg0);
+}
+
+static void emit_store3f( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ if (p->outputs_safe) {
+ /* Emit the extra dword anyway. This may hurt writecombining,
+ * may cause other problems.
+ */
+ sse_movups(&p->func, dest, arg0);
+ }
+ else {
+ /* Alternate strategy - emit two, shuffle, emit one.
+ */
+ sse_movlps(&p->func, dest, arg0);
+ sse_shufps(&p->func, arg0, arg0, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
+ sse_movss(&p->func, x86_make_disp(dest,8), arg0);
+ }
+}
+
+static void emit_store2f( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movlps(&p->func, dest, arg0);
+}
+
+static void emit_store1f( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 )
+{
+ sse_movss(&p->func, dest, arg0);
+}
+
+
+static void (*store[4])( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg arg0 ) =
+{
+ emit_store1f,
+ emit_store2f,
+ emit_store3f,
+ emit_store4f
+};
+
+static void emit_store( struct x86_program *p,
+ struct x86_reg dest,
+ GLuint sz,
+ struct x86_reg temp )
+
+{
+ store[sz-1](p, dest, temp);
+}
+
+static void emit_pack_store_4ub( struct x86_program *p,
+ struct x86_reg dest,
+ struct x86_reg temp )
+{
+ /* Scale by 255.0
+ */
+ sse_mulps(&p->func, temp, p->chan0);
+
+ if (p->have_sse2) {
+ sse2_cvtps2dq(&p->func, temp, temp);
+ sse2_packssdw(&p->func, temp, temp);
+ sse2_packuswb(&p->func, temp, temp);
+ sse_movss(&p->func, dest, temp);
+ }
+ else {
+ struct x86_reg mmx0 = x86_make_reg(file_MMX, 0);
+ struct x86_reg mmx1 = x86_make_reg(file_MMX, 1);
+ sse_cvtps2pi(&p->func, mmx0, temp);
+ sse_movhlps(&p->func, temp, temp);
+ sse_cvtps2pi(&p->func, mmx1, temp);
+ mmx_packssdw(&p->func, mmx0, mmx1);
+ mmx_packuswb(&p->func, mmx0, mmx0);
+ mmx_movd(&p->func, dest, mmx0);
+ }
+}
+
+static GLint get_offset( const void *a, const void *b )
+{
+ return (const char *)b - (const char *)a;
+}
+
+/* Not much happens here. Eventually use this function to try and
+ * avoid saving/reloading the source pointers each vertex (if some of
+ * them can fit in registers).
+ */
+static void get_src_ptr( struct x86_program *p,
+ struct x86_reg srcREG,
+ struct x86_reg vfREG,
+ struct vf_attr *a )
+{
+ struct vertex_fetch *vf = p->vf;
+ struct x86_reg ptr_to_src = x86_make_disp(vfREG, get_offset(vf, &a->inputptr));
+
+ /* Load current a[j].inputptr
+ */
+ x86_mov(&p->func, srcREG, ptr_to_src);
+}
+
+static void update_src_ptr( struct x86_program *p,
+ struct x86_reg srcREG,
+ struct x86_reg vfREG,
+ struct vf_attr *a )
+{
+ if (a->inputstride) {
+ struct vertex_fetch *vf = p->vf;
+ struct x86_reg ptr_to_src = x86_make_disp(vfREG, get_offset(vf, &a->inputptr));
+
+ /* add a[j].inputstride (hardcoded value - could just as easily
+ * pull the stride value from memory each time).
+ */
+ x86_lea(&p->func, srcREG, x86_make_disp(srcREG, a->inputstride));
+
+ /* save new value of a[j].inputptr
+ */
+ x86_mov(&p->func, ptr_to_src, srcREG);
+ }
+}
+
+
+/* Lots of hardcoding
+ *
+ * EAX -- pointer to current output vertex
+ * ECX -- pointer to current attribute
+ *
+ */
+static GLboolean build_vertex_emit( struct x86_program *p )
+{
+ struct vertex_fetch *vf = p->vf;
+ GLuint j = 0;
+
+ struct x86_reg vertexEAX = x86_make_reg(file_REG32, reg_AX);
+ struct x86_reg srcECX = x86_make_reg(file_REG32, reg_CX);
+ struct x86_reg countEBP = x86_make_reg(file_REG32, reg_BP);
+ struct x86_reg vfESI = x86_make_reg(file_REG32, reg_SI);
+ struct x86_reg temp = x86_make_reg(file_XMM, 0);
+ struct x86_reg vp0 = x86_make_reg(file_XMM, 1);
+ struct x86_reg vp1 = x86_make_reg(file_XMM, 2);
+ GLubyte *fixup, *label;
+
+ x86_init_func(&p->func);
+
+ /* Push a few regs?
+ */
+ x86_push(&p->func, countEBP);
+ x86_push(&p->func, vfESI);
+
+
+ /* Get vertex count, compare to zero
+ */
+ x86_xor(&p->func, srcECX, srcECX);
+ x86_mov(&p->func, countEBP, x86_fn_arg(&p->func, 2));
+ x86_cmp(&p->func, countEBP, srcECX);
+ fixup = x86_jcc_forward(&p->func, cc_E);
+
+ /* Initialize destination register.
+ */
+ x86_mov(&p->func, vertexEAX, x86_fn_arg(&p->func, 3));
+
+ /* Move argument 1 (vf) into a reg:
+ */
+ x86_mov(&p->func, vfESI, x86_fn_arg(&p->func, 1));
+
+
+ /* Possibly load vp0, vp1 for viewport calcs:
+ */
+ if (vf->allow_viewport_emits) {
+ sse_movups(&p->func, vp0, x86_make_disp(vfESI, get_offset(vf, &vf->vp[0])));
+ sse_movups(&p->func, vp1, x86_make_disp(vfESI, get_offset(vf, &vf->vp[4])));
+ }
+
+ /* always load, needed or not:
+ */
+ sse_movups(&p->func, p->chan0, x86_make_disp(vfESI, get_offset(vf, &vf->chan_scale[0])));
+ sse_movups(&p->func, p->identity, x86_make_disp(vfESI, get_offset(vf, &vf->identity[0])));
+
+ /* Note address for loop jump */
+ label = x86_get_label(&p->func);
+
+ /* Emit code for each of the attributes. Currently routes
+ * everything through SSE registers, even when it might be more
+ * efficient to stick with regular old x86. No optimization or
+ * other tricks - enough new ground to cover here just getting
+ * things working.
+ */
+ while (j < vf->attr_count) {
+ struct vf_attr *a = &vf->attr[j];
+ struct x86_reg dest = x86_make_disp(vertexEAX, a->vertoffset);
+
+ /* Now, load an XMM reg from src, perhaps transform, then save.
+ * Could be shortcircuited in specific cases:
+ */
+ switch (a->format) {
+ case EMIT_1F:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 1, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_2F:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 2, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_3F:
+ /* Potentially the worst case - hardcode 2+1 copying:
+ */
+ if (0) {
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 3, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ }
+ else {
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 2, temp);
+ if (a->inputsize > 2) {
+ emit_load(p, temp, 1, x86_make_disp(srcECX, 8), 1);
+ emit_store(p, x86_make_disp(dest,8), 1, temp);
+ }
+ else {
+ sse_movss(&p->func, x86_make_disp(dest,8), get_identity(p));
+ }
+ update_src_ptr(p, srcECX, vfESI, a);
+ }
+ break;
+ case EMIT_4F:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 4, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_2F_VIEWPORT:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
+ sse_mulps(&p->func, temp, vp0);
+ sse_addps(&p->func, temp, vp1);
+ emit_store(p, dest, 2, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_3F_VIEWPORT:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
+ sse_mulps(&p->func, temp, vp0);
+ sse_addps(&p->func, temp, vp1);
+ emit_store(p, dest, 3, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_4F_VIEWPORT:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ sse_mulps(&p->func, temp, vp0);
+ sse_addps(&p->func, temp, vp1);
+ emit_store(p, dest, 4, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_3F_XYW:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ sse_shufps(&p->func, temp, temp, SHUF(X,Y,W,Z));
+ emit_store(p, dest, 3, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+
+ case EMIT_1UB_1F:
+ /* Test for PAD3 + 1UB:
+ */
+ if (j > 0 &&
+ a[-1].vertoffset + a[-1].vertattrsize <= a->vertoffset - 3)
+ {
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
+ sse_shufps(&p->func, temp, temp, SHUF(X,X,X,X));
+ emit_pack_store_4ub(p, x86_make_disp(dest, -3), temp); /* overkill! */
+ update_src_ptr(p, srcECX, vfESI, a);
+ }
+ else {
+ _mesa_printf("Can't emit 1ub %x %x %d\n", a->vertoffset, a[-1].vertoffset, a[-1].vertattrsize );
+ return GL_FALSE;
+ }
+ break;
+ case EMIT_3UB_3F_RGB:
+ case EMIT_3UB_3F_BGR:
+ /* Test for 3UB + PAD1:
+ */
+ if (j == vf->attr_count - 1 ||
+ a[1].vertoffset >= a->vertoffset + 4) {
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
+ if (a->format == EMIT_3UB_3F_BGR)
+ sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ }
+ /* Test for 3UB + 1UB:
+ */
+ else if (j < vf->attr_count - 1 &&
+ a[1].format == EMIT_1UB_1F &&
+ a[1].vertoffset == a->vertoffset + 3) {
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
+ update_src_ptr(p, srcECX, vfESI, a);
+
+ /* Make room for incoming value:
+ */
+ sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
+
+ get_src_ptr(p, srcECX, vfESI, &a[1]);
+ emit_load(p, temp, 1, x86_deref(srcECX), a[1].inputsize);
+ update_src_ptr(p, srcECX, vfESI, &a[1]);
+
+ /* Rearrange and possibly do BGR conversion:
+ */
+ if (a->format == EMIT_3UB_3F_BGR)
+ sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
+ else
+ sse_shufps(&p->func, temp, temp, SHUF(Y,Z,W,X));
+
+ emit_pack_store_4ub(p, dest, temp);
+ j++; /* NOTE: two attrs consumed */
+ }
+ else {
+ _mesa_printf("Can't emit 3ub\n");
+ }
+ return GL_FALSE; /* add this later */
+ break;
+
+ case EMIT_4UB_4F_RGBA:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_4UB_4F_BGRA:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_4UB_4F_ARGB:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_4UB_4F_ABGR:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case EMIT_4CHAN_4F_RGBA:
+ switch (CHAN_TYPE) {
+ case GL_UNSIGNED_BYTE:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ emit_pack_store_4ub(p, dest, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case GL_FLOAT:
+ get_src_ptr(p, srcECX, vfESI, a);
+ emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
+ emit_store(p, dest, 4, temp);
+ update_src_ptr(p, srcECX, vfESI, a);
+ break;
+ case GL_UNSIGNED_SHORT:
+ default:
+ _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE));
+ return GL_FALSE;
+ }
+ break;
+ default:
+ _mesa_printf("unknown a[%d].format %d\n", j, a->format);
+ return GL_FALSE; /* catch any new opcodes */
+ }
+
+ /* Increment j by at least 1 - may have been incremented above also:
+ */
+ j++;
+ }
+
+ /* Next vertex:
+ */
+ x86_lea(&p->func, vertexEAX, x86_make_disp(vertexEAX, vf->vertex_stride));
+
+ /* decr count, loop if not zero
+ */
+ x86_dec(&p->func, countEBP);
+ x86_test(&p->func, countEBP, countEBP);
+ x86_jcc(&p->func, cc_NZ, label);
+
+ /* Exit mmx state?
+ */
+ if (p->func.need_emms)
+ mmx_emms(&p->func);
+
+ /* Land forward jump here:
+ */
+ x86_fixup_fwd_jump(&p->func, fixup);
+
+ /* Pop regs and return
+ */
+ x86_pop(&p->func, x86_get_base_reg(vfESI));
+ x86_pop(&p->func, countEBP);
+ x86_ret(&p->func);
+
+ vf->emit = (vf_emit_func)x86_get_func(&p->func);
+ return GL_TRUE;
+}
+
+
+
+void vf_generate_sse_emit( struct vertex_fetch *vf )
+{
+ struct x86_program p;
+
+ if (!cpu_has_xmm) {
+ vf->codegen_emit = NULL;
+ return;
+ }
+
+ _mesa_memset(&p, 0, sizeof(p));
+
+ p.vf = vf;
+ p.inputs_safe = 0; /* for now */
+ p.outputs_safe = 1; /* for now */
+ p.have_sse2 = cpu_has_xmm2;
+ p.identity = x86_make_reg(file_XMM, 6);
+ p.chan0 = x86_make_reg(file_XMM, 7);
+
+ x86_init_func(&p.func);
+
+ if (build_vertex_emit(&p)) {
+ vf_register_fastpath( vf, GL_TRUE );
+ }
+ else {
+ /* Note the failure so that we don't keep trying to codegen an
+ * impossible state:
+ */
+ vf_register_fastpath( vf, GL_FALSE );
+ x86_release_func(&p.func);
+ }
+}
+
+#else
+
+void vf_generate_sse_emit( struct vertex_fetch *vf )
+{
+ /* Dummy version for when USE_SSE_ASM not defined */
+}
+
+#endif