aboutsummaryrefslogtreecommitdiffstats
path: root/src/mesa
diff options
context:
space:
mode:
authorDaniel Borca <[email protected]>2004-03-29 06:51:41 +0000
committerDaniel Borca <[email protected]>2004-03-29 06:51:41 +0000
commit71c7c1feb6ed3453d1f3238ffefa8f7827ad2c8a (patch)
tree3de79ba26827900ff775689913e6e0bc8a035745 /src/mesa
parent238693544cc77c53395b8cdade0c5df3b844aaa7 (diff)
code cleanup heheh
Diffstat (limited to 'src/mesa')
-rw-r--r--src/mesa/drivers/glide/fxapi.c10
-rw-r--r--src/mesa/drivers/glide/fxdd.c2
-rw-r--r--src/mesa/drivers/glide/fxddtex.c12
-rw-r--r--src/mesa/drivers/glide/fxdrv.h4
-rw-r--r--src/mesa/drivers/glide/fxg.c16
-rw-r--r--src/mesa/drivers/glide/fxg.h12
-rw-r--r--src/mesa/drivers/glide/fxsetup.c150
-rw-r--r--src/mesa/drivers/glide/fxvb.c3
8 files changed, 96 insertions, 113 deletions
diff --git a/src/mesa/drivers/glide/fxapi.c b/src/mesa/drivers/glide/fxapi.c
index 0474de9016a..5d7bcdb2c30 100644
--- a/src/mesa/drivers/glide/fxapi.c
+++ b/src/mesa/drivers/glide/fxapi.c
@@ -386,14 +386,7 @@ fxMesaCreateContext(GLuint win,
Glide->txMipQuantize &&
Glide->txPalToNcc && !getenv("MESA_FX_IGNORE_TEXUS2");
- /*
- * Pixel tables are used during pixel read-back
- * Either initialize them for RGB or BGR order;
- * However, 32bit capable cards have the right order.
- * As a consequence, 32bit read-back is not swizzled!
- * Also determine if we need vertex snapping.
- */
- /* number of SLI units and AA Samples per chip */
+ /* Determine if we need vertex swapping, RGB order and SLI/AA */
sliaa = 0;
switch (fxMesa->type) {
case GR_SSTTYPE_VOODOO:
@@ -408,6 +401,7 @@ fxMesaCreateContext(GLuint win,
break;
case GR_SSTTYPE_Voodoo4:
case GR_SSTTYPE_Voodoo5:
+ /* number of SLI units and AA Samples per chip */
if ((str = Glide->grGetRegistryOrEnvironmentStringExt("SSTH3_SLI_AA_CONFIGURATION")) != NULL) {
sliaa = atoi(str);
}
diff --git a/src/mesa/drivers/glide/fxdd.c b/src/mesa/drivers/glide/fxdd.c
index 245654ca4c7..510b9ce43bc 100644
--- a/src/mesa/drivers/glide/fxdd.c
+++ b/src/mesa/drivers/glide/fxdd.c
@@ -1362,7 +1362,7 @@ fxDDInitFxMesaContext(fxMesaContext fxMesa)
textureLevels++;
} while ((textureSize >>= 0x1) & 0x7ff);
ctx->Const.MaxTextureLevels = textureLevels;
-#if 1||FX_RESCALE_BIG_TEXURES
+#if FX_RESCALE_BIG_TEXURES_HACK
fxMesa->textureMaxLod = textureLevels - 1;
if ((env = getenv("MESA_FX_MAXLOD")) != NULL) {
int maxLevels = atoi(env) + 1;
diff --git a/src/mesa/drivers/glide/fxddtex.c b/src/mesa/drivers/glide/fxddtex.c
index bbf7dfad793..b8e5d814058 100644
--- a/src/mesa/drivers/glide/fxddtex.c
+++ b/src/mesa/drivers/glide/fxddtex.c
@@ -1233,7 +1233,7 @@ fxDDTexImage2D(GLcontext * ctx, GLenum target, GLint level,
mml->width = width * mml->wScale;
mml->height = height * mml->hScale;
-#if 0 && FX_COMPRESS_S3TC_AS_FXT1_HACK
+#if FX_COMPRESS_S3TC_AS_FXT1_HACK
/* [koolsmoky] substitute FXT1 for DXTn and Legacy S3TC */
/* [dBorca] we should update texture's attribute, then,
* because if the application asks us to decompress, we
@@ -1257,16 +1257,6 @@ fxDDTexImage2D(GLcontext * ctx, GLenum target, GLint level,
}
}
#endif
-#if 0 && FX_COMPRESS_DXT5_AS_DXT3_HACK
- /* [dBorca] either VSA is stupid at DXT5,
- * or our compression tool is broken. See
- * above for caveats.
- */
- if ((texImage->IsCompressed) &&
- (internalFormat == GL_COMPRESSED_RGBA_S3TC_DXT5_EXT)) {
- internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
- }
-#endif
/* choose the texture format */
assert(ctx->Driver.ChooseTextureFormat);
diff --git a/src/mesa/drivers/glide/fxdrv.h b/src/mesa/drivers/glide/fxdrv.h
index a619cf10148..ff5601f21b4 100644
--- a/src/mesa/drivers/glide/fxdrv.h
+++ b/src/mesa/drivers/glide/fxdrv.h
@@ -730,4 +730,8 @@ extern int TDFX_DEBUG;
#define TDFX_DEBUG 0
#endif
+/* dirty hacks */
+#define FX_RESCALE_BIG_TEXURES_HACK 1
+#define FX_COMPRESS_S3TC_AS_FXT1_HACK 0
+
#endif
diff --git a/src/mesa/drivers/glide/fxg.c b/src/mesa/drivers/glide/fxg.c
index d49878ae18d..b7408a70f98 100644
--- a/src/mesa/drivers/glide/fxg.c
+++ b/src/mesa/drivers/glide/fxg.c
@@ -38,7 +38,7 @@
#include <stdarg.h>
#include <assert.h>
-#define DEBUG_TRAP_internal
+#define FX_TRAP_GLIDE_internal
#include "fxg.h"
@@ -46,7 +46,7 @@
/****************************************************************************\
* logging *
\****************************************************************************/
-#if DEBUG_TRAP
+#if FX_TRAP_GLIDE
#define TRAP_LOG trp_printf
#ifdef __GNUC__
__attribute__ ((format(printf, 1, 2)))
@@ -66,17 +66,17 @@ int trp_printf (const char *format, ...)
va_end(arg);
return n;
}
-#else /* DEBUG_TRAP */
+#else /* FX_TRAP_GLIDE */
#ifdef __GNUC__
#define TRAP_LOG(format, ...) do {} while (0)
#else /* __GNUC__ */
#define TRAP_LOG 0 && (unsigned long)
#endif /* __GNUC__ */
-#endif /* DEBUG_TRAP */
+#endif /* FX_TRAP_GLIDE */
-#if DEBUG_TRAP
+#if FX_TRAP_GLIDE
/****************************************************************************\
* helpers *
\****************************************************************************/
@@ -2242,15 +2242,15 @@ void FX_CALL fake_grTexNCCTableExt (GrChipID_t tmu,
\****************************************************************************/
void tdfx_hook_glide (struct tdfx_glide *Glide)
{
-#if DEBUG_TRAP
+#if FX_TRAP_GLIDE
#define GET_EXT_ADDR(name) *(GrProc *)&real_##name = grGetProcAddress(#name), Glide->name = trap_##name
#define GET_EXT_FAKE(name) GET_EXT_ADDR(name); if (real_##name == NULL) real_##name = fake_##name
#define GET_EXT_NULL(name) GET_EXT_ADDR(name); if (real_##name == NULL) Glide->name = NULL
-#else /* DEBUG_TRAP */
+#else /* FX_TRAP_GLIDE */
#define GET_EXT_ADDR(name) *(GrProc *)&Glide->name = grGetProcAddress(#name)
#define GET_EXT_FAKE(name) GET_EXT_ADDR(name); if (Glide->name == NULL) Glide->name = fake_##name
#define GET_EXT_NULL(name) GET_EXT_ADDR(name)
-#endif /* DEBUG_TRAP */
+#endif /* FX_TRAP_GLIDE */
/*
** glide extensions
diff --git a/src/mesa/drivers/glide/fxg.h b/src/mesa/drivers/glide/fxg.h
index 57a891b6a99..f2822967158 100644
--- a/src/mesa/drivers/glide/fxg.h
+++ b/src/mesa/drivers/glide/fxg.h
@@ -37,9 +37,11 @@
#include <glide.h>
#include <g3ext.h>
-#define DEBUG_TRAP 0
+#ifndef FX_TRAP_GLIDE
+#define FX_TRAP_GLIDE 0
+#endif
-#if DEBUG_TRAP
+#if FX_TRAP_GLIDE
/*
** rendering functions
*/
@@ -171,7 +173,7 @@ void FX_CALL trap_guFogGenerateExp (GrFog_t *fogtable, float density);
void FX_CALL trap_guFogGenerateExp2 (GrFog_t *fogtable, float density);
void FX_CALL trap_guFogGenerateLinear (GrFog_t *fogtable, float nearZ, float farZ);
-#ifndef DEBUG_TRAP_internal
+#ifndef FX_TRAP_GLIDE_internal
/*
** rendering functions
*/
@@ -302,8 +304,8 @@ void FX_CALL trap_guFogGenerateLinear (GrFog_t *fogtable, float nearZ, float far
#define guFogGenerateExp trap_guFogGenerateExp
#define guFogGenerateExp2 trap_guFogGenerateExp2
#define guFogGenerateLinear trap_guFogGenerateLinear
-#endif /* DEBUG_TRAP_internal */
-#endif /* DEBUG_TRAP */
+#endif /* FX_TRAP_GLIDE_internal */
+#endif /* FX_TRAP_GLIDE */
diff --git a/src/mesa/drivers/glide/fxsetup.c b/src/mesa/drivers/glide/fxsetup.c
index 7714e3cf80c..12a525a1e8e 100644
--- a/src/mesa/drivers/glide/fxsetup.c
+++ b/src/mesa/drivers/glide/fxsetup.c
@@ -65,85 +65,79 @@ fxTexValidate(GLcontext * ctx, struct gl_texture_object *tObj)
minl = ti->minLevel = tObj->BaseLevel;
maxl = ti->maxLevel = MIN2(tObj->MaxLevel, tObj->Image[0][0]->MaxLog2);
-#if 1||FX_RESCALE_BIG_TEXURES
+#if FX_RESCALE_BIG_TEXURES_HACK
{
- extern void _mesa_rescale_teximage2d( GLuint bytesPerPixel,
- GLuint dstRowStride,
- GLint srcWidth, GLint srcHeight,
- GLint dstWidth, GLint dstHeight,
- const GLvoid *srcImage, GLvoid *dstImage );
- fxMesaContext fxMesa = FX_CONTEXT(ctx);
- if (maxl - minl > fxMesa->textureMaxLod) {
- /* [dBorca]
- * Ooooooook! Here's a(nother) long story.
- * We get here because we need to handle a texture larger
- * than hardware can support. Two cases:
- * 1) we have mipmaps. Then we just push up to the first supported
- * LOD. A possible drawback is that Mesa will ignore the skipped
- * LODs on further texture handling.
- * Will this interfere with GL_TEXTURE_[MIN|BASE]_LEVEL? How?
- * 2) we don't have mipmaps. We need to rescale texture; two ways:
- * a) create a new LOD and push up ti->minLevel and tObj->BaseLevel
- * but this means we need to rescale on both axes, which
- * yield unnecessary ugly texture. Also, same issues as 1)
- * b) rescale the biggest LOD in place and go two ways:
- * - update texImage->Width and texImage->Height, then
- * decrease maxLevel, so we won't rescale again on the
- * next validation. Changing texImage-> parameters is
- * not quite legal here (see convolution), but...
- * - leaving texImage-> parameters alone, while rescaling
- * texture and decreasing maxLevel makes Mesa puke. Also
- * this approach requires that mml->[wh]Scale go below 1,
- * otherwise bad ju-ju will be in our future (see fetch_texel)
- * Will this interfere with GL_TEXTURE_MAX_LEVEL? How?
- * The above approach is somehow dumb! we might have rescaled
- * once in TexImage2D to accomodate aspect ratio, and now we
- * are rescaling again. The thing is, in TexImage2D we don't
- * know whether we'll hit 1) or 2) by the time of validation.
- * NB: we could handle mml->[wh]Scale nicely, using (biased) shifts.
- *
- * Which brings me to another issue. How can we handle NPOT textures?
- * - rescaling NPOT to the next bigger POT (mml->[wh]Scale can't shift)
- * - upping the max LOD to the next power-of-two, in fxTexGetInfo; then
- * choosing non-power-of-two values for ti->[st]Scale... Anyhow, we
- * still need to align mipmaps correctly in texture memory!
- */
- if ((tObj->MinFilter == GL_NEAREST) || (tObj->MinFilter == GL_LINEAR)) {
- /* no mipmaps! need to rescale */
- struct gl_texture_image *texImage = tObj->Image[0][minl];
- tfxMipMapLevel *mml = FX_MIPMAP_DATA(texImage);
- GLint texelBytes = texImage->TexFormat->TexelBytes;
- GLvoid *texImage_Data = texImage->Data;
- GLint _w = MIN2(mml->width, 1 << fxMesa->textureMaxLod);
- GLint _h = MIN2(mml->height, 1 << fxMesa->textureMaxLod);
- if (TDFX_DEBUG & VERBOSE_TEXTURE) {
- fprintf(stderr, "fxTexValidate: rescaling %d x %d -> %d x %d\n",
- mml->width, mml->height,
- _w, _h);
- }
- fxTexGetInfo(_w, _h, NULL, NULL, NULL, NULL,
- &(mml->wScale), &(mml->hScale));
- texImage->Width = _w / mml->wScale;
- texImage->Height = _h / mml->hScale;
- texImage->Data = MESA_PBUFFER_ALLOC(_w * _h * texelBytes);
- _mesa_rescale_teximage2d(texelBytes,
- _w * texelBytes, /* dst stride */
- mml->width, mml->height, /* src */
- _w, _h, /* dst */
- texImage_Data /*src*/, texImage->Data /*dst*/ );
- MESA_PBUFFER_FREE(texImage_Data);
- mml->width = _w;
- mml->height = _h;
- maxl = ti->maxLevel = tObj->Image[0][0]->MaxLog2 = minl + fxMesa->textureMaxLod;
- } else {
- /* skip a certain number of LODs */
- minl += maxl - fxMesa->textureMaxLod;
- if (TDFX_DEBUG & VERBOSE_TEXTURE) {
- fprintf(stderr, "fxTexValidate: skipping %d LODs\n", minl - ti->minLevel);
- }
- ti->minLevel = tObj->BaseLevel = minl;
- }
- }
+ extern void _mesa_rescale_teximage2d( GLuint bytesPerPixel,
+ GLuint dstRowStride,
+ GLint srcWidth, GLint srcHeight,
+ GLint dstWidth, GLint dstHeight,
+ const GLvoid *srcImage, GLvoid *dstImage );
+ fxMesaContext fxMesa = FX_CONTEXT(ctx);
+ /* [dBorca]
+ * Ooooooook! Here's a(nother) long story.
+ * We get here because we need to handle a texture larger
+ * than hardware can support. Two cases:
+ * 1) we have mipmaps. Then we just push up to the first supported
+ * LOD. A possible drawback is that Mesa will ignore the skipped
+ * LODs on further texture handling.
+ * Will this interfere with GL_TEXTURE_[MIN|BASE]_LEVEL? How?
+ * 2) we don't have mipmaps. We need to rescale the big LOD in place.
+ * The above approach is somehow dumb! we might have rescaled
+ * once in TexImage2D to accomodate aspect ratio, and now we
+ * are rescaling again. The thing is, in TexImage2D we don't
+ * know whether we'll hit 1) or 2) by the time of validation.
+ * NB: we could handle mml->[wh]Scale nicely, using (biased) shifts.
+ *
+ * Which brings me to another issue. How can we handle NPOT textures?
+ * - rescaling NPOT to the next bigger POT (mml->[wh]Scale can't shift)
+ * - upping the max LOD to the next power-of-two, in fxTexGetInfo; then
+ * choosing non-power-of-two values for ti->[st]Scale... Anyhow, we
+ * still need to align mipmaps correctly in texture memory!
+ */
+ if ((tObj->MinFilter == GL_NEAREST) || (tObj->MinFilter == GL_LINEAR)) {
+ /* no mipmaps! */
+ struct gl_texture_image *texImage = tObj->Image[0][minl];
+ tfxMipMapLevel *mml = FX_MIPMAP_DATA(texImage);
+ GLint _w, _h, maxSize = 1 << fxMesa->textureMaxLod;
+ if ((mml->width > maxSize) || (mml->height > maxSize)) {
+ /* need to rescale */
+ GLint texelBytes = texImage->TexFormat->TexelBytes;
+ GLvoid *texImage_Data = texImage->Data;
+ _w = MIN2(texImage->Width, maxSize);
+ _h = MIN2(texImage->Height, maxSize);
+ if (TDFX_DEBUG & VERBOSE_TEXTURE) {
+ fprintf(stderr, "fxTexValidate: rescaling %d x %d -> %d x %d\n",
+ texImage->Width, texImage->Height, _w, _h);
+ }
+ /* we should leave these as is and... (!) */
+ texImage->Width = _w;
+ texImage->Height = _h;
+ fxTexGetInfo(_w, _h, NULL, NULL, NULL, NULL,
+ &(mml->wScale), &(mml->hScale));
+ _w *= mml->wScale;
+ _h *= mml->hScale;
+ texImage->Data = MESA_PBUFFER_ALLOC(_w * _h * texelBytes);
+ _mesa_rescale_teximage2d(texelBytes,
+ _w * texelBytes, /* dst stride */
+ mml->width, mml->height, /* src */
+ _w, _h, /* dst */
+ texImage_Data /*src*/, texImage->Data /*dst*/ );
+ MESA_PBUFFER_FREE(texImage_Data);
+ mml->width = _w;
+ mml->height = _h;
+ /* (!) ... and set mml->wScale = _w / texImage->Width */
+ }
+ } else {
+ /* mipmapping */
+ if (maxl - minl > fxMesa->textureMaxLod) {
+ /* skip a certain number of LODs */
+ minl += maxl - fxMesa->textureMaxLod;
+ if (TDFX_DEBUG & VERBOSE_TEXTURE) {
+ fprintf(stderr, "fxTexValidate: skipping %d LODs\n", minl - ti->minLevel);
+ }
+ ti->minLevel = tObj->BaseLevel = minl;
+ }
+ }
}
#endif
diff --git a/src/mesa/drivers/glide/fxvb.c b/src/mesa/drivers/glide/fxvb.c
index 7a275ad1c9f..ac2574a6b5b 100644
--- a/src/mesa/drivers/glide/fxvb.c
+++ b/src/mesa/drivers/glide/fxvb.c
@@ -481,9 +481,8 @@ void fxBuildVertices( GLcontext *ctx, GLuint start, GLuint count,
if (newinputs & VERT_BIT_COLOR0)
ind |= SETUP_RGBA;
- if (newinputs & VERT_BIT_COLOR1) {
+ if (newinputs & VERT_BIT_COLOR1)
ind |= SETUP_SPEC;
- }
if (newinputs & VERT_BIT_TEX0)
ind |= SETUP_TMU0;