summaryrefslogtreecommitdiffstats
path: root/src/mesa/tnl/t_draw.c
diff options
context:
space:
mode:
authorMathias Fröhlich <[email protected]>2018-11-17 07:13:11 +0100
committerMathias Fröhlich <[email protected]>2018-11-21 06:27:19 +0100
commit0a7020b4e60ef69e0e4b38aee31bfce385e594d8 (patch)
tree64e68add5e2e22579c6ef715707c1dd99c1ecde1 /src/mesa/tnl/t_draw.c
parent2da7b0a2fbf0dbc5e89f19622cf3bbfa346ed0f1 (diff)
mesa: Factor out struct gl_vertex_format.
Factor out struct gl_vertex_format from array attributes. The data type is supposed to describe the type of a vertex element. At this current stage the data type is only used with the VAO, but actually is useful in various other places. Due to the bitfields being used, special care needs to be taken for the glGet code paths. v2: Change unsigned char -> GLubyte. Use struct assignment for struct gl_vertex_format. Reviewed-by: Brian Paul <[email protected]> Reviewed-by: Marek Olšák <[email protected]> Signed-off-by: Mathias Fröhlich <[email protected]>
Diffstat (limited to 'src/mesa/tnl/t_draw.c')
-rw-r--r--src/mesa/tnl/t_draw.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/src/mesa/tnl/t_draw.c b/src/mesa/tnl/t_draw.c
index 1fe2d405cb6..009a0bf3626 100644
--- a/src/mesa/tnl/t_draw.c
+++ b/src/mesa/tnl/t_draw.c
@@ -70,7 +70,7 @@ static void free_space(struct gl_context *ctx)
*/
#define CONVERT( TYPE, MACRO ) do { \
GLuint i, j; \
- if (attrib->Normalized) { \
+ if (attrib->Format.Normalized) { \
for (i = 0; i < count; i++) { \
const TYPE *in = (TYPE *)ptr; \
for (j = 0; j < sz; j++) { \
@@ -104,8 +104,8 @@ convert_bgra_to_float(const struct gl_vertex_buffer_binding *binding,
GLuint count )
{
GLuint i;
- assert(attrib->Normalized);
- assert(attrib->Size == 4);
+ assert(attrib->Format.Normalized);
+ assert(attrib->Format.Size == 4);
for (i = 0; i < count; i++) {
const GLubyte *in = (GLubyte *) ptr; /* in is in BGRA order */
*fptr++ = UBYTE_TO_FLOAT(in[2]); /* red */
@@ -152,9 +152,9 @@ convert_fixed_to_float(const struct gl_vertex_buffer_binding *binding,
{
GLuint i;
GLint j;
- const GLint size = attrib->Size;
+ const GLint size = attrib->Format.Size;
- if (attrib->Normalized) {
+ if (attrib->Format.Normalized) {
for (i = 0; i < count; ++i) {
const GLfixed *in = (GLfixed *) ptr;
for (j = 0; j < size; ++j) {
@@ -187,17 +187,17 @@ static void _tnl_import_array( struct gl_context *ctx,
struct vertex_buffer *VB = &tnl->vb;
GLuint stride = binding->Stride;
- if (attrib->Type != GL_FLOAT) {
- const GLuint sz = attrib->Size;
+ if (attrib->Format.Type != GL_FLOAT) {
+ const GLuint sz = attrib->Format.Size;
GLubyte *buf = get_space(ctx, count * sz * sizeof(GLfloat));
GLfloat *fptr = (GLfloat *)buf;
- switch (attrib->Type) {
+ switch (attrib->Format.Type) {
case GL_BYTE:
CONVERT(GLbyte, BYTE_TO_FLOAT);
break;
case GL_UNSIGNED_BYTE:
- if (attrib->Format == GL_BGRA) {
+ if (attrib->Format.Format == GL_BGRA) {
/* See GL_EXT_vertex_array_bgra */
convert_bgra_to_float(binding, attrib, ptr, fptr, count);
}
@@ -240,11 +240,11 @@ static void _tnl_import_array( struct gl_context *ctx,
VB->AttribPtr[attr]->start = (GLfloat *)ptr;
VB->AttribPtr[attr]->count = count;
VB->AttribPtr[attr]->stride = stride;
- VB->AttribPtr[attr]->size = attrib->Size;
+ VB->AttribPtr[attr]->size = attrib->Format.Size;
/* This should die, but so should the whole GLvector4f concept:
*/
- VB->AttribPtr[attr]->flags = (((1<<attrib->Size)-1) |
+ VB->AttribPtr[attr]->flags = (((1<<attrib->Format.Size)-1) |
VEC_NOT_WRITEABLE |
(stride == 4*sizeof(GLfloat) ? 0 : VEC_BAD_STRIDE));