summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/Makefile1
-rw-r--r--src/gallium/auxiliary/SConscript1
-rw-r--r--src/gallium/auxiliary/draw/draw_context.c1
-rw-r--r--src/gallium/auxiliary/draw/draw_context.h8
-rw-r--r--src/gallium/auxiliary/draw/draw_gs.c9
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_clip.c38
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_vbuf.c6
-rw-r--r--src/gallium/auxiliary/draw/draw_private.h5
-rw-r--r--src/gallium/auxiliary/draw/draw_pt.c29
-rw-r--r--src/gallium/auxiliary/draw/draw_pt.h3
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_emit.c6
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch.c45
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch_emit.c5
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c14
-rw-r--r--src/gallium/auxiliary/draw/draw_vs.c8
-rw-r--r--src/gallium/auxiliary/draw/draw_vs.h1
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_ppc.c6
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_varient.c10
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c282
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_x86sse.c7
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_x86sse.h1
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump.c3
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_exec.c8
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ppc.c3
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sanity.c6
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sse2.c50
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.c84
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.h34
-rw-r--r--src/gallium/auxiliary/translate/translate.h18
-rw-r--r--src/gallium/auxiliary/translate/translate_generic.c40
-rw-r--r--src/gallium/auxiliary/translate/translate_sse.c186
-rw-r--r--src/gallium/auxiliary/util/u_blitter.c168
-rw-r--r--src/gallium/auxiliary/util/u_draw_quad.c1
-rw-r--r--src/gallium/auxiliary/util/u_format.csv8
-rw-r--r--src/gallium/auxiliary/util/u_pack_color.h2
-rw-r--r--src/gallium/auxiliary/util/u_ringbuffer.c145
-rw-r--r--src/gallium/auxiliary/util/u_ringbuffer.h29
-rw-r--r--src/gallium/auxiliary/util/u_tile.c5
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.c22
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.h2
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c28
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h4
42 files changed, 969 insertions, 363 deletions
diff --git a/src/gallium/auxiliary/Makefile b/src/gallium/auxiliary/Makefile
index e3af41c6e04..8f937e3b4e9 100644
--- a/src/gallium/auxiliary/Makefile
+++ b/src/gallium/auxiliary/Makefile
@@ -111,6 +111,7 @@ C_SOURCES = \
util/u_math.c \
util/u_mm.c \
util/u_rect.c \
+ util/u_ringbuffer.c \
util/u_simple_shaders.c \
util/u_snprintf.c \
util/u_stream_stdc.c \
diff --git a/src/gallium/auxiliary/SConscript b/src/gallium/auxiliary/SConscript
index 782eb533863..f957090b5fb 100644
--- a/src/gallium/auxiliary/SConscript
+++ b/src/gallium/auxiliary/SConscript
@@ -147,6 +147,7 @@ source = [
'util/u_math.c',
'util/u_mm.c',
'util/u_rect.c',
+ 'util/u_ringbuffer.c',
'util/u_simple_shaders.c',
'util/u_snprintf.c',
'util/u_stream_stdc.c',
diff --git a/src/gallium/auxiliary/draw/draw_context.c b/src/gallium/auxiliary/draw/draw_context.c
index 667aa46b208..e90dfc5aec4 100644
--- a/src/gallium/auxiliary/draw/draw_context.c
+++ b/src/gallium/auxiliary/draw/draw_context.c
@@ -95,6 +95,7 @@ void draw_destroy( struct draw_context *draw )
draw_pipeline_destroy( draw );
draw_pt_destroy( draw );
draw_vs_destroy( draw );
+ draw_gs_destroy( draw );
FREE( draw );
}
diff --git a/src/gallium/auxiliary/draw/draw_context.h b/src/gallium/auxiliary/draw/draw_context.h
index b716209df29..8a64c06efcd 100644
--- a/src/gallium/auxiliary/draw/draw_context.h
+++ b/src/gallium/auxiliary/draw/draw_context.h
@@ -164,6 +164,14 @@ void draw_set_mapped_constant_buffer(struct draw_context *draw,
void draw_arrays(struct draw_context *draw, unsigned prim,
unsigned start, unsigned count);
+void
+draw_arrays_instanced(struct draw_context *draw,
+ unsigned mode,
+ unsigned start,
+ unsigned count,
+ unsigned startInstance,
+ unsigned instanceCount);
+
void draw_flush(struct draw_context *draw);
diff --git a/src/gallium/auxiliary/draw/draw_gs.c b/src/gallium/auxiliary/draw/draw_gs.c
index 5db2e755423..daf8d071f12 100644
--- a/src/gallium/auxiliary/draw/draw_gs.c
+++ b/src/gallium/auxiliary/draw/draw_gs.c
@@ -59,6 +59,15 @@ draw_gs_init( struct draw_context *draw )
return TRUE;
}
+void draw_gs_destroy( struct draw_context *draw )
+{
+ if (!draw->gs.machine)
+ return;
+
+ align_free(draw->gs.machine->Primitives);
+
+ tgsi_exec_machine_destroy(draw->gs.machine);
+}
void draw_gs_set_constants( struct draw_context *draw,
const float (*constants)[4],
diff --git a/src/gallium/auxiliary/draw/draw_pipe_clip.c b/src/gallium/auxiliary/draw/draw_pipe_clip.c
index 205cda5eabe..51a6115ebf5 100644
--- a/src/gallium/auxiliary/draw/draw_pipe_clip.c
+++ b/src/gallium/auxiliary/draw/draw_pipe_clip.c
@@ -55,7 +55,7 @@
-struct clipper {
+struct clip_stage {
struct draw_stage stage; /**< base class */
/* Basically duplicate some of the flatshading logic here:
@@ -70,9 +70,9 @@ struct clipper {
/* This is a bit confusing:
*/
-static INLINE struct clipper *clipper_stage( struct draw_stage *stage )
+static INLINE struct clip_stage *clip_stage( struct draw_stage *stage )
{
- return (struct clipper *)stage;
+ return (struct clip_stage *)stage;
}
@@ -92,11 +92,12 @@ static void interp_attr( float *fdst,
fdst[3] = LINTERP( t, fout[3], fin[3] );
}
+
static void copy_colors( struct draw_stage *stage,
struct vertex_header *dst,
const struct vertex_header *src )
{
- const struct clipper *clipper = clipper_stage(stage);
+ const struct clip_stage *clipper = clip_stage(stage);
uint i;
for (i = 0; i < clipper->num_color_attribs; i++) {
const uint attr = clipper->color_attribs[i];
@@ -108,7 +109,7 @@ static void copy_colors( struct draw_stage *stage,
/* Interpolate between two vertices to produce a third.
*/
-static void interp( const struct clipper *clip,
+static void interp( const struct clip_stage *clip,
struct vertex_header *dst,
float t,
const struct vertex_header *out,
@@ -179,7 +180,7 @@ static void emit_poly( struct draw_stage *stage,
header.v[2] = inlist[0]; /* keep in v[2] for flatshading */
if (i == n-1)
- header.flags |= edge_last;
+ header.flags |= edge_last;
if (0) {
const struct draw_vertex_shader *vs = stage->draw->vs.vertex_shader;
@@ -200,13 +201,14 @@ static void emit_poly( struct draw_stage *stage,
}
}
+
static INLINE float
dot4(const float *a, const float *b)
{
- return (a[0]*b[0] +
- a[1]*b[1] +
- a[2]*b[2] +
- a[3]*b[3]);
+ return (a[0] * b[0] +
+ a[1] * b[1] +
+ a[2] * b[2] +
+ a[3] * b[3]);
}
@@ -217,7 +219,7 @@ do_clip_tri( struct draw_stage *stage,
struct prim_header *header,
unsigned clipmask )
{
- struct clipper *clipper = clipper_stage( stage );
+ struct clip_stage *clipper = clip_stage( stage );
struct vertex_header *a[MAX_CLIPPED_VERTICES];
struct vertex_header *b[MAX_CLIPPED_VERTICES];
struct vertex_header **inlist = a;
@@ -280,6 +282,7 @@ do_clip_tri( struct draw_stage *stage,
dp_prev = dp;
}
+ /* swap in/out lists */
{
struct vertex_header **tmp = inlist;
inlist = outlist;
@@ -291,15 +294,11 @@ do_clip_tri( struct draw_stage *stage,
/* If flat-shading, copy color to new provoking vertex.
*/
if (clipper->flat && inlist[0] != header->v[2]) {
- if (1) {
- inlist[0] = dup_vert(stage, inlist[0], tmpnr++);
- }
+ inlist[0] = dup_vert(stage, inlist[0], tmpnr++);
copy_colors(stage, inlist[0], header->v[2]);
}
-
-
/* Emit the polygon as triangles to the setup stage:
*/
if (n >= 3)
@@ -314,7 +313,7 @@ do_clip_line( struct draw_stage *stage,
struct prim_header *header,
unsigned clipmask )
{
- const struct clipper *clipper = clipper_stage( stage );
+ const struct clip_stage *clipper = clip_stage( stage );
struct vertex_header *v0 = header->v[0];
struct vertex_header *v1 = header->v[1];
const float *pos0 = v0->clip;
@@ -416,13 +415,14 @@ clip_tri( struct draw_stage *stage,
}
}
+
/* Update state. Could further delay this until we hit the first
* primitive that really requires clipping.
*/
static void
clip_init_state( struct draw_stage *stage )
{
- struct clipper *clipper = clipper_stage( stage );
+ struct clip_stage *clipper = clip_stage( stage );
clipper->flat = stage->draw->rasterizer->flatshade ? TRUE : FALSE;
@@ -488,7 +488,7 @@ static void clip_destroy( struct draw_stage *stage )
*/
struct draw_stage *draw_clip_stage( struct draw_context *draw )
{
- struct clipper *clipper = CALLOC_STRUCT(clipper);
+ struct clip_stage *clipper = CALLOC_STRUCT(clip_stage);
if (clipper == NULL)
goto fail;
diff --git a/src/gallium/auxiliary/draw/draw_pipe_vbuf.c b/src/gallium/auxiliary/draw/draw_pipe_vbuf.c
index 1a5269c0de9..d40c0352401 100644
--- a/src/gallium/auxiliary/draw/draw_pipe_vbuf.c
+++ b/src/gallium/auxiliary/draw/draw_pipe_vbuf.c
@@ -138,7 +138,7 @@ emit_vertex( struct vbuf_stage *vbuf,
/* Note: we really do want data[0] here, not data[pos]:
*/
vbuf->translate->set_buffer(vbuf->translate, 0, vertex->data[0], 0);
- vbuf->translate->run(vbuf->translate, 0, 1, vbuf->vertex_ptr);
+ vbuf->translate->run(vbuf->translate, 0, 1, 0, vbuf->vertex_ptr);
if (0) draw_dump_emitted_vertex(vbuf->vinfo, (uint8_t *)vbuf->vertex_ptr);
@@ -271,10 +271,12 @@ vbuf_start_prim( struct vbuf_stage *vbuf, uint prim )
emit_sz = 0;
break;
}
-
+
+ hw_key.element[i].type = TRANSLATE_ELEMENT_NORMAL;
hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
hw_key.element[i].input_buffer = src_buffer;
hw_key.element[i].input_offset = src_offset;
+ hw_key.element[i].instance_divisor = 0;
hw_key.element[i].output_format = output_format;
hw_key.element[i].output_offset = dst_offset;
diff --git a/src/gallium/auxiliary/draw/draw_private.h b/src/gallium/auxiliary/draw/draw_private.h
index e49041556bd..ef49e575366 100644
--- a/src/gallium/auxiliary/draw/draw_private.h
+++ b/src/gallium/auxiliary/draw/draw_private.h
@@ -172,6 +172,8 @@ struct draw_context
boolean force_passthrough; /**< never clip or shade */
+ boolean dump_vs;
+
double mrd; /**< minimum resolvable depth value, for polygon offset */
/* pipe state that we need: */
@@ -239,6 +241,8 @@ struct draw_context
unsigned reduced_prim;
+ unsigned instance_id;
+
void *driver_private;
};
@@ -265,6 +269,7 @@ boolean draw_gs_init( struct draw_context *draw );
void draw_gs_set_constants( struct draw_context *,
const float (*constants)[4],
unsigned size );
+void draw_gs_destroy( struct draw_context *draw );
/*******************************************************************************
* Common shading code:
diff --git a/src/gallium/auxiliary/draw/draw_pt.c b/src/gallium/auxiliary/draw/draw_pt.c
index 2801dbafe47..a5ddec52863 100644
--- a/src/gallium/auxiliary/draw/draw_pt.c
+++ b/src/gallium/auxiliary/draw/draw_pt.c
@@ -280,20 +280,33 @@ void
draw_arrays(struct draw_context *draw, unsigned prim,
unsigned start, unsigned count)
{
- unsigned reduced_prim = u_reduced_prim(prim);
+ draw_arrays_instanced(draw, prim, start, count, 0, 1);
+}
+
+void
+draw_arrays_instanced(struct draw_context *draw,
+ unsigned mode,
+ unsigned start,
+ unsigned count,
+ unsigned startInstance,
+ unsigned instanceCount)
+{
+ unsigned reduced_prim = u_reduced_prim(mode);
+ unsigned instance;
+
if (reduced_prim != draw->reduced_prim) {
- draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw_do_flush(draw, DRAW_FLUSH_STATE_CHANGE);
draw->reduced_prim = reduced_prim;
}
if (0)
- draw_print_arrays(draw, prim, start, MIN2(count, 20));
+ draw_print_arrays(draw, mode, start, MIN2(count, 20));
#if 0
{
int i;
- debug_printf("draw_arrays(prim=%u start=%u count=%u):\n",
- prim, start, count);
+ debug_printf("draw_arrays(mode=%u start=%u count=%u):\n",
+ mode, start, count);
tgsi_dump(draw->vs.vertex_shader->state.tokens, 0);
debug_printf("Elements:\n");
for (i = 0; i < draw->pt.nr_vertex_elements; i++) {
@@ -311,6 +324,8 @@ draw_arrays(struct draw_context *draw, unsigned prim,
}
#endif
- /* drawing done here: */
- draw_pt_arrays(draw, prim, start, count);
+ for (instance = 0; instance < instanceCount; instance++) {
+ draw->instance_id = instance + startInstance;
+ draw_pt_arrays(draw, mode, start, count);
+ }
}
diff --git a/src/gallium/auxiliary/draw/draw_pt.h b/src/gallium/auxiliary/draw/draw_pt.h
index 20edf7a227e..d5e0d92a605 100644
--- a/src/gallium/auxiliary/draw/draw_pt.h
+++ b/src/gallium/auxiliary/draw/draw_pt.h
@@ -183,7 +183,8 @@ struct pt_emit *draw_pt_emit_create( struct draw_context *draw );
struct pt_fetch;
void draw_pt_fetch_prepare( struct pt_fetch *fetch,
unsigned vertex_input_count,
- unsigned vertex_size );
+ unsigned vertex_size,
+ unsigned instance_id_index );
void draw_pt_fetch_run( struct pt_fetch *fetch,
const unsigned *elts,
diff --git a/src/gallium/auxiliary/draw/draw_pt_emit.c b/src/gallium/auxiliary/draw/draw_pt_emit.c
index 064e16c295c..4fb53276bbe 100644
--- a/src/gallium/auxiliary/draw/draw_pt_emit.c
+++ b/src/gallium/auxiliary/draw/draw_pt_emit.c
@@ -121,10 +121,12 @@ void draw_pt_emit_prepare( struct pt_emit *emit,
emit_sz = 0;
break;
}
-
+
+ hw_key.element[i].type = TRANSLATE_ELEMENT_NORMAL;
hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
hw_key.element[i].input_buffer = src_buffer;
hw_key.element[i].input_offset = src_offset;
+ hw_key.element[i].instance_divisor = 0;
hw_key.element[i].output_format = output_format;
hw_key.element[i].output_offset = dst_offset;
@@ -204,6 +206,7 @@ void draw_pt_emit( struct pt_emit *emit,
translate->run( translate,
0,
vertex_count,
+ draw->instance_id,
hw_verts );
render->unmap_vertices( render,
@@ -263,6 +266,7 @@ void draw_pt_emit_linear(struct pt_emit *emit,
translate->run(translate,
0,
count,
+ draw->instance_id,
hw_verts);
if (0) {
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch.c b/src/gallium/auxiliary/draw/draw_pt_fetch.c
index 305bfef4352..55e7a7b81ad 100644
--- a/src/gallium/auxiliary/draw/draw_pt_fetch.c
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch.c
@@ -58,12 +58,14 @@ struct pt_fetch {
*/
void draw_pt_fetch_prepare( struct pt_fetch *fetch,
unsigned vs_input_count,
- unsigned vertex_size )
+ unsigned vertex_size,
+ unsigned instance_id_index )
{
struct draw_context *draw = fetch->draw;
unsigned nr_inputs;
- unsigned i, nr = 0;
+ unsigned i, nr = 0, ei = 0;
unsigned dst_offset = 0;
+ unsigned num_extra_inputs = 0;
struct translate_key key;
fetch->vertex_size = vertex_size;
@@ -78,9 +80,11 @@ void draw_pt_fetch_prepare( struct pt_fetch *fetch,
{
/* Need to set header->vertex_id = 0xffff somehow.
*/
+ key.element[nr].type = TRANSLATE_ELEMENT_NORMAL;
key.element[nr].input_format = PIPE_FORMAT_R32_FLOAT;
key.element[nr].input_buffer = draw->pt.nr_vertex_buffers;
key.element[nr].input_offset = 0;
+ key.element[nr].instance_divisor = 0;
key.element[nr].output_format = PIPE_FORMAT_R32_FLOAT;
key.element[nr].output_offset = dst_offset;
dst_offset += 1 * sizeof(float);
@@ -91,19 +95,36 @@ void draw_pt_fetch_prepare( struct pt_fetch *fetch,
*/
dst_offset += 4 * sizeof(float);
}
-
- assert( draw->pt.nr_vertex_elements >= vs_input_count );
- nr_inputs = MIN2( vs_input_count, draw->pt.nr_vertex_elements );
+ if (instance_id_index != ~0) {
+ num_extra_inputs++;
+ }
+
+ assert(draw->pt.nr_vertex_elements + num_extra_inputs >= vs_input_count);
+
+ nr_inputs = MIN2(vs_input_count, draw->pt.nr_vertex_elements + num_extra_inputs);
for (i = 0; i < nr_inputs; i++) {
- key.element[nr].input_format = draw->pt.vertex_element[i].src_format;
- key.element[nr].input_buffer = draw->pt.vertex_element[i].vertex_buffer_index;
- key.element[nr].input_offset = draw->pt.vertex_element[i].src_offset;
- key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
- key.element[nr].output_offset = dst_offset;
+ if (i == instance_id_index) {
+ key.element[nr].type = TRANSLATE_ELEMENT_INSTANCE_ID;
+ key.element[nr].input_format = PIPE_FORMAT_R32_USCALED;
+ key.element[nr].output_format = PIPE_FORMAT_R32_USCALED;
+ key.element[nr].output_offset = dst_offset;
+
+ dst_offset += sizeof(uint);
+ } else {
+ key.element[nr].type = TRANSLATE_ELEMENT_NORMAL;
+ key.element[nr].input_format = draw->pt.vertex_element[ei].src_format;
+ key.element[nr].input_buffer = draw->pt.vertex_element[ei].vertex_buffer_index;
+ key.element[nr].input_offset = draw->pt.vertex_element[ei].src_offset;
+ key.element[nr].instance_divisor = draw->pt.vertex_element[ei].instance_divisor;
+ key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ key.element[nr].output_offset = dst_offset;
+
+ ei++;
+ dst_offset += 4 * sizeof(float);
+ }
- dst_offset += 4 * sizeof(float);
nr++;
}
@@ -158,6 +179,7 @@ void draw_pt_fetch_run( struct pt_fetch *fetch,
translate->run_elts( translate,
elts,
count,
+ draw->instance_id,
verts );
}
@@ -183,6 +205,7 @@ void draw_pt_fetch_run_linear( struct pt_fetch *fetch,
translate->run( translate,
start,
count,
+ draw->instance_id,
verts );
}
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c b/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c
index e7fe6b3b768..2a604470e9a 100644
--- a/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c
@@ -166,9 +166,11 @@ static void fetch_emit_prepare( struct draw_pt_middle_end *middle,
continue;
}
+ key.element[i].type = TRANSLATE_ELEMENT_NORMAL;
key.element[i].input_format = input_format;
key.element[i].input_buffer = input_buffer;
key.element[i].input_offset = input_offset;
+ key.element[i].instance_divisor = src->instance_divisor;
key.element[i].output_format = output_format;
key.element[i].output_offset = dst_offset;
@@ -256,6 +258,7 @@ static void fetch_emit_run( struct draw_pt_middle_end *middle,
feme->translate->run_elts( feme->translate,
fetch_elts,
fetch_count,
+ draw->instance_id,
hw_verts );
if (0) {
@@ -314,6 +317,7 @@ static void fetch_emit_run_linear( struct draw_pt_middle_end *middle,
feme->translate->run( feme->translate,
start,
count,
+ draw->instance_id,
hw_verts );
if (0) {
@@ -374,6 +378,7 @@ static boolean fetch_emit_run_linear_elts( struct draw_pt_middle_end *middle,
feme->translate->run( feme->translate,
start,
count,
+ draw->instance_id,
hw_verts );
draw->render->unmap_vertices( draw->render, 0, (ushort)(count - 1) );
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c
index 1a9df4cac5d..23da556f797 100644
--- a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c
@@ -59,6 +59,8 @@ static void fetch_pipeline_prepare( struct draw_pt_middle_end *middle,
struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
struct draw_context *draw = fpme->draw;
struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ unsigned i;
+ unsigned instance_id_index = ~0;
/* Add one to num_outputs because the pipeline occasionally tags on
* an additional texcoord, eg for AA lines.
@@ -66,6 +68,15 @@ static void fetch_pipeline_prepare( struct draw_pt_middle_end *middle,
unsigned nr = MAX2( vs->info.num_inputs,
vs->info.num_outputs + 1 );
+ /* Scan for instanceID system value.
+ */
+ for (i = 0; i < vs->info.num_inputs; i++) {
+ if (vs->info.input_semantic_name[i] == TGSI_SEMANTIC_INSTANCEID) {
+ instance_id_index = i;
+ break;
+ }
+ }
+
fpme->prim = prim;
fpme->opt = opt;
@@ -79,7 +90,8 @@ static void fetch_pipeline_prepare( struct draw_pt_middle_end *middle,
draw_pt_fetch_prepare( fpme->fetch,
vs->info.num_inputs,
- fpme->vertex_size );
+ fpme->vertex_size,
+ instance_id_index );
/* XXX: it's not really gl rasterization rules we care about here,
* but gl vs dx9 clip spaces.
*/
diff --git a/src/gallium/auxiliary/draw/draw_vs.c b/src/gallium/auxiliary/draw/draw_vs.c
index 35536895326..e03ac8c2291 100644
--- a/src/gallium/auxiliary/draw/draw_vs.c
+++ b/src/gallium/auxiliary/draw/draw_vs.c
@@ -43,11 +43,11 @@
#include "translate/translate.h"
#include "translate/translate_cache.h"
+#include "tgsi/tgsi_dump.h"
#include "tgsi/tgsi_exec.h"
-
void draw_vs_set_constants( struct draw_context *draw,
const float (*constants)[4],
unsigned size )
@@ -83,6 +83,10 @@ draw_create_vertex_shader(struct draw_context *draw,
{
struct draw_vertex_shader *vs;
+ if (draw->dump_vs) {
+ tgsi_dump(shader->tokens, 0);
+ }
+
vs = draw_create_vs_llvm( draw, shader );
if (!vs) {
vs = draw_create_vs_sse( draw, shader );
@@ -152,6 +156,8 @@ draw_delete_vertex_shader(struct draw_context *draw,
boolean
draw_vs_init( struct draw_context *draw )
{
+ draw->dump_vs = debug_get_bool_option("GALLIUM_DUMP_VS", FALSE);
+
draw->vs.machine = tgsi_exec_machine_create();
if (!draw->vs.machine)
return FALSE;
diff --git a/src/gallium/auxiliary/draw/draw_vs.h b/src/gallium/auxiliary/draw/draw_vs.h
index e3b807ebd0e..00036cfe68b 100644
--- a/src/gallium/auxiliary/draw/draw_vs.h
+++ b/src/gallium/auxiliary/draw/draw_vs.h
@@ -43,6 +43,7 @@ struct draw_varient_input
enum pipe_format format;
unsigned buffer;
unsigned offset;
+ unsigned instance_divisor;
};
struct draw_varient_output
diff --git a/src/gallium/auxiliary/draw/draw_vs_ppc.c b/src/gallium/auxiliary/draw/draw_vs_ppc.c
index ad184bd696d..da9f3e3d35c 100644
--- a/src/gallium/auxiliary/draw/draw_vs_ppc.c
+++ b/src/gallium/auxiliary/draw/draw_vs_ppc.c
@@ -98,9 +98,9 @@ vs_ppc_run_linear( struct draw_vertex_shader *base,
/* loop over verts */
for (i = 0; i < count; i += MAX_VERTICES) {
const uint max_vertices = MIN2(MAX_VERTICES, count - i);
- float inputs_soa[PIPE_MAX_SHADER_INPUTS][4][4] ALIGN16_ATTRIB;
- float outputs_soa[PIPE_MAX_SHADER_OUTPUTS][4][4] ALIGN16_ATTRIB;
- float temps_soa[TGSI_EXEC_NUM_TEMPS][4][4] ALIGN16_ATTRIB;
+ PIPE_ALIGN_VAR(16) float inputs_soa[PIPE_MAX_SHADER_INPUTS][4][4];
+ PIPE_ALIGN_VAR(16) float outputs_soa[PIPE_MAX_SHADER_OUTPUTS][4][4];
+ PIPE_ALIGN_VAR(16) float temps_soa[TGSI_EXEC_NUM_TEMPS][4][4];
uint attr;
/* convert (up to) four input verts to SoA format */
diff --git a/src/gallium/auxiliary/draw/draw_vs_varient.c b/src/gallium/auxiliary/draw/draw_vs_varient.c
index d16692584e5..9f40030f39f 100644
--- a/src/gallium/auxiliary/draw/draw_vs_varient.c
+++ b/src/gallium/auxiliary/draw/draw_vs_varient.c
@@ -142,6 +142,7 @@ static void PIPE_CDECL vsvg_run_elts( struct draw_vs_varient *varient,
vsvg->fetch->run_elts( vsvg->fetch,
elts,
count,
+ vsvg->draw->instance_id,
temp_buffer );
vsvg->base.vs->run_linear( vsvg->base.vs,
@@ -181,6 +182,7 @@ static void PIPE_CDECL vsvg_run_elts( struct draw_vs_varient *varient,
vsvg->emit->run( vsvg->emit,
0, count,
+ vsvg->draw->instance_id,
output_buffer );
FREE(temp_buffer);
@@ -203,6 +205,7 @@ static void PIPE_CDECL vsvg_run_linear( struct draw_vs_varient *varient,
vsvg->fetch->run( vsvg->fetch,
start,
count,
+ vsvg->draw->instance_id,
temp_buffer );
vsvg->base.vs->run_linear( vsvg->base.vs,
@@ -239,6 +242,7 @@ static void PIPE_CDECL vsvg_run_linear( struct draw_vs_varient *varient,
vsvg->emit->run( vsvg->emit,
0, count,
+ vsvg->draw->instance_id,
output_buffer );
FREE(temp_buffer);
@@ -281,9 +285,11 @@ struct draw_vs_varient *draw_vs_varient_generic( struct draw_vertex_shader *vs,
fetch.nr_elements = key->nr_inputs;
fetch.output_stride = vsvg->temp_vertex_stride;
for (i = 0; i < key->nr_inputs; i++) {
+ fetch.element[i].type = TRANSLATE_ELEMENT_NORMAL;
fetch.element[i].input_format = key->element[i].in.format;
fetch.element[i].input_buffer = key->element[i].in.buffer;
fetch.element[i].input_offset = key->element[i].in.offset;
+ fetch.element[i].instance_divisor = 0;
fetch.element[i].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
fetch.element[i].output_offset = i * 4 * sizeof(float);
assert(fetch.element[i].output_offset < fetch.output_stride);
@@ -295,17 +301,21 @@ struct draw_vs_varient *draw_vs_varient_generic( struct draw_vertex_shader *vs,
for (i = 0; i < key->nr_outputs; i++) {
if (key->element[i].out.format != EMIT_1F_PSIZE)
{
+ emit.element[i].type = TRANSLATE_ELEMENT_NORMAL;
emit.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
emit.element[i].input_buffer = 0;
emit.element[i].input_offset = key->element[i].out.vs_output * 4 * sizeof(float);
+ emit.element[i].instance_divisor = 0;
emit.element[i].output_format = draw_translate_vinfo_format(key->element[i].out.format);
emit.element[i].output_offset = key->element[i].out.offset;
assert(emit.element[i].input_offset <= fetch.output_stride);
}
else {
+ emit.element[i].type = TRANSLATE_ELEMENT_NORMAL;
emit.element[i].input_format = PIPE_FORMAT_R32_FLOAT;
emit.element[i].input_buffer = 1;
emit.element[i].input_offset = 0;
+ emit.element[i].instance_divisor = 0;
emit.element[i].output_format = PIPE_FORMAT_R32_FLOAT;
emit.element[i].output_offset = key->element[i].out.offset;
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index ba6f7b15f9e..a4b78f14943 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -80,27 +80,11 @@ struct fenced_buffer_list
*/
struct fenced_buffer
{
- /*
- * Immutable members.
- */
-
struct pb_buffer base;
+
struct pb_buffer *buffer;
- struct fenced_buffer_list *list;
-
- /**
- * Protected by fenced_buffer_list::mutex
- */
- struct list_head head;
- /**
- * Following members are mutable and protected by this mutex.
- *
- * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
- * held, but in order to prevent deadlocks you must never lock
- * fenced_buffer_list::mutex with this mutex held.
- */
- pipe_mutex mutex;
+ /* FIXME: protect access with mutex */
/**
* A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
@@ -112,6 +96,9 @@ struct fenced_buffer
struct pb_validate *vl;
unsigned validation_flags;
struct pipe_fence_handle *fence;
+
+ struct list_head head;
+ struct fenced_buffer_list *list;
};
@@ -123,24 +110,15 @@ fenced_buffer(struct pb_buffer *buf)
}
-/**
- * Add the buffer to the fenced list.
- *
- * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
- * order, before calling this function.
- *
- * Reference count should be incremented before calling this function.
- */
static INLINE void
-fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
- struct fenced_buffer *fenced_buf)
+_fenced_buffer_add(struct fenced_buffer *fenced_buf)
{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
assert(pipe_is_referenced(&fenced_buf->base.base.reference));
assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
assert(fenced_buf->fence);
- /* TODO: Move the reference count increment here */
-
#ifdef DEBUG
LIST_DEL(&fenced_buf->head);
assert(fenced_list->numUnfenced);
@@ -152,16 +130,32 @@ fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
/**
- * Remove the buffer from the fenced list.
- *
- * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
- * order before calling this function.
- *
- * Reference count should be decremented after calling this function.
+ * Actually destroy the buffer.
*/
static INLINE void
-fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
- struct fenced_buffer *fenced_buf)
+_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(!fenced_buf->fence);
+#ifdef DEBUG
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#else
+ (void)fenced_list;
+#endif
+ pb_reference(&fenced_buf->buffer, NULL);
+ FREE(fenced_buf);
+}
+
+
+static INLINE void
+_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
{
struct pb_fence_ops *ops = fenced_list->ops;
@@ -183,56 +177,37 @@ fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
++fenced_list->numUnfenced;
#endif
- /* TODO: Move the reference count decrement and destruction here */
+ /**
+ * FIXME!!!
+ */
+
+ if(!pipe_is_referenced(&fenced_buf->base.base.reference))
+ _fenced_buffer_destroy(fenced_buf);
}
-/**
- * Wait for the fence to expire, and remove it from the fenced list.
- *
- * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
- * held -- it will be acquired internally.
- */
static INLINE enum pipe_error
-fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
- struct fenced_buffer *fenced_buf)
+_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
struct pb_fence_ops *ops = fenced_list->ops;
- enum pipe_error ret = PIPE_ERROR;
#if 0
debug_warning("waiting for GPU");
#endif
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
assert(fenced_buf->fence);
-
- /*
- * Acquire the global lock. Must release buffer mutex first to preserve
- * lock order.
- */
- pipe_mutex_unlock(fenced_buf->mutex);
- pipe_mutex_lock(fenced_list->mutex);
- pipe_mutex_lock(fenced_buf->mutex);
-
if(fenced_buf->fence) {
- if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
- /* Remove from the fenced list */
- /* TODO: remove consequents */
- fenced_buffer_remove_locked(fenced_list, fenced_buf);
-
- p_atomic_dec(&fenced_buf->base.base.reference.count);
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
-
- fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
- ret = PIPE_OK;
+ if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
+ return PIPE_ERROR;
}
+ /* Remove from the fenced list */
+ /* TODO: remove consequents */
+ _fenced_buffer_remove(fenced_list, fenced_buf);
}
- pipe_mutex_unlock(fenced_list->mutex);
-
- return ret;
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ return PIPE_OK;
}
@@ -240,8 +215,8 @@ fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
* Free as many fenced buffers from the list head as possible.
*/
static void
-fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
- int wait)
+_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
{
struct pb_fence_ops *ops = fenced_list->ops;
struct list_head *curr, *next;
@@ -254,29 +229,21 @@ fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
while(curr != &fenced_list->delayed) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- pipe_mutex_lock(fenced_buf->mutex);
-
if(fenced_buf->fence != prev_fence) {
int signaled;
if (wait)
signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
else
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- if (signaled != 0) {
- pipe_mutex_unlock(fenced_buf->mutex);
+ if (signaled != 0)
break;
- }
prev_fence = fenced_buf->fence;
}
else {
assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
}
- fenced_buffer_remove_locked(fenced_list, fenced_buf);
- pipe_mutex_unlock(fenced_buf->mutex);
-
- pb_buf = &fenced_buf->base;
- pb_reference(&pb_buf, NULL);
+ _fenced_buffer_remove(fenced_list, fenced_buf);
curr = next;
next = curr->next;
@@ -290,25 +257,30 @@ fenced_buffer_destroy(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_buffer_list *fenced_list = fenced_buf->list;
- assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
- assert(!fenced_buf->fence);
-
-#ifdef DEBUG
pipe_mutex_lock(fenced_list->mutex);
- assert(fenced_buf->head.prev);
- assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
- assert(fenced_list->numUnfenced);
- --fenced_list->numUnfenced;
+ assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ if (fenced_buf->fence) {
+ struct pb_fence_ops *ops = fenced_list->ops;
+ if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
+ struct list_head *curr, *prev;
+ curr = &fenced_buf->head;
+ prev = curr->prev;
+ do {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ curr = prev;
+ prev = curr->prev;
+ } while (curr != &fenced_list->delayed);
+ }
+ else {
+ /* delay destruction */
+ }
+ }
+ else {
+ _fenced_buffer_destroy(fenced_buf);
+ }
pipe_mutex_unlock(fenced_list->mutex);
-#else
- (void)fenced_list;
-#endif
-
- pb_reference(&fenced_buf->buffer, NULL);
-
- pipe_mutex_destroy(fenced_buf->mutex);
- FREE(fenced_buf);
}
@@ -319,23 +291,24 @@ fenced_buffer_map(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_buffer_list *fenced_list = fenced_buf->list;
struct pb_fence_ops *ops = fenced_list->ops;
- void *map = NULL;
-
- pipe_mutex_lock(fenced_buf->mutex);
+ void *map;
assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
/* Serialize writes */
if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
- if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
- ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
+ if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
/* Don't wait for the GPU to finish writing */
- goto done;
+ if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ else
+ return NULL;
+ }
+ else {
+ /* Wait for the GPU to finish writing */
+ _fenced_buffer_finish(fenced_buf);
}
-
- /* Wait for the GPU to finish writing */
- fenced_buffer_finish_locked(fenced_list, fenced_buf);
}
#if 0
@@ -352,9 +325,6 @@ fenced_buffer_map(struct pb_buffer *buf,
fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
-done:
- pipe_mutex_unlock(fenced_buf->mutex);
-
return map;
}
@@ -363,9 +333,6 @@ static void
fenced_buffer_unmap(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
-
- pipe_mutex_lock(fenced_buf->mutex);
-
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
pb_unmap(fenced_buf->buffer);
@@ -373,8 +340,6 @@ fenced_buffer_unmap(struct pb_buffer *buf)
if(!fenced_buf->mapcount)
fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
-
- pipe_mutex_unlock(fenced_buf->mutex);
}
@@ -386,14 +351,11 @@ fenced_buffer_validate(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
enum pipe_error ret;
- pipe_mutex_lock(fenced_buf->mutex);
-
if(!vl) {
/* invalidate */
fenced_buf->vl = NULL;
fenced_buf->validation_flags = 0;
- ret = PIPE_OK;
- goto done;
+ return PIPE_OK;
}
assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
@@ -401,17 +363,14 @@ fenced_buffer_validate(struct pb_buffer *buf,
flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
/* Buffer cannot be validated in two different lists */
- if(fenced_buf->vl && fenced_buf->vl != vl) {
- ret = PIPE_ERROR_RETRY;
- goto done;
- }
+ if(fenced_buf->vl && fenced_buf->vl != vl)
+ return PIPE_ERROR_RETRY;
#if 0
/* Do not validate if buffer is still mapped */
if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
/* TODO: wait for the thread that mapped the buffer to unmap it */
- ret = PIPE_ERROR_RETRY;
- goto done;
+ return PIPE_ERROR_RETRY;
}
/* Final sanity checking */
assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
@@ -421,21 +380,17 @@ fenced_buffer_validate(struct pb_buffer *buf,
if(fenced_buf->vl == vl &&
(fenced_buf->validation_flags & flags) == flags) {
/* Nothing to do -- buffer already validated */
- ret = PIPE_OK;
- goto done;
+ return PIPE_OK;
}
ret = pb_validate(fenced_buf->buffer, vl, flags);
if (ret != PIPE_OK)
- goto done;
+ return ret;
fenced_buf->vl = vl;
fenced_buf->validation_flags |= flags;
-done:
- pipe_mutex_unlock(fenced_buf->mutex);
-
- return ret;
+ return PIPE_OK;
}
@@ -450,36 +405,29 @@ fenced_buffer_fence(struct pb_buffer *buf,
fenced_buf = fenced_buffer(buf);
fenced_list = fenced_buf->list;
ops = fenced_list->ops;
-
- pipe_mutex_lock(fenced_list->mutex);
- pipe_mutex_lock(fenced_buf->mutex);
-
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
-
- if(fence != fenced_buf->fence) {
- assert(fenced_buf->vl);
- assert(fenced_buf->validation_flags);
-
- if (fenced_buf->fence) {
- fenced_buffer_remove_locked(fenced_list, fenced_buf);
- p_atomic_dec(&fenced_buf->base.base.reference.count);
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
- }
- if (fence) {
- ops->fence_reference(ops, &fenced_buf->fence, fence);
- fenced_buf->flags |= fenced_buf->validation_flags;
- p_atomic_inc(&fenced_buf->base.base.reference.count);
- fenced_buffer_add_locked(fenced_list, fenced_buf);
- }
-
- pb_fence(fenced_buf->buffer, fence);
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
+ if(fence == fenced_buf->fence) {
+ /* Nothing to do */
+ return;
}
- pipe_mutex_unlock(fenced_buf->mutex);
+ assert(fenced_buf->vl);
+ assert(fenced_buf->validation_flags);
+
+ pipe_mutex_lock(fenced_list->mutex);
+ if (fenced_buf->fence)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ if (fence) {
+ ops->fence_reference(ops, &fenced_buf->fence, fence);
+ fenced_buf->flags |= fenced_buf->validation_flags;
+ _fenced_buffer_add(fenced_buf);
+ }
pipe_mutex_unlock(fenced_list->mutex);
+
+ pb_fence(fenced_buf->buffer, fence);
+
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
}
@@ -489,7 +437,6 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
pb_size *offset)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- /* NOTE: accesses immutable members only -- mutex not necessary */
pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
}
@@ -529,8 +476,6 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
buf->buffer = buffer;
buf->list = fenced_list;
- pipe_mutex_init(buf->mutex);
-
#ifdef DEBUG
pipe_mutex_lock(fenced_list->mutex);
LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
@@ -572,7 +517,7 @@ fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
int wait)
{
pipe_mutex_lock(fenced_list->mutex);
- fenced_buffer_list_check_free_locked(fenced_list, wait);
+ _fenced_buffer_list_check_free(fenced_list, wait);
pipe_mutex_unlock(fenced_list->mutex);
}
@@ -594,13 +539,11 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
next = curr->next;
while(curr != &fenced_list->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- pipe_mutex_lock(fenced_buf->mutex);
assert(!fenced_buf->fence);
debug_printf("%10p %7u %7u\n",
(void *) fenced_buf,
fenced_buf->base.base.size,
p_atomic_read(&fenced_buf->base.base.reference.count));
- pipe_mutex_unlock(fenced_buf->mutex);
curr = next;
next = curr->next;
}
@@ -610,7 +553,6 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
while(curr != &fenced_list->delayed) {
int signaled;
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- pipe_mutex_lock(fenced_buf->mutex);
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
debug_printf("%10p %7u %7u %10p %s\n",
(void *) fenced_buf,
@@ -618,7 +560,6 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
p_atomic_read(&fenced_buf->base.base.reference.count),
(void *) fenced_buf->fence,
signaled == 0 ? "y" : "n");
- pipe_mutex_unlock(fenced_buf->mutex);
curr = next;
next = curr->next;
}
@@ -639,8 +580,8 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
+ _fenced_buffer_list_check_free(fenced_list, 1);
pipe_mutex_lock(fenced_list->mutex);
- fenced_buffer_list_check_free_locked(fenced_list, 1);
}
#ifdef DEBUG
@@ -648,7 +589,6 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
#endif
pipe_mutex_unlock(fenced_list->mutex);
- pipe_mutex_destroy(fenced_list->mutex);
fenced_list->ops->destroy(fenced_list->ops);
diff --git a/src/gallium/auxiliary/rtasm/rtasm_x86sse.c b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
index 1acf3c373eb..f675427d987 100644
--- a/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
+++ b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
@@ -673,6 +673,13 @@ void x86_and( struct x86_function *p,
emit_op_modrm( p, 0x23, 0x21, dst, src );
}
+void x86_div( struct x86_function *p,
+ struct x86_reg src )
+{
+ assert(src.file == file_REG32 && src.mod == mod_REG);
+ emit_op_modrm(p, 0xf7, 0, x86_make_reg(file_REG32, 6), src);
+}
+
/***********************************************************************
diff --git a/src/gallium/auxiliary/rtasm/rtasm_x86sse.h b/src/gallium/auxiliary/rtasm/rtasm_x86sse.h
index 731a6517968..f7612d416a0 100644
--- a/src/gallium/auxiliary/rtasm/rtasm_x86sse.h
+++ b/src/gallium/auxiliary/rtasm/rtasm_x86sse.h
@@ -244,6 +244,7 @@ void x86_sub( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_test( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_xor( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_sahf( struct x86_function *p );
+void x86_div( struct x86_function *p, struct x86_reg src );
void x86_cdecl_caller_push_regs( struct x86_function *p );
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump.c b/src/gallium/auxiliary/tgsi/tgsi_dump.c
index e2e5394f86f..d7ff262f30a 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_dump.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump.c
@@ -123,7 +123,8 @@ static const char *semantic_names[] =
"NORMAL",
"FACE",
"EDGEFLAG",
- "PRIM_ID"
+ "PRIM_ID",
+ "INSTANCEID"
};
static const char *immediate_type_names[] =
diff --git a/src/gallium/auxiliary/tgsi/tgsi_exec.c b/src/gallium/auxiliary/tgsi/tgsi_exec.c
index 2bcb33392a8..118a638ab48 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_exec.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_exec.c
@@ -1041,11 +1041,19 @@ fetch_src_file_channel(
default:
assert( 0 );
+ chan->u[0] = 0;
+ chan->u[1] = 0;
+ chan->u[2] = 0;
+ chan->u[3] = 0;
}
break;
default:
assert( 0 );
+ chan->u[0] = 0;
+ chan->u[1] = 0;
+ chan->u[2] = 0;
+ chan->u[3] = 0;
}
}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ppc.c b/src/gallium/auxiliary/tgsi/tgsi_ppc.c
index 138d2d095bb..ad553c71a57 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_ppc.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_ppc.c
@@ -51,7 +51,8 @@
* Since it's pretty much impossible to form PPC vector immediates, load
* them from memory here:
*/
-const float ppc_builtin_constants[] ALIGN16_ATTRIB = {
+PIPE_ALIGN_VAR(16) const float
+ppc_builtin_constants[] = {
1.0f, -128.0f, 128.0, 0.0
};
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sanity.c b/src/gallium/auxiliary/tgsi/tgsi_sanity.c
index 7f1c8e5dd68..e1e4f97967d 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_sanity.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_sanity.c
@@ -335,13 +335,13 @@ iter_instruction(
fill_scan_register1d(ind_reg,
inst->Src[i].Indirect.File,
inst->Src[i].Indirect.Index);
- if (!(reg->file == TGSI_FILE_ADDRESS || reg->file == TGSI_FILE_LOOP) ||
- reg->indices[0] != 0) {
+ if (!(ind_reg->file == TGSI_FILE_ADDRESS || ind_reg->file == TGSI_FILE_LOOP) ||
+ ind_reg->indices[0] != 0) {
report_warning(ctx, "Indirect register neither ADDR[0] nor LOOP[0]");
}
check_register_usage(
ctx,
- reg,
+ ind_reg,
"indirect",
FALSE );
}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sse2.c b/src/gallium/auxiliary/tgsi/tgsi_sse2.c
index 2e13a7aaf9d..a85cc4659e0 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_sse2.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_sse2.c
@@ -2146,40 +2146,50 @@ emit_instruction(
break;
case TGSI_OPCODE_XPD:
+ /* Note: we do all stores after all operands have been fetched
+ * to avoid src/dst register aliasing issues for an instruction
+ * such as: XPD TEMP[2].xyz, TEMP[0], TEMP[2];
+ */
if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
- FETCH( func, *inst, 1, 1, CHAN_Z );
- FETCH( func, *inst, 3, 0, CHAN_Z );
+ FETCH( func, *inst, 1, 1, CHAN_Z ); /* xmm[1] = src[1].z */
+ FETCH( func, *inst, 3, 0, CHAN_Z ); /* xmm[3] = src[0].z */
}
if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
- FETCH( func, *inst, 0, 0, CHAN_Y );
- FETCH( func, *inst, 4, 1, CHAN_Y );
+ FETCH( func, *inst, 0, 0, CHAN_Y ); /* xmm[0] = src[0].y */
+ FETCH( func, *inst, 4, 1, CHAN_Y ); /* xmm[4] = src[1].y */
}
IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) {
- emit_MOV( func, 2, 0 );
- emit_mul( func, 2, 1 );
- emit_MOV( func, 5, 3 );
- emit_mul( func, 5, 4 );
- emit_sub( func, 2, 5 );
- STORE( func, *inst, 2, 0, CHAN_X );
+ emit_MOV( func, 7, 0 ); /* xmm[7] = xmm[0] */
+ emit_mul( func, 7, 1 ); /* xmm[7] = xmm[2] * xmm[1] */
+ emit_MOV( func, 5, 3 ); /* xmm[5] = xmm[3] */
+ emit_mul( func, 5, 4 ); /* xmm[5] = xmm[5] * xmm[4] */
+ emit_sub( func, 7, 5 ); /* xmm[7] = xmm[2] - xmm[5] */
+ /* store xmm[7] in dst.x below */
}
if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ||
IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
- FETCH( func, *inst, 2, 1, CHAN_X );
- FETCH( func, *inst, 5, 0, CHAN_X );
+ FETCH( func, *inst, 2, 1, CHAN_X ); /* xmm[2] = src[1].x */
+ FETCH( func, *inst, 5, 0, CHAN_X ); /* xmm[5] = src[0].x */
}
IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) {
- emit_mul( func, 3, 2 );
- emit_mul( func, 1, 5 );
- emit_sub( func, 3, 1 );
- STORE( func, *inst, 3, 0, CHAN_Y );
+ emit_mul( func, 3, 2 ); /* xmm[3] = xmm[3] * xmm[2] */
+ emit_mul( func, 1, 5 ); /* xmm[1] = xmm[1] * xmm[5] */
+ emit_sub( func, 3, 1 ); /* xmm[3] = xmm[3] - xmm[1] */
+ /* store xmm[3] in dst.y below */
}
IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) {
- emit_mul( func, 5, 4 );
- emit_mul( func, 0, 2 );
- emit_sub( func, 5, 0 );
- STORE( func, *inst, 5, 0, CHAN_Z );
+ emit_mul( func, 5, 4 ); /* xmm[5] = xmm[5] * xmm[4] */
+ emit_mul( func, 0, 2 ); /* xmm[0] = xmm[0] * xmm[2] */
+ emit_sub( func, 5, 0 ); /* xmm[5] = xmm[5] - xmm[0] */
+ STORE( func, *inst, 5, 0, CHAN_Z ); /* dst.z = xmm[5] */
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) {
+ STORE( func, *inst, 7, 0, CHAN_X ); /* dst.x = xmm[7] */
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) {
+ STORE( func, *inst, 3, 0, CHAN_Y ); /* dst.y = xmm[3] */
}
IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) {
emit_tempf(
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.c b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
index e64e2b731df..8bd6f68dcc0 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_ureg.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
@@ -40,6 +40,8 @@ union tgsi_any_token {
struct tgsi_header header;
struct tgsi_processor processor;
struct tgsi_token token;
+ struct tgsi_property prop;
+ struct tgsi_property_data prop_data;
struct tgsi_declaration decl;
struct tgsi_declaration_range decl_range;
struct tgsi_declaration_semantic decl_semantic;
@@ -64,6 +66,7 @@ struct ureg_tokens {
};
#define UREG_MAX_INPUT PIPE_MAX_ATTRIBS
+#define UREG_MAX_SYSTEM_VALUE PIPE_MAX_ATTRIBS
#define UREG_MAX_OUTPUT PIPE_MAX_ATTRIBS
#define UREG_MAX_CONSTANT_RANGE 32
#define UREG_MAX_IMMEDIATE 32
@@ -95,6 +98,13 @@ struct ureg_program
unsigned nr_gs_inputs;
struct {
+ unsigned index;
+ unsigned semantic_name;
+ unsigned semantic_index;
+ } system_value[UREG_MAX_SYSTEM_VALUE];
+ unsigned nr_system_values;
+
+ struct {
unsigned semantic_name;
unsigned semantic_index;
} output[UREG_MAX_OUTPUT];
@@ -123,6 +133,8 @@ struct ureg_program
} constant_range[UREG_MAX_CONSTANT_RANGE];
unsigned nr_constant_ranges;
+ unsigned property_gs_input_prim;
+
unsigned nr_addrs;
unsigned nr_preds;
unsigned nr_loops;
@@ -234,19 +246,28 @@ ureg_src_register( unsigned file,
src.SwizzleY = TGSI_SWIZZLE_Y;
src.SwizzleZ = TGSI_SWIZZLE_Z;
src.SwizzleW = TGSI_SWIZZLE_W;
- src.Pad = 0;
src.Indirect = 0;
src.IndirectIndex = 0;
src.IndirectSwizzle = 0;
src.Absolute = 0;
src.Index = index;
src.Negate = 0;
+ src.Dimension = 0;
+ src.DimensionIndex = 0;
return src;
}
+void
+ureg_property_gs_input_prim(struct ureg_program *ureg,
+ unsigned gs_input_prim)
+{
+ ureg->property_gs_input_prim = gs_input_prim;
+}
+
+
struct ureg_src
ureg_DECL_fs_input( struct ureg_program *ureg,
@@ -304,6 +325,25 @@ ureg_DECL_gs_input(struct ureg_program *ureg,
}
+struct ureg_src
+ureg_DECL_system_value(struct ureg_program *ureg,
+ unsigned index,
+ unsigned semantic_name,
+ unsigned semantic_index)
+{
+ if (ureg->nr_system_values < UREG_MAX_SYSTEM_VALUE) {
+ ureg->system_value[ureg->nr_system_values].index = index;
+ ureg->system_value[ureg->nr_system_values].semantic_name = semantic_name;
+ ureg->system_value[ureg->nr_system_values].semantic_index = semantic_index;
+ ureg->nr_system_values++;
+ } else {
+ set_bad(ureg);
+ }
+
+ return ureg_src_register(TGSI_FILE_SYSTEM_VALUE, index);
+}
+
+
struct ureg_dst
ureg_DECL_output( struct ureg_program *ureg,
unsigned name,
@@ -628,7 +668,7 @@ void
ureg_emit_src( struct ureg_program *ureg,
struct ureg_src src )
{
- unsigned size = 1 + (src.Indirect ? 1 : 0);
+ unsigned size = 1 + (src.Indirect ? 1 : 0) + (src.Dimension ? 1 : 0);
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
unsigned n = 0;
@@ -660,6 +700,15 @@ ureg_emit_src( struct ureg_program *ureg,
n++;
}
+ if (src.Dimension) {
+ out[0].src.Dimension = 1;
+ out[n].dim.Indirect = 0;
+ out[n].dim.Dimension = 0;
+ out[n].dim.Padding = 0;
+ out[n].dim.Index = src.DimensionIndex;
+ n++;
+ }
+
assert(n == size);
}
@@ -1027,13 +1076,34 @@ emit_immediate( struct ureg_program *ureg,
out[4].imm_data.Uint = v[3];
}
+static void
+emit_property(struct ureg_program *ureg,
+ unsigned name,
+ unsigned data)
+{
+ union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
+ out[0].value = 0;
+ out[0].prop.Type = TGSI_TOKEN_TYPE_PROPERTY;
+ out[0].prop.NrTokens = 2;
+ out[0].prop.PropertyName = name;
+
+ out[1].prop_data.Data = data;
+}
static void emit_decls( struct ureg_program *ureg )
{
unsigned i;
+ if (ureg->property_gs_input_prim != ~0) {
+ assert(ureg->processor == TGSI_PROCESSOR_GEOMETRY);
+
+ emit_property(ureg,
+ TGSI_PROPERTY_GS_INPUT_PRIM,
+ ureg->property_gs_input_prim);
+ }
+
if (ureg->processor == TGSI_PROCESSOR_VERTEX) {
for (i = 0; i < UREG_MAX_INPUT; i++) {
if (ureg->vs_inputs[i/32] & (1 << (i%32))) {
@@ -1058,6 +1128,15 @@ static void emit_decls( struct ureg_program *ureg )
}
}
+ for (i = 0; i < ureg->nr_system_values; i++) {
+ emit_decl(ureg,
+ TGSI_FILE_SYSTEM_VALUE,
+ ureg->system_value[i].index,
+ ureg->system_value[i].semantic_name,
+ ureg->system_value[i].semantic_index,
+ TGSI_INTERPOLATE_CONSTANT);
+ }
+
for (i = 0; i < ureg->nr_outputs; i++) {
emit_decl( ureg,
TGSI_FILE_OUTPUT,
@@ -1234,6 +1313,7 @@ struct ureg_program *ureg_create( unsigned processor )
return NULL;
ureg->processor = processor;
+ ureg->property_gs_input_prim = ~0;
return ureg;
}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.h b/src/gallium/auxiliary/tgsi/tgsi_ureg.h
index 6f11273320a..03eaf24854c 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_ureg.h
+++ b/src/gallium/auxiliary/tgsi/tgsi_ureg.h
@@ -47,13 +47,14 @@ struct ureg_src
unsigned SwizzleY : 2; /* TGSI_SWIZZLE_ */
unsigned SwizzleZ : 2; /* TGSI_SWIZZLE_ */
unsigned SwizzleW : 2; /* TGSI_SWIZZLE_ */
- unsigned Pad : 1; /* BOOL */
unsigned Indirect : 1; /* BOOL */
+ unsigned Dimension : 1; /* BOOL */
unsigned Absolute : 1; /* BOOL */
- int Index : 16; /* SINT */
unsigned Negate : 1; /* BOOL */
+ int Index : 16; /* SINT */
int IndirectIndex : 16; /* SINT */
int IndirectSwizzle : 2; /* TGSI_SWIZZLE_ */
+ int DimensionIndex : 16; /* SINT */
};
/* Very similar to a tgsi_dst_register, removing unsupported fields
@@ -118,6 +119,14 @@ ureg_create_shader_and_destroy( struct ureg_program *p,
}
+/***********************************************************************
+ * Build shader properties:
+ */
+
+void
+ureg_property_gs_input_prim(struct ureg_program *ureg,
+ unsigned gs_input_prim);
+
/***********************************************************************
* Build shader declarations:
@@ -137,6 +146,12 @@ struct ureg_src
ureg_DECL_gs_input(struct ureg_program *,
unsigned index);
+struct ureg_src
+ureg_DECL_system_value(struct ureg_program *,
+ unsigned index,
+ unsigned semantic_name,
+ unsigned semantic_index);
+
struct ureg_dst
ureg_DECL_output( struct ureg_program *,
unsigned semantic_name,
@@ -760,6 +775,15 @@ ureg_src_indirect( struct ureg_src reg, struct ureg_src addr )
return reg;
}
+static INLINE struct ureg_src
+ureg_src_dimension( struct ureg_src reg, int index )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ reg.Dimension = 1;
+ reg.DimensionIndex = index;
+ return reg;
+}
+
static INLINE struct ureg_dst
ureg_dst( struct ureg_src src )
{
@@ -792,13 +816,14 @@ ureg_src( struct ureg_dst dst )
src.SwizzleY = TGSI_SWIZZLE_Y;
src.SwizzleZ = TGSI_SWIZZLE_Z;
src.SwizzleW = TGSI_SWIZZLE_W;
- src.Pad = 0;
src.Indirect = dst.Indirect;
src.IndirectIndex = dst.IndirectIndex;
src.IndirectSwizzle = dst.IndirectSwizzle;
src.Absolute = 0;
src.Index = dst.Index;
src.Negate = 0;
+ src.Dimension = 0;
+ src.DimensionIndex = 0;
return src;
}
@@ -837,13 +862,14 @@ ureg_src_undef( void )
src.SwizzleY = 0;
src.SwizzleZ = 0;
src.SwizzleW = 0;
- src.Pad = 0;
src.Indirect = 0;
src.IndirectIndex = 0;
src.IndirectSwizzle = 0;
src.Absolute = 0;
src.Index = 0;
src.Negate = 0;
+ src.Dimension = 0;
+ src.DimensionIndex = 0;
return src;
}
diff --git a/src/gallium/auxiliary/translate/translate.h b/src/gallium/auxiliary/translate/translate.h
index 34526eb0617..54ed2c1a4be 100644
--- a/src/gallium/auxiliary/translate/translate.h
+++ b/src/gallium/auxiliary/translate/translate.h
@@ -44,12 +44,19 @@
#include "pipe/p_format.h"
#include "pipe/p_state.h"
+enum translate_element_type {
+ TRANSLATE_ELEMENT_NORMAL,
+ TRANSLATE_ELEMENT_INSTANCE_ID
+};
+
struct translate_element
{
+ enum translate_element_type type;
enum pipe_format input_format;
enum pipe_format output_format;
unsigned input_buffer:8;
unsigned input_offset:24;
+ unsigned instance_divisor;
unsigned output_offset;
};
@@ -74,11 +81,13 @@ struct translate {
void (PIPE_CDECL *run_elts)( struct translate *,
const unsigned *elts,
unsigned count,
+ unsigned instance_id,
void *output_buffer);
void (PIPE_CDECL *run)( struct translate *,
unsigned start,
unsigned count,
+ unsigned instance_id,
void *output_buffer);
};
@@ -103,8 +112,13 @@ static INLINE int translate_keysize( const struct translate_key *key )
static INLINE int translate_key_compare( const struct translate_key *a,
const struct translate_key *b )
{
- int keysize = translate_keysize(a);
- return memcmp(a, b, keysize);
+ int keysize_a = translate_keysize(a);
+ int keysize_b = translate_keysize(b);
+
+ if (keysize_a != keysize_b) {
+ return keysize_a - keysize_b;
+ }
+ return memcmp(a, b, keysize_a);
}
diff --git a/src/gallium/auxiliary/translate/translate_generic.c b/src/gallium/auxiliary/translate/translate_generic.c
index 266e7ee81e6..24727d49888 100644
--- a/src/gallium/auxiliary/translate/translate_generic.c
+++ b/src/gallium/auxiliary/translate/translate_generic.c
@@ -46,9 +46,12 @@ struct translate_generic {
struct translate translate;
struct {
+ enum translate_element_type type;
+
fetch_func fetch;
unsigned buffer;
unsigned input_offset;
+ unsigned instance_divisor;
emit_func emit;
unsigned output_offset;
@@ -568,6 +571,7 @@ static emit_func get_emit_func( enum pipe_format format )
static void PIPE_CDECL generic_run_elts( struct translate *translate,
const unsigned *elts,
unsigned count,
+ unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
@@ -583,13 +587,20 @@ static void PIPE_CDECL generic_run_elts( struct translate *translate,
for (attr = 0; attr < nr_attrs; attr++) {
float data[4];
-
- const char *src = (tg->attrib[attr].input_ptr +
- tg->attrib[attr].input_stride * elt);
+ const char *src;
char *dst = (vert +
tg->attrib[attr].output_offset);
+ if (tg->attrib[attr].instance_divisor) {
+ src = tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride *
+ (instance_id / tg->attrib[attr].instance_divisor);
+ } else {
+ src = tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride * elt;
+ }
+
tg->attrib[attr].fetch( src, data );
if (0) debug_printf("vert %d/%d attr %d: %f %f %f %f\n",
@@ -607,6 +618,7 @@ static void PIPE_CDECL generic_run_elts( struct translate *translate,
static void PIPE_CDECL generic_run( struct translate *translate,
unsigned start,
unsigned count,
+ unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
@@ -623,13 +635,25 @@ static void PIPE_CDECL generic_run( struct translate *translate,
for (attr = 0; attr < nr_attrs; attr++) {
float data[4];
- const char *src = (tg->attrib[attr].input_ptr +
- tg->attrib[attr].input_stride * elt);
-
char *dst = (vert +
tg->attrib[attr].output_offset);
- tg->attrib[attr].fetch( src, data );
+ if (tg->attrib[attr].type == TRANSLATE_ELEMENT_NORMAL) {
+ const char *src;
+
+ if (tg->attrib[attr].instance_divisor) {
+ src = tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride *
+ (instance_id / tg->attrib[attr].instance_divisor);
+ } else {
+ src = tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride * elt;
+ }
+
+ tg->attrib[attr].fetch( src, data );
+ } else {
+ data[0] = (float)instance_id;
+ }
if (0) debug_printf("vert %d attr %d: %f %f %f %f\n",
i, attr, data[0], data[1], data[2], data[3]);
@@ -683,10 +707,12 @@ struct translate *translate_generic_create( const struct translate_key *key )
tg->translate.run = generic_run;
for (i = 0; i < key->nr_elements; i++) {
+ tg->attrib[i].type = key->element[i].type;
tg->attrib[i].fetch = get_fetch_func(key->element[i].input_format);
tg->attrib[i].buffer = key->element[i].input_buffer;
tg->attrib[i].input_offset = key->element[i].input_offset;
+ tg->attrib[i].instance_divisor = key->element[i].instance_divisor;
tg->attrib[i].emit = get_emit_func(key->element[i].output_format);
tg->attrib[i].output_offset = key->element[i].output_offset;
diff --git a/src/gallium/auxiliary/translate/translate_sse.c b/src/gallium/auxiliary/translate/translate_sse.c
index b62db8d8f33..c13e7427387 100644
--- a/src/gallium/auxiliary/translate/translate_sse.c
+++ b/src/gallium/auxiliary/translate/translate_sse.c
@@ -49,19 +49,29 @@
typedef void (PIPE_CDECL *run_func)( struct translate *translate,
unsigned start,
unsigned count,
- void *output_buffer );
+ unsigned instance_id,
+ void *output_buffer);
typedef void (PIPE_CDECL *run_elts_func)( struct translate *translate,
const unsigned *elts,
unsigned count,
- void *output_buffer );
+ unsigned instance_id,
+ void *output_buffer);
struct translate_buffer {
const void *base_ptr;
unsigned stride;
- void *ptr; /* updated per vertex */
};
+struct translate_buffer_varient {
+ unsigned buffer_index;
+ unsigned instance_divisor;
+ void *ptr; /* updated either per vertex or per instance */
+};
+
+
+#define ELEMENT_BUFFER_INSTANCE_ID 1001
+
struct translate_sse {
struct translate translate;
@@ -81,6 +91,16 @@ struct translate_sse {
struct translate_buffer buffer[PIPE_MAX_ATTRIBS];
unsigned nr_buffers;
+ /* Multiple buffer varients can map to a single buffer. */
+ struct translate_buffer_varient buffer_varient[PIPE_MAX_ATTRIBS];
+ unsigned nr_buffer_varients;
+
+ /* Multiple elements can map to a single buffer varient. */
+ unsigned element_to_buffer_varient[PIPE_MAX_ATTRIBS];
+
+ boolean use_instancing;
+ unsigned instance_id;
+
run_func gen_run;
run_elts_func gen_run_elts;
@@ -359,32 +379,61 @@ static boolean init_inputs( struct translate_sse *p,
boolean linear )
{
unsigned i;
- if (linear) {
- for (i = 0; i < p->nr_buffers; i++) {
+ struct x86_reg instance_id = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->instance_id));
+
+ for (i = 0; i < p->nr_buffer_varients; i++) {
+ struct translate_buffer_varient *varient = &p->buffer_varient[i];
+ struct translate_buffer *buffer = &p->buffer[varient->buffer_index];
+
+ if (linear || varient->instance_divisor) {
struct x86_reg buf_stride = x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[i].stride));
+ get_offset(p, &buffer->stride));
struct x86_reg buf_ptr = x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[i].ptr));
+ get_offset(p, &varient->ptr));
struct x86_reg buf_base_ptr = x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[i].base_ptr));
+ get_offset(p, &buffer->base_ptr));
struct x86_reg elt = p->idx_EBX;
- struct x86_reg tmp = p->tmp_EAX;
-
+ struct x86_reg tmp_EAX = p->tmp_EAX;
/* Calculate pointer to first attrib:
+ * base_ptr + stride * index, where index depends on instance divisor
*/
- x86_mov(p->func, tmp, buf_stride);
- x86_imul(p->func, tmp, elt);
- x86_add(p->func, tmp, buf_base_ptr);
+ if (varient->instance_divisor) {
+ /* Our index is instance ID divided by instance divisor.
+ */
+ x86_mov(p->func, tmp_EAX, instance_id);
+
+ if (varient->instance_divisor != 1) {
+ struct x86_reg tmp_EDX = p->machine_EDX;
+ struct x86_reg tmp_ECX = p->outbuf_ECX;
+
+ /* TODO: Add x86_shr() to rtasm and use it whenever
+ * instance divisor is power of two.
+ */
+
+ x86_push(p->func, tmp_EDX);
+ x86_push(p->func, tmp_ECX);
+ x86_xor(p->func, tmp_EDX, tmp_EDX);
+ x86_mov_reg_imm(p->func, tmp_ECX, varient->instance_divisor);
+ x86_div(p->func, tmp_ECX); /* EAX = EDX:EAX / ECX */
+ x86_pop(p->func, tmp_ECX);
+ x86_pop(p->func, tmp_EDX);
+ }
+ } else {
+ x86_mov(p->func, tmp_EAX, elt);
+ }
+ x86_imul(p->func, tmp_EAX, buf_stride);
+ x86_add(p->func, tmp_EAX, buf_base_ptr);
/* In the linear case, keep the buffer pointer instead of the
* index number.
*/
- if (p->nr_buffers == 1)
- x86_mov( p->func, elt, tmp );
+ if (linear && p->nr_buffer_varients == 1)
+ x86_mov(p->func, elt, tmp_EAX);
else
- x86_mov( p->func, buf_ptr, tmp );
+ x86_mov(p->func, buf_ptr, tmp_EAX);
}
}
@@ -394,31 +443,36 @@ static boolean init_inputs( struct translate_sse *p,
static struct x86_reg get_buffer_ptr( struct translate_sse *p,
boolean linear,
- unsigned buf_idx,
+ unsigned var_idx,
struct x86_reg elt )
{
- if (linear && p->nr_buffers == 1) {
+ if (var_idx == ELEMENT_BUFFER_INSTANCE_ID) {
+ return x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->instance_id));
+ }
+ if (linear && p->nr_buffer_varients == 1) {
return p->idx_EBX;
}
- else if (linear) {
+ else if (linear || p->buffer_varient[var_idx].instance_divisor) {
struct x86_reg ptr = p->tmp_EAX;
struct x86_reg buf_ptr =
x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[buf_idx].ptr));
+ get_offset(p, &p->buffer_varient[var_idx].ptr));
x86_mov(p->func, ptr, buf_ptr);
return ptr;
}
else {
struct x86_reg ptr = p->tmp_EAX;
+ const struct translate_buffer_varient *varient = &p->buffer_varient[var_idx];
struct x86_reg buf_stride =
x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[buf_idx].stride));
+ get_offset(p, &p->buffer[varient->buffer_index].stride));
struct x86_reg buf_base_ptr =
x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[buf_idx].base_ptr));
+ get_offset(p, &p->buffer[varient->buffer_index].base_ptr));
@@ -436,28 +490,33 @@ static struct x86_reg get_buffer_ptr( struct translate_sse *p,
static boolean incr_inputs( struct translate_sse *p,
boolean linear )
{
- if (linear && p->nr_buffers == 1) {
+ if (linear && p->nr_buffer_varients == 1) {
struct x86_reg stride = x86_make_disp(p->machine_EDX,
get_offset(p, &p->buffer[0].stride));
- x86_add(p->func, p->idx_EBX, stride);
- sse_prefetchnta(p->func, x86_make_disp(p->idx_EBX, 192));
+ if (p->buffer_varient[0].instance_divisor == 0) {
+ x86_add(p->func, p->idx_EBX, stride);
+ sse_prefetchnta(p->func, x86_make_disp(p->idx_EBX, 192));
+ }
}
else if (linear) {
unsigned i;
/* Is this worthwhile??
*/
- for (i = 0; i < p->nr_buffers; i++) {
+ for (i = 0; i < p->nr_buffer_varients; i++) {
+ struct translate_buffer_varient *varient = &p->buffer_varient[i];
struct x86_reg buf_ptr = x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[i].ptr));
+ get_offset(p, &varient->ptr));
struct x86_reg buf_stride = x86_make_disp(p->machine_EDX,
- get_offset(p, &p->buffer[i].stride));
+ get_offset(p, &p->buffer[varient->buffer_index].stride));
- x86_mov(p->func, p->tmp_EAX, buf_ptr);
- x86_add(p->func, p->tmp_EAX, buf_stride);
- if (i == 0) sse_prefetchnta(p->func, x86_make_disp(p->tmp_EAX, 192));
- x86_mov(p->func, buf_ptr, p->tmp_EAX);
+ if (varient->instance_divisor == 0) {
+ x86_mov(p->func, p->tmp_EAX, buf_ptr);
+ x86_add(p->func, p->tmp_EAX, buf_stride);
+ if (i == 0) sse_prefetchnta(p->func, x86_make_disp(p->tmp_EAX, 192));
+ x86_mov(p->func, buf_ptr, p->tmp_EAX);
+ }
}
}
else {
@@ -514,7 +573,18 @@ static boolean build_vertex_emit( struct translate_sse *p,
x86_mov(p->func, p->machine_EDX, x86_fn_arg(p->func, 1));
x86_mov(p->func, p->idx_EBX, x86_fn_arg(p->func, 2));
x86_mov(p->func, p->count_ESI, x86_fn_arg(p->func, 3));
- x86_mov(p->func, p->outbuf_ECX, x86_fn_arg(p->func, 4));
+ x86_mov(p->func, p->outbuf_ECX, x86_fn_arg(p->func, 5));
+
+ /* Load instance ID.
+ */
+ if (p->use_instancing) {
+ x86_mov(p->func,
+ p->tmp_EAX,
+ x86_fn_arg(p->func, 4));
+ x86_mov(p->func,
+ x86_make_disp(p->machine_EDX, get_offset(p, &p->instance_id)),
+ p->tmp_EAX);
+ }
/* Get vertex count, compare to zero
*/
@@ -531,17 +601,18 @@ static boolean build_vertex_emit( struct translate_sse *p,
label = x86_get_label(p->func);
{
struct x86_reg elt = linear ? p->idx_EBX : x86_deref(p->idx_EBX);
- int last_vb = -1;
+ int last_varient = -1;
struct x86_reg vb;
for (j = 0; j < p->translate.key.nr_elements; j++) {
const struct translate_element *a = &p->translate.key.element[j];
+ unsigned varient = p->element_to_buffer_varient[j];
/* Figure out source pointer address:
*/
- if (a->input_buffer != last_vb) {
- last_vb = a->input_buffer;
- vb = get_buffer_ptr(p, linear, a->input_buffer, elt);
+ if (varient != last_varient) {
+ last_varient = varient;
+ vb = get_buffer_ptr(p, linear, varient, elt);
}
if (!translate_attr( p, a,
@@ -624,6 +695,7 @@ static void translate_sse_release( struct translate *translate )
static void PIPE_CDECL translate_sse_run_elts( struct translate *translate,
const unsigned *elts,
unsigned count,
+ unsigned instance_id,
void *output_buffer )
{
struct translate_sse *p = (struct translate_sse *)translate;
@@ -631,12 +703,14 @@ static void PIPE_CDECL translate_sse_run_elts( struct translate *translate,
p->gen_run_elts( translate,
elts,
count,
- output_buffer );
+ instance_id,
+ output_buffer);
}
static void PIPE_CDECL translate_sse_run( struct translate *translate,
unsigned start,
unsigned count,
+ unsigned instance_id,
void *output_buffer )
{
struct translate_sse *p = (struct translate_sse *)translate;
@@ -644,7 +718,8 @@ static void PIPE_CDECL translate_sse_run( struct translate *translate,
p->gen_run( translate,
start,
count,
- output_buffer );
+ instance_id,
+ output_buffer);
}
@@ -666,8 +741,37 @@ struct translate *translate_sse2_create( const struct translate_key *key )
p->translate.run_elts = translate_sse_run_elts;
p->translate.run = translate_sse_run;
- for (i = 0; i < key->nr_elements; i++)
- p->nr_buffers = MAX2( p->nr_buffers, key->element[i].input_buffer + 1 );
+ for (i = 0; i < key->nr_elements; i++) {
+ if (key->element[i].type == TRANSLATE_ELEMENT_NORMAL) {
+ unsigned j;
+
+ p->nr_buffers = MAX2(p->nr_buffers, key->element[i].input_buffer + 1);
+
+ if (key->element[i].instance_divisor) {
+ p->use_instancing = TRUE;
+ }
+
+ /*
+ * Map vertex element to vertex buffer varient.
+ */
+ for (j = 0; j < p->nr_buffer_varients; j++) {
+ if (p->buffer_varient[j].buffer_index == key->element[i].input_buffer &&
+ p->buffer_varient[j].instance_divisor == key->element[i].instance_divisor) {
+ break;
+ }
+ }
+ if (j == p->nr_buffer_varients) {
+ p->buffer_varient[j].buffer_index = key->element[i].input_buffer;
+ p->buffer_varient[j].instance_divisor = key->element[i].instance_divisor;
+ p->nr_buffer_varients++;
+ }
+ p->element_to_buffer_varient[i] = j;
+ } else {
+ assert(key->element[i].type == TRANSLATE_ELEMENT_INSTANCE_ID);
+
+ p->element_to_buffer_varient[i] = ELEMENT_BUFFER_INSTANCE_ID;
+ }
+ }
if (0) debug_printf("nr_buffers: %d\n", p->nr_buffers);
diff --git a/src/gallium/auxiliary/util/u_blitter.c b/src/gallium/auxiliary/util/u_blitter.c
index cef3b69e46d..249a0375fc5 100644
--- a/src/gallium/auxiliary/util/u_blitter.c
+++ b/src/gallium/auxiliary/util/u_blitter.c
@@ -163,6 +163,7 @@ struct blitter_context *util_blitter_create(struct pipe_context *pipe)
rs_state.cull_mode = PIPE_WINDING_NONE;
rs_state.bypass_vs_clip_and_viewport = 1;
rs_state.gl_rasterization_rules = 1;
+ rs_state.flatshade = 1;
ctx->rs_state = pipe->create_rasterizer_state(pipe, &rs_state);
/* fragment shaders are created on-demand */
@@ -378,9 +379,16 @@ static void blitter_set_texcoords_cube(struct blitter_context_priv *ctx,
float t1 = y1 / (float)surf->height;
float s2 = x2 / (float)surf->width;
float t2 = y2 / (float)surf->height;
- const float st[4][2] = {
- {s1, t1}, {s2, t1}, {s2, t2}, {s1, t2}
- };
+ float st[4][2];
+
+ st[0][0] = s1;
+ st[0][1] = t1;
+ st[1][0] = s2;
+ st[1][1] = t1;
+ st[2][0] = s2;
+ st[2][1] = t2;
+ st[3][0] = s1;
+ st[3][1] = t2;
util_map_texcoords2d_onto_cubemap(surf->face,
/* pointer, stride in floats */
@@ -567,45 +575,29 @@ void util_blitter_clear(struct blitter_context *blitter,
blitter_restore_CSOs(ctx);
}
-void util_blitter_copy(struct blitter_context *blitter,
- struct pipe_surface *dst,
- unsigned dstx, unsigned dsty,
- struct pipe_surface *src,
- unsigned srcx, unsigned srcy,
- unsigned width, unsigned height,
- boolean ignore_stencil)
+static boolean
+is_overlap(unsigned sx1, unsigned sx2, unsigned sy1, unsigned sy2,
+ unsigned dx1, unsigned dx2, unsigned dy1, unsigned dy2)
+{
+ if (sx1 >= dx2 || sx2 <= dx1 || sy1 >= dy2 || sy2 <= dy1) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+}
+
+static void util_blitter_do_copy(struct blitter_context *blitter,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height,
+ boolean is_depth)
{
struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter;
struct pipe_context *pipe = ctx->pipe;
- struct pipe_screen *screen = pipe->screen;
struct pipe_framebuffer_state fb_state;
- boolean is_stencil, is_depth;
- unsigned dst_tex_usage;
-
- /* give up if textures are not set */
- assert(dst->texture && src->texture);
- if (!dst->texture || !src->texture)
- return;
-
- is_depth = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 0) != 0;
- is_stencil = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 1) != 0;
- dst_tex_usage = is_depth || is_stencil ? PIPE_TEXTURE_USAGE_DEPTH_STENCIL :
- PIPE_TEXTURE_USAGE_RENDER_TARGET;
- /* check if we can sample from and render to the surfaces */
- /* (assuming copying a stencil buffer is not possible) */
- if ((!ignore_stencil && is_stencil) ||
- !screen->is_format_supported(screen, dst->format, dst->texture->target,
- dst_tex_usage, 0) ||
- !screen->is_format_supported(screen, src->format, src->texture->target,
- PIPE_TEXTURE_USAGE_SAMPLER, 0)) {
- util_surface_copy(pipe, FALSE, dst, dstx, dsty, src, srcx, srcy,
- width, height);
- return;
- }
-
- /* check whether the states are properly saved */
- blitter_check_saved_CSOs(ctx);
assert(blitter->saved_fb_state.nr_cbufs != ~0);
assert(blitter->saved_num_textures != ~0);
assert(blitter->saved_num_sampler_states != ~0);
@@ -663,6 +655,108 @@ void util_blitter_copy(struct blitter_context *blitter,
blitter_set_rectangle(ctx, dstx, dsty, dstx+width, dsty+height, 0);
blitter_draw_quad(ctx);
+
+}
+
+static void util_blitter_overlap_copy(struct blitter_context *blitter,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter;
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+
+ struct pipe_texture texTemp;
+ struct pipe_texture *texture;
+ struct pipe_surface *tex_surf;
+
+ /* check whether the states are properly saved */
+ blitter_check_saved_CSOs(ctx);
+
+ memset(&texTemp, 0, sizeof(texTemp));
+ texTemp.target = PIPE_TEXTURE_2D;
+ texTemp.format = dst->texture->format; /* XXX verify supported by driver! */
+ texTemp.last_level = 0;
+ texTemp.width0 = width;
+ texTemp.height0 = height;
+ texTemp.depth0 = 1;
+
+ texture = screen->texture_create(screen, &texTemp);
+ if (!texture)
+ return;
+
+ tex_surf = screen->get_tex_surface(screen, texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* blit from the src to the temp */
+ util_blitter_do_copy(blitter, tex_surf, 0, 0,
+ src, srcx, srcy,
+ width, height,
+ FALSE);
+ util_blitter_do_copy(blitter, dst, dstx, dsty,
+ tex_surf, 0, 0,
+ width, height,
+ FALSE);
+ pipe_surface_reference(&tex_surf, NULL);
+ pipe_texture_reference(&texture, NULL);
+ blitter_restore_CSOs(ctx);
+}
+
+void util_blitter_copy(struct blitter_context *blitter,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height,
+ boolean ignore_stencil)
+{
+ struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter;
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ boolean is_stencil, is_depth;
+ unsigned dst_tex_usage;
+
+ /* give up if textures are not set */
+ assert(dst->texture && src->texture);
+ if (!dst->texture || !src->texture)
+ return;
+
+ if (dst->texture == src->texture) {
+ if (is_overlap(srcx, srcx + width, srcy, srcy + height,
+ dstx, dstx + width, dsty, dsty + height)) {
+ util_blitter_overlap_copy(blitter, dst, dstx, dsty, src, srcx, srcy,
+ width, height);
+ return;
+ }
+ }
+
+ is_depth = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 0) != 0;
+ is_stencil = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 1) != 0;
+ dst_tex_usage = is_depth || is_stencil ? PIPE_TEXTURE_USAGE_DEPTH_STENCIL :
+ PIPE_TEXTURE_USAGE_RENDER_TARGET;
+
+ /* check if we can sample from and render to the surfaces */
+ /* (assuming copying a stencil buffer is not possible) */
+ if ((!ignore_stencil && is_stencil) ||
+ !screen->is_format_supported(screen, dst->format, dst->texture->target,
+ dst_tex_usage, 0) ||
+ !screen->is_format_supported(screen, src->format, src->texture->target,
+ PIPE_TEXTURE_USAGE_SAMPLER, 0)) {
+ util_surface_copy(pipe, FALSE, dst, dstx, dsty, src, srcx, srcy,
+ width, height);
+ return;
+ }
+
+ /* check whether the states are properly saved */
+ blitter_check_saved_CSOs(ctx);
+ util_blitter_do_copy(blitter,
+ dst, dstx, dsty,
+ src, srcx, srcy,
+ width, height, is_depth);
blitter_restore_CSOs(ctx);
}
diff --git a/src/gallium/auxiliary/util/u_draw_quad.c b/src/gallium/auxiliary/util/u_draw_quad.c
index 4110485fb19..e2e23c3cdd8 100644
--- a/src/gallium/auxiliary/util/u_draw_quad.c
+++ b/src/gallium/auxiliary/util/u_draw_quad.c
@@ -61,6 +61,7 @@ util_draw_vertex_buffer(struct pipe_context *pipe,
/* tell pipe about the vertex attributes */
for (i = 0; i < num_attribs; i++) {
velements[i].src_offset = i * 4 * sizeof(float);
+ velements[i].instance_divisor = 0;
velements[i].vertex_buffer_index = 0;
velements[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
velements[i].nr_components = 4;
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
index 9f16b42944e..01f7931aed1 100644
--- a/src/gallium/auxiliary/util/u_format.csv
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -62,10 +62,10 @@ PIPE_FORMAT_R16G16_SSCALED , array , 1, 1, s16 , s16 , , , xy01,
PIPE_FORMAT_R16G16B16_SSCALED , array , 1, 1, s16 , s16 , s16 , , xyz1, rgb
PIPE_FORMAT_R16G16B16A16_SSCALED , array , 1, 1, s16 , s16 , s16 , s16 , xyzw, rgb
PIPE_FORMAT_R8_UNORM , array , 1, 1, un8 , , , , x001, rgb
-PIPE_FORMAT_R8G8_UNORM , array , 1, 1, un8 , un8 , , , xy01, rgb
-PIPE_FORMAT_R8G8B8_UNORM , array , 1, 1, un8 , un8 , un8 , , xyz1, rgb
-PIPE_FORMAT_R8G8B8A8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , xyzw, rgb
-PIPE_FORMAT_R8G8B8X8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , xyz1, rgb
+PIPE_FORMAT_R8G8_UNORM , array , 1, 1, un8 , un8 , , , yx01, rgb
+PIPE_FORMAT_R8G8B8_UNORM , array , 1, 1, un8 , un8 , un8 , , zyx1, rgb
+PIPE_FORMAT_R8G8B8A8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , wzyx, rgb
+PIPE_FORMAT_R8G8B8X8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , wzy1, rgb
PIPE_FORMAT_R8_USCALED , array , 1, 1, u8 , , , , x001, rgb
PIPE_FORMAT_R8G8_USCALED , array , 1, 1, u8 , u8 , , , xy01, rgb
PIPE_FORMAT_R8G8B8_USCALED , array , 1, 1, u8 , u8 , u8 , , xyz1, rgb
diff --git a/src/gallium/auxiliary/util/u_pack_color.h b/src/gallium/auxiliary/util/u_pack_color.h
index 43eb0153ee7..0ab53c75dd6 100644
--- a/src/gallium/auxiliary/util/u_pack_color.h
+++ b/src/gallium/auxiliary/util/u_pack_color.h
@@ -425,6 +425,8 @@ util_pack_z(enum pipe_format format, double z)
if (z == 1.0)
return 0xffffffff;
return (uint) (z * 0xffffffff);
+ case PIPE_FORMAT_Z32_FLOAT:
+ return (uint)z;
case PIPE_FORMAT_S8Z24_UNORM:
case PIPE_FORMAT_X8Z24_UNORM:
if (z == 1.0)
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.c b/src/gallium/auxiliary/util/u_ringbuffer.c
new file mode 100644
index 00000000000..3f43a19e018
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_ringbuffer.c
@@ -0,0 +1,145 @@
+
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "util/u_ringbuffer.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+/* Generic ringbuffer:
+ */
+struct util_ringbuffer
+{
+ struct util_packet *buf;
+ unsigned mask;
+
+ /* Can this be done with atomic variables??
+ */
+ unsigned head;
+ unsigned tail;
+ pipe_condvar change;
+ pipe_mutex mutex;
+};
+
+
+struct util_ringbuffer *util_ringbuffer_create( unsigned dwords )
+{
+ struct util_ringbuffer *ring = CALLOC_STRUCT(util_ringbuffer);
+ if (ring == NULL)
+ return NULL;
+
+ assert(util_is_power_of_two(dwords));
+
+ ring->buf = MALLOC( dwords * sizeof(unsigned) );
+ if (ring->buf == NULL)
+ goto fail;
+
+ ring->mask = dwords - 1;
+
+ pipe_condvar_init(ring->change);
+ pipe_mutex_init(ring->mutex);
+ return ring;
+
+fail:
+ FREE(ring->buf);
+ FREE(ring);
+ return NULL;
+}
+
+void util_ringbuffer_destroy( struct util_ringbuffer *ring )
+{
+ pipe_condvar_destroy(ring->change);
+ pipe_mutex_destroy(ring->mutex);
+ FREE(ring->buf);
+ FREE(ring);
+}
+
+static INLINE unsigned util_ringbuffer_space( const struct util_ringbuffer *ring )
+{
+ return (ring->tail - (ring->head + 1)) & ring->mask;
+}
+
+void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
+ const struct util_packet *packet )
+{
+ unsigned i;
+
+ /* XXX: over-reliance on mutexes, etc:
+ */
+ pipe_mutex_lock(ring->mutex);
+
+ /* Wait for free space:
+ */
+ while (util_ringbuffer_space(ring) < packet->dwords)
+ pipe_condvar_wait(ring->change, ring->mutex);
+
+ /* Copy data to ring:
+ */
+ for (i = 0; i < packet->dwords; i++) {
+
+ /* Copy all dwords of the packet. Note we're abusing the
+ * typesystem a little - we're being passed a pointer to
+ * something, but probably not an array of packet structs:
+ */
+ ring->buf[ring->head] = packet[i];
+ ring->head++;
+ ring->head &= ring->mask;
+ }
+
+ /* Signal change:
+ */
+ pipe_condvar_signal(ring->change);
+ pipe_mutex_unlock(ring->mutex);
+}
+
+enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
+ struct util_packet *packet,
+ unsigned max_dwords,
+ boolean wait )
+{
+ const struct util_packet *ring_packet;
+ unsigned i;
+ int ret = PIPE_OK;
+
+ /* XXX: over-reliance on mutexes, etc:
+ */
+ pipe_mutex_lock(ring->mutex);
+
+ /* Wait for free space:
+ */
+ if (wait) {
+ while (util_ringbuffer_space(ring) == 0)
+ pipe_condvar_wait(ring->change, ring->mutex);
+ }
+ else {
+ if (util_ringbuffer_space(ring) == 0) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+ }
+
+ ring_packet = &ring->buf[ring->tail];
+
+ /* Both of these are considered bugs. Raise an assert on debug builds.
+ */
+ if (ring_packet->dwords > ring->mask + 1 - util_ringbuffer_space(ring) ||
+ ring_packet->dwords > max_dwords) {
+ assert(0);
+ ret = PIPE_ERROR_BAD_INPUT;
+ goto out;
+ }
+
+ /* Copy data from ring:
+ */
+ for (i = 0; i < ring_packet->dwords; i++) {
+ packet[i] = ring->buf[ring->tail];
+ ring->tail++;
+ ring->tail &= ring->mask;
+ }
+
+out:
+ /* Signal change:
+ */
+ pipe_condvar_signal(ring->change);
+ pipe_mutex_unlock(ring->mutex);
+ return ret;
+}
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.h b/src/gallium/auxiliary/util/u_ringbuffer.h
new file mode 100644
index 00000000000..85f0ad6c1f6
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_ringbuffer.h
@@ -0,0 +1,29 @@
+
+#ifndef UTIL_RINGBUFFER_H
+#define UTIL_RINGBUFFER_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_defines.h" /* only for pipe_error! */
+
+/* Generic header
+ */
+struct util_packet {
+ unsigned dwords:8;
+ unsigned data24:24;
+};
+
+struct util_ringbuffer;
+
+struct util_ringbuffer *util_ringbuffer_create( unsigned dwords );
+
+void util_ringbuffer_destroy( struct util_ringbuffer *ring );
+
+void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
+ const struct util_packet *packet );
+
+enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
+ struct util_packet *packet,
+ unsigned max_dwords,
+ boolean wait );
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_tile.c b/src/gallium/auxiliary/util/u_tile.c
index 1ba82bb21f0..f9936eb1cb2 100644
--- a/src/gallium/auxiliary/util/u_tile.c
+++ b/src/gallium/auxiliary/util/u_tile.c
@@ -1357,7 +1357,10 @@ pipe_put_tile_rgba(struct pipe_transfer *pt,
/*z24s8_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);*/
break;
default:
- debug_printf("%s: unsupported format %s\n", __FUNCTION__, pf_name(format));
+ util_format_write_4f(format,
+ p, src_stride * sizeof(float),
+ packed, util_format_get_stride(format, w),
+ 0, 0, w, h);
}
pipe_put_tile_raw(pt, x, y, w, h, packed, 0);
diff --git a/src/gallium/auxiliary/vl/vl_compositor.c b/src/gallium/auxiliary/vl/vl_compositor.c
index fc2a1c59a6b..a524e2fdfb3 100644
--- a/src/gallium/auxiliary/vl/vl_compositor.c
+++ b/src/gallium/auxiliary/vl/vl_compositor.c
@@ -316,6 +316,7 @@ init_buffers(struct vl_compositor *c)
pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[0].buffer);
c->vertex_elems[0].src_offset = 0;
+ c->vertex_elems[0].instance_divisor = 0;
c->vertex_elems[0].vertex_buffer_index = 0;
c->vertex_elems[0].nr_components = 2;
c->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
@@ -345,6 +346,7 @@ init_buffers(struct vl_compositor *c)
pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[1].buffer);
c->vertex_elems[1].src_offset = 0;
+ c->vertex_elems[1].instance_divisor = 0;
c->vertex_elems[1].vertex_buffer_index = 1;
c->vertex_elems[1].nr_components = 2;
c->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
@@ -353,7 +355,7 @@ init_buffers(struct vl_compositor *c)
* Create our vertex shader's constant buffer
* Const buffer contains scaling and translation vectors
*/
- c->vs_const_buf.buffer = pipe_buffer_create
+ c->vs_const_buf = pipe_buffer_create
(
c->pipe->screen,
1,
@@ -365,7 +367,7 @@ init_buffers(struct vl_compositor *c)
* Create our fragment shader's constant buffer
* Const buffer contains the color conversion matrix and bias vectors
*/
- c->fs_const_buf.buffer = pipe_buffer_create
+ c->fs_const_buf = pipe_buffer_create
(
c->pipe->screen,
1,
@@ -390,8 +392,8 @@ cleanup_buffers(struct vl_compositor *c)
for (i = 0; i < 2; ++i)
pipe_buffer_reference(&c->vertex_bufs[i].buffer, NULL);
- pipe_buffer_reference(&c->vs_const_buf.buffer, NULL);
- pipe_buffer_reference(&c->fs_const_buf.buffer, NULL);
+ pipe_buffer_reference(&c->vs_const_buf, NULL);
+ pipe_buffer_reference(&c->fs_const_buf, NULL);
}
bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
@@ -483,13 +485,13 @@ void vl_compositor_render(struct vl_compositor *compositor,
compositor->pipe->bind_fs_state(compositor->pipe, compositor->fragment_shader);
compositor->pipe->set_vertex_buffers(compositor->pipe, 2, compositor->vertex_bufs);
compositor->pipe->set_vertex_elements(compositor->pipe, 2, compositor->vertex_elems);
- compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_VERTEX, 0, &compositor->vs_const_buf);
- compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, &compositor->fs_const_buf);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_VERTEX, 0, compositor->vs_const_buf);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
vs_consts = pipe_buffer_map
(
compositor->pipe->screen,
- compositor->vs_const_buf.buffer,
+ compositor->vs_const_buf,
PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
);
@@ -511,7 +513,7 @@ void vl_compositor_render(struct vl_compositor *compositor,
vs_consts->src_trans.z = 0;
vs_consts->src_trans.w = 0;
- pipe_buffer_unmap(compositor->pipe->screen, compositor->vs_const_buf.buffer);
+ pipe_buffer_unmap(compositor->pipe->screen, compositor->vs_const_buf);
compositor->pipe->draw_arrays(compositor->pipe, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
@@ -525,10 +527,10 @@ void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float
memcpy
(
- pipe_buffer_map(compositor->pipe->screen, compositor->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ pipe_buffer_map(compositor->pipe->screen, compositor->fs_const_buf, PIPE_BUFFER_USAGE_CPU_WRITE),
mat,
sizeof(struct fragment_shader_consts)
);
- pipe_buffer_unmap(compositor->pipe->screen, compositor->fs_const_buf.buffer);
+ pipe_buffer_unmap(compositor->pipe->screen, compositor->fs_const_buf);
}
diff --git a/src/gallium/auxiliary/vl/vl_compositor.h b/src/gallium/auxiliary/vl/vl_compositor.h
index f441901a751..6a9a3fd7af1 100644
--- a/src/gallium/auxiliary/vl/vl_compositor.h
+++ b/src/gallium/auxiliary/vl/vl_compositor.h
@@ -47,7 +47,7 @@ struct vl_compositor
struct pipe_scissor_state scissor;
struct pipe_vertex_buffer vertex_bufs[2];
struct pipe_vertex_element vertex_elems[2];
- struct pipe_constant_buffer vs_const_buf, fs_const_buf;
+ struct pipe_buffer *vs_const_buf, *fs_const_buf;
};
bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe);
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
index caf581aca60..e43187545c5 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
@@ -891,53 +891,61 @@ init_buffers(struct vl_mpeg12_mc_renderer *r)
/* Position element */
r->vertex_elems[0].src_offset = 0;
+ r->vertex_elems[0].instance_divisor = 0;
r->vertex_elems[0].vertex_buffer_index = 0;
r->vertex_elems[0].nr_components = 2;
r->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* Luma, texcoord element */
r->vertex_elems[1].src_offset = sizeof(struct vertex2f);
+ r->vertex_elems[1].instance_divisor = 0;
r->vertex_elems[1].vertex_buffer_index = 0;
r->vertex_elems[1].nr_components = 2;
r->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* Chroma Cr texcoord element */
r->vertex_elems[2].src_offset = sizeof(struct vertex2f) * 2;
+ r->vertex_elems[2].instance_divisor = 0;
r->vertex_elems[2].vertex_buffer_index = 0;
r->vertex_elems[2].nr_components = 2;
r->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* Chroma Cb texcoord element */
r->vertex_elems[3].src_offset = sizeof(struct vertex2f) * 3;
+ r->vertex_elems[3].instance_divisor = 0;
r->vertex_elems[3].vertex_buffer_index = 0;
r->vertex_elems[3].nr_components = 2;
r->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* First ref surface top field texcoord element */
r->vertex_elems[4].src_offset = 0;
+ r->vertex_elems[4].instance_divisor = 0;
r->vertex_elems[4].vertex_buffer_index = 1;
r->vertex_elems[4].nr_components = 2;
r->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* First ref surface bottom field texcoord element */
r->vertex_elems[5].src_offset = sizeof(struct vertex2f);
+ r->vertex_elems[5].instance_divisor = 0;
r->vertex_elems[5].vertex_buffer_index = 1;
r->vertex_elems[5].nr_components = 2;
r->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* Second ref surface top field texcoord element */
r->vertex_elems[6].src_offset = 0;
+ r->vertex_elems[6].instance_divisor = 0;
r->vertex_elems[6].vertex_buffer_index = 2;
r->vertex_elems[6].nr_components = 2;
r->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
/* Second ref surface bottom field texcoord element */
r->vertex_elems[7].src_offset = sizeof(struct vertex2f);
+ r->vertex_elems[7].instance_divisor = 0;
r->vertex_elems[7].vertex_buffer_index = 2;
r->vertex_elems[7].nr_components = 2;
r->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
- r->vs_const_buf.buffer = pipe_buffer_create
+ r->vs_const_buf = pipe_buffer_create
(
r->pipe->screen,
DEFAULT_BUF_ALIGNMENT,
@@ -945,7 +953,7 @@ init_buffers(struct vl_mpeg12_mc_renderer *r)
sizeof(struct vertex_shader_consts)
);
- r->fs_const_buf.buffer = pipe_buffer_create
+ r->fs_const_buf = pipe_buffer_create
(
r->pipe->screen,
DEFAULT_BUF_ALIGNMENT,
@@ -954,11 +962,11 @@ init_buffers(struct vl_mpeg12_mc_renderer *r)
memcpy
(
- pipe_buffer_map(r->pipe->screen, r->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ pipe_buffer_map(r->pipe->screen, r->fs_const_buf, PIPE_BUFFER_USAGE_CPU_WRITE),
&fs_consts, sizeof(struct fragment_shader_consts)
);
- pipe_buffer_unmap(r->pipe->screen, r->fs_const_buf.buffer);
+ pipe_buffer_unmap(r->pipe->screen, r->fs_const_buf);
return true;
}
@@ -970,8 +978,8 @@ cleanup_buffers(struct vl_mpeg12_mc_renderer *r)
assert(r);
- pipe_buffer_reference(&r->vs_const_buf.buffer, NULL);
- pipe_buffer_reference(&r->fs_const_buf.buffer, NULL);
+ pipe_buffer_reference(&r->vs_const_buf, NULL);
+ pipe_buffer_reference(&r->fs_const_buf, NULL);
for (i = 0; i < 3; ++i)
pipe_buffer_reference(&r->vertex_bufs.all[i].buffer, NULL);
@@ -1284,19 +1292,19 @@ flush(struct vl_mpeg12_mc_renderer *r)
vs_consts = pipe_buffer_map
(
- r->pipe->screen, r->vs_const_buf.buffer,
+ r->pipe->screen, r->vs_const_buf,
PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
);
vs_consts->denorm.x = r->surface->width0;
vs_consts->denorm.y = r->surface->height0;
- pipe_buffer_unmap(r->pipe->screen, r->vs_const_buf.buffer);
+ pipe_buffer_unmap(r->pipe->screen, r->vs_const_buf);
r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_VERTEX, 0,
- &r->vs_const_buf);
+ r->vs_const_buf);
r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_FRAGMENT, 0,
- &r->fs_const_buf);
+ r->fs_const_buf);
if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0) {
r->pipe->set_vertex_buffers(r->pipe, 1, r->vertex_bufs.all);
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h
index 64184337a06..f00b8c7b8b1 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h
@@ -63,8 +63,8 @@ struct vl_mpeg12_mc_renderer
struct pipe_viewport_state viewport;
struct pipe_scissor_state scissor;
- struct pipe_constant_buffer vs_const_buf;
- struct pipe_constant_buffer fs_const_buf;
+ struct pipe_buffer *vs_const_buf;
+ struct pipe_buffer *fs_const_buf;
struct pipe_framebuffer_state fb_state;
struct pipe_vertex_element vertex_elems[8];