diff options
Diffstat (limited to 'src/mesa/drivers/dri/i965')
59 files changed, 3756 insertions, 1065 deletions
diff --git a/src/mesa/drivers/dri/i965/Makefile b/src/mesa/drivers/dri/i965/Makefile index bea48e13138..917d39061d2 100644 --- a/src/mesa/drivers/dri/i965/Makefile +++ b/src/mesa/drivers/dri/i965/Makefile @@ -105,6 +105,7 @@ C_SOURCES = \ $(DRIVER_SOURCES) CXX_SOURCES = \ + brw_cubemap_normalize.cpp \ brw_fs.cpp \ brw_fs_channel_expressions.cpp \ brw_fs_vector_splitting.cpp diff --git a/src/mesa/drivers/dri/i965/brw_cc.c b/src/mesa/drivers/dri/i965/brw_cc.c index cfce5d31405..00418760da3 100644 --- a/src/mesa/drivers/dri/i965/brw_cc.c +++ b/src/mesa/drivers/dri/i965/brw_cc.c @@ -39,7 +39,7 @@ void brw_update_cc_vp(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_cc_viewport ccv; memset(&ccv, 0, sizeof(ccv)); @@ -90,7 +90,8 @@ static void prepare_cc_unit(struct brw_context *brw) */ static void upload_cc_unit(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct intel_context *intel = &brw->intel; + struct gl_context *ctx = &brw->intel.ctx; struct brw_cc_unit_state cc; void *map; @@ -203,12 +204,12 @@ static void upload_cc_unit(struct brw_context *brw) cc.cc2.depth_write_enable = ctx->Depth.Mask; } + if (intel->stats_wm || (INTEL_DEBUG & DEBUG_STATS)) + cc.cc5.statistics_enable = 1; + /* CACHE_NEW_CC_VP */ cc.cc4.cc_viewport_state_offset = brw->cc.vp_bo->offset >> 5; /* reloc */ - if (INTEL_DEBUG & DEBUG_STATS) - cc.cc5.statistics_enable = 1; - map = brw_state_batch(brw, sizeof(cc), 64, &brw->cc.state_bo, &brw->cc.state_offset); memcpy(map, &cc, sizeof(cc)); diff --git a/src/mesa/drivers/dri/i965/brw_clip.c b/src/mesa/drivers/dri/i965/brw_clip.c index a1e9dae9154..15e60bf3ce3 100644 --- a/src/mesa/drivers/dri/i965/brw_clip.c +++ b/src/mesa/drivers/dri/i965/brw_clip.c @@ -159,7 +159,7 @@ static void compile_clip_prog( struct brw_context *brw, static void upload_clip_prog(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; struct brw_clip_prog_key key; memset(&key, 0, sizeof(key)); diff --git a/src/mesa/drivers/dri/i965/brw_clip_state.c b/src/mesa/drivers/dri/i965/brw_clip_state.c index 856d8f0c6c0..885167da908 100644 --- a/src/mesa/drivers/dri/i965/brw_clip_state.c +++ b/src/mesa/drivers/dri/i965/brw_clip_state.c @@ -49,7 +49,7 @@ struct brw_clip_unit_key { static void clip_unit_populate_key(struct brw_context *brw, struct brw_clip_unit_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); /* CACHE_NEW_CLIP_PROG */ diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c index d08538ec20c..3c4ae8a7a4f 100644 --- a/src/mesa/drivers/dri/i965/brw_context.c +++ b/src/mesa/drivers/dri/i965/brw_context.c @@ -41,7 +41,6 @@ #include "intel_span.h" #include "tnl/t_pipeline.h" - /*************************************** * Mesa's Driver Functions ***************************************/ @@ -58,14 +57,14 @@ static void brwInitDriverFunctions( struct dd_function_table *functions ) } GLboolean brwCreateContext( int api, - const __GLcontextModes *mesaVis, + const struct gl_config *mesaVis, __DRIcontext *driContextPriv, void *sharedContextPrivate) { struct dd_function_table functions; struct brw_context *brw = (struct brw_context *) CALLOC_STRUCT(brw_context); struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; unsigned i; if (!brw) { @@ -123,6 +122,9 @@ GLboolean brwCreateContext( int api, (i == MESA_SHADER_FRAGMENT); ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp = (i == MESA_SHADER_FRAGMENT); + + if (intel->gen == 6) + ctx->ShaderCompilerOptions[i].EmitNoIfs = GL_TRUE; } ctx->Const.VertexProgram.MaxNativeInstructions = (16 * 1024); diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index 703a7de78d1..f205c07a727 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -719,7 +719,7 @@ void brwInitVtbl( struct brw_context *brw ); * brw_context.c */ GLboolean brwCreateContext( int api, - const __GLcontextModes *mesaVis, + const struct gl_config *mesaVis, __DRIcontext *driContextPriv, void *sharedContextPrivate); @@ -763,15 +763,15 @@ void brw_upload_cs_urb_state(struct brw_context *brw); int brw_disasm (FILE *file, struct brw_instruction *inst, int gen); /* brw_state.c */ -void brw_enable(GLcontext * ctx, GLenum cap, GLboolean state); -void brw_depth_range(GLcontext *ctx, GLclampd nearval, GLclampd farval); +void brw_enable(struct gl_context * ctx, GLenum cap, GLboolean state); +void brw_depth_range(struct gl_context *ctx, GLclampd nearval, GLclampd farval); /*====================================================================== * Inline conversion functions. These are better-typed than the * macros used previously: */ static INLINE struct brw_context * -brw_context( GLcontext *ctx ) +brw_context( struct gl_context *ctx ) { return (struct brw_context *)ctx; } @@ -800,5 +800,7 @@ brw_fragment_program_const(const struct gl_fragment_program *p) return (const struct brw_fragment_program *) p; } +GLboolean brw_do_cubemap_normalize(struct exec_list *instructions); + #endif diff --git a/src/mesa/drivers/dri/i965/brw_cubemap_normalize.cpp b/src/mesa/drivers/dri/i965/brw_cubemap_normalize.cpp new file mode 100644 index 00000000000..35bea681214 --- /dev/null +++ b/src/mesa/drivers/dri/i965/brw_cubemap_normalize.cpp @@ -0,0 +1,110 @@ +/* + * Copyright © 2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * \file brw_cubemap_normalize.cpp + * + * IR lower pass to perform the normalization of the cubemap coordinates to + * have the largest magnitude component be -1.0 or 1.0. + * + * \author Eric Anholt <[email protected]> + */ + +#include "../glsl/glsl_types.h" +#include "../glsl/ir.h" + +class brw_cubemap_normalize_visitor : public ir_hierarchical_visitor { +public: + brw_cubemap_normalize_visitor() + { + progress = false; + } + + ir_visitor_status visit_leave(ir_texture *ir); + + bool progress; +}; + +ir_visitor_status +brw_cubemap_normalize_visitor::visit_leave(ir_texture *ir) +{ + if (ir->sampler->type->sampler_dimensionality != GLSL_SAMPLER_DIM_CUBE) + return visit_continue; + + void *mem_ctx = talloc_parent(ir); + + ir_variable *var = new(mem_ctx) ir_variable(ir->coordinate->type, + "coordinate", ir_var_auto); + base_ir->insert_before(var); + ir_dereference *deref = new(mem_ctx) ir_dereference_variable(var); + ir_assignment *assign = new(mem_ctx) ir_assignment(deref, ir->coordinate, + NULL); + base_ir->insert_before(assign); + + deref = new(mem_ctx) ir_dereference_variable(var); + ir_rvalue *swiz0 = new(mem_ctx) ir_swizzle(deref, 0, 0, 0, 0, 1); + deref = new(mem_ctx) ir_dereference_variable(var); + ir_rvalue *swiz1 = new(mem_ctx) ir_swizzle(deref, 1, 0, 0, 0, 1); + deref = new(mem_ctx) ir_dereference_variable(var); + ir_rvalue *swiz2 = new(mem_ctx) ir_swizzle(deref, 2, 0, 0, 0, 1); + + swiz0 = new(mem_ctx) ir_expression(ir_unop_abs, swiz0->type, swiz0, NULL); + swiz1 = new(mem_ctx) ir_expression(ir_unop_abs, swiz1->type, swiz1, NULL); + swiz2 = new(mem_ctx) ir_expression(ir_unop_abs, swiz2->type, swiz2, NULL); + + ir_expression *expr; + expr = new(mem_ctx) ir_expression(ir_binop_max, + glsl_type::float_type, + swiz0, swiz1); + + expr = new(mem_ctx) ir_expression(ir_binop_max, + glsl_type::float_type, + expr, swiz2); + + expr = new(mem_ctx) ir_expression(ir_unop_rcp, + glsl_type::float_type, + expr, NULL); + + deref = new(mem_ctx) ir_dereference_variable(var); + ir->coordinate = new(mem_ctx) ir_expression(ir_binop_mul, + ir->coordinate->type, + deref, + expr); + + progress = true; + return visit_continue; +} + +extern "C" { + +GLboolean +brw_do_cubemap_normalize(exec_list *instructions) +{ + brw_cubemap_normalize_visitor v; + + visit_list_elements(&v, instructions); + + return v.progress; +} + +} diff --git a/src/mesa/drivers/dri/i965/brw_curbe.c b/src/mesa/drivers/dri/i965/brw_curbe.c index 8196d8ca625..9ce0d8decdc 100644 --- a/src/mesa/drivers/dri/i965/brw_curbe.c +++ b/src/mesa/drivers/dri/i965/brw_curbe.c @@ -55,7 +55,7 @@ */ static void calculate_curbe_offsets( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; /* CACHE_NEW_WM_PROG */ const GLuint nr_fp_regs = (brw->wm.prog_data->nr_params + 15) / 16; @@ -179,7 +179,7 @@ static GLfloat fixed_plane[6][4] = { */ static void prepare_constant_buffer(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const struct brw_vertex_program *vp = brw_vertex_program_const(brw->vertex_program); const GLuint sz = brw->curbe.total_size; diff --git a/src/mesa/drivers/dri/i965/brw_defines.h b/src/mesa/drivers/dri/i965/brw_defines.h index 6b8e9e05d08..9633c95ff50 100644 --- a/src/mesa/drivers/dri/i965/brw_defines.h +++ b/src/mesa/drivers/dri/i965/brw_defines.h @@ -501,9 +501,26 @@ #define BRW_MASK_ENABLE 0 #define BRW_MASK_DISABLE 1 -/* Sandybridge is WECtrl (Write enable control) */ +/** @{ + * + * Gen6 has replaced "mask enable/disable" with WECtrl, which is + * effectively the same but much simpler to think about. Now, there + * are two contributors ANDed together to whether channels are + * executed: The predication on the instruction, and the channel write + * enable. + */ +/** + * This is the default value. It means that a channel's write enable is set + * if the per-channel IP is pointing at this instruction. + */ #define BRW_WE_NORMAL 0 -#define BRW_WE_KILL_PRED 1 +/** + * This is used like BRW_MASK_DISABLE, and causes all channels to have + * their write enable set. Note that predication still contributes to + * whether the channel actually gets written. + */ +#define BRW_WE_ALL 1 +/** @} */ #define BRW_OPCODE_MOV 1 #define BRW_OPCODE_SEL 2 @@ -930,6 +947,7 @@ /* DW3 */ # define GEN6_CLIP_MIN_POINT_WIDTH_SHIFT 17 # define GEN6_CLIP_MAX_POINT_WIDTH_SHIFT 6 +# define GEN6_CLIP_FORCE_ZERO_RTAINDEX (1 << 5) #define CMD_3D_SF_STATE 0x7813 /* GEN6+ */ /* DW1 */ diff --git a/src/mesa/drivers/dri/i965/brw_disasm.c b/src/mesa/drivers/dri/i965/brw_disasm.c index f74a236834b..12b8f2e4678 100644 --- a/src/mesa/drivers/dri/i965/brw_disasm.c +++ b/src/mesa/drivers/dri/i965/brw_disasm.c @@ -164,6 +164,11 @@ char *accwr[2] = { [1] = "AccWrEnable" }; +char *wectrl[2] = { + [0] = "WE_normal", + [1] = "WE_all" +}; + char *exec_size[8] = { [0] = "1", [1] = "2", @@ -651,6 +656,7 @@ static int src_da16 (FILE *file, err |= control (file, "channel select", chan_sel, swz_z, NULL); err |= control (file, "channel select", chan_sel, swz_w, NULL); } + err |= control (file, "src da16 reg type", reg_encoding, _reg_type, NULL); return err; } @@ -804,6 +810,44 @@ static int src1 (FILE *file, struct brw_instruction *inst) } } +int esize[6] = { + [0] = 1, + [1] = 2, + [2] = 4, + [3] = 8, + [4] = 16, + [5] = 32, +}; + +static int qtr_ctrl(FILE *file, struct brw_instruction *inst) +{ + int qtr_ctl = inst->header.compression_control; + int exec_size = esize[inst->header.execution_size]; + + if (exec_size == 8) { + switch (qtr_ctl) { + case 0: + string (file, " 1Q"); + break; + case 1: + string (file, " 2Q"); + break; + case 2: + string (file, " 3Q"); + break; + case 3: + string (file, " 4Q"); + break; + } + } else if (exec_size == 16){ + if (qtr_ctl < 2) + string (file, " 1H"); + else + string (file, " 2H"); + } + return 0; +} + int brw_disasm (FILE *file, struct brw_instruction *inst, int gen) { int err = 0; @@ -842,13 +886,18 @@ int brw_disasm (FILE *file, struct brw_instruction *inst, int gen) string (file, ")"); } - if (inst->header.opcode == BRW_OPCODE_SEND) + if (inst->header.opcode == BRW_OPCODE_SEND && gen < 6) format (file, " %d", inst->header.destreg__conditionalmod); if (opcode[inst->header.opcode].ndst > 0) { pad (file, 16); err |= dest (file, inst); + } else if (gen >= 6 && (inst->header.opcode == BRW_OPCODE_IF || + inst->header.opcode == BRW_OPCODE_ELSE || + inst->header.opcode == BRW_OPCODE_ENDIF)) { + format (file, " %d", inst->bits1.branch_gen6.jump_count); } + if (opcode[inst->header.opcode].nsrc > 0) { pad (file, 32); err |= src0 (file, inst); @@ -998,18 +1047,26 @@ int brw_disasm (FILE *file, struct brw_instruction *inst, int gen) string (file, "{"); space = 1; err |= control(file, "access mode", access_mode, inst->header.access_mode, &space); - err |= control (file, "mask control", mask_ctrl, inst->header.mask_control, &space); + if (gen >= 6) + err |= control (file, "write enable control", wectrl, inst->header.mask_control, &space); + else + err |= control (file, "mask control", mask_ctrl, inst->header.mask_control, &space); err |= control (file, "dependency control", dep_ctrl, inst->header.dependency_control, &space); - if (inst->header.compression_control == BRW_COMPRESSION_COMPRESSED && - opcode[inst->header.opcode].ndst > 0 && - inst->bits1.da1.dest_reg_file == BRW_MESSAGE_REGISTER_FILE && - inst->bits1.da1.dest_reg_nr & (1 << 7)) { - format (file, " compr4"); - } else { - err |= control (file, "compression control", compr_ctrl, - inst->header.compression_control, &space); + if (gen >= 6) + err |= qtr_ctrl (file, inst); + else { + if (inst->header.compression_control == BRW_COMPRESSION_COMPRESSED && + opcode[inst->header.opcode].ndst > 0 && + inst->bits1.da1.dest_reg_file == BRW_MESSAGE_REGISTER_FILE && + inst->bits1.da1.dest_reg_nr & (1 << 7)) { + format (file, " compr4"); + } else { + err |= control (file, "compression control", compr_ctrl, + inst->header.compression_control, &space); + } } + err |= control (file, "thread control", thread_ctrl, inst->header.thread_control, &space); if (gen >= 6) err |= control (file, "acc write control", accwr, inst->header.acc_wr_control, &space); diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c index 16331cc3ac0..04bc8cb2db0 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.c +++ b/src/mesa/drivers/dri/i965/brw_draw.c @@ -80,7 +80,7 @@ static const GLenum reduced_prim[GL_POLYGON+1] = { static GLuint brw_set_prim(struct brw_context *brw, const struct _mesa_prim *prim) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; GLenum mode = prim->mode; if (INTEL_DEBUG & DEBUG_PRIMS) @@ -201,7 +201,7 @@ static GLboolean check_fallbacks( struct brw_context *brw, const struct _mesa_prim *prim, GLuint nr_prims ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; GLuint i; /* If we don't require strict OpenGL conformance, never @@ -293,7 +293,7 @@ static GLboolean check_fallbacks( struct brw_context *brw, /* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ -static GLboolean brw_try_draw_prims( GLcontext *ctx, +static GLboolean brw_try_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, @@ -416,7 +416,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx, return retval; } -void brw_draw_prims( GLcontext *ctx, +void brw_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, @@ -434,7 +434,7 @@ void brw_draw_prims( GLcontext *ctx, /* Decide if we want to rebase. If so we end up recursing once * only into this function. */ - if (min_index != 0) { + if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) { vbo_rebase_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, @@ -460,7 +460,7 @@ void brw_draw_prims( GLcontext *ctx, void brw_draw_init( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct vbo_context *vbo = vbo_context(ctx); /* Register our drawing function: diff --git a/src/mesa/drivers/dri/i965/brw_draw.h b/src/mesa/drivers/dri/i965/brw_draw.h index 2a14db217fc..1fe417296f6 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.h +++ b/src/mesa/drivers/dri/i965/brw_draw.h @@ -28,13 +28,13 @@ #ifndef BRW_DRAW_H #define BRW_DRAW_H -#include "main/mtypes.h" /* for GLcontext... */ +#include "main/mtypes.h" /* for struct gl_context... */ #include "vbo/vbo.h" struct brw_context; -void brw_draw_prims( GLcontext *ctx, +void brw_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prims, GLuint nr_prims, @@ -48,7 +48,7 @@ void brw_draw_destroy( struct brw_context *brw ); /* brw_draw_current.c */ -void brw_init_current_values(GLcontext *ctx, +void brw_init_current_values(struct gl_context *ctx, struct gl_client_array *arrays); #endif diff --git a/src/mesa/drivers/dri/i965/brw_draw_upload.c b/src/mesa/drivers/dri/i965/brw_draw_upload.c index 249e874ab1a..c4654360d46 100644 --- a/src/mesa/drivers/dri/i965/brw_draw_upload.c +++ b/src/mesa/drivers/dri/i965/brw_draw_upload.c @@ -313,7 +313,7 @@ copy_array_to_vbo_array( struct brw_context *brw, static void brw_prepare_vertices(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = intel_context(ctx); GLbitfield vs_inputs = brw->vs.prog_data->inputs_read; GLuint i; @@ -383,7 +383,7 @@ static void brw_prepare_vertices(struct brw_context *brw) */ assert(input->offset < input->bo->size); } else { - input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1; + input->count = input->glarray->StrideB ? max_index + 1 : 1; if (input->bo != NULL) { /* Already-uploaded vertex data is present from a previous * prepare_vertices, but we had to re-validate state due to @@ -414,15 +414,6 @@ static void brw_prepare_vertices(struct brw_context *brw) } upload[nr_uploads++] = input; - - /* We rebase drawing to start at element zero only when - * varyings are not in vbos, which means we can end up - * uploading non-varying arrays (stride != 0) when min_index - * is zero. This doesn't matter as the amount to upload is - * the same for these arrays whether the draw call is rebased - * or not - we just have to upload the one element. - */ - assert(min_index == 0 || input->glarray->StrideB == 0); } } @@ -460,7 +451,7 @@ static void brw_prepare_vertices(struct brw_context *brw) static void brw_emit_vertices(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = intel_context(ctx); GLuint i; @@ -592,7 +583,7 @@ const struct brw_tracked_state brw_vertices = { static void brw_prepare_indices(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; diff --git a/src/mesa/drivers/dri/i965/brw_eu.h b/src/mesa/drivers/dri/i965/brw_eu.h index c63db164609..7cac4a72e4a 100644 --- a/src/mesa/drivers/dri/i965/brw_eu.h +++ b/src/mesa/drivers/dri/i965/brw_eu.h @@ -789,6 +789,10 @@ struct brw_instruction *brw_##OP(struct brw_compile *p, \ struct brw_reg src0, \ struct brw_reg src1); +#define ROUND(OP) \ +void brw_##OP(struct brw_compile *p, struct brw_reg dest, struct brw_reg src0); + + ALU1(MOV) ALU2(SEL) ALU1(NOT) @@ -805,7 +809,6 @@ ALU2(ADD) ALU2(MUL) ALU1(FRC) ALU1(RNDD) -ALU1(RNDZ) ALU2(MAC) ALU2(MACH) ALU1(LZD) @@ -816,9 +819,12 @@ ALU2(DP2) ALU2(LINE) ALU2(PLN) +ROUND(RNDZ) +ROUND(RNDE) + #undef ALU1 #undef ALU2 - +#undef ROUND /* Helpers for SEND instruction: @@ -885,6 +891,12 @@ void brw_math( struct brw_compile *p, GLuint data_type, GLuint precision ); +void brw_math2(struct brw_compile *p, + struct brw_reg dest, + GLuint function, + struct brw_reg src0, + struct brw_reg src1); + void brw_dp_READ_16( struct brw_compile *p, struct brw_reg dest, GLuint scratch_offset ); diff --git a/src/mesa/drivers/dri/i965/brw_eu_emit.c b/src/mesa/drivers/dri/i965/brw_eu_emit.c index ddd3a94eb07..a1fead0a4fa 100644 --- a/src/mesa/drivers/dri/i965/brw_eu_emit.c +++ b/src/mesa/drivers/dri/i965/brw_eu_emit.c @@ -448,6 +448,7 @@ static void brw_set_dp_write_message( struct brw_context *brw, GLuint msg_control, GLuint msg_type, GLuint msg_length, + GLboolean header_present, GLuint pixel_scoreboard_clear, GLuint response_length, GLuint end_of_thread, @@ -462,7 +463,7 @@ static void brw_set_dp_write_message( struct brw_context *brw, insn->bits3.dp_render_cache.pixel_scoreboard_clear = pixel_scoreboard_clear; insn->bits3.dp_render_cache.msg_type = msg_type; insn->bits3.dp_render_cache.send_commit_msg = send_commit_msg; - insn->bits3.dp_render_cache.header_present = 0; /* XXX */ + insn->bits3.dp_render_cache.header_present = header_present; insn->bits3.dp_render_cache.response_length = response_length; insn->bits3.dp_render_cache.msg_length = msg_length; insn->bits3.dp_render_cache.end_of_thread = end_of_thread; @@ -476,7 +477,7 @@ static void brw_set_dp_write_message( struct brw_context *brw, insn->bits3.dp_write_gen5.pixel_scoreboard_clear = pixel_scoreboard_clear; insn->bits3.dp_write_gen5.msg_type = msg_type; insn->bits3.dp_write_gen5.send_commit_msg = send_commit_msg; - insn->bits3.dp_write_gen5.header_present = 1; + insn->bits3.dp_write_gen5.header_present = header_present; insn->bits3.dp_write_gen5.response_length = response_length; insn->bits3.dp_write_gen5.msg_length = msg_length; insn->bits3.dp_write_gen5.end_of_thread = end_of_thread; @@ -548,7 +549,7 @@ static void brw_set_sampler_message(struct brw_context *brw, assert(eot == 0); brw_set_src1(insn, brw_imm_d(0)); - if (intel->gen == 5) { + if (intel->gen >= 5) { insn->bits3.sampler_gen5.binding_table_index = binding_table_index; insn->bits3.sampler_gen5.sampler = sampler; insn->bits3.sampler_gen5.msg_type = msg_type; @@ -557,8 +558,12 @@ static void brw_set_sampler_message(struct brw_context *brw, insn->bits3.sampler_gen5.response_length = response_length; insn->bits3.sampler_gen5.msg_length = msg_length; insn->bits3.sampler_gen5.end_of_thread = eot; - insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_SAMPLER; - insn->bits2.send_gen5.end_of_thread = eot; + if (intel->gen >= 6) + insn->header.destreg__conditionalmod = BRW_MESSAGE_TARGET_SAMPLER; + else { + insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_SAMPLER; + insn->bits2.send_gen5.end_of_thread = eot; + } } else if (intel->is_g4x) { insn->bits3.sampler_g4x.binding_table_index = binding_table_index; insn->bits3.sampler_g4x.sampler = sampler; @@ -649,6 +654,26 @@ struct brw_instruction *brw_##OP(struct brw_compile *p, \ return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \ } +/* Rounding operations (other than RNDD) require two instructions - the first + * stores a rounded value (possibly the wrong way) in the dest register, but + * also sets a per-channel "increment bit" in the flag register. A predicated + * add of 1.0 fixes dest to contain the desired result. + */ +#define ROUND(OP) \ +void brw_##OP(struct brw_compile *p, \ + struct brw_reg dest, \ + struct brw_reg src) \ +{ \ + struct brw_instruction *rnd, *add; \ + rnd = next_insn(p, BRW_OPCODE_##OP); \ + brw_set_dest(rnd, dest); \ + brw_set_src0(rnd, src); \ + rnd->header.destreg__conditionalmod = 0x7; /* turn on round-increments */ \ + \ + add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \ + add->header.predicate_control = BRW_PREDICATE_NORMAL; \ +} + ALU1(MOV) ALU2(SEL) @@ -663,7 +688,6 @@ ALU2(RSL) ALU2(ASR) ALU1(FRC) ALU1(RNDD) -ALU1(RNDZ) ALU2(MAC) ALU2(MACH) ALU1(LZD) @@ -674,6 +698,11 @@ ALU2(DP2) ALU2(LINE) ALU2(PLN) + +ROUND(RNDZ) +ROUND(RNDE) + + struct brw_instruction *brw_ADD(struct brw_compile *p, struct brw_reg dest, struct brw_reg src0, @@ -782,6 +811,7 @@ struct brw_instruction *brw_JMPI(struct brw_compile *p, */ struct brw_instruction *brw_IF(struct brw_compile *p, GLuint execute_size) { + struct intel_context *intel = &p->brw->intel; struct brw_instruction *insn; if (p->single_program_flow) { @@ -795,9 +825,15 @@ struct brw_instruction *brw_IF(struct brw_compile *p, GLuint execute_size) /* Override the defaults for this instruction: */ - brw_set_dest(insn, brw_ip_reg()); - brw_set_src0(insn, brw_ip_reg()); - brw_set_src1(insn, brw_imm_d(0x0)); + if (intel->gen < 6) { + brw_set_dest(insn, brw_ip_reg()); + brw_set_src0(insn, brw_ip_reg()); + brw_set_src1(insn, brw_imm_d(0x0)); + } else { + brw_set_dest(insn, brw_imm_w(0)); + brw_set_src0(insn, brw_null_reg()); + brw_set_src1(insn, brw_null_reg()); + } insn->header.execution_size = execute_size; insn->header.compression_control = BRW_COMPRESSION_NONE; @@ -819,7 +855,9 @@ struct brw_instruction *brw_ELSE(struct brw_compile *p, struct brw_instruction *insn; GLuint br = 1; - if (intel->gen == 5) + /* jump count is for 64bit data chunk each, so one 128bit + instruction requires 2 chunks. */ + if (intel->gen >= 5) br = 2; if (p->single_program_flow) { @@ -828,9 +866,15 @@ struct brw_instruction *brw_ELSE(struct brw_compile *p, insn = next_insn(p, BRW_OPCODE_ELSE); } - brw_set_dest(insn, brw_ip_reg()); - brw_set_src0(insn, brw_ip_reg()); - brw_set_src1(insn, brw_imm_d(0x0)); + if (intel->gen < 6) { + brw_set_dest(insn, brw_ip_reg()); + brw_set_src0(insn, brw_ip_reg()); + brw_set_src1(insn, brw_imm_d(0x0)); + } else { + brw_set_dest(insn, brw_imm_w(0)); + brw_set_src0(insn, brw_null_reg()); + brw_set_src1(insn, brw_null_reg()); + } insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.execution_size = if_insn->header.execution_size; @@ -847,9 +891,13 @@ struct brw_instruction *brw_ELSE(struct brw_compile *p, } else { assert(if_insn->header.opcode == BRW_OPCODE_IF); - if_insn->bits3.if_else.jump_count = br * (insn - if_insn); - if_insn->bits3.if_else.pop_count = 0; - if_insn->bits3.if_else.pad0 = 0; + if (intel->gen < 6) { + if_insn->bits3.if_else.jump_count = br * (insn - if_insn); + if_insn->bits3.if_else.pop_count = 0; + if_insn->bits3.if_else.pad0 = 0; + } else { + if_insn->bits1.branch_gen6.jump_count = br * (insn - if_insn + 1); + } } return insn; @@ -861,7 +909,7 @@ void brw_ENDIF(struct brw_compile *p, struct intel_context *intel = &p->brw->intel; GLuint br = 1; - if (intel->gen == 5) + if (intel->gen >= 5) br = 2; if (p->single_program_flow) { @@ -877,9 +925,15 @@ void brw_ENDIF(struct brw_compile *p, } else { struct brw_instruction *insn = next_insn(p, BRW_OPCODE_ENDIF); - brw_set_dest(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); - brw_set_src0(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); - brw_set_src1(insn, brw_imm_d(0x0)); + if (intel->gen < 6) { + brw_set_dest(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); + brw_set_src0(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); + brw_set_src1(insn, brw_imm_d(0x0)); + } else { + brw_set_dest(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_W)); + brw_set_src0(insn, brw_null_reg()); + brw_set_src1(insn, brw_null_reg()); + } insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.execution_size = patch_insn->header.execution_size; @@ -892,25 +946,42 @@ void brw_ENDIF(struct brw_compile *p, * instruction respectively. */ if (patch_insn->header.opcode == BRW_OPCODE_IF) { - /* Automagically turn it into an IFF: - */ - patch_insn->header.opcode = BRW_OPCODE_IFF; - patch_insn->bits3.if_else.jump_count = br * (insn - patch_insn + 1); - patch_insn->bits3.if_else.pop_count = 0; - patch_insn->bits3.if_else.pad0 = 0; - } else if (patch_insn->header.opcode == BRW_OPCODE_ELSE) { - patch_insn->bits3.if_else.jump_count = br * (insn - patch_insn + 1); - patch_insn->bits3.if_else.pop_count = 1; - patch_insn->bits3.if_else.pad0 = 0; + if (intel->gen < 6) { + /* Turn it into an IFF, which means no mask stack operations for + * all-false and jumping past the ENDIF. + */ + patch_insn->header.opcode = BRW_OPCODE_IFF; + patch_insn->bits3.if_else.jump_count = br * (insn - patch_insn + 1); + patch_insn->bits3.if_else.pop_count = 0; + patch_insn->bits3.if_else.pad0 = 0; + } else { + /* As of gen6, there is no IFF and IF must point to the ENDIF. */ + patch_insn->bits1.branch_gen6.jump_count = br * (insn - patch_insn); + } } else { - assert(0); + assert(patch_insn->header.opcode == BRW_OPCODE_ELSE); + if (intel->gen < 6) { + /* BRW_OPCODE_ELSE pre-gen6 should point just past the + * matching ENDIF. + */ + patch_insn->bits3.if_else.jump_count = br * (insn - patch_insn + 1); + patch_insn->bits3.if_else.pop_count = 1; + patch_insn->bits3.if_else.pad0 = 0; + } else { + /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */ + patch_insn->bits1.branch_gen6.jump_count = br * (insn - patch_insn); + } } /* Also pop item off the stack in the endif instruction: */ - insn->bits3.if_else.jump_count = 0; - insn->bits3.if_else.pop_count = 1; - insn->bits3.if_else.pad0 = 0; + if (intel->gen < 6) { + insn->bits3.if_else.jump_count = 0; + insn->bits3.if_else.pop_count = 1; + insn->bits3.if_else.pad0 = 0; + } else { + insn->bits1.branch_gen6.jump_count = 2; + } } } @@ -978,7 +1049,7 @@ struct brw_instruction *brw_WHILE(struct brw_compile *p, struct brw_instruction *insn; GLuint br = 1; - if (intel->gen == 5) + if (intel->gen >= 5) br = 2; if (p->single_program_flow) @@ -1022,7 +1093,7 @@ void brw_land_fwd_jump(struct brw_compile *p, struct brw_instruction *landing = &p->store[p->nr_insn]; GLuint jmpi = 1; - if (intel->gen == 5) + if (intel->gen >= 5) jmpi = 2; assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI); @@ -1100,6 +1171,17 @@ void brw_math( struct brw_compile *p, if (intel->gen >= 6) { struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); + assert(dest.file == BRW_GENERAL_REGISTER_FILE); + assert(src.file == BRW_GENERAL_REGISTER_FILE); + + assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); + assert(src.hstride == BRW_HORIZONTAL_STRIDE_1); + + if (function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT && + function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) { + assert(src.type == BRW_REGISTER_TYPE_F); + } + /* Math is the same ISA format as other opcodes, except that CondModifier * becomes FC[3:0] and ThreadCtrl becomes FC[5:4]. */ @@ -1131,6 +1213,45 @@ void brw_math( struct brw_compile *p, } } +/** Extended math function, float[8]. + */ +void brw_math2(struct brw_compile *p, + struct brw_reg dest, + GLuint function, + struct brw_reg src0, + struct brw_reg src1) +{ + struct intel_context *intel = &p->brw->intel; + struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); + + assert(intel->gen >= 6); + (void) intel; + + + assert(dest.file == BRW_GENERAL_REGISTER_FILE); + assert(src0.file == BRW_GENERAL_REGISTER_FILE); + assert(src1.file == BRW_GENERAL_REGISTER_FILE); + + assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); + assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1); + assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1); + + if (function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT && + function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) { + assert(src0.type == BRW_REGISTER_TYPE_F); + assert(src1.type == BRW_REGISTER_TYPE_F); + } + + /* Math is the same ISA format as other opcodes, except that CondModifier + * becomes FC[3:0] and ThreadCtrl becomes FC[5:4]. + */ + insn->header.destreg__conditionalmod = function; + + brw_set_dest(insn, dest); + brw_set_src0(insn, src0); + brw_set_src1(insn, src1); +} + /** * Extended math function, float[16]. * Use 2 send instructions. @@ -1264,6 +1385,7 @@ void brw_dp_WRITE_16( struct brw_compile *p, BRW_DATAPORT_OWORD_BLOCK_4_OWORDS, /* msg_control */ BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE, /* msg_type */ msg_length, + GL_TRUE, /* header_present */ 0, /* pixel scoreboard */ send_commit_msg, /* response_length */ 0, /* eot */ @@ -1501,12 +1623,16 @@ void brw_fb_WRITE(struct brw_compile *p, struct intel_context *intel = &p->brw->intel; struct brw_instruction *insn; GLuint msg_control, msg_type; + GLboolean header_present = GL_TRUE; insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; /* XXX */ insn->header.compression_control = BRW_COMPRESSION_NONE; if (intel->gen >= 6) { + if (msg_length == 4) + header_present = GL_FALSE; + /* headerless version, just submit color payload */ src0 = brw_message_reg(msg_reg_nr); @@ -1530,6 +1656,7 @@ void brw_fb_WRITE(struct brw_compile *p, msg_control, msg_type, msg_length, + header_present, 1, /* pixel scoreboard */ response_length, eot, @@ -1556,6 +1683,7 @@ void brw_SAMPLE(struct brw_compile *p, GLuint header_present, GLuint simd_mode) { + struct intel_context *intel = &p->brw->intel; GLboolean need_stall = 0; if (writemask == 0) { @@ -1627,11 +1755,25 @@ void brw_SAMPLE(struct brw_compile *p, } { - struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND); + struct brw_instruction *insn; + /* Sandybridge doesn't have the implied move for SENDs, + * and the first message register index comes from src0. + */ + if (intel->gen >= 6) { + brw_push_insn_state(p); + brw_set_mask_control( p, BRW_MASK_DISABLE ); + /* m1 contains header? */ + brw_MOV(p, brw_message_reg(msg_reg_nr), src0); + brw_pop_insn_state(p); + src0 = brw_message_reg(msg_reg_nr); + } + + insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; /* XXX */ insn->header.compression_control = BRW_COMPRESSION_NONE; - insn->header.destreg__conditionalmod = msg_reg_nr; + if (intel->gen < 6) + insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_dest(insn, dest); brw_set_src0(insn, src0); @@ -1721,13 +1863,28 @@ void brw_ff_sync(struct brw_compile *p, GLuint response_length, GLboolean eot) { - struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND); + struct intel_context *intel = &p->brw->intel; + struct brw_instruction *insn; + /* Sandybridge doesn't have the implied move for SENDs, + * and the first message register index comes from src0. + */ + if (intel->gen >= 6) { + brw_push_insn_state(p); + brw_set_mask_control( p, BRW_MASK_DISABLE ); + brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD), + retype(src0, BRW_REGISTER_TYPE_UD)); + brw_pop_insn_state(p); + src0 = brw_message_reg(msg_reg_nr); + } + + insn = next_insn(p, BRW_OPCODE_SEND); brw_set_dest(insn, dest); brw_set_src0(insn, src0); brw_set_src1(insn, brw_imm_d(0)); - insn->header.destreg__conditionalmod = msg_reg_nr; + if (intel->gen < 6) + insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_ff_sync_message(p->brw, insn, diff --git a/src/mesa/drivers/dri/i965/brw_fallback.c b/src/mesa/drivers/dri/i965/brw_fallback.c index ba401c215cb..6796fb208dc 100644 --- a/src/mesa/drivers/dri/i965/brw_fallback.c +++ b/src/mesa/drivers/dri/i965/brw_fallback.c @@ -43,7 +43,7 @@ static GLboolean do_check_fallback(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; GLuint i; if (brw->intel.no_rast) { diff --git a/src/mesa/drivers/dri/i965/brw_fallback.h b/src/mesa/drivers/dri/i965/brw_fallback.h index 50dcdacd17a..13b18b52e65 100644 --- a/src/mesa/drivers/dri/i965/brw_fallback.h +++ b/src/mesa/drivers/dri/i965/brw_fallback.h @@ -28,15 +28,15 @@ #ifndef BRW_FALLBACK_H #define BRW_FALLBACK_H -#include "main/mtypes.h" /* for GLcontext... */ +#include "main/mtypes.h" /* for struct gl_context... */ struct brw_context; struct vbo_prim; -void brw_fallback( GLcontext *ctx ); -void brw_unfallback( GLcontext *ctx ); +void brw_fallback( struct gl_context *ctx ); +void brw_unfallback( struct gl_context *ctx ); -void brw_loopback_vertex_list( GLcontext *ctx, +void brw_loopback_vertex_list( struct gl_context *ctx, const GLfloat *buffer, const GLubyte *attrsz, const struct vbo_prim *prim, diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp index cf5c52119a5..554ba39a0cd 100644 --- a/src/mesa/drivers/dri/i965/brw_fs.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs.cpp @@ -31,52 +31,27 @@ extern "C" { #include "main/macros.h" #include "main/shaderobj.h" +#include "main/uniforms.h" #include "program/prog_parameter.h" #include "program/prog_print.h" #include "program/prog_optimize.h" +#include "program/register_allocate.h" +#include "program/sampler.h" #include "program/hash_table.h" #include "brw_context.h" #include "brw_eu.h" #include "brw_wm.h" #include "talloc.h" } +#include "brw_fs.h" #include "../glsl/glsl_types.h" #include "../glsl/ir_optimization.h" #include "../glsl/ir_print_visitor.h" -enum register_file { - ARF = BRW_ARCHITECTURE_REGISTER_FILE, - GRF = BRW_GENERAL_REGISTER_FILE, - MRF = BRW_MESSAGE_REGISTER_FILE, - IMM = BRW_IMMEDIATE_VALUE, - FIXED_HW_REG, /* a struct brw_reg */ - UNIFORM, /* prog_data->params[hw_reg] */ - BAD_FILE -}; - -enum fs_opcodes { - FS_OPCODE_FB_WRITE = 256, - FS_OPCODE_RCP, - FS_OPCODE_RSQ, - FS_OPCODE_SQRT, - FS_OPCODE_EXP2, - FS_OPCODE_LOG2, - FS_OPCODE_POW, - FS_OPCODE_SIN, - FS_OPCODE_COS, - FS_OPCODE_DDX, - FS_OPCODE_DDY, - FS_OPCODE_LINTERP, - FS_OPCODE_TEX, - FS_OPCODE_TXB, - FS_OPCODE_TXL, - FS_OPCODE_DISCARD, -}; - -static int using_new_fs = -1; +static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg); struct gl_shader * -brw_new_shader(GLcontext *ctx, GLuint name, GLuint type) +brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type) { struct brw_shader *shader; @@ -91,7 +66,7 @@ brw_new_shader(GLcontext *ctx, GLuint name, GLuint type) } struct gl_shader_program * -brw_new_shader_program(GLcontext *ctx, GLuint name) +brw_new_shader_program(struct gl_context *ctx, GLuint name) { struct brw_shader_program *prog; prog = talloc_zero(NULL, struct brw_shader_program); @@ -103,7 +78,7 @@ brw_new_shader_program(GLcontext *ctx, GLuint name) } GLboolean -brw_compile_shader(GLcontext *ctx, struct gl_shader *shader) +brw_compile_shader(struct gl_context *ctx, struct gl_shader *shader) { if (!_mesa_ir_compile_shader(ctx, shader)) return GL_FALSE; @@ -112,43 +87,60 @@ brw_compile_shader(GLcontext *ctx, struct gl_shader *shader) } GLboolean -brw_link_shader(GLcontext *ctx, struct gl_shader_program *prog) +brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog) { - if (using_new_fs == -1) - using_new_fs = getenv("INTEL_NEW_FS") != NULL; - - for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) { - struct brw_shader *shader = (struct brw_shader *)prog->_LinkedShaders[i]; - - if (using_new_fs && shader->base.Type == GL_FRAGMENT_SHADER) { - void *mem_ctx = talloc_new(NULL); - bool progress; - - if (shader->ir) - talloc_free(shader->ir); - shader->ir = new(shader) exec_list; - clone_ir_list(mem_ctx, shader->ir, shader->base.ir); - - do_mat_op_to_vec(shader->ir); - do_mod_to_fract(shader->ir); - do_div_to_mul_rcp(shader->ir); - do_sub_to_add_neg(shader->ir); - do_explog_to_explog2(shader->ir); + struct intel_context *intel = intel_context(ctx); + + struct brw_shader *shader = + (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; + if (shader != NULL) { + void *mem_ctx = talloc_new(NULL); + bool progress; + + if (shader->ir) + talloc_free(shader->ir); + shader->ir = new(shader) exec_list; + clone_ir_list(mem_ctx, shader->ir, shader->base.ir); + + do_mat_op_to_vec(shader->ir); + do_mod_to_fract(shader->ir); + do_div_to_mul_rcp(shader->ir); + do_sub_to_add_neg(shader->ir); + do_explog_to_explog2(shader->ir); + do_lower_texture_projection(shader->ir); + brw_do_cubemap_normalize(shader->ir); + + do { + progress = false; brw_do_channel_expressions(shader->ir); brw_do_vector_splitting(shader->ir); - do { - progress = false; - - progress = do_common_optimization(shader->ir, true, 32) || progress; - } while (progress); + progress = do_lower_jumps(shader->ir, true, true, + true, /* main return */ + false, /* continue */ + false /* loops */ + ) || progress; + + progress = do_common_optimization(shader->ir, true, 32) || progress; + + progress = lower_noise(shader->ir) || progress; + progress = + lower_variable_index_to_cond_assign(shader->ir, + GL_TRUE, /* input */ + GL_TRUE, /* output */ + GL_TRUE, /* temp */ + GL_TRUE /* uniform */ + ) || progress; + if (intel->gen == 6) { + progress = do_if_to_cond_assign(shader->ir) || progress; + } + } while (progress); - validate_ir_tree(shader->ir); + validate_ir_tree(shader->ir); - reparent_ir(shader->ir, shader->ir); - talloc_free(mem_ctx); - } + reparent_ir(shader->ir, shader->ir); + talloc_free(mem_ctx); } if (!_mesa_ir_link_shader(ctx, prog)) @@ -169,7 +161,6 @@ type_size(const struct glsl_type *type) case GLSL_TYPE_BOOL: return type->components(); case GLSL_TYPE_ARRAY: - /* FINISHME: uniform/varying arrays. */ return type_size(type->fields.array) * type->length; case GLSL_TYPE_STRUCT: size = 0; @@ -188,331 +179,415 @@ type_size(const struct glsl_type *type) } } -class fs_reg { -public: - /* Callers of this talloc-based new need not call delete. It's - * easier to just talloc_free 'ctx' (or any of its ancestors). */ - static void* operator new(size_t size, void *ctx) - { - void *node; - - node = talloc_size(ctx, size); - assert(node != NULL); - - return node; - } - - void init() - { - this->reg = 0; - this->reg_offset = 0; - this->negate = 0; - this->abs = 0; - this->hw_reg = -1; - } - - /** Generic unset register constructor. */ - fs_reg() - { - init(); - this->file = BAD_FILE; - } - - /** Immediate value constructor. */ - fs_reg(float f) - { - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_F; - this->imm.f = f; - } - - /** Immediate value constructor. */ - fs_reg(int32_t i) - { - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_D; - this->imm.i = i; - } - - /** Immediate value constructor. */ - fs_reg(uint32_t u) - { - init(); - this->file = IMM; - this->type = BRW_REGISTER_TYPE_UD; - this->imm.u = u; - } - - /** Fixed brw_reg Immediate value constructor. */ - fs_reg(struct brw_reg fixed_hw_reg) - { - init(); - this->file = FIXED_HW_REG; - this->fixed_hw_reg = fixed_hw_reg; - this->type = fixed_hw_reg.type; - } - - fs_reg(enum register_file file, int hw_reg); - fs_reg(class fs_visitor *v, const struct glsl_type *type); - - /** Register file: ARF, GRF, MRF, IMM. */ - enum register_file file; - /** Abstract register number. 0 = fixed hw reg */ - int reg; - /** Offset within the abstract register. */ - int reg_offset; - /** HW register number. Generally unset until register allocation. */ - int hw_reg; - /** Register type. BRW_REGISTER_TYPE_* */ - int type; - bool negate; - bool abs; - struct brw_reg fixed_hw_reg; - - /** Value for file == BRW_IMMMEDIATE_FILE */ - union { - int32_t i; - uint32_t u; - float f; - } imm; -}; - static const fs_reg reg_undef; -static const fs_reg reg_null(ARF, BRW_ARF_NULL); +static const fs_reg reg_null_f(ARF, BRW_ARF_NULL, BRW_REGISTER_TYPE_F); +static const fs_reg reg_null_d(ARF, BRW_ARF_NULL, BRW_REGISTER_TYPE_D); + +int +fs_visitor::virtual_grf_alloc(int size) +{ + if (virtual_grf_array_size <= virtual_grf_next) { + if (virtual_grf_array_size == 0) + virtual_grf_array_size = 16; + else + virtual_grf_array_size *= 2; + virtual_grf_sizes = talloc_realloc(mem_ctx, virtual_grf_sizes, + int, virtual_grf_array_size); + + /* This slot is always unused. */ + virtual_grf_sizes[0] = 0; + } + virtual_grf_sizes[virtual_grf_next] = size; + return virtual_grf_next++; +} + +/** Fixed HW reg constructor. */ +fs_reg::fs_reg(enum register_file file, int hw_reg) +{ + init(); + this->file = file; + this->hw_reg = hw_reg; + this->type = BRW_REGISTER_TYPE_F; +} + +/** Fixed HW reg constructor. */ +fs_reg::fs_reg(enum register_file file, int hw_reg, uint32_t type) +{ + init(); + this->file = file; + this->hw_reg = hw_reg; + this->type = type; +} + +int +brw_type_for_base_type(const struct glsl_type *type) +{ + switch (type->base_type) { + case GLSL_TYPE_FLOAT: + return BRW_REGISTER_TYPE_F; + case GLSL_TYPE_INT: + case GLSL_TYPE_BOOL: + return BRW_REGISTER_TYPE_D; + case GLSL_TYPE_UINT: + return BRW_REGISTER_TYPE_UD; + case GLSL_TYPE_ARRAY: + case GLSL_TYPE_STRUCT: + /* These should be overridden with the type of the member when + * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely + * way to trip up if we don't. + */ + return BRW_REGISTER_TYPE_UD; + default: + assert(!"not reached"); + return BRW_REGISTER_TYPE_F; + } +} + +/** Automatic reg constructor. */ +fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type) +{ + init(); -class fs_inst : public exec_node { -public: - /* Callers of this talloc-based new need not call delete. It's - * easier to just talloc_free 'ctx' (or any of its ancestors). */ - static void* operator new(size_t size, void *ctx) - { - void *node; + this->file = GRF; + this->reg = v->virtual_grf_alloc(type_size(type)); + this->reg_offset = 0; + this->type = brw_type_for_base_type(type); +} + +fs_reg * +fs_visitor::variable_storage(ir_variable *var) +{ + return (fs_reg *)hash_table_find(this->variable_ht, var); +} + +/* Our support for uniforms is piggy-backed on the struct + * gl_fragment_program, because that's where the values actually + * get stored, rather than in some global gl_shader_program uniform + * store. + */ +int +fs_visitor::setup_uniform_values(int loc, const glsl_type *type) +{ + unsigned int offset = 0; + float *vec_values; - node = talloc_zero_size(ctx, size); - assert(node != NULL); + if (type->is_matrix()) { + const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT, + type->vector_elements, + 1); - return node; + for (unsigned int i = 0; i < type->matrix_columns; i++) { + offset += setup_uniform_values(loc + offset, column); + } + + return offset; } - void init() - { - this->opcode = BRW_OPCODE_NOP; - this->saturate = false; - this->conditional_mod = BRW_CONDITIONAL_NONE; - this->predicated = false; - this->sampler = 0; - this->shadow_compare = false; + switch (type->base_type) { + case GLSL_TYPE_FLOAT: + case GLSL_TYPE_UINT: + case GLSL_TYPE_INT: + case GLSL_TYPE_BOOL: + vec_values = fp->Base.Parameters->ParameterValues[loc]; + for (unsigned int i = 0; i < type->vector_elements; i++) { + c->prog_data.param[c->prog_data.nr_params++] = &vec_values[i]; + } + return 1; + + case GLSL_TYPE_STRUCT: + for (unsigned int i = 0; i < type->length; i++) { + offset += setup_uniform_values(loc + offset, + type->fields.structure[i].type); + } + return offset; + + case GLSL_TYPE_ARRAY: + for (unsigned int i = 0; i < type->length; i++) { + offset += setup_uniform_values(loc + offset, type->fields.array); + } + return offset; + + case GLSL_TYPE_SAMPLER: + /* The sampler takes up a slot, but we don't use any values from it. */ + return 1; + + default: + assert(!"not reached"); + return 0; } +} + - fs_inst() - { - init(); +/* Our support for builtin uniforms is even scarier than non-builtin. + * It sits on top of the PROG_STATE_VAR parameters that are + * automatically updated from GL context state. + */ +void +fs_visitor::setup_builtin_uniform_values(ir_variable *ir) +{ + const struct gl_builtin_uniform_desc *statevar = NULL; + + for (unsigned int i = 0; _mesa_builtin_uniform_desc[i].name; i++) { + statevar = &_mesa_builtin_uniform_desc[i]; + if (strcmp(ir->name, _mesa_builtin_uniform_desc[i].name) == 0) + break; } - fs_inst(int opcode) - { - init(); - this->opcode = opcode; + if (!statevar->name) { + this->fail = true; + printf("Failed to find builtin uniform `%s'\n", ir->name); + return; } - fs_inst(int opcode, fs_reg dst, fs_reg src0) - { - init(); - this->opcode = opcode; - this->dst = dst; - this->src[0] = src0; + int array_count; + if (ir->type->is_array()) { + array_count = ir->type->length; + } else { + array_count = 1; } - fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1) - { - init(); - this->opcode = opcode; - this->dst = dst; - this->src[0] = src0; - this->src[1] = src1; + for (int a = 0; a < array_count; a++) { + for (unsigned int i = 0; i < statevar->num_elements; i++) { + struct gl_builtin_uniform_element *element = &statevar->elements[i]; + int tokens[STATE_LENGTH]; + + memcpy(tokens, element->tokens, sizeof(element->tokens)); + if (ir->type->is_array()) { + tokens[1] = a; + } + + /* This state reference has already been setup by ir_to_mesa, + * but we'll get the same index back here. + */ + int index = _mesa_add_state_reference(this->fp->Base.Parameters, + (gl_state_index *)tokens); + float *vec_values = this->fp->Base.Parameters->ParameterValues[index]; + + /* Add each of the unique swizzles of the element as a + * parameter. This'll end up matching the expected layout of + * the array/matrix/structure we're trying to fill in. + */ + int last_swiz = -1; + for (unsigned int i = 0; i < 4; i++) { + int swiz = GET_SWZ(element->swizzle, i); + if (swiz == last_swiz) + break; + last_swiz = swiz; + + c->prog_data.param[c->prog_data.nr_params++] = &vec_values[swiz]; + } + } } +} - fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1, fs_reg src2) - { - init(); - this->opcode = opcode; - this->dst = dst; - this->src[0] = src0; - this->src[1] = src1; - this->src[2] = src2; +fs_reg * +fs_visitor::emit_fragcoord_interpolation(ir_variable *ir) +{ + fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); + fs_reg wpos = *reg; + fs_reg neg_y = this->pixel_y; + neg_y.negate = true; + + /* gl_FragCoord.x */ + if (ir->pixel_center_integer) { + emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_x)); + } else { + emit(fs_inst(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f))); } + wpos.reg_offset++; - int opcode; /* BRW_OPCODE_* or FS_OPCODE_* */ - fs_reg dst; - fs_reg src[3]; - bool saturate; - bool predicated; - int conditional_mod; /**< BRW_CONDITIONAL_* */ + /* gl_FragCoord.y */ + if (ir->origin_upper_left && ir->pixel_center_integer) { + emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_y)); + } else { + fs_reg pixel_y = this->pixel_y; + float offset = (ir->pixel_center_integer ? 0.0 : 0.5); - int mlen; /** SEND message length */ - int sampler; - bool shadow_compare; + if (!ir->origin_upper_left) { + pixel_y.negate = true; + offset += c->key.drawable_height - 1.0; + } - /** @{ - * Annotation for the generated IR. One of the two can be set. - */ - ir_instruction *ir; - const char *annotation; - /** @} */ -}; - -class fs_visitor : public ir_visitor -{ -public: - - fs_visitor(struct brw_wm_compile *c, struct brw_shader *shader) - { - this->c = c; - this->p = &c->func; - this->brw = p->brw; - this->intel = &brw->intel; - this->ctx = &intel->ctx; - this->mem_ctx = talloc_new(NULL); - this->shader = shader; - this->fail = false; - this->next_abstract_grf = 1; - this->variable_ht = hash_table_ctor(0, - hash_table_pointer_hash, - hash_table_pointer_compare); - - this->frag_color = NULL; - this->frag_data = NULL; - this->frag_depth = NULL; - this->first_non_payload_grf = 0; - - this->current_annotation = NULL; - this->annotation_string = NULL; - this->annotation_ir = NULL; - } - ~fs_visitor() - { - talloc_free(this->mem_ctx); - hash_table_dtor(this->variable_ht); - } - - fs_reg *variable_storage(ir_variable *var); - - void visit(ir_variable *ir); - void visit(ir_assignment *ir); - void visit(ir_dereference_variable *ir); - void visit(ir_dereference_record *ir); - void visit(ir_dereference_array *ir); - void visit(ir_expression *ir); - void visit(ir_texture *ir); - void visit(ir_if *ir); - void visit(ir_constant *ir); - void visit(ir_swizzle *ir); - void visit(ir_return *ir); - void visit(ir_loop *ir); - void visit(ir_loop_jump *ir); - void visit(ir_discard *ir); - void visit(ir_call *ir); - void visit(ir_function *ir); - void visit(ir_function_signature *ir); - - fs_inst *emit(fs_inst inst); - void assign_curb_setup(); - void assign_urb_setup(); - void assign_regs(); - void generate_code(); - void generate_fb_write(fs_inst *inst); - void generate_linterp(fs_inst *inst, struct brw_reg dst, - struct brw_reg *src); - void generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src); - void generate_math(fs_inst *inst, struct brw_reg dst, struct brw_reg *src); - void generate_discard(fs_inst *inst); - - void emit_dummy_fs(); - void emit_interpolation(); - void emit_pinterp(int location); - void emit_fb_writes(); - - struct brw_reg interp_reg(int location, int channel); - - struct brw_context *brw; - struct intel_context *intel; - GLcontext *ctx; - struct brw_wm_compile *c; - struct brw_compile *p; - struct brw_shader *shader; - void *mem_ctx; - exec_list instructions; - int next_abstract_grf; - struct hash_table *variable_ht; - ir_variable *frag_color, *frag_data, *frag_depth; - int first_non_payload_grf; - - /** @{ debug annotation info */ - const char *current_annotation; - ir_instruction *base_ir; - const char **annotation_string; - ir_instruction **annotation_ir; - /** @} */ - - bool fail; - - /* Result of last visit() method. */ - fs_reg result; + emit(fs_inst(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset))); + } + wpos.reg_offset++; - fs_reg pixel_x; - fs_reg pixel_y; - fs_reg pixel_w; - fs_reg delta_x; - fs_reg delta_y; - fs_reg interp_attrs[64]; + /* gl_FragCoord.z */ + emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y, + interp_reg(FRAG_ATTRIB_WPOS, 2))); + wpos.reg_offset++; - int grf_used; + /* gl_FragCoord.w: Already set up in emit_interpolation */ + emit(fs_inst(BRW_OPCODE_MOV, wpos, this->wpos_w)); -}; + return reg; +} -/** Fixed HW reg constructor. */ -fs_reg::fs_reg(enum register_file file, int hw_reg) +fs_reg * +fs_visitor::emit_general_interpolation(ir_variable *ir) { - init(); - this->file = file; - this->hw_reg = hw_reg; - this->type = BRW_REGISTER_TYPE_F; + fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); + /* Interpolation is always in floating point regs. */ + reg->type = BRW_REGISTER_TYPE_F; + fs_reg attr = *reg; + + unsigned int array_elements; + const glsl_type *type; + + if (ir->type->is_array()) { + array_elements = ir->type->length; + if (array_elements == 0) { + this->fail = true; + } + type = ir->type->fields.array; + } else { + array_elements = 1; + type = ir->type; + } + + int location = ir->location; + for (unsigned int i = 0; i < array_elements; i++) { + for (unsigned int j = 0; j < type->matrix_columns; j++) { + if (urb_setup[location] == -1) { + /* If there's no incoming setup data for this slot, don't + * emit interpolation for it. + */ + attr.reg_offset += type->vector_elements; + location++; + continue; + } + + for (unsigned int c = 0; c < type->vector_elements; c++) { + struct brw_reg interp = interp_reg(location, c); + emit(fs_inst(FS_OPCODE_LINTERP, + attr, + this->delta_x, + this->delta_y, + fs_reg(interp))); + attr.reg_offset++; + } + + if (intel->gen < 6) { + attr.reg_offset -= type->vector_elements; + for (unsigned int c = 0; c < type->vector_elements; c++) { + emit(fs_inst(BRW_OPCODE_MUL, + attr, + attr, + this->pixel_w)); + attr.reg_offset++; + } + } + location++; + } + } + + return reg; } -/** Automatic reg constructor. */ -fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type) +fs_reg * +fs_visitor::emit_frontfacing_interpolation(ir_variable *ir) { - init(); + fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); + + /* The frontfacing comes in as a bit in the thread payload. */ + if (intel->gen >= 6) { + emit(fs_inst(BRW_OPCODE_ASR, + *reg, + fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)), + fs_reg(15))); + emit(fs_inst(BRW_OPCODE_NOT, + *reg, + *reg)); + emit(fs_inst(BRW_OPCODE_AND, + *reg, + *reg, + fs_reg(1))); + } else { + fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); + struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD); + /* bit 31 is "primitive is back face", so checking < (1 << 31) gives + * us front face + */ + fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, + *reg, + fs_reg(r1_6ud), + fs_reg(1u << 31))); + inst->conditional_mod = BRW_CONDITIONAL_L; + emit(fs_inst(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u))); + } - this->file = GRF; - this->reg = v->next_abstract_grf; - this->reg_offset = 0; - v->next_abstract_grf += type_size(type); + return reg; +} - switch (type->base_type) { - case GLSL_TYPE_FLOAT: - this->type = BRW_REGISTER_TYPE_F; - break; - case GLSL_TYPE_INT: - case GLSL_TYPE_BOOL: - this->type = BRW_REGISTER_TYPE_D; - break; - case GLSL_TYPE_UINT: - this->type = BRW_REGISTER_TYPE_UD; +fs_inst * +fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src) +{ + switch (opcode) { + case FS_OPCODE_RCP: + case FS_OPCODE_RSQ: + case FS_OPCODE_SQRT: + case FS_OPCODE_EXP2: + case FS_OPCODE_LOG2: + case FS_OPCODE_SIN: + case FS_OPCODE_COS: break; default: - assert(!"not reached"); - this->type = BRW_REGISTER_TYPE_F; - break; + assert(!"not reached: bad math opcode"); + return NULL; } + + /* Can't do hstride == 0 args to gen6 math, so expand it out. We + * might be able to do better by doing execsize = 1 math and then + * expanding that result out, but we would need to be careful with + * masking. + */ + if (intel->gen >= 6 && src.file == UNIFORM) { + fs_reg expanded = fs_reg(this, glsl_type::float_type); + emit(fs_inst(BRW_OPCODE_MOV, expanded, src)); + src = expanded; + } + + fs_inst *inst = emit(fs_inst(opcode, dst, src)); + + if (intel->gen < 6) { + inst->base_mrf = 2; + inst->mlen = 1; + } + + return inst; } -fs_reg * -fs_visitor::variable_storage(ir_variable *var) +fs_inst * +fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src0, fs_reg src1) { - return (fs_reg *)hash_table_find(this->variable_ht, var); + int base_mrf = 2; + fs_inst *inst; + + assert(opcode == FS_OPCODE_POW); + + if (intel->gen >= 6) { + /* Can't do hstride == 0 args to gen6 math, so expand it out. */ + if (src0.file == UNIFORM) { + fs_reg expanded = fs_reg(this, glsl_type::float_type); + emit(fs_inst(BRW_OPCODE_MOV, expanded, src0)); + src0 = expanded; + } + + if (src1.file == UNIFORM) { + fs_reg expanded = fs_reg(this, glsl_type::float_type); + emit(fs_inst(BRW_OPCODE_MOV, expanded, src1)); + src1 = expanded; + } + + inst = emit(fs_inst(opcode, dst, src0, src1)); + } else { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1)); + inst = emit(fs_inst(opcode, dst, src0, reg_null_f)); + + inst->base_mrf = base_mrf; + inst->mlen = 2; + } + return inst; } void @@ -520,36 +595,37 @@ fs_visitor::visit(ir_variable *ir) { fs_reg *reg = NULL; + if (variable_storage(ir)) + return; + if (strcmp(ir->name, "gl_FragColor") == 0) { this->frag_color = ir; } else if (strcmp(ir->name, "gl_FragData") == 0) { this->frag_data = ir; } else if (strcmp(ir->name, "gl_FragDepth") == 0) { this->frag_depth = ir; - assert(!"FINISHME: this hangs currently."); } if (ir->mode == ir_var_in) { - reg = &this->interp_attrs[ir->location]; + if (!strcmp(ir->name, "gl_FragCoord")) { + reg = emit_fragcoord_interpolation(ir); + } else if (!strcmp(ir->name, "gl_FrontFacing")) { + reg = emit_frontfacing_interpolation(ir); + } else { + reg = emit_general_interpolation(ir); + } + assert(reg); + hash_table_insert(this->variable_ht, reg, ir); + return; } if (ir->mode == ir_var_uniform) { - const float *vec_values; int param_index = c->prog_data.nr_params; - /* FINISHME: This is wildly incomplete. */ - assert(ir->type->is_scalar() || ir->type->is_vector() || - ir->type->is_sampler()); - - const struct gl_program *fp = &this->brw->fragment_program->Base; - /* Our support for uniforms is piggy-backed on the struct - * gl_fragment_program, because that's where the values actually - * get stored, rather than in some global gl_shader_program uniform - * store. - */ - vec_values = fp->Parameters->ParameterValues[ir->location]; - for (unsigned int i = 0; i < ir->type->vector_elements; i++) { - c->prog_data.param[c->prog_data.nr_params++] = &vec_values[i]; + if (!strncmp(ir->name, "gl_", 3)) { + setup_builtin_uniform_values(ir); + } else { + setup_uniform_values(ir->location, ir->type); } reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); @@ -571,7 +647,18 @@ fs_visitor::visit(ir_dereference_variable *ir) void fs_visitor::visit(ir_dereference_record *ir) { - assert(!"FINISHME"); + const glsl_type *struct_type = ir->record->type; + + ir->record->accept(this); + + unsigned int offset = 0; + for (unsigned int i = 0; i < struct_type->length; i++) { + if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) + break; + offset += type_size(struct_type->fields.structure[i].type); + } + this->result.reg_offset += offset; + this->result.type = brw_type_for_base_type(ir->type); } void @@ -583,11 +670,8 @@ fs_visitor::visit(ir_dereference_array *ir) ir->array->accept(this); index = ir->array_index->as_constant(); - if (ir->type->is_matrix()) { - element_size = ir->type->vector_elements; - } else { - element_size = type_size(ir->type); - } + element_size = type_size(ir->type); + this->result.type = brw_type_for_base_type(ir->type); if (index) { assert(this->result.file == UNIFORM || @@ -595,7 +679,7 @@ fs_visitor::visit(ir_dereference_array *ir) this->result.reg != 0)); this->result.reg_offset += index->value.i[0] * element_size; } else { - assert(!"FINISHME: non-constant matrix column"); + assert(!"FINISHME: non-constant array element"); } } @@ -633,10 +717,13 @@ fs_visitor::visit(ir_expression *ir) switch (ir->operation) { case ir_unop_logic_not: + /* Note that BRW_OPCODE_NOT is not appropriate here, since it is + * ones complement of the whole register, not just bit 0. + */ emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], fs_reg(-1))); break; case ir_unop_neg: - op[0].negate = ~op[0].negate; + op[0].negate = !op[0].negate; this->result = op[0]; break; case ir_unop_abs: @@ -648,36 +735,36 @@ fs_visitor::visit(ir_expression *ir) emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(0.0f))); - inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f))); + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f))); inst->conditional_mod = BRW_CONDITIONAL_G; inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(1.0f))); inst->predicated = true; - inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f))); + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f))); inst->conditional_mod = BRW_CONDITIONAL_L; inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f))); inst->predicated = true; break; case ir_unop_rcp: - emit(fs_inst(FS_OPCODE_RCP, this->result, op[0])); + emit_math(FS_OPCODE_RCP, this->result, op[0]); break; case ir_unop_exp2: - emit(fs_inst(FS_OPCODE_EXP2, this->result, op[0])); + emit_math(FS_OPCODE_EXP2, this->result, op[0]); break; case ir_unop_log2: - emit(fs_inst(FS_OPCODE_LOG2, this->result, op[0])); + emit_math(FS_OPCODE_LOG2, this->result, op[0]); break; case ir_unop_exp: case ir_unop_log: assert(!"not reached: should be handled by ir_explog_to_explog2"); break; case ir_unop_sin: - emit(fs_inst(FS_OPCODE_SIN, this->result, op[0])); + emit_math(FS_OPCODE_SIN, this->result, op[0]); break; case ir_unop_cos: - emit(fs_inst(FS_OPCODE_COS, this->result, op[0])); + emit_math(FS_OPCODE_COS, this->result, op[0]); break; case ir_unop_dFdx: @@ -725,11 +812,13 @@ fs_visitor::visit(ir_expression *ir) emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1))); break; case ir_binop_equal: + case ir_binop_all_equal: /* same as nequal for scalars */ inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1])); inst->conditional_mod = BRW_CONDITIONAL_Z; emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1))); break; case ir_binop_nequal: + case ir_binop_any_nequal: /* same as nequal for scalars */ inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1])); inst->conditional_mod = BRW_CONDITIONAL_NZ; emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1))); @@ -750,7 +839,7 @@ fs_visitor::visit(ir_expression *ir) case ir_binop_dot: case ir_binop_cross: case ir_unop_any: - assert(!"not reached: should be handled by brw_channel_expressions"); + assert(!"not reached: should be handled by brw_fs_channel_expressions"); break; case ir_unop_noise: @@ -758,18 +847,16 @@ fs_visitor::visit(ir_expression *ir) break; case ir_unop_sqrt: - emit(fs_inst(FS_OPCODE_SQRT, this->result, op[0])); + emit_math(FS_OPCODE_SQRT, this->result, op[0]); break; case ir_unop_rsq: - emit(fs_inst(FS_OPCODE_RSQ, this->result, op[0])); + emit_math(FS_OPCODE_RSQ, this->result, op[0]); break; case ir_unop_i2f: case ir_unop_b2f: case ir_unop_b2i: - emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0])); - break; case ir_unop_f2i: emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0])); break; @@ -777,12 +864,15 @@ fs_visitor::visit(ir_expression *ir) case ir_unop_i2b: inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], fs_reg(0.0f))); inst->conditional_mod = BRW_CONDITIONAL_NZ; + inst = emit(fs_inst(BRW_OPCODE_AND, this->result, + this->result, fs_reg(1))); + break; case ir_unop_trunc: - emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0])); + emit(fs_inst(BRW_OPCODE_RNDZ, this->result, op[0])); break; case ir_unop_ceil: - op[0].negate = ~op[0].negate; + op[0].negate = !op[0].negate; inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0])); this->result.negate = true; break; @@ -792,6 +882,9 @@ fs_visitor::visit(ir_expression *ir) case ir_unop_fract: inst = emit(fs_inst(BRW_OPCODE_FRC, this->result, op[0])); break; + case ir_unop_round_even: + emit(fs_inst(BRW_OPCODE_RNDE, this->result, op[0])); + break; case ir_binop_min: inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1])); @@ -809,7 +902,7 @@ fs_visitor::visit(ir_expression *ir) break; case ir_binop_pow: - inst = emit(fs_inst(FS_OPCODE_POW, this->result, op[0], op[1])); + emit_math(FS_OPCODE_POW, this->result, op[0], op[1]); break; case ir_unop_bit_not: @@ -825,11 +918,50 @@ fs_visitor::visit(ir_expression *ir) } void +fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, + const glsl_type *type, bool predicated) +{ + switch (type->base_type) { + case GLSL_TYPE_FLOAT: + case GLSL_TYPE_UINT: + case GLSL_TYPE_INT: + case GLSL_TYPE_BOOL: + for (unsigned int i = 0; i < type->components(); i++) { + l.type = brw_type_for_base_type(type); + r.type = brw_type_for_base_type(type); + + fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, l, r)); + inst->predicated = predicated; + + l.reg_offset++; + r.reg_offset++; + } + break; + case GLSL_TYPE_ARRAY: + for (unsigned int i = 0; i < type->length; i++) { + emit_assignment_writes(l, r, type->fields.array, predicated); + } + + case GLSL_TYPE_STRUCT: + for (unsigned int i = 0; i < type->length; i++) { + emit_assignment_writes(l, r, type->fields.structure[i].type, + predicated); + } + break; + + case GLSL_TYPE_SAMPLER: + break; + + default: + assert(!"not reached"); + break; + } +} + +void fs_visitor::visit(ir_assignment *ir) { struct fs_reg l, r; - int i; - int write_mask; fs_inst *inst; /* FINISHME: arrays on the lhs */ @@ -839,118 +971,300 @@ fs_visitor::visit(ir_assignment *ir) ir->rhs->accept(this); r = this->result; - /* FINISHME: This should really set to the correct maximal writemask for each - * FINISHME: component written (in the loops below). This case can only - * FINISHME: occur for matrices, arrays, and structures. - */ - if (ir->write_mask == 0) { - assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector()); - write_mask = WRITEMASK_XYZW; - } else { - assert(ir->lhs->type->is_vector() || ir->lhs->type->is_scalar()); - write_mask = ir->write_mask; - } - assert(l.file != BAD_FILE); assert(r.file != BAD_FILE); if (ir->condition) { - /* Get the condition bool into the predicate. */ - ir->condition->accept(this); - inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, fs_reg(0))); - inst->conditional_mod = BRW_CONDITIONAL_NZ; + emit_bool_to_cond_code(ir->condition); } - for (i = 0; i < type_size(ir->lhs->type); i++) { - if (i >= 4 || (write_mask & (1 << i))) { - inst = emit(fs_inst(BRW_OPCODE_MOV, l, r)); - if (ir->condition) - inst->predicated = true; + if (ir->lhs->type->is_scalar() || + ir->lhs->type->is_vector()) { + for (int i = 0; i < ir->lhs->type->vector_elements; i++) { + if (ir->write_mask & (1 << i)) { + inst = emit(fs_inst(BRW_OPCODE_MOV, l, r)); + if (ir->condition) + inst->predicated = true; + r.reg_offset++; + } + l.reg_offset++; } - l.reg_offset++; - r.reg_offset++; + } else { + emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); } } -void -fs_visitor::visit(ir_texture *ir) +fs_inst * +fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate) { - int base_mrf = 2; - fs_inst *inst = NULL; - unsigned int mlen = 0; + int mlen; + int base_mrf = 1; + bool simd16 = false; + fs_reg orig_dst; - ir->coordinate->accept(this); - fs_reg coordinate = this->result; + /* g0 header. */ + mlen = 1; - if (ir->projector) { - fs_reg inv_proj = fs_reg(this, glsl_type::float_type); + if (ir->shadow_comparitor) { + for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), + coordinate)); + coordinate.reg_offset++; + } + /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ + mlen += 3; - ir->projector->accept(this); - emit(fs_inst(FS_OPCODE_RCP, inv_proj, this->result)); + if (ir->op == ir_tex) { + /* There's no plain shadow compare message, so we use shadow + * compare with a bias of 0.0. + */ + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), + fs_reg(0.0f))); + mlen++; + } else if (ir->op == ir_txb) { + ir->lod_info.bias->accept(this); + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), + this->result)); + mlen++; + } else { + assert(ir->op == ir_txl); + ir->lod_info.lod->accept(this); + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), + this->result)); + mlen++; + } - fs_reg proj_coordinate = fs_reg(this, ir->coordinate->type); - for (unsigned int i = 0; i < ir->coordinate->type->vector_elements; i++) { - emit(fs_inst(BRW_OPCODE_MUL, proj_coordinate, coordinate, inv_proj)); + ir->shadow_comparitor->accept(this); + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result)); + mlen++; + } else if (ir->op == ir_tex) { + for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), + coordinate)); coordinate.reg_offset++; - proj_coordinate.reg_offset++; } - proj_coordinate.reg_offset = 0; + /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ + mlen += 3; + } else { + /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod + * instructions. We'll need to do SIMD16 here. + */ + assert(ir->op == ir_txb || ir->op == ir_txl); - coordinate = proj_coordinate; + for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), + coordinate)); + coordinate.reg_offset++; + } + + /* lod/bias appears after u/v/r. */ + mlen += 6; + + if (ir->op == ir_txb) { + ir->lod_info.bias->accept(this); + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), + this->result)); + mlen++; + } else { + ir->lod_info.lod->accept(this); + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), + this->result)); + mlen++; + } + + /* The unused upper half. */ + mlen++; + + /* Now, since we're doing simd16, the return is 2 interleaved + * vec4s where the odd-indexed ones are junk. We'll need to move + * this weirdness around to the expected layout. + */ + simd16 = true; + orig_dst = dst; + dst = fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, + 2)); + dst.type = BRW_REGISTER_TYPE_F; } - for (mlen = 0; mlen < ir->coordinate->type->vector_elements; mlen++) { - emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate)); - coordinate.reg_offset++; + fs_inst *inst = NULL; + switch (ir->op) { + case ir_tex: + inst = emit(fs_inst(FS_OPCODE_TEX, dst)); + break; + case ir_txb: + inst = emit(fs_inst(FS_OPCODE_TXB, dst)); + break; + case ir_txl: + inst = emit(fs_inst(FS_OPCODE_TXL, dst)); + break; + case ir_txd: + case ir_txf: + assert(!"GLSL 1.30 features unsupported"); + break; } + inst->base_mrf = base_mrf; + inst->mlen = mlen; - /* Pre-Ironlake, the 8-wide sampler always took u,v,r. */ - if (intel->gen < 5) - mlen = 3; + if (simd16) { + for (int i = 0; i < 4; i++) { + emit(fs_inst(BRW_OPCODE_MOV, orig_dst, dst)); + orig_dst.reg_offset++; + dst.reg_offset += 2; + } + } + + return inst; +} + +fs_inst * +fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate) +{ + /* gen5's SIMD8 sampler has slots for u, v, r, array index, then + * optional parameters like shadow comparitor or LOD bias. If + * optional parameters aren't present, those base slots are + * optional and don't need to be included in the message. + * + * We don't fill in the unnecessary slots regardless, which may + * look surprising in the disassembly. + */ + int mlen = 1; /* g0 header always present. */ + int base_mrf = 1; + + for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), + coordinate)); + coordinate.reg_offset++; + } + mlen += ir->coordinate->type->vector_elements; if (ir->shadow_comparitor) { - /* For shadow comparisons, we have to supply u,v,r. */ - mlen = 3; + mlen = MAX2(mlen, 5); ir->shadow_comparitor->accept(this); emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result)); mlen++; } - /* Do we ever want to handle writemasking on texture samples? Is it - * performance relevant? - */ - fs_reg dst = fs_reg(this, glsl_type::vec4_type); - + fs_inst *inst = NULL; switch (ir->op) { case ir_tex: - inst = emit(fs_inst(FS_OPCODE_TEX, dst, fs_reg(MRF, base_mrf))); + inst = emit(fs_inst(FS_OPCODE_TEX, dst)); break; case ir_txb: ir->lod_info.bias->accept(this); + mlen = MAX2(mlen, 5); emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result)); mlen++; - inst = emit(fs_inst(FS_OPCODE_TXB, dst, fs_reg(MRF, base_mrf))); + inst = emit(fs_inst(FS_OPCODE_TXB, dst)); break; case ir_txl: ir->lod_info.lod->accept(this); + mlen = MAX2(mlen, 5); emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result)); mlen++; - inst = emit(fs_inst(FS_OPCODE_TXL, dst, fs_reg(MRF, base_mrf))); + inst = emit(fs_inst(FS_OPCODE_TXL, dst)); break; case ir_txd: case ir_txf: assert(!"GLSL 1.30 features unsupported"); break; } + inst->base_mrf = base_mrf; + inst->mlen = mlen; + + return inst; +} + +void +fs_visitor::visit(ir_texture *ir) +{ + int sampler; + fs_inst *inst = NULL; + + ir->coordinate->accept(this); + fs_reg coordinate = this->result; + + /* Should be lowered by do_lower_texture_projection */ + assert(!ir->projector); + + sampler = _mesa_get_sampler_uniform_value(ir->sampler, + ctx->Shader.CurrentProgram, + &brw->fragment_program->Base); + sampler = c->fp->program.Base.SamplerUnits[sampler]; + + /* The 965 requires the EU to do the normalization of GL rectangle + * texture coordinates. We use the program parameter state + * tracking to get the scaling factor. + */ + if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { + struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; + int tokens[STATE_LENGTH] = { + STATE_INTERNAL, + STATE_TEXRECT_SCALE, + sampler, + 0, + 0 + }; + + fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); + fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); + GLuint index = _mesa_add_state_reference(params, + (gl_state_index *)tokens); + float *vec_values = this->fp->Base.Parameters->ParameterValues[index]; + + c->prog_data.param[c->prog_data.nr_params++] = &vec_values[0]; + c->prog_data.param[c->prog_data.nr_params++] = &vec_values[1]; + + fs_reg dst = fs_reg(this, ir->coordinate->type); + fs_reg src = coordinate; + coordinate = dst; + + emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_x)); + dst.reg_offset++; + src.reg_offset++; + emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_y)); + } + + /* Writemasking doesn't eliminate channels on SIMD8 texture + * samples, so don't worry about them. + */ + fs_reg dst = fs_reg(this, glsl_type::vec4_type); + + if (intel->gen < 5) { + inst = emit_texture_gen4(ir, dst, coordinate); + } else { + inst = emit_texture_gen5(ir, dst, coordinate); + } + + inst->sampler = sampler; this->result = dst; if (ir->shadow_comparitor) inst->shadow_compare = true; - inst->mlen = mlen; + + if (c->key.tex_swizzles[inst->sampler] != SWIZZLE_NOOP) { + fs_reg swizzle_dst = fs_reg(this, glsl_type::vec4_type); + + for (int i = 0; i < 4; i++) { + int swiz = GET_SWZ(c->key.tex_swizzles[inst->sampler], i); + fs_reg l = swizzle_dst; + l.reg_offset += i; + + if (swiz == SWIZZLE_ZERO) { + emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(0.0f))); + } else if (swiz == SWIZZLE_ONE) { + emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(1.0f))); + } else { + fs_reg r = dst; + r.reg_offset += GET_SWZ(c->key.tex_swizzles[inst->sampler], i); + emit(fs_inst(BRW_OPCODE_MOV, l, r)); + } + } + this->result = swizzle_dst; + } } void @@ -959,6 +1273,11 @@ fs_visitor::visit(ir_swizzle *ir) ir->val->accept(this); fs_reg val = this->result; + if (ir->type->vector_elements == 1) { + this->result.reg_offset += ir->mask.x; + return; + } + fs_reg result = fs_reg(this, ir->type); this->result = result; @@ -990,9 +1309,13 @@ fs_visitor::visit(ir_swizzle *ir) void fs_visitor::visit(ir_discard *ir) { + fs_reg temp = fs_reg(this, glsl_type::uint_type); + assert(ir->condition == NULL); /* FINISHME */ - emit(fs_inst(FS_OPCODE_DISCARD)); + emit(fs_inst(FS_OPCODE_DISCARD_NOT, temp, reg_null_d)); + emit(fs_inst(FS_OPCODE_DISCARD_AND, reg_null_d, temp)); + kill_emitted = true; } void @@ -1023,6 +1346,108 @@ fs_visitor::visit(ir_constant *ir) } void +fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) +{ + ir_expression *expr = ir->as_expression(); + + if (expr) { + fs_reg op[2]; + fs_inst *inst; + + for (unsigned int i = 0; i < expr->get_num_operands(); i++) { + assert(expr->operands[i]->type->is_scalar()); + + expr->operands[i]->accept(this); + op[i] = this->result; + } + + switch (expr->operation) { + case ir_unop_logic_not: + inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1))); + inst->conditional_mod = BRW_CONDITIONAL_Z; + break; + + case ir_binop_logic_xor: + inst = emit(fs_inst(BRW_OPCODE_XOR, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + + case ir_binop_logic_or: + inst = emit(fs_inst(BRW_OPCODE_OR, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + + case ir_binop_logic_and: + inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + + case ir_unop_f2b: + if (intel->gen >= 6) { + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, + op[0], fs_reg(0.0f))); + } else { + inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0])); + } + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + + case ir_unop_i2b: + if (intel->gen >= 6) { + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0))); + } else { + inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0])); + } + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + + case ir_binop_greater: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_G; + break; + case ir_binop_gequal: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_GE; + break; + case ir_binop_less: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_L; + break; + case ir_binop_lequal: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_LE; + break; + case ir_binop_equal: + case ir_binop_all_equal: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_Z; + break; + case ir_binop_nequal: + case ir_binop_any_nequal: + inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1])); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + default: + assert(!"not reached"); + this->fail = true; + break; + } + return; + } + + ir->accept(this); + + if (intel->gen >= 6) { + fs_inst *inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, + this->result, fs_reg(1))); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + } else { + fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, this->result)); + inst->conditional_mod = BRW_CONDITIONAL_NZ; + } +} + +void fs_visitor::visit(ir_if *ir) { fs_inst *inst; @@ -1032,10 +1457,7 @@ fs_visitor::visit(ir_if *ir) */ this->base_ir = ir->condition; - /* Generate the condition into the condition code. */ - ir->condition->accept(this); - inst = emit(fs_inst(BRW_OPCODE_MOV, fs_reg(brw_null_reg()), this->result)); - inst->conditional_mod = BRW_CONDITIONAL_NZ; + emit_bool_to_cond_code(ir->condition); inst = emit(fs_inst(BRW_OPCODE_IF)); inst->predicated = true; @@ -1064,32 +1486,69 @@ fs_visitor::visit(ir_if *ir) void fs_visitor::visit(ir_loop *ir) { - assert(!ir->from); - assert(!ir->to); - assert(!ir->increment); - assert(!ir->counter); + fs_reg counter = reg_undef; + + if (ir->counter) { + this->base_ir = ir->counter; + ir->counter->accept(this); + counter = *(variable_storage(ir->counter)); + + if (ir->from) { + this->base_ir = ir->from; + ir->from->accept(this); + + emit(fs_inst(BRW_OPCODE_MOV, counter, this->result)); + } + } emit(fs_inst(BRW_OPCODE_DO)); - /* Start a safety counter. If the user messed up their loop - * counting, we don't want to hang the GPU. - */ - fs_reg max_iter = fs_reg(this, glsl_type::int_type); - emit(fs_inst(BRW_OPCODE_MOV, max_iter, fs_reg(10000))); + if (ir->to) { + this->base_ir = ir->to; + ir->to->accept(this); + + fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, + counter, this->result)); + switch (ir->cmp) { + case ir_binop_equal: + inst->conditional_mod = BRW_CONDITIONAL_Z; + break; + case ir_binop_nequal: + inst->conditional_mod = BRW_CONDITIONAL_NZ; + break; + case ir_binop_gequal: + inst->conditional_mod = BRW_CONDITIONAL_GE; + break; + case ir_binop_lequal: + inst->conditional_mod = BRW_CONDITIONAL_LE; + break; + case ir_binop_greater: + inst->conditional_mod = BRW_CONDITIONAL_G; + break; + case ir_binop_less: + inst->conditional_mod = BRW_CONDITIONAL_L; + break; + default: + assert(!"not reached: unknown loop condition"); + this->fail = true; + break; + } + + inst = emit(fs_inst(BRW_OPCODE_BREAK)); + inst->predicated = true; + } foreach_iter(exec_list_iterator, iter, ir->body_instructions) { ir_instruction *ir = (ir_instruction *)iter.get(); - fs_inst *inst; this->base_ir = ir; ir->accept(this); + } - /* Check the maximum loop iters counter. */ - inst = emit(fs_inst(BRW_OPCODE_ADD, max_iter, max_iter, fs_reg(-1))); - inst->conditional_mod = BRW_CONDITIONAL_Z; - - inst = emit(fs_inst(BRW_OPCODE_BREAK)); - inst->predicated = true; + if (ir->increment) { + this->base_ir = ir->increment; + ir->increment->accept(this); + emit(fs_inst(BRW_OPCODE_ADD, counter, counter, this->result)); } emit(fs_inst(BRW_OPCODE_WHILE)); @@ -1186,6 +1645,7 @@ fs_visitor::emit_dummy_fs() write = emit(fs_inst(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0))); + write->base_mrf = 0; } /* The register location here is relative to the start of the URB @@ -1195,22 +1655,19 @@ fs_visitor::emit_dummy_fs() struct brw_reg fs_visitor::interp_reg(int location, int channel) { - int regnr = location * 2 + channel / 2; + int regnr = urb_setup[location] * 2 + channel / 2; int stride = (channel & 1) * 4; + assert(urb_setup[location] != -1); + return brw_vec1_grf(regnr, stride); } /** Emits the interpolation for the varying inputs. */ void -fs_visitor::emit_interpolation() +fs_visitor::emit_interpolation_setup_gen4() { struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); - /* For now, the source regs for the setup URB data will be unset, - * since we don't know until codegen how many push constants we'll - * use, and therefore what the setup URB offset is. - */ - fs_reg src_reg = reg_undef; this->current_annotation = "compute pixel centers"; this->pixel_x = fs_reg(this, glsl_type::uint_type); @@ -1227,8 +1684,14 @@ fs_visitor::emit_interpolation() fs_reg(brw_imm_v(0x11001100)))); this->current_annotation = "compute pixel deltas from v0"; - this->delta_x = fs_reg(this, glsl_type::float_type); - this->delta_y = fs_reg(this, glsl_type::float_type); + if (brw->has_pln) { + this->delta_x = fs_reg(this, glsl_type::vec2_type); + this->delta_y = this->delta_x; + this->delta_y.reg_offset++; + } else { + this->delta_x = fs_reg(this, glsl_type::float_type); + this->delta_y = fs_reg(this, glsl_type::float_type); + } emit(fs_inst(BRW_OPCODE_ADD, this->delta_x, this->pixel_x, @@ -1236,97 +1699,148 @@ fs_visitor::emit_interpolation() emit(fs_inst(BRW_OPCODE_ADD, this->delta_y, this->pixel_y, - fs_reg(brw_vec1_grf(1, 1)))); + fs_reg(negate(brw_vec1_grf(1, 1))))); this->current_annotation = "compute pos.w and 1/pos.w"; - /* Compute wpos. Unlike many other varying inputs, we usually need it - * to produce 1/w, and the varying variable wouldn't show up. + /* Compute wpos.w. It's always in our setup, since it's needed to + * interpolate the other attributes. */ - fs_reg wpos = fs_reg(this, glsl_type::vec4_type); - this->interp_attrs[FRAG_ATTRIB_WPOS] = wpos; - emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_x)); /* FINISHME: ARB_fcc */ - wpos.reg_offset++; - emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_y)); /* FINISHME: ARB_fcc */ - wpos.reg_offset++; - emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y, - interp_reg(FRAG_ATTRIB_WPOS, 2))); - wpos.reg_offset++; - emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y, + this->wpos_w = fs_reg(this, glsl_type::float_type); + emit(fs_inst(FS_OPCODE_LINTERP, wpos_w, this->delta_x, this->delta_y, interp_reg(FRAG_ATTRIB_WPOS, 3))); - /* Compute the pixel W value from wpos.w. */ + /* Compute the pixel 1/W value from wpos.w. */ this->pixel_w = fs_reg(this, glsl_type::float_type); - emit(fs_inst(FS_OPCODE_RCP, this->pixel_w, wpos)); + emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w); + this->current_annotation = NULL; +} - /* FINISHME: gl_FrontFacing */ +/** Emits the interpolation for the varying inputs. */ +void +fs_visitor::emit_interpolation_setup_gen6() +{ + struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); - foreach_iter(exec_list_iterator, iter, *this->shader->ir) { - ir_instruction *ir = (ir_instruction *)iter.get(); - ir_variable *var = ir->as_variable(); + /* If the pixel centers end up used, the setup is the same as for gen4. */ + this->current_annotation = "compute pixel centers"; + fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); + fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); + int_pixel_x.type = BRW_REGISTER_TYPE_UW; + int_pixel_y.type = BRW_REGISTER_TYPE_UW; + emit(fs_inst(BRW_OPCODE_ADD, + int_pixel_x, + fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), + fs_reg(brw_imm_v(0x10101010)))); + emit(fs_inst(BRW_OPCODE_ADD, + int_pixel_y, + fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), + fs_reg(brw_imm_v(0x11001100)))); - if (!var) - continue; + /* As of gen6, we can no longer mix float and int sources. We have + * to turn the integer pixel centers into floats for their actual + * use. + */ + this->pixel_x = fs_reg(this, glsl_type::float_type); + this->pixel_y = fs_reg(this, glsl_type::float_type); + emit(fs_inst(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x)); + emit(fs_inst(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y)); - if (var->mode != ir_var_in) - continue; + this->current_annotation = "compute 1/pos.w"; + this->wpos_w = fs_reg(brw_vec8_grf(c->key.source_w_reg, 0)); + this->pixel_w = fs_reg(this, glsl_type::float_type); + emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w); - /* If it's already set up (WPOS), skip. */ - if (var->location == 0) - continue; + this->delta_x = fs_reg(brw_vec8_grf(2, 0)); + this->delta_y = fs_reg(brw_vec8_grf(3, 0)); - this->current_annotation = talloc_asprintf(this->mem_ctx, - "interpolate %s " - "(FRAG_ATTRIB[%d])", - var->name, - var->location); - emit_pinterp(var->location); - } this->current_annotation = NULL; } void -fs_visitor::emit_pinterp(int location) +fs_visitor::emit_fb_writes() { - fs_reg interp_attr = fs_reg(this, glsl_type::vec4_type); - this->interp_attrs[location] = interp_attr; + this->current_annotation = "FB write header"; + GLboolean header_present = GL_TRUE; + int nr = 0; + + if (intel->gen >= 6 && + !this->kill_emitted && + c->key.nr_color_regions == 1) { + header_present = false; + } - for (unsigned int i = 0; i < 4; i++) { - struct brw_reg interp = interp_reg(location, i); - emit(fs_inst(FS_OPCODE_LINTERP, - interp_attr, - this->delta_x, - this->delta_y, - fs_reg(interp))); - interp_attr.reg_offset++; + if (header_present) { + /* m0, m1 header */ + nr += 2; } - interp_attr.reg_offset -= 4; - for (unsigned int i = 0; i < 4; i++) { - emit(fs_inst(BRW_OPCODE_MUL, - interp_attr, - interp_attr, - this->pixel_w)); - interp_attr.reg_offset++; + if (c->key.aa_dest_stencil_reg) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), + fs_reg(brw_vec8_grf(c->key.aa_dest_stencil_reg, 0)))); } -} -void -fs_visitor::emit_fb_writes() -{ - this->current_annotation = "FB write"; + /* Reserve space for color. It'll be filled in per MRT below. */ + int color_mrf = nr; + nr += 4; + + if (c->key.source_depth_to_render_target) { + if (c->key.computes_depth) { + /* Hand over gl_FragDepth. */ + assert(this->frag_depth); + fs_reg depth = *(variable_storage(this->frag_depth)); + + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), depth)); + } else { + /* Pass through the payload depth. */ + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), + fs_reg(brw_vec8_grf(c->key.source_depth_reg, 0)))); + } + } + + if (c->key.dest_depth_reg) { + emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), + fs_reg(brw_vec8_grf(c->key.dest_depth_reg, 0)))); + } + + fs_reg color = reg_undef; + if (this->frag_color) + color = *(variable_storage(this->frag_color)); + else if (this->frag_data) + color = *(variable_storage(this->frag_data)); - assert(this->frag_color || !"FINISHME: MRT"); - fs_reg color = *(variable_storage(this->frag_color)); + for (int target = 0; target < c->key.nr_color_regions; target++) { + this->current_annotation = talloc_asprintf(this->mem_ctx, + "FB write target %d", + target); + if (this->frag_color || this->frag_data) { + for (int i = 0; i < 4; i++) { + emit(fs_inst(BRW_OPCODE_MOV, + fs_reg(MRF, color_mrf + i), + color)); + color.reg_offset++; + } + } - for (int i = 0; i < 4; i++) { - emit(fs_inst(BRW_OPCODE_MOV, - fs_reg(MRF, 2 + i), - color)); - color.reg_offset++; + if (this->frag_color) + color.reg_offset -= 4; + + fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE, + reg_undef, reg_undef)); + inst->target = target; + inst->base_mrf = 0; + inst->mlen = nr; + if (target == c->key.nr_color_regions - 1) + inst->eot = true; + inst->header_present = header_present; } - emit(fs_inst(FS_OPCODE_FB_WRITE, - fs_reg(0), - fs_reg(0))); + if (c->key.nr_color_regions == 0) { + fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE, + reg_undef, reg_undef)); + inst->base_mrf = 0; + inst->mlen = nr; + inst->eot = true; + inst->header_present = header_present; + } this->current_annotation = NULL; } @@ -1334,8 +1848,8 @@ fs_visitor::emit_fb_writes() void fs_visitor::generate_fb_write(fs_inst *inst) { - GLboolean eot = 1; /* FINISHME: MRT */ - /* FINISHME: AADS */ + GLboolean eot = inst->eot; + struct brw_reg implied_header; /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied * move, here's g1. @@ -1343,20 +1857,33 @@ fs_visitor::generate_fb_write(fs_inst *inst) brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_compression_control(p, BRW_COMPRESSION_NONE); - brw_MOV(p, - brw_message_reg(1), - brw_vec8_grf(1, 0)); - brw_pop_insn_state(p); - int nr = 2 + 4; + if (inst->header_present) { + if (intel->gen >= 6) { + brw_MOV(p, + brw_message_reg(inst->base_mrf), + brw_vec8_grf(0, 0)); + implied_header = brw_null_reg(); + } else { + implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW); + } + + brw_MOV(p, + brw_message_reg(inst->base_mrf + 1), + brw_vec8_grf(1, 0)); + } else { + implied_header = brw_null_reg(); + } + + brw_pop_insn_state(p); brw_fb_WRITE(p, 8, /* dispatch_width */ retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW), - 0, /* base MRF */ - retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW), - 0, /* FINISHME: MRT target */ - nr, + inst->base_mrf, + implied_header, + inst->target, + inst->mlen, 0, eot); } @@ -1416,26 +1943,41 @@ fs_visitor::generate_math(fs_inst *inst, break; } - if (inst->opcode == FS_OPCODE_POW) { - brw_MOV(p, brw_message_reg(3), src[1]); + if (intel->gen >= 6) { + assert(inst->mlen == 0); + + if (inst->opcode == FS_OPCODE_POW) { + brw_math2(p, dst, op, src[0], src[1]); + } else { + brw_math(p, dst, + op, + inst->saturate ? BRW_MATH_SATURATE_SATURATE : + BRW_MATH_SATURATE_NONE, + 0, src[0], + BRW_MATH_DATA_VECTOR, + BRW_MATH_PRECISION_FULL); + } + } else { + assert(inst->mlen >= 1); + + brw_math(p, dst, + op, + inst->saturate ? BRW_MATH_SATURATE_SATURATE : + BRW_MATH_SATURATE_NONE, + inst->base_mrf, src[0], + BRW_MATH_DATA_VECTOR, + BRW_MATH_PRECISION_FULL); } - - brw_math(p, dst, - op, - inst->saturate ? BRW_MATH_SATURATE_SATURATE : - BRW_MATH_SATURATE_NONE, - 2, src[0], - BRW_MATH_DATA_VECTOR, - BRW_MATH_PRECISION_FULL); } void -fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src) +fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst) { int msg_type = -1; int rlen = 4; + uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8; - if (intel->gen == 5) { + if (intel->gen >= 5) { switch (inst->opcode) { case FS_OPCODE_TEX: if (inst->shadow_compare) { @@ -1458,60 +2000,129 @@ fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src) /* Note that G45 and older determines shadow compare and dispatch width * from message length for most messages. */ + msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE; if (inst->shadow_compare) { - msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE; + assert(inst->mlen == 6); } else { - msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE; + assert(inst->mlen <= 4); } + break; case FS_OPCODE_TXB: if (inst->shadow_compare) { - assert(!"FINISHME: shadow compare with bias."); - msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS; + assert(inst->mlen == 6); + msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE; } else { + assert(inst->mlen == 9); msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS; - rlen = 8; + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16; } break; } } assert(msg_type != -1); - /* g0 header. */ - src.nr--; + if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) { + rlen = 8; + dst = vec16(dst); + } brw_SAMPLE(p, retype(dst, BRW_REGISTER_TYPE_UW), - src.nr, + inst->base_mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW), SURF_INDEX_TEXTURE(inst->sampler), inst->sampler, WRITEMASK_XYZW, msg_type, rlen, - inst->mlen + 1, + inst->mlen, 0, 1, - BRW_SAMPLER_SIMD_MODE_SIMD8); + simd_mode); } + +/* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input + * looking like: + * + * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br + * + * and we're trying to produce: + * + * DDX DDY + * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl) + * (ss0.tr - ss0.tl) (ss0.tr - ss0.br) + * (ss0.br - ss0.bl) (ss0.tl - ss0.bl) + * (ss0.br - ss0.bl) (ss0.tr - ss0.br) + * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl) + * (ss1.tr - ss1.tl) (ss1.tr - ss1.br) + * (ss1.br - ss1.bl) (ss1.tl - ss1.bl) + * (ss1.br - ss1.bl) (ss1.tr - ss1.br) + * + * and add another set of two more subspans if in 16-pixel dispatch mode. + * + * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result + * for each pair, and vertstride = 2 jumps us 2 elements after processing a + * pair. But for DDY, it's harder, as we want to produce the pairs swizzled + * between each other. We could probably do it like ddx and swizzle the right + * order later, but bail for now and just produce + * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4) + */ void -fs_visitor::generate_discard(fs_inst *inst) +fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src) +{ + struct brw_reg src0 = brw_reg(src.file, src.nr, 1, + BRW_REGISTER_TYPE_F, + BRW_VERTICAL_STRIDE_2, + BRW_WIDTH_2, + BRW_HORIZONTAL_STRIDE_0, + BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); + struct brw_reg src1 = brw_reg(src.file, src.nr, 0, + BRW_REGISTER_TYPE_F, + BRW_VERTICAL_STRIDE_2, + BRW_WIDTH_2, + BRW_HORIZONTAL_STRIDE_0, + BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); + brw_ADD(p, dst, src0, negate(src1)); +} + +void +fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src) +{ + struct brw_reg src0 = brw_reg(src.file, src.nr, 0, + BRW_REGISTER_TYPE_F, + BRW_VERTICAL_STRIDE_4, + BRW_WIDTH_4, + BRW_HORIZONTAL_STRIDE_0, + BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); + struct brw_reg src1 = brw_reg(src.file, src.nr, 2, + BRW_REGISTER_TYPE_F, + BRW_VERTICAL_STRIDE_4, + BRW_WIDTH_4, + BRW_HORIZONTAL_STRIDE_0, + BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); + brw_ADD(p, dst, src0, negate(src1)); +} + +void +fs_visitor::generate_discard_not(fs_inst *inst, struct brw_reg mask) { - struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); - brw_NOT(p, c->emit_mask_reg, brw_mask_reg(1)); /* IMASK */ - brw_AND(p, g0, c->emit_mask_reg, g0); + brw_NOT(p, mask, brw_mask_reg(1)); /* IMASK */ brw_pop_insn_state(p); } -static void -trivial_assign_reg(int header_size, fs_reg *reg) +void +fs_visitor::generate_discard_and(fs_inst *inst, struct brw_reg mask) { - if (reg->file == GRF && reg->reg != 0) { - reg->hw_reg = header_size + reg->reg - 1 + reg->reg_offset; - reg->reg = 0; - } + struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); + mask = brw_uw1_reg(mask.file, mask.nr, 0); + + brw_push_insn_state(p); + brw_set_mask_control(p, BRW_MASK_DISABLE); + brw_AND(p, g0, mask, g0); + brw_pop_insn_state(p); } void @@ -1520,14 +2131,6 @@ fs_visitor::assign_curb_setup() c->prog_data.first_curbe_grf = c->key.nr_payload_regs; c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8; - if (intel->gen == 5 && (c->prog_data.first_curbe_grf + - c->prog_data.curb_read_length) & 1) { - /* Align the start of the interpolation coefficients so that we can use - * the PLN instruction. - */ - c->prog_data.first_curbe_grf++; - } - /* Map the offsets in the UNIFORM file to fixed HW regs. */ foreach_iter(exec_list_iterator, iter, this->instructions) { fs_inst *inst = (fs_inst *)iter.get(); @@ -1547,28 +2150,50 @@ fs_visitor::assign_curb_setup() } void -fs_visitor::assign_urb_setup() +fs_visitor::calculate_urb_setup() { - int urb_start = c->prog_data.first_curbe_grf + c->prog_data.curb_read_length; - int interp_reg_nr[FRAG_ATTRIB_MAX]; - - c->prog_data.urb_read_length = 0; + for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) { + urb_setup[i] = -1; + } + int urb_next = 0; /* Figure out where each of the incoming setup attributes lands. */ - for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) { - interp_reg_nr[i] = -1; + if (intel->gen >= 6) { + for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) { + if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i)) { + urb_setup[i] = urb_next++; + } + } + } else { + /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */ + for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) { + if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) { + int fp_index; + + if (i >= VERT_RESULT_VAR0) + fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0); + else if (i <= VERT_RESULT_TEX7) + fp_index = i; + else + fp_index = -1; + + if (fp_index >= 0) + urb_setup[fp_index] = urb_next++; + } + } + } - if (i != FRAG_ATTRIB_WPOS && - !(brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i))) - continue; + /* Each attribute is 4 setup channels, each of which is half a reg. */ + c->prog_data.urb_read_length = urb_next * 2; +} - /* Each attribute is 4 setup channels, each of which is half a reg. */ - interp_reg_nr[i] = urb_start + c->prog_data.urb_read_length; - c->prog_data.urb_read_length += 2; - } +void +fs_visitor::assign_urb_setup() +{ + int urb_start = c->prog_data.first_curbe_grf + c->prog_data.curb_read_length; - /* Map the register numbers for FS_OPCODE_LINTERP so that it uses - * the correct setup input. + /* Offset all the urb_setup[] index by the actual position of the + * setup regs, now that the location of the constants has been chosen. */ foreach_iter(exec_list_iterator, iter, this->instructions) { fs_inst *inst = (fs_inst *)iter.get(); @@ -1578,35 +2203,740 @@ fs_visitor::assign_urb_setup() assert(inst->src[2].file == FIXED_HW_REG); - int location = inst->src[2].fixed_hw_reg.nr / 2; - assert(interp_reg_nr[location] != -1); - inst->src[2].fixed_hw_reg.nr = (interp_reg_nr[location] + - (inst->src[2].fixed_hw_reg.nr & 1)); + inst->src[2].fixed_hw_reg.nr += urb_start; } this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length; } +static void +assign_reg(int *reg_hw_locations, fs_reg *reg) +{ + if (reg->file == GRF && reg->reg != 0) { + assert(reg->reg_offset >= 0); + reg->hw_reg = reg_hw_locations[reg->reg] + reg->reg_offset; + reg->reg = 0; + } +} + void -fs_visitor::assign_regs() +fs_visitor::assign_regs_trivial() { - int header_size = this->first_non_payload_grf; int last_grf = 0; + int hw_reg_mapping[this->virtual_grf_next]; + int i; + + hw_reg_mapping[0] = 0; + hw_reg_mapping[1] = this->first_non_payload_grf; + for (i = 2; i < this->virtual_grf_next; i++) { + hw_reg_mapping[i] = (hw_reg_mapping[i - 1] + + this->virtual_grf_sizes[i - 1]); + } + last_grf = hw_reg_mapping[i - 1] + this->virtual_grf_sizes[i - 1]; - /* FINISHME: trivial assignment of register numbers */ foreach_iter(exec_list_iterator, iter, this->instructions) { fs_inst *inst = (fs_inst *)iter.get(); - trivial_assign_reg(header_size, &inst->dst); - trivial_assign_reg(header_size, &inst->src[0]); - trivial_assign_reg(header_size, &inst->src[1]); + assign_reg(hw_reg_mapping, &inst->dst); + assign_reg(hw_reg_mapping, &inst->src[0]); + assign_reg(hw_reg_mapping, &inst->src[1]); + } + + this->grf_used = last_grf + 1; +} + +void +fs_visitor::assign_regs() +{ + int last_grf = 0; + int hw_reg_mapping[this->virtual_grf_next + 1]; + int base_reg_count = BRW_MAX_GRF - this->first_non_payload_grf; + int class_sizes[base_reg_count]; + int class_count = 0; + int aligned_pair_class = -1; + + /* Set up the register classes. + * + * The base registers store a scalar value. For texture samples, + * we get virtual GRFs composed of 4 contiguous hw register. For + * structures and arrays, we store them as contiguous larger things + * than that, though we should be able to do better most of the + * time. + */ + class_sizes[class_count++] = 1; + if (brw->has_pln && intel->gen < 6) { + /* Always set up the (unaligned) pairs for gen5, so we can find + * them for making the aligned pair class. + */ + class_sizes[class_count++] = 2; + } + for (int r = 1; r < this->virtual_grf_next; r++) { + int i; + + for (i = 0; i < class_count; i++) { + if (class_sizes[i] == this->virtual_grf_sizes[r]) + break; + } + if (i == class_count) { + if (this->virtual_grf_sizes[r] >= base_reg_count) { + fprintf(stderr, "Object too large to register allocate.\n"); + this->fail = true; + } + + class_sizes[class_count++] = this->virtual_grf_sizes[r]; + } + } + + int ra_reg_count = 0; + int class_base_reg[class_count]; + int class_reg_count[class_count]; + int classes[class_count + 1]; + + for (int i = 0; i < class_count; i++) { + class_base_reg[i] = ra_reg_count; + class_reg_count[i] = base_reg_count - (class_sizes[i] - 1); + ra_reg_count += class_reg_count[i]; + } + + struct ra_regs *regs = ra_alloc_reg_set(ra_reg_count); + for (int i = 0; i < class_count; i++) { + classes[i] = ra_alloc_reg_class(regs); + + for (int i_r = 0; i_r < class_reg_count[i]; i_r++) { + ra_class_add_reg(regs, classes[i], class_base_reg[i] + i_r); + } + + /* Add conflicts between our contiguous registers aliasing + * base regs and other register classes' contiguous registers + * that alias base regs, or the base regs themselves for classes[0]. + */ + for (int c = 0; c <= i; c++) { + for (int i_r = 0; i_r < class_reg_count[i]; i_r++) { + for (int c_r = MAX2(0, i_r - (class_sizes[c] - 1)); + c_r < MIN2(class_reg_count[c], i_r + class_sizes[i]); + c_r++) { + + if (0) { + printf("%d/%d conflicts %d/%d\n", + class_sizes[i], this->first_non_payload_grf + i_r, + class_sizes[c], this->first_non_payload_grf + c_r); + } + + ra_add_reg_conflict(regs, + class_base_reg[i] + i_r, + class_base_reg[c] + c_r); + } + } + } + } + + /* Add a special class for aligned pairs, which we'll put delta_x/y + * in on gen5 so that we can do PLN. + */ + if (brw->has_pln && intel->gen < 6) { + int reg_count = (base_reg_count - 1) / 2; + int unaligned_pair_class = 1; + assert(class_sizes[unaligned_pair_class] == 2); + + aligned_pair_class = class_count; + classes[aligned_pair_class] = ra_alloc_reg_class(regs); + class_sizes[aligned_pair_class] = 2; + class_base_reg[aligned_pair_class] = 0; + class_reg_count[aligned_pair_class] = 0; + int start = (this->first_non_payload_grf & 1) ? 1 : 0; + + for (int i = 0; i < reg_count; i++) { + ra_class_add_reg(regs, classes[aligned_pair_class], + class_base_reg[unaligned_pair_class] + i * 2 + start); + } + class_count++; + } + + ra_set_finalize(regs); + + struct ra_graph *g = ra_alloc_interference_graph(regs, + this->virtual_grf_next); + /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1 + * with nodes. + */ + ra_set_node_class(g, 0, classes[0]); + + for (int i = 1; i < this->virtual_grf_next; i++) { + for (int c = 0; c < class_count; c++) { + if (class_sizes[c] == this->virtual_grf_sizes[i]) { + if (aligned_pair_class >= 0 && + this->delta_x.reg == i) { + ra_set_node_class(g, i, classes[aligned_pair_class]); + } else { + ra_set_node_class(g, i, classes[c]); + } + break; + } + } + + for (int j = 1; j < i; j++) { + if (virtual_grf_interferes(i, j)) { + ra_add_node_interference(g, i, j); + } + } + } + + /* FINISHME: Handle spilling */ + if (!ra_allocate_no_spills(g)) { + fprintf(stderr, "Failed to allocate registers.\n"); + this->fail = true; + return; + } + + /* Get the chosen virtual registers for each node, and map virtual + * regs in the register classes back down to real hardware reg + * numbers. + */ + hw_reg_mapping[0] = 0; /* unused */ + for (int i = 1; i < this->virtual_grf_next; i++) { + int reg = ra_get_node_reg(g, i); + int hw_reg = -1; + + for (int c = 0; c < class_count; c++) { + if (reg >= class_base_reg[c] && + reg < class_base_reg[c] + class_reg_count[c]) { + hw_reg = reg - class_base_reg[c]; + break; + } + } + + assert(hw_reg >= 0); + hw_reg_mapping[i] = this->first_non_payload_grf + hw_reg; + last_grf = MAX2(last_grf, + hw_reg_mapping[i] + this->virtual_grf_sizes[i] - 1); + } + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); - last_grf = MAX2(last_grf, inst->dst.hw_reg); - last_grf = MAX2(last_grf, inst->src[0].hw_reg); - last_grf = MAX2(last_grf, inst->src[1].hw_reg); + assign_reg(hw_reg_mapping, &inst->dst); + assign_reg(hw_reg_mapping, &inst->src[0]); + assign_reg(hw_reg_mapping, &inst->src[1]); } this->grf_used = last_grf + 1; + + talloc_free(g); + talloc_free(regs); +} + +/** + * Split large virtual GRFs into separate components if we can. + * + * This is mostly duplicated with what brw_fs_vector_splitting does, + * but that's really conservative because it's afraid of doing + * splitting that doesn't result in real progress after the rest of + * the optimization phases, which would cause infinite looping in + * optimization. We can do it once here, safely. This also has the + * opportunity to split interpolated values, or maybe even uniforms, + * which we don't have at the IR level. + * + * We want to split, because virtual GRFs are what we register + * allocate and spill (due to contiguousness requirements for some + * instructions), and they're what we naturally generate in the + * codegen process, but most virtual GRFs don't actually need to be + * contiguous sets of GRFs. If we split, we'll end up with reduced + * live intervals and better dead code elimination and coalescing. + */ +void +fs_visitor::split_virtual_grfs() +{ + int num_vars = this->virtual_grf_next; + bool split_grf[num_vars]; + int new_virtual_grf[num_vars]; + + /* Try to split anything > 0 sized. */ + for (int i = 0; i < num_vars; i++) { + if (this->virtual_grf_sizes[i] != 1) + split_grf[i] = true; + else + split_grf[i] = false; + } + + if (brw->has_pln) { + /* PLN opcodes rely on the delta_xy being contiguous. */ + split_grf[this->delta_x.reg] = false; + } + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + /* Texturing produces 4 contiguous registers, so no splitting. */ + if ((inst->opcode == FS_OPCODE_TEX || + inst->opcode == FS_OPCODE_TXB || + inst->opcode == FS_OPCODE_TXL) && + inst->dst.file == GRF) { + split_grf[inst->dst.reg] = false; + } + } + + /* Allocate new space for split regs. Note that the virtual + * numbers will be contiguous. + */ + for (int i = 0; i < num_vars; i++) { + if (split_grf[i]) { + new_virtual_grf[i] = virtual_grf_alloc(1); + for (int j = 2; j < this->virtual_grf_sizes[i]; j++) { + int reg = virtual_grf_alloc(1); + assert(reg == new_virtual_grf[i] + j - 1); + } + this->virtual_grf_sizes[i] = 1; + } + } + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + if (inst->dst.file == GRF && + split_grf[inst->dst.reg] && + inst->dst.reg_offset != 0) { + inst->dst.reg = (new_virtual_grf[inst->dst.reg] + + inst->dst.reg_offset - 1); + inst->dst.reg_offset = 0; + } + for (int i = 0; i < 3; i++) { + if (inst->src[i].file == GRF && + split_grf[inst->src[i].reg] && + inst->src[i].reg_offset != 0) { + inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] + + inst->src[i].reg_offset - 1); + inst->src[i].reg_offset = 0; + } + } + } +} + +void +fs_visitor::calculate_live_intervals() +{ + int num_vars = this->virtual_grf_next; + int *def = talloc_array(mem_ctx, int, num_vars); + int *use = talloc_array(mem_ctx, int, num_vars); + int loop_depth = 0; + int loop_start = 0; + + for (int i = 0; i < num_vars; i++) { + def[i] = 1 << 30; + use[i] = -1; + } + + int ip = 0; + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + if (inst->opcode == BRW_OPCODE_DO) { + if (loop_depth++ == 0) + loop_start = ip; + } else if (inst->opcode == BRW_OPCODE_WHILE) { + loop_depth--; + + if (loop_depth == 0) { + /* FINISHME: + * + * Patches up any vars marked for use within the loop as + * live until the end. This is conservative, as there + * will often be variables defined and used inside the + * loop but dead at the end of the loop body. + */ + for (int i = 0; i < num_vars; i++) { + if (use[i] == loop_start) { + use[i] = ip; + } + } + } + } else { + int eip = ip; + + if (loop_depth) + eip = loop_start; + + for (unsigned int i = 0; i < 3; i++) { + if (inst->src[i].file == GRF && inst->src[i].reg != 0) { + use[inst->src[i].reg] = MAX2(use[inst->src[i].reg], eip); + } + } + if (inst->dst.file == GRF && inst->dst.reg != 0) { + def[inst->dst.reg] = MIN2(def[inst->dst.reg], eip); + } + } + + ip++; + } + + talloc_free(this->virtual_grf_def); + talloc_free(this->virtual_grf_use); + this->virtual_grf_def = def; + this->virtual_grf_use = use; +} + +/** + * Attempts to move immediate constants into the immediate + * constant slot of following instructions. + * + * Immediate constants are a bit tricky -- they have to be in the last + * operand slot, you can't do abs/negate on them, + */ + +bool +fs_visitor::propagate_constants() +{ + bool progress = false; + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + if (inst->opcode != BRW_OPCODE_MOV || + inst->predicated || + inst->dst.file != GRF || inst->src[0].file != IMM || + inst->dst.type != inst->src[0].type) + continue; + + /* Don't bother with cases where we should have had the + * operation on the constant folded in GLSL already. + */ + if (inst->saturate) + continue; + + /* Found a move of a constant to a GRF. Find anything else using the GRF + * before it's written, and replace it with the constant if we can. + */ + exec_list_iterator scan_iter = iter; + scan_iter.next(); + for (; scan_iter.has_next(); scan_iter.next()) { + fs_inst *scan_inst = (fs_inst *)scan_iter.get(); + + if (scan_inst->opcode == BRW_OPCODE_DO || + scan_inst->opcode == BRW_OPCODE_WHILE || + scan_inst->opcode == BRW_OPCODE_ELSE || + scan_inst->opcode == BRW_OPCODE_ENDIF) { + break; + } + + for (int i = 2; i >= 0; i--) { + if (scan_inst->src[i].file != GRF || + scan_inst->src[i].reg != inst->dst.reg || + scan_inst->src[i].reg_offset != inst->dst.reg_offset) + continue; + + /* Don't bother with cases where we should have had the + * operation on the constant folded in GLSL already. + */ + if (scan_inst->src[i].negate || scan_inst->src[i].abs) + continue; + + switch (scan_inst->opcode) { + case BRW_OPCODE_MOV: + scan_inst->src[i] = inst->src[0]; + progress = true; + break; + + case BRW_OPCODE_MUL: + case BRW_OPCODE_ADD: + if (i == 1) { + scan_inst->src[i] = inst->src[0]; + progress = true; + } else if (i == 0 && scan_inst->src[1].file != IMM) { + /* Fit this constant in by commuting the operands */ + scan_inst->src[0] = scan_inst->src[1]; + scan_inst->src[1] = inst->src[0]; + } + break; + case BRW_OPCODE_CMP: + if (i == 1) { + scan_inst->src[i] = inst->src[0]; + progress = true; + } + } + } + + if (scan_inst->dst.file == GRF && + scan_inst->dst.reg == inst->dst.reg && + (scan_inst->dst.reg_offset == inst->dst.reg_offset || + scan_inst->opcode == FS_OPCODE_TEX)) { + break; + } + } + } + + return progress; +} +/** + * Must be called after calculate_live_intervales() to remove unused + * writes to registers -- register allocation will fail otherwise + * because something deffed but not used won't be considered to + * interfere with other regs. + */ +bool +fs_visitor::dead_code_eliminate() +{ + bool progress = false; + int num_vars = this->virtual_grf_next; + bool dead[num_vars]; + + for (int i = 0; i < num_vars; i++) { + dead[i] = this->virtual_grf_def[i] >= this->virtual_grf_use[i]; + + if (dead[i]) { + /* Mark off its interval so it won't interfere with anything. */ + this->virtual_grf_def[i] = -1; + this->virtual_grf_use[i] = -1; + } + } + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + if (inst->dst.file == GRF && dead[inst->dst.reg]) { + inst->remove(); + progress = true; + } + } + + return progress; +} + +bool +fs_visitor::register_coalesce() +{ + bool progress = false; + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + if (inst->opcode != BRW_OPCODE_MOV || + inst->predicated || + inst->saturate || + inst->dst.file != GRF || inst->src[0].file != GRF || + inst->dst.type != inst->src[0].type) + continue; + + /* Found a move of a GRF to a GRF. Let's see if we can coalesce + * them: check for no writes to either one until the exit of the + * program. + */ + bool interfered = false; + exec_list_iterator scan_iter = iter; + scan_iter.next(); + for (; scan_iter.has_next(); scan_iter.next()) { + fs_inst *scan_inst = (fs_inst *)scan_iter.get(); + + if (scan_inst->opcode == BRW_OPCODE_DO || + scan_inst->opcode == BRW_OPCODE_WHILE || + scan_inst->opcode == BRW_OPCODE_ENDIF) { + interfered = true; + iter = scan_iter; + break; + } + + if (scan_inst->dst.file == GRF) { + if (scan_inst->dst.reg == inst->dst.reg && + (scan_inst->dst.reg_offset == inst->dst.reg_offset || + scan_inst->opcode == FS_OPCODE_TEX)) { + interfered = true; + break; + } + if (scan_inst->dst.reg == inst->src[0].reg && + (scan_inst->dst.reg_offset == inst->src[0].reg_offset || + scan_inst->opcode == FS_OPCODE_TEX)) { + interfered = true; + break; + } + } + } + if (interfered) { + continue; + } + + /* Update live interval so we don't have to recalculate. */ + this->virtual_grf_use[inst->src[0].reg] = MAX2(virtual_grf_use[inst->src[0].reg], + virtual_grf_use[inst->dst.reg]); + + /* Rewrite the later usage to point at the source of the move to + * be removed. + */ + for (exec_list_iterator scan_iter = iter; scan_iter.has_next(); + scan_iter.next()) { + fs_inst *scan_inst = (fs_inst *)scan_iter.get(); + + for (int i = 0; i < 3; i++) { + if (scan_inst->src[i].file == GRF && + scan_inst->src[i].reg == inst->dst.reg && + scan_inst->src[i].reg_offset == inst->dst.reg_offset) { + scan_inst->src[i].reg = inst->src[0].reg; + scan_inst->src[i].reg_offset = inst->src[0].reg_offset; + scan_inst->src[i].abs |= inst->src[0].abs; + scan_inst->src[i].negate ^= inst->src[0].negate; + } + } + } + + inst->remove(); + progress = true; + } + + return progress; +} + + +bool +fs_visitor::compute_to_mrf() +{ + bool progress = false; + int next_ip = 0; + + foreach_iter(exec_list_iterator, iter, this->instructions) { + fs_inst *inst = (fs_inst *)iter.get(); + + int ip = next_ip; + next_ip++; + + if (inst->opcode != BRW_OPCODE_MOV || + inst->predicated || + inst->dst.file != MRF || inst->src[0].file != GRF || + inst->dst.type != inst->src[0].type || + inst->src[0].abs || inst->src[0].negate) + continue; + + /* Can't compute-to-MRF this GRF if someone else was going to + * read it later. + */ + if (this->virtual_grf_use[inst->src[0].reg] > ip) + continue; + + /* Found a move of a GRF to a MRF. Let's see if we can go + * rewrite the thing that made this GRF to write into the MRF. + */ + bool found = false; + fs_inst *scan_inst; + for (scan_inst = (fs_inst *)inst->prev; + scan_inst->prev != NULL; + scan_inst = (fs_inst *)scan_inst->prev) { + /* We don't handle flow control here. Most computation of + * values that end up in MRFs are shortly before the MRF + * write anyway. + */ + if (scan_inst->opcode == BRW_OPCODE_DO || + scan_inst->opcode == BRW_OPCODE_WHILE || + scan_inst->opcode == BRW_OPCODE_ENDIF) { + break; + } + + /* You can't read from an MRF, so if someone else reads our + * MRF's source GRF that we wanted to rewrite, that stops us. + */ + bool interfered = false; + for (int i = 0; i < 3; i++) { + if (scan_inst->src[i].file == GRF && + scan_inst->src[i].reg == inst->src[0].reg && + scan_inst->src[i].reg_offset == inst->src[0].reg_offset) { + interfered = true; + } + } + if (interfered) + break; + + if (scan_inst->dst.file == MRF && + scan_inst->dst.hw_reg == inst->dst.hw_reg) { + /* Somebody else wrote our MRF here, so we can't can't + * compute-to-MRF before that. + */ + break; + } + + if (scan_inst->mlen > 0) { + /* Found a SEND instruction, which will do some amount of + * implied write that may overwrite our MRF that we were + * hoping to compute-to-MRF somewhere above it. Nothing + * we have implied-writes more than 2 MRFs from base_mrf, + * though. + */ + int implied_write_len = MIN2(scan_inst->mlen, 2); + if (inst->dst.hw_reg >= scan_inst->base_mrf && + inst->dst.hw_reg < scan_inst->base_mrf + implied_write_len) { + break; + } + } + + if (scan_inst->dst.file == GRF && + scan_inst->dst.reg == inst->src[0].reg) { + /* Found the last thing to write our reg we want to turn + * into a compute-to-MRF. + */ + + if (scan_inst->opcode == FS_OPCODE_TEX) { + /* texturing writes several continuous regs, so we can't + * compute-to-mrf that. + */ + break; + } + + /* If it's predicated, it (probably) didn't populate all + * the channels. + */ + if (scan_inst->predicated) + break; + + /* SEND instructions can't have MRF as a destination. */ + if (scan_inst->mlen) + break; + + if (intel->gen >= 6) { + /* gen6 math instructions must have the destination be + * GRF, so no compute-to-MRF for them. + */ + if (scan_inst->opcode == FS_OPCODE_RCP || + scan_inst->opcode == FS_OPCODE_RSQ || + scan_inst->opcode == FS_OPCODE_SQRT || + scan_inst->opcode == FS_OPCODE_EXP2 || + scan_inst->opcode == FS_OPCODE_LOG2 || + scan_inst->opcode == FS_OPCODE_SIN || + scan_inst->opcode == FS_OPCODE_COS || + scan_inst->opcode == FS_OPCODE_POW) { + break; + } + } + + if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) { + /* Found the creator of our MRF's source value. */ + found = true; + break; + } + } + } + if (found) { + scan_inst->dst.file = MRF; + scan_inst->dst.hw_reg = inst->dst.hw_reg; + scan_inst->saturate |= inst->saturate; + inst->remove(); + progress = true; + } + } + + return progress; +} + +bool +fs_visitor::virtual_grf_interferes(int a, int b) +{ + int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]); + int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]); + + /* For dead code, just check if the def interferes with the other range. */ + if (this->virtual_grf_use[a] == -1) { + return (this->virtual_grf_def[a] >= this->virtual_grf_def[b] && + this->virtual_grf_def[a] < this->virtual_grf_use[b]); + } + if (this->virtual_grf_use[b] == -1) { + return (this->virtual_grf_def[b] >= this->virtual_grf_def[a] && + this->virtual_grf_def[b] < this->virtual_grf_use[a]); + } + + return start < end; } static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg) @@ -1698,6 +3028,9 @@ fs_visitor::generate_code() case BRW_OPCODE_RNDD: brw_RNDD(p, dst, src[0]); break; + case BRW_OPCODE_RNDE: + brw_RNDE(p, dst, src[0]); + break; case BRW_OPCODE_RNDZ: brw_RNDZ(p, dst, src[0]); break; @@ -1711,6 +3044,18 @@ fs_visitor::generate_code() case BRW_OPCODE_XOR: brw_XOR(p, dst, src[0], src[1]); break; + case BRW_OPCODE_NOT: + brw_NOT(p, dst, src[0]); + break; + case BRW_OPCODE_ASR: + brw_ASR(p, dst, src[0], src[1]); + break; + case BRW_OPCODE_SHR: + brw_SHR(p, dst, src[0], src[1]); + break; + case BRW_OPCODE_SHL: + brw_SHL(p, dst, src[0], src[1]); + break; case BRW_OPCODE_CMP: brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]); @@ -1722,6 +3067,7 @@ fs_visitor::generate_code() case BRW_OPCODE_IF: assert(if_stack_depth < 16); if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8); + if_depth_in_loop[loop_stack_depth]++; if_stack_depth++; break; case BRW_OPCODE_ELSE: @@ -1731,6 +3077,7 @@ fs_visitor::generate_code() case BRW_OPCODE_ENDIF: if_stack_depth--; brw_ENDIF(p , if_stack[if_stack_depth]); + if_depth_in_loop[loop_stack_depth]--; break; case BRW_OPCODE_DO: @@ -1751,7 +3098,7 @@ fs_visitor::generate_code() struct brw_instruction *inst0, *inst1; GLuint br = 1; - if (intel->gen == 5) + if (intel->gen >= 5) br = 2; assert(loop_stack_depth > 0); @@ -1788,10 +3135,19 @@ fs_visitor::generate_code() case FS_OPCODE_TEX: case FS_OPCODE_TXB: case FS_OPCODE_TXL: - generate_tex(inst, dst, src[0]); + generate_tex(inst, dst); break; - case FS_OPCODE_DISCARD: - generate_discard(inst); + case FS_OPCODE_DISCARD_NOT: + generate_discard_not(inst, dst); + break; + case FS_OPCODE_DISCARD_AND: + generate_discard_and(inst, src[0]); + break; + case FS_OPCODE_DDX: + generate_ddx(inst, dst, src[0]); + break; + case FS_OPCODE_DDY: + generate_ddy(inst, dst, src[0]); break; case FS_OPCODE_FB_WRITE: generate_fb_write(inst); @@ -1834,22 +3190,14 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c) { struct brw_compile *p = &c->func; struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; - struct brw_shader *shader = NULL; + struct gl_context *ctx = &intel->ctx; struct gl_shader_program *prog = ctx->Shader.CurrentProgram; if (!prog) return GL_FALSE; - if (!using_new_fs) - return GL_FALSE; - - for (unsigned int i = 0; i < prog->_NumLinkedShaders; i++) { - if (prog->_LinkedShaders[i]->Type == GL_FRAGMENT_SHADER) { - shader = (struct brw_shader *)prog->_LinkedShaders[i]; - break; - } - } + struct brw_shader *shader = + (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; if (!shader) return GL_FALSE; @@ -1874,7 +3222,11 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c) if (0) { v.emit_dummy_fs(); } else { - v.emit_interpolation(); + v.calculate_urb_setup(); + if (intel->gen < 6) + v.emit_interpolation_setup_gen4(); + else + v.emit_interpolation_setup_gen6(); /* Generate FS IR for main(). (the visitor only descends into * functions called "main"). @@ -1886,12 +3238,30 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c) } v.emit_fb_writes(); + + v.split_virtual_grfs(); + v.assign_curb_setup(); v.assign_urb_setup(); - v.assign_regs(); + + bool progress; + do { + progress = false; + v.calculate_live_intervals(); + progress = v.propagate_constants() || progress; + progress = v.register_coalesce() || progress; + progress = v.compute_to_mrf() || progress; + progress = v.dead_code_eliminate() || progress; + } while (progress); + + if (0) + v.assign_regs_trivial(); + else + v.assign_regs(); } - v.generate_code(); + if (!v.fail) + v.generate_code(); assert(!v.fail); /* FINISHME: Cleanly fail, tested at link time, etc. */ @@ -1918,6 +3288,11 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c) printf(" %s\n", last_annotation_string); } brw_disasm(stdout, &p->store[i], intel->gen); + printf("0x%08x 0x%08x 0x%08x 0x%08x\n", + ((uint32_t *)&p->store[i])[3], + ((uint32_t *)&p->store[i])[2], + ((uint32_t *)&p->store[i])[1], + ((uint32_t *)&p->store[i])[0]); } printf("\n"); } diff --git a/src/mesa/drivers/dri/i965/brw_fs.h b/src/mesa/drivers/dri/i965/brw_fs.h new file mode 100644 index 00000000000..b7f4e15c767 --- /dev/null +++ b/src/mesa/drivers/dri/i965/brw_fs.h @@ -0,0 +1,447 @@ +/* + * Copyright © 2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <[email protected]> + * + */ + +extern "C" { + +#include <sys/types.h> + +#include "main/macros.h" +#include "main/shaderobj.h" +#include "main/uniforms.h" +#include "program/prog_parameter.h" +#include "program/prog_print.h" +#include "program/prog_optimize.h" +#include "program/register_allocate.h" +#include "program/sampler.h" +#include "program/hash_table.h" +#include "brw_context.h" +#include "brw_eu.h" +#include "brw_wm.h" +#include "talloc.h" +} +#include "../glsl/glsl_types.h" +#include "../glsl/ir.h" + +enum register_file { + ARF = BRW_ARCHITECTURE_REGISTER_FILE, + GRF = BRW_GENERAL_REGISTER_FILE, + MRF = BRW_MESSAGE_REGISTER_FILE, + IMM = BRW_IMMEDIATE_VALUE, + FIXED_HW_REG, /* a struct brw_reg */ + UNIFORM, /* prog_data->params[hw_reg] */ + BAD_FILE +}; + +enum fs_opcodes { + FS_OPCODE_FB_WRITE = 256, + FS_OPCODE_RCP, + FS_OPCODE_RSQ, + FS_OPCODE_SQRT, + FS_OPCODE_EXP2, + FS_OPCODE_LOG2, + FS_OPCODE_POW, + FS_OPCODE_SIN, + FS_OPCODE_COS, + FS_OPCODE_DDX, + FS_OPCODE_DDY, + FS_OPCODE_LINTERP, + FS_OPCODE_TEX, + FS_OPCODE_TXB, + FS_OPCODE_TXL, + FS_OPCODE_DISCARD_NOT, + FS_OPCODE_DISCARD_AND, +}; + + +class fs_reg { +public: + /* Callers of this talloc-based new need not call delete. It's + * easier to just talloc_free 'ctx' (or any of its ancestors). */ + static void* operator new(size_t size, void *ctx) + { + void *node; + + node = talloc_size(ctx, size); + assert(node != NULL); + + return node; + } + + void init() + { + this->reg = 0; + this->reg_offset = 0; + this->negate = 0; + this->abs = 0; + this->hw_reg = -1; + } + + /** Generic unset register constructor. */ + fs_reg() + { + init(); + this->file = BAD_FILE; + } + + /** Immediate value constructor. */ + fs_reg(float f) + { + init(); + this->file = IMM; + this->type = BRW_REGISTER_TYPE_F; + this->imm.f = f; + } + + /** Immediate value constructor. */ + fs_reg(int32_t i) + { + init(); + this->file = IMM; + this->type = BRW_REGISTER_TYPE_D; + this->imm.i = i; + } + + /** Immediate value constructor. */ + fs_reg(uint32_t u) + { + init(); + this->file = IMM; + this->type = BRW_REGISTER_TYPE_UD; + this->imm.u = u; + } + + /** Fixed brw_reg Immediate value constructor. */ + fs_reg(struct brw_reg fixed_hw_reg) + { + init(); + this->file = FIXED_HW_REG; + this->fixed_hw_reg = fixed_hw_reg; + this->type = fixed_hw_reg.type; + } + + fs_reg(enum register_file file, int hw_reg); + fs_reg(enum register_file file, int hw_reg, uint32_t type); + fs_reg(class fs_visitor *v, const struct glsl_type *type); + + /** Register file: ARF, GRF, MRF, IMM. */ + enum register_file file; + /** virtual register number. 0 = fixed hw reg */ + int reg; + /** Offset within the virtual register. */ + int reg_offset; + /** HW register number. Generally unset until register allocation. */ + int hw_reg; + /** Register type. BRW_REGISTER_TYPE_* */ + int type; + bool negate; + bool abs; + struct brw_reg fixed_hw_reg; + + /** Value for file == BRW_IMMMEDIATE_FILE */ + union { + int32_t i; + uint32_t u; + float f; + } imm; +}; + +class fs_inst : public exec_node { +public: + /* Callers of this talloc-based new need not call delete. It's + * easier to just talloc_free 'ctx' (or any of its ancestors). */ + static void* operator new(size_t size, void *ctx) + { + void *node; + + node = talloc_zero_size(ctx, size); + assert(node != NULL); + + return node; + } + + void init() + { + this->opcode = BRW_OPCODE_NOP; + this->saturate = false; + this->conditional_mod = BRW_CONDITIONAL_NONE; + this->predicated = false; + this->sampler = 0; + this->target = 0; + this->eot = false; + this->header_present = false; + this->shadow_compare = false; + this->mlen = 0; + this->base_mrf = 0; + } + + fs_inst() + { + init(); + } + + fs_inst(int opcode) + { + init(); + this->opcode = opcode; + } + + fs_inst(int opcode, fs_reg dst) + { + init(); + this->opcode = opcode; + this->dst = dst; + + if (dst.file == GRF) + assert(dst.reg_offset >= 0); + } + + fs_inst(int opcode, fs_reg dst, fs_reg src0) + { + init(); + this->opcode = opcode; + this->dst = dst; + this->src[0] = src0; + + if (dst.file == GRF) + assert(dst.reg_offset >= 0); + if (src[0].file == GRF) + assert(src[0].reg_offset >= 0); + } + + fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1) + { + init(); + this->opcode = opcode; + this->dst = dst; + this->src[0] = src0; + this->src[1] = src1; + + if (dst.file == GRF) + assert(dst.reg_offset >= 0); + if (src[0].file == GRF) + assert(src[0].reg_offset >= 0); + if (src[1].file == GRF) + assert(src[1].reg_offset >= 0); + } + + fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1, fs_reg src2) + { + init(); + this->opcode = opcode; + this->dst = dst; + this->src[0] = src0; + this->src[1] = src1; + this->src[2] = src2; + + if (dst.file == GRF) + assert(dst.reg_offset >= 0); + if (src[0].file == GRF) + assert(src[0].reg_offset >= 0); + if (src[1].file == GRF) + assert(src[1].reg_offset >= 0); + if (src[2].file == GRF) + assert(src[2].reg_offset >= 0); + } + + int opcode; /* BRW_OPCODE_* or FS_OPCODE_* */ + fs_reg dst; + fs_reg src[3]; + bool saturate; + bool predicated; + int conditional_mod; /**< BRW_CONDITIONAL_* */ + + int mlen; /**< SEND message length */ + int base_mrf; /**< First MRF in the SEND message, if mlen is nonzero. */ + int sampler; + int target; /**< MRT target. */ + bool eot; + bool header_present; + bool shadow_compare; + + /** @{ + * Annotation for the generated IR. One of the two can be set. + */ + ir_instruction *ir; + const char *annotation; + /** @} */ +}; + +class fs_visitor : public ir_visitor +{ +public: + + fs_visitor(struct brw_wm_compile *c, struct brw_shader *shader) + { + this->c = c; + this->p = &c->func; + this->brw = p->brw; + this->fp = brw->fragment_program; + this->intel = &brw->intel; + this->ctx = &intel->ctx; + this->mem_ctx = talloc_new(NULL); + this->shader = shader; + this->fail = false; + this->variable_ht = hash_table_ctor(0, + hash_table_pointer_hash, + hash_table_pointer_compare); + + this->frag_color = NULL; + this->frag_data = NULL; + this->frag_depth = NULL; + this->first_non_payload_grf = 0; + + this->current_annotation = NULL; + this->annotation_string = NULL; + this->annotation_ir = NULL; + this->base_ir = NULL; + + this->virtual_grf_sizes = NULL; + this->virtual_grf_next = 1; + this->virtual_grf_array_size = 0; + this->virtual_grf_def = NULL; + this->virtual_grf_use = NULL; + + this->kill_emitted = false; + } + + ~fs_visitor() + { + talloc_free(this->mem_ctx); + hash_table_dtor(this->variable_ht); + } + + fs_reg *variable_storage(ir_variable *var); + int virtual_grf_alloc(int size); + + void visit(ir_variable *ir); + void visit(ir_assignment *ir); + void visit(ir_dereference_variable *ir); + void visit(ir_dereference_record *ir); + void visit(ir_dereference_array *ir); + void visit(ir_expression *ir); + void visit(ir_texture *ir); + void visit(ir_if *ir); + void visit(ir_constant *ir); + void visit(ir_swizzle *ir); + void visit(ir_return *ir); + void visit(ir_loop *ir); + void visit(ir_loop_jump *ir); + void visit(ir_discard *ir); + void visit(ir_call *ir); + void visit(ir_function *ir); + void visit(ir_function_signature *ir); + + fs_inst *emit(fs_inst inst); + void assign_curb_setup(); + void calculate_urb_setup(); + void assign_urb_setup(); + void assign_regs(); + void assign_regs_trivial(); + void split_virtual_grfs(); + void calculate_live_intervals(); + bool propagate_constants(); + bool register_coalesce(); + bool compute_to_mrf(); + bool dead_code_eliminate(); + bool virtual_grf_interferes(int a, int b); + void generate_code(); + void generate_fb_write(fs_inst *inst); + void generate_linterp(fs_inst *inst, struct brw_reg dst, + struct brw_reg *src); + void generate_tex(fs_inst *inst, struct brw_reg dst); + void generate_math(fs_inst *inst, struct brw_reg dst, struct brw_reg *src); + void generate_discard_not(fs_inst *inst, struct brw_reg temp); + void generate_discard_and(fs_inst *inst, struct brw_reg temp); + void generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src); + void generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src); + + void emit_dummy_fs(); + fs_reg *emit_fragcoord_interpolation(ir_variable *ir); + fs_reg *emit_frontfacing_interpolation(ir_variable *ir); + fs_reg *emit_general_interpolation(ir_variable *ir); + void emit_interpolation_setup_gen4(); + void emit_interpolation_setup_gen6(); + fs_inst *emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate); + fs_inst *emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate); + fs_inst *emit_math(fs_opcodes op, fs_reg dst, fs_reg src0); + fs_inst *emit_math(fs_opcodes op, fs_reg dst, fs_reg src0, fs_reg src1); + void emit_bool_to_cond_code(ir_rvalue *condition); + + void emit_fb_writes(); + void emit_assignment_writes(fs_reg &l, fs_reg &r, + const glsl_type *type, bool predicated); + + struct brw_reg interp_reg(int location, int channel); + int setup_uniform_values(int loc, const glsl_type *type); + void setup_builtin_uniform_values(ir_variable *ir); + + struct brw_context *brw; + const struct gl_fragment_program *fp; + struct intel_context *intel; + struct gl_context *ctx; + struct brw_wm_compile *c; + struct brw_compile *p; + struct brw_shader *shader; + void *mem_ctx; + exec_list instructions; + + int *virtual_grf_sizes; + int virtual_grf_next; + int virtual_grf_array_size; + int *virtual_grf_def; + int *virtual_grf_use; + + struct hash_table *variable_ht; + ir_variable *frag_color, *frag_data, *frag_depth; + int first_non_payload_grf; + int urb_setup[FRAG_ATTRIB_MAX]; + bool kill_emitted; + + /** @{ debug annotation info */ + const char *current_annotation; + ir_instruction *base_ir; + const char **annotation_string; + ir_instruction **annotation_ir; + /** @} */ + + bool fail; + + /* Result of last visit() method. */ + fs_reg result; + + fs_reg pixel_x; + fs_reg pixel_y; + fs_reg wpos_w; + fs_reg pixel_w; + fs_reg delta_x; + fs_reg delta_y; + + int grf_used; +}; + +GLboolean brw_do_channel_expressions(struct exec_list *instructions); +GLboolean brw_do_vector_splitting(struct exec_list *instructions); diff --git a/src/mesa/drivers/dri/i965/brw_fs_channel_expressions.cpp b/src/mesa/drivers/dri/i965/brw_fs_channel_expressions.cpp index 478614090a0..4aac1b5a058 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_channel_expressions.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_channel_expressions.cpp @@ -83,7 +83,6 @@ channel_expressions_predicate(ir_instruction *ir) return false; } -extern "C" { GLboolean brw_do_channel_expressions(exec_list *instructions) { @@ -99,7 +98,6 @@ brw_do_channel_expressions(exec_list *instructions) return v.progress; } -} ir_rvalue * ir_channel_expressions_visitor::get_element(ir_variable *var, unsigned int elem) @@ -119,7 +117,6 @@ ir_channel_expressions_visitor::assign(ir_assignment *ir, int elem, ir_rvalue *v { ir_dereference *lhs = ir->lhs->clone(mem_ctx, NULL); ir_assignment *assign; - ir_swizzle *val_swiz; /* This assign-of-expression should have been generated by the * expression flattening visitor (since we never short circit to @@ -128,10 +125,7 @@ ir_channel_expressions_visitor::assign(ir_assignment *ir, int elem, ir_rvalue *v */ assert(ir->write_mask == (1 << ir->lhs->type->components()) - 1); - /* Smear the float across all the channels for the masked write. */ - val_swiz = new(mem_ctx) ir_swizzle(val, 0, 0, 0, 0, - ir->lhs->type->components()); - assign = new(mem_ctx) ir_assignment(lhs, val_swiz, NULL, (1 << elem)); + assign = new(mem_ctx) ir_assignment(lhs, val, NULL, (1 << elem)); ir->insert_before(assign); } @@ -235,6 +229,12 @@ ir_channel_expressions_visitor::visit_leave(ir_assignment *ir) case ir_binop_bit_and: case ir_binop_bit_xor: case ir_binop_bit_or: + case ir_binop_less: + case ir_binop_greater: + case ir_binop_lequal: + case ir_binop_gequal: + case ir_binop_equal: + case ir_binop_nequal: for (i = 0; i < vector_elements; i++) { ir_rvalue *op0 = get_element(op_var[0], i); ir_rvalue *op1 = get_element(op_var[1], i); @@ -315,10 +315,6 @@ ir_channel_expressions_visitor::visit_leave(ir_assignment *ir) break; } - case ir_binop_less: - case ir_binop_greater: - case ir_binop_lequal: - case ir_binop_gequal: case ir_binop_logic_and: case ir_binop_logic_xor: case ir_binop_logic_or: @@ -326,8 +322,8 @@ ir_channel_expressions_visitor::visit_leave(ir_assignment *ir) printf("\n"); assert(!"not reached: expression operates on scalars only"); break; - case ir_binop_equal: - case ir_binop_nequal: { + case ir_binop_all_equal: + case ir_binop_any_nequal: { ir_expression *last = NULL; for (i = 0; i < vector_elements; i++) { ir_rvalue *op0 = get_element(op_var[0], i); @@ -335,7 +331,7 @@ ir_channel_expressions_visitor::visit_leave(ir_assignment *ir) ir_expression *temp; ir_expression_operation join; - if (expr->operation == ir_binop_equal) + if (expr->operation == ir_binop_all_equal) join = ir_binop_logic_and; else join = ir_binop_logic_or; diff --git a/src/mesa/drivers/dri/i965/brw_fs_vector_splitting.cpp b/src/mesa/drivers/dri/i965/brw_fs_vector_splitting.cpp index 00d5c202485..2be6b08b5c7 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_vector_splitting.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_vector_splitting.cpp @@ -212,7 +212,6 @@ public: struct variable_entry *get_splitting_entry(ir_variable *var); exec_list *variable_list; - void *mem_ctx; }; struct variable_entry * @@ -264,37 +263,43 @@ ir_vector_splitting_visitor::visit_leave(ir_assignment *ir) variable_entry *rhs = rhs_deref ? get_splitting_entry(rhs_deref->var) : NULL; if (lhs_deref && rhs_deref && (lhs || rhs) && !ir->condition) { + unsigned int rhs_chan = 0; + /* Straight assignment of vector variables. */ - for (unsigned int i = 0; i < ir->rhs->type->vector_elements; i++) { + for (unsigned int i = 0; i < ir->lhs->type->vector_elements; i++) { ir_dereference *new_lhs; ir_rvalue *new_rhs; void *mem_ctx = lhs ? lhs->mem_ctx : rhs->mem_ctx; unsigned int writemask; + if (!(ir->write_mask & (1 << i))) + continue; + if (lhs) { new_lhs = new(mem_ctx) ir_dereference_variable(lhs->components[i]); - writemask = (ir->write_mask >> i) & 1; + writemask = 1; } else { new_lhs = ir->lhs->clone(mem_ctx, NULL); - writemask = ir->write_mask & (1 << i); + writemask = 1 << i; } if (rhs) { - new_rhs = new(mem_ctx) ir_dereference_variable(rhs->components[i]); - /* If we're writing into a writemask, smear it out to that channel. */ - if (!lhs) - new_rhs = new(mem_ctx) ir_swizzle(new_rhs, i, i, i, i, i + 1); + new_rhs = + new(mem_ctx) ir_dereference_variable(rhs->components[rhs_chan]); } else { new_rhs = new(mem_ctx) ir_swizzle(ir->rhs->clone(mem_ctx, NULL), - i, i, i, i, 1); + rhs_chan, 0, 0, 0, 1); } ir->insert_before(new(mem_ctx) ir_assignment(new_lhs, new_rhs, NULL, writemask)); + + rhs_chan++; } ir->remove(); } else if (lhs) { + void *mem_ctx = lhs->mem_ctx; int elem = -1; switch (ir->write_mask) { @@ -319,8 +324,6 @@ ir_vector_splitting_visitor::visit_leave(ir_assignment *ir) ir->write_mask = (1 << 0); handle_rvalue(&ir->rhs); - ir->rhs = new(mem_ctx) ir_swizzle(ir->rhs, - elem, elem, elem, elem, 1); } else { handle_rvalue(&ir->rhs); } @@ -330,7 +333,6 @@ ir_vector_splitting_visitor::visit_leave(ir_assignment *ir) return visit_continue; } -extern "C" { bool brw_do_vector_splitting(exec_list *instructions) { @@ -388,4 +390,3 @@ brw_do_vector_splitting(exec_list *instructions) return true; } -} diff --git a/src/mesa/drivers/dri/i965/brw_gs.c b/src/mesa/drivers/dri/i965/brw_gs.c index 5409e557880..cfcc8ea4d6a 100644 --- a/src/mesa/drivers/dri/i965/brw_gs.c +++ b/src/mesa/drivers/dri/i965/brw_gs.c @@ -60,7 +60,7 @@ static void compile_gs_prog( struct brw_context *brw, */ c.nr_attrs = brw_count_bits(c.key.attrs); - if (intel->gen == 5) + if (intel->gen >= 5) c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */ else c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */ @@ -85,9 +85,14 @@ static void compile_gs_prog( struct brw_context *brw, */ switch (key->primitive) { case GL_QUADS: + /* Gen6: VF has already converted into polygon. */ + if (intel->gen == 6) + return; brw_gs_quads( &c, key ); break; case GL_QUAD_STRIP: + if (intel->gen == 6) + return; brw_gs_quad_strip( &c, key ); break; case GL_LINE_LOOP: @@ -160,7 +165,7 @@ static const GLenum gs_prim[GL_POLYGON+1] = { static void populate_key( struct brw_context *brw, struct brw_gs_prog_key *key ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); /* CACHE_NEW_VS_PROG */ diff --git a/src/mesa/drivers/dri/i965/brw_gs.h b/src/mesa/drivers/dri/i965/brw_gs.h index 813b8d447ae..7e3531086f9 100644 --- a/src/mesa/drivers/dri/i965/brw_gs.h +++ b/src/mesa/drivers/dri/i965/brw_gs.h @@ -56,6 +56,7 @@ struct brw_gs_compile { struct { struct brw_reg R0; struct brw_reg vertex[MAX_GS_VERTS]; + struct brw_reg temp; } reg; /* 3 different ways of expressing vertex size: diff --git a/src/mesa/drivers/dri/i965/brw_gs_emit.c b/src/mesa/drivers/dri/i965/brw_gs_emit.c index a01d5576f8c..e1f751fdaa4 100644 --- a/src/mesa/drivers/dri/i965/brw_gs_emit.c +++ b/src/mesa/drivers/dri/i965/brw_gs_emit.c @@ -58,6 +58,8 @@ static void brw_gs_alloc_regs( struct brw_gs_compile *c, i += c->nr_regs; } + c->reg.temp = brw_vec8_grf(i, 0); + c->prog_data.urb_read_length = c->nr_regs; c->prog_data.total_grf = i; } @@ -69,12 +71,22 @@ static void brw_gs_emit_vue(struct brw_gs_compile *c, GLuint header) { struct brw_compile *p = &c->func; + struct intel_context *intel = &c->func.brw->intel; GLboolean allocate = !last; + struct brw_reg temp; + + if (intel->gen < 6) + temp = c->reg.R0; + else { + temp = c->reg.temp; + brw_MOV(p, retype(temp, BRW_REGISTER_TYPE_UD), + retype(c->reg.R0, BRW_REGISTER_TYPE_UD)); + } /* Overwrite PrimType and PrimStart in the message header, for * each vertex in turn: */ - brw_MOV(p, get_element_ud(c->reg.R0, 2), brw_imm_ud(header)); + brw_MOV(p, get_element_ud(temp, 2), brw_imm_ud(header)); /* Copy the vertex from vertn into m1..mN+1: */ @@ -87,9 +99,9 @@ static void brw_gs_emit_vue(struct brw_gs_compile *c, * allocated each time. */ brw_urb_WRITE(p, - allocate ? c->reg.R0 : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD), + allocate ? temp : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD), 0, - c->reg.R0, + temp, allocate, 1, /* used */ c->nr_regs + 1, /* msg length */ @@ -98,19 +110,39 @@ static void brw_gs_emit_vue(struct brw_gs_compile *c, 1, /* writes_complete */ 0, /* urb offset */ BRW_URB_SWIZZLE_NONE); + + if (intel->gen >= 6 && allocate) + brw_MOV(p, get_element_ud(c->reg.R0, 0), get_element_ud(temp, 0)); } static void brw_gs_ff_sync(struct brw_gs_compile *c, int num_prim) { struct brw_compile *p = &c->func; - brw_MOV(p, get_element_ud(c->reg.R0, 1), brw_imm_ud(num_prim)); - brw_ff_sync(p, - c->reg.R0, - 0, - c->reg.R0, - 1, /* allocate */ - 1, /* response length */ - 0 /* eot */); + struct intel_context *intel = &c->func.brw->intel; + + if (intel->gen < 6) { + brw_MOV(p, get_element_ud(c->reg.R0, 1), brw_imm_ud(num_prim)); + brw_ff_sync(p, + c->reg.R0, + 0, + c->reg.R0, + 1, /* allocate */ + 1, /* response length */ + 0 /* eot */); + } else { + brw_MOV(p, retype(c->reg.temp, BRW_REGISTER_TYPE_UD), + retype(c->reg.R0, BRW_REGISTER_TYPE_UD)); + brw_MOV(p, get_element_ud(c->reg.temp, 1), brw_imm_ud(num_prim)); + brw_ff_sync(p, + c->reg.temp, + 0, + c->reg.temp, + 1, /* allocate */ + 1, /* response length */ + 0 /* eot */); + brw_MOV(p, get_element_ud(c->reg.R0, 0), + get_element_ud(c->reg.temp, 0)); + } } diff --git a/src/mesa/drivers/dri/i965/brw_misc_state.c b/src/mesa/drivers/dri/i965/brw_misc_state.c index 6eeaba77720..27d161db413 100644 --- a/src/mesa/drivers/dri/i965/brw_misc_state.c +++ b/src/mesa/drivers/dri/i965/brw_misc_state.c @@ -48,7 +48,7 @@ static void upload_blend_constant_color(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_blend_constant_color bcc; memset(&bcc, 0, sizeof(bcc)); @@ -76,7 +76,7 @@ const struct brw_tracked_state brw_blend_constant_color = { static void upload_drawing_rect(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; BEGIN_BATCH(4); OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965); @@ -335,7 +335,7 @@ const struct brw_tracked_state brw_depthbuffer = { static void upload_polygon_stipple(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_polygon_stipple bps; GLuint i; @@ -378,7 +378,7 @@ const struct brw_tracked_state brw_polygon_stipple = { static void upload_polygon_stipple_offset(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_polygon_stipple_offset bpso; memset(&bpso, 0, sizeof(bpso)); @@ -449,7 +449,7 @@ const struct brw_tracked_state brw_aa_line_parameters = { static void upload_line_stipple(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_line_stipple bls; GLfloat tmp; GLint tmpi; diff --git a/src/mesa/drivers/dri/i965/brw_program.c b/src/mesa/drivers/dri/i965/brw_program.c index 3e52be5d4b7..1367d814696 100644 --- a/src/mesa/drivers/dri/i965/brw_program.c +++ b/src/mesa/drivers/dri/i965/brw_program.c @@ -41,7 +41,7 @@ #include "brw_context.h" #include "brw_wm.h" -static void brwBindProgram( GLcontext *ctx, +static void brwBindProgram( struct gl_context *ctx, GLenum target, struct gl_program *prog ) { @@ -57,7 +57,7 @@ static void brwBindProgram( GLcontext *ctx, } } -static struct gl_program *brwNewProgram( GLcontext *ctx, +static struct gl_program *brwNewProgram( struct gl_context *ctx, GLenum target, GLuint id ) { @@ -93,14 +93,14 @@ static struct gl_program *brwNewProgram( GLcontext *ctx, } } -static void brwDeleteProgram( GLcontext *ctx, +static void brwDeleteProgram( struct gl_context *ctx, struct gl_program *prog ) { _mesa_delete_program( ctx, prog ); } -static GLboolean brwIsProgramNative( GLcontext *ctx, +static GLboolean brwIsProgramNative( struct gl_context *ctx, GLenum target, struct gl_program *prog ) { @@ -108,7 +108,7 @@ static GLboolean brwIsProgramNative( GLcontext *ctx, } static void -shader_error(GLcontext *ctx, struct gl_program *prog, const char *msg) +shader_error(struct gl_context *ctx, struct gl_program *prog, const char *msg) { struct gl_shader_program *shader; @@ -120,7 +120,7 @@ shader_error(GLcontext *ctx, struct gl_program *prog, const char *msg) } } -static GLboolean brwProgramStringNotify( GLcontext *ctx, +static GLboolean brwProgramStringNotify( struct gl_context *ctx, GLenum target, struct gl_program *prog ) { @@ -148,15 +148,9 @@ static GLboolean brwProgramStringNotify( GLcontext *ctx, * using the new FS backend. */ shader_program = _mesa_lookup_shader_program(ctx, prog->Id); - if (shader_program) { - for (i = 0; i < shader_program->_NumLinkedShaders; i++) { - struct brw_shader *shader; - - shader = (struct brw_shader *)shader_program->_LinkedShaders[i]; - if (shader->base.Type == GL_FRAGMENT_SHADER && shader->ir) { - return GL_TRUE; - } - } + if (shader_program + && shader_program->_LinkedShaders[MESA_SHADER_FRAGMENT]) { + return GL_TRUE; } } else if (target == GL_VERTEX_PROGRAM_ARB) { diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/dri/i965/brw_queryobj.c index f6868c83ac7..f28f28663ea 100644 --- a/src/mesa/drivers/dri/i965/brw_queryobj.c +++ b/src/mesa/drivers/dri/i965/brw_queryobj.c @@ -72,7 +72,7 @@ brw_queryobj_get_results(struct brw_query_object *query) } static struct gl_query_object * -brw_new_query_object(GLcontext *ctx, GLuint id) +brw_new_query_object(struct gl_context *ctx, GLuint id) { struct brw_query_object *query; @@ -87,7 +87,7 @@ brw_new_query_object(GLcontext *ctx, GLuint id) } static void -brw_delete_query(GLcontext *ctx, struct gl_query_object *q) +brw_delete_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_query_object *query = (struct brw_query_object *)q; @@ -96,7 +96,7 @@ brw_delete_query(GLcontext *ctx, struct gl_query_object *q) } static void -brw_begin_query(GLcontext *ctx, struct gl_query_object *q) +brw_begin_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_context *brw = brw_context(ctx); struct intel_context *intel = intel_context(ctx); @@ -107,16 +107,29 @@ brw_begin_query(GLcontext *ctx, struct gl_query_object *q) query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096); - BEGIN_BATCH(4); - OUT_BATCH(_3DSTATE_PIPE_CONTROL | - PIPE_CONTROL_WRITE_TIMESTAMP); - OUT_RELOC(query->bo, - I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, - PIPE_CONTROL_GLOBAL_GTT_WRITE | - 0); - OUT_BATCH(0); - OUT_BATCH(0); - ADVANCE_BATCH(); + if (intel->gen >= 6) { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP); + OUT_RELOC(query->bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + 0); + OUT_BATCH(0); + ADVANCE_BATCH(); + + } else { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL | + PIPE_CONTROL_WRITE_TIMESTAMP); + OUT_RELOC(query->bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + 0); + OUT_BATCH(0); + OUT_BATCH(0); + ADVANCE_BATCH(); + } } else { /* Reset our driver's tracking of query state. */ drm_intel_bo_unreference(query->bo); @@ -133,23 +146,36 @@ brw_begin_query(GLcontext *ctx, struct gl_query_object *q) * Begin the ARB_occlusion_query query on a query object. */ static void -brw_end_query(GLcontext *ctx, struct gl_query_object *q) +brw_end_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_context *brw = brw_context(ctx); struct intel_context *intel = intel_context(ctx); struct brw_query_object *query = (struct brw_query_object *)q; if (query->Base.Target == GL_TIME_ELAPSED_EXT) { - BEGIN_BATCH(4); - OUT_BATCH(_3DSTATE_PIPE_CONTROL | - PIPE_CONTROL_WRITE_TIMESTAMP); - OUT_RELOC(query->bo, - I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, - PIPE_CONTROL_GLOBAL_GTT_WRITE | - 8); - OUT_BATCH(0); - OUT_BATCH(0); - ADVANCE_BATCH(); + if (intel->gen >= 6) { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP); + OUT_RELOC(query->bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + 8); + OUT_BATCH(0); + ADVANCE_BATCH(); + + } else { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL | + PIPE_CONTROL_WRITE_TIMESTAMP); + OUT_RELOC(query->bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + 8); + OUT_BATCH(0); + OUT_BATCH(0); + ADVANCE_BATCH(); + } intel_batchbuffer_flush(intel->batch); } else { @@ -171,7 +197,7 @@ brw_end_query(GLcontext *ctx, struct gl_query_object *q) } } -static void brw_wait_query(GLcontext *ctx, struct gl_query_object *q) +static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_query_object *query = (struct brw_query_object *)q; @@ -179,7 +205,7 @@ static void brw_wait_query(GLcontext *ctx, struct gl_query_object *q) query->Base.Ready = GL_TRUE; } -static void brw_check_query(GLcontext *ctx, struct gl_query_object *q) +static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q) { struct brw_query_object *query = (struct brw_query_object *)q; @@ -223,22 +249,43 @@ brw_emit_query_begin(struct brw_context *brw) if (!query || brw->query.active) return; - BEGIN_BATCH(4); - OUT_BATCH(_3DSTATE_PIPE_CONTROL | - PIPE_CONTROL_DEPTH_STALL | - PIPE_CONTROL_WRITE_DEPTH_COUNT); - /* This object could be mapped cacheable, but we don't have an exposed - * mechanism to support that. Since it's going uncached, tell GEM that - * we're writing to it. The usual clflush should be all that's required - * to pick up the results. - */ - OUT_RELOC(brw->query.bo, - I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, - PIPE_CONTROL_GLOBAL_GTT_WRITE | - ((brw->query.index * 2) * sizeof(uint64_t))); - OUT_BATCH(0); - OUT_BATCH(0); - ADVANCE_BATCH(); + if (intel->gen >= 6) { + BEGIN_BATCH(8); + + /* workaround: CS stall required before depth stall. */ + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_CS_STALL); + OUT_BATCH(0); /* write address */ + OUT_BATCH(0); /* write data */ + + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_WRITE_DEPTH_COUNT); + OUT_RELOC(brw->query.bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + ((brw->query.index * 2) * sizeof(uint64_t))); + OUT_BATCH(0); + ADVANCE_BATCH(); + + } else { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL | + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_WRITE_DEPTH_COUNT); + /* This object could be mapped cacheable, but we don't have an exposed + * mechanism to support that. Since it's going uncached, tell GEM that + * we're writing to it. The usual clflush should be all that's required + * to pick up the results. + */ + OUT_RELOC(brw->query.bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + ((brw->query.index * 2) * sizeof(uint64_t))); + OUT_BATCH(0); + OUT_BATCH(0); + ADVANCE_BATCH(); + } if (query->bo != brw->query.bo) { if (query->bo != NULL) @@ -260,17 +307,37 @@ brw_emit_query_end(struct brw_context *brw) if (!brw->query.active) return; - BEGIN_BATCH(4); - OUT_BATCH(_3DSTATE_PIPE_CONTROL | - PIPE_CONTROL_DEPTH_STALL | - PIPE_CONTROL_WRITE_DEPTH_COUNT); - OUT_RELOC(brw->query.bo, - I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, - PIPE_CONTROL_GLOBAL_GTT_WRITE | - ((brw->query.index * 2 + 1) * sizeof(uint64_t))); - OUT_BATCH(0); - OUT_BATCH(0); - ADVANCE_BATCH(); + if (intel->gen >= 6) { + BEGIN_BATCH(8); + /* workaround: CS stall required before depth stall. */ + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_CS_STALL); + OUT_BATCH(0); /* write address */ + OUT_BATCH(0); /* write data */ + + OUT_BATCH(_3DSTATE_PIPE_CONTROL); + OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_WRITE_DEPTH_COUNT); + OUT_RELOC(brw->query.bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + ((brw->query.index * 2 + 1) * sizeof(uint64_t))); + OUT_BATCH(0); + ADVANCE_BATCH(); + + } else { + BEGIN_BATCH(4); + OUT_BATCH(_3DSTATE_PIPE_CONTROL | + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_WRITE_DEPTH_COUNT); + OUT_RELOC(brw->query.bo, + I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, + PIPE_CONTROL_GLOBAL_GTT_WRITE | + ((brw->query.index * 2 + 1) * sizeof(uint64_t))); + OUT_BATCH(0); + OUT_BATCH(0); + ADVANCE_BATCH(); + } brw->query.active = GL_FALSE; brw->query.index++; diff --git a/src/mesa/drivers/dri/i965/brw_sf.c b/src/mesa/drivers/dri/i965/brw_sf.c index 7d005d278fb..7dbd70daaea 100644 --- a/src/mesa/drivers/dri/i965/brw_sf.c +++ b/src/mesa/drivers/dri/i965/brw_sf.c @@ -132,7 +132,7 @@ static void compile_sf_prog( struct brw_context *brw, */ static void upload_sf_prog(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_sf_prog_key key; memset(&key, 0, sizeof(key)); diff --git a/src/mesa/drivers/dri/i965/brw_sf_state.c b/src/mesa/drivers/dri/i965/brw_sf_state.c index 914f275cc67..6ad9e1b48a4 100644 --- a/src/mesa/drivers/dri/i965/brw_sf_state.c +++ b/src/mesa/drivers/dri/i965/brw_sf_state.c @@ -38,7 +38,7 @@ static void upload_sf_vp(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF; struct brw_sf_viewport sfv; GLfloat y_scale, y_bias; @@ -139,7 +139,7 @@ struct brw_sf_unit_key { static void sf_unit_populate_key(struct brw_context *brw, struct brw_sf_unit_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); /* CACHE_NEW_SF_PROG */ diff --git a/src/mesa/drivers/dri/i965/brw_state.c b/src/mesa/drivers/dri/i965/brw_state.c index 1e77e427d38..13b231d5cf5 100644 --- a/src/mesa/drivers/dri/i965/brw_state.c +++ b/src/mesa/drivers/dri/i965/brw_state.c @@ -28,7 +28,7 @@ #include "brw_context.h" void -brw_enable(GLcontext *ctx, GLenum cap, GLboolean state) +brw_enable(struct gl_context *ctx, GLenum cap, GLboolean state) { struct brw_context *brw = brw_context(ctx); @@ -40,7 +40,7 @@ brw_enable(GLcontext *ctx, GLenum cap, GLboolean state) } void -brw_depth_range(GLcontext *ctx, GLclampd nearval, GLclampd farval) +brw_depth_range(struct gl_context *ctx, GLclampd nearval, GLclampd farval) { struct brw_context *brw = brw_context(ctx); diff --git a/src/mesa/drivers/dri/i965/brw_state.h b/src/mesa/drivers/dri/i965/brw_state.h index c5d296b1295..3beed16945b 100644 --- a/src/mesa/drivers/dri/i965/brw_state.h +++ b/src/mesa/drivers/dri/i965/brw_state.h @@ -102,6 +102,7 @@ extern const struct brw_tracked_state gen6_depth_stencil_state; extern const struct brw_tracked_state gen6_gs_state; extern const struct brw_tracked_state gen6_sampler_state; extern const struct brw_tracked_state gen6_scissor_state; +extern const struct brw_tracked_state gen6_scissor_state_pointers; extern const struct brw_tracked_state gen6_sf_state; extern const struct brw_tracked_state gen6_sf_vp; extern const struct brw_tracked_state gen6_urb; diff --git a/src/mesa/drivers/dri/i965/brw_state_dump.c b/src/mesa/drivers/dri/i965/brw_state_dump.c index d410861bdf6..b79b33c2e32 100644 --- a/src/mesa/drivers/dri/i965/brw_state_dump.c +++ b/src/mesa/drivers/dri/i965/brw_state_dump.c @@ -161,6 +161,123 @@ static void dump_sf_viewport_state(struct brw_context *brw) drm_intel_bo_unmap(brw->sf.vp_bo); } +static void dump_clip_viewport_state(struct brw_context *brw) +{ + const char *name = "CLIP VP"; + struct brw_clipper_viewport *vp; + uint32_t vp_off; + + if (brw->clip.vp_bo == NULL) + return; + + drm_intel_bo_map(brw->clip.vp_bo, GL_FALSE); + + vp = brw->clip.vp_bo->virtual; + vp_off = brw->clip.vp_bo->offset; + + state_out(name, vp, vp_off, 0, "xmin = %f\n", vp->xmin); + state_out(name, vp, vp_off, 1, "xmax = %f\n", vp->xmax); + state_out(name, vp, vp_off, 2, "ymin = %f\n", vp->ymin); + state_out(name, vp, vp_off, 3, "ymax = %f\n", vp->ymax); + drm_intel_bo_unmap(brw->clip.vp_bo); +} + +static void dump_cc_viewport_state(struct brw_context *brw) +{ + const char *name = "CC VP"; + struct brw_cc_viewport *vp; + uint32_t vp_off; + + if (brw->cc.vp_bo == NULL) + return; + + drm_intel_bo_map(brw->cc.vp_bo, GL_FALSE); + + vp = brw->cc.vp_bo->virtual; + vp_off = brw->cc.vp_bo->offset; + + state_out(name, vp, vp_off, 0, "min_depth = %f\n", vp->min_depth); + state_out(name, vp, vp_off, 1, "max_depth = %f\n", vp->max_depth); + drm_intel_bo_unmap(brw->cc.vp_bo); +} + +static void dump_depth_stencil_state(struct brw_context *brw) +{ + const char *name = "DEPTH STENCIL"; + struct gen6_depth_stencil_state *ds; + uint32_t ds_off; + + if (brw->cc.depth_stencil_state_bo == NULL) + return; + + drm_intel_bo_map(brw->cc.depth_stencil_state_bo, GL_FALSE); + + ds = brw->cc.depth_stencil_state_bo->virtual; + ds_off = brw->cc.depth_stencil_state_bo->offset; + + state_out(name, ds, ds_off, 0, "stencil %sable, func %d, write %sable\n", + ds->ds0.stencil_enable ? "en" : "dis", + ds->ds0.stencil_func, + ds->ds0.stencil_write_enable ? "en" : "dis"); + state_out(name, ds, ds_off, 1, "stencil test mask 0x%x, write mask 0x%x\n", + ds->ds1.stencil_test_mask, ds->ds1.stencil_write_mask); + state_out(name, ds, ds_off, 2, "depth test %sable, func %d, write %sable\n", + ds->ds2.depth_test_enable ? "en" : "dis", + ds->ds2.depth_test_func, + ds->ds2.depth_write_enable ? "en" : "dis"); + drm_intel_bo_unmap(brw->cc.depth_stencil_state_bo); +} + +static void dump_cc_state(struct brw_context *brw) +{ + const char *name = "CC"; + struct gen6_color_calc_state *cc; + uint32_t cc_off; + + if (brw->cc.state_bo == NULL) + return; + + drm_intel_bo_map(brw->cc.state_bo, GL_FALSE); + cc = brw->cc.state_bo->virtual; + cc_off = brw->cc.state_bo->offset; + + state_out(name, cc, cc_off, 0, "alpha test format %s, round disable %d, stencil ref %d," + "bf stencil ref %d\n", + cc->cc0.alpha_test_format ? "FLOAT32" : "UNORM8", + cc->cc0.round_disable, + cc->cc0.stencil_ref, + cc->cc0.bf_stencil_ref); + state_out(name, cc, cc_off, 1, "\n"); + state_out(name, cc, cc_off, 2, "constant red %f\n", cc->constant_r); + state_out(name, cc, cc_off, 3, "constant green %f\n", cc->constant_g); + state_out(name, cc, cc_off, 4, "constant blue %f\n", cc->constant_b); + state_out(name, cc, cc_off, 5, "constant alpha %f\n", cc->constant_a); + + drm_intel_bo_unmap(brw->cc.state_bo); + +} + +static void dump_blend_state(struct brw_context *brw) +{ + const char *name = "BLEND"; + struct gen6_blend_state *blend; + uint32_t blend_off; + + if (brw->cc.blend_state_bo == NULL) + return; + + drm_intel_bo_map(brw->cc.blend_state_bo, GL_FALSE); + + blend = brw->cc.blend_state_bo->virtual; + blend_off = brw->cc.blend_state_bo->offset; + + state_out(name, blend, blend_off, 0, "\n"); + state_out(name, blend, blend_off, 1, "\n"); + + drm_intel_bo_unmap(brw->cc.blend_state_bo); + +} + static void brw_debug_prog(const char *name, drm_intel_bo *prog) { unsigned int i; @@ -208,16 +325,29 @@ void brw_debug_batch(struct intel_context *intel) state_struct_out("WM bind", brw->wm.bind_bo, 4 * brw->wm.nr_surfaces); dump_wm_surface_state(brw); - state_struct_out("VS", brw->vs.state_bo, sizeof(struct brw_vs_unit_state)); + if (intel->gen < 6) + state_struct_out("VS", brw->vs.state_bo, sizeof(struct brw_vs_unit_state)); brw_debug_prog("VS prog", brw->vs.prog_bo); - state_struct_out("GS", brw->gs.state_bo, sizeof(struct brw_gs_unit_state)); + if (intel->gen < 6) + state_struct_out("GS", brw->gs.state_bo, sizeof(struct brw_gs_unit_state)); brw_debug_prog("GS prog", brw->gs.prog_bo); - state_struct_out("SF", brw->sf.state_bo, sizeof(struct brw_sf_unit_state)); + if (intel->gen < 6) { + state_struct_out("SF", brw->sf.state_bo, sizeof(struct brw_sf_unit_state)); + brw_debug_prog("SF prog", brw->sf.prog_bo); + } dump_sf_viewport_state(brw); - brw_debug_prog("SF prog", brw->sf.prog_bo); - state_struct_out("WM", brw->wm.state_bo, sizeof(struct brw_wm_unit_state)); + if (intel->gen < 6) + state_struct_out("WM", brw->wm.state_bo, sizeof(struct brw_wm_unit_state)); brw_debug_prog("WM prog", brw->wm.prog_bo); + + if (intel->gen >= 6) { + dump_cc_viewport_state(brw); + dump_clip_viewport_state(brw); + dump_depth_stencil_state(brw); + dump_cc_state(brw); + dump_blend_state(brw); + } } diff --git a/src/mesa/drivers/dri/i965/brw_state_upload.c b/src/mesa/drivers/dri/i965/brw_state_upload.c index a0c130557e3..73940a51569 100644 --- a/src/mesa/drivers/dri/i965/brw_state_upload.c +++ b/src/mesa/drivers/dri/i965/brw_state_upload.c @@ -145,6 +145,7 @@ const struct brw_tracked_state *gen6_atoms[] = &gen6_wm_state, &gen6_scissor_state, + &gen6_scissor_state_pointers, &brw_state_base_address, @@ -231,7 +232,6 @@ static struct dirty_bit_map mesa_bits[] = { DEFINE_BIT(_NEW_MODELVIEW), DEFINE_BIT(_NEW_PROJECTION), DEFINE_BIT(_NEW_TEXTURE_MATRIX), - DEFINE_BIT(_NEW_COLOR_MATRIX), DEFINE_BIT(_NEW_ACCUM), DEFINE_BIT(_NEW_COLOR), DEFINE_BIT(_NEW_DEPTH), @@ -336,7 +336,7 @@ brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits) */ void brw_validate_state( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; struct brw_state_flags *state = &brw->state.dirty; GLuint i; diff --git a/src/mesa/drivers/dri/i965/brw_structs.h b/src/mesa/drivers/dri/i965/brw_structs.h index 2a118e01c53..8ce9af9c4fe 100644 --- a/src/mesa/drivers/dri/i965/brw_structs.h +++ b/src/mesa/drivers/dri/i965/brw_structs.h @@ -909,10 +909,12 @@ struct brw_sf_unit_state }; -struct gen6_scissor_state +struct gen6_scissor_rect { - GLuint ymin, xmin; - GLuint ymax, xmax; + GLuint xmin:16; + GLuint ymin:16; + GLuint xmax:16; + GLuint ymax:16; }; struct brw_gs_unit_state @@ -1073,7 +1075,7 @@ struct brw_sampler_state GLuint mag_filter:3; GLuint mip_filter:2; GLuint base_level:5; - GLuint pad:1; + GLuint min_mag_neq:1; GLuint lod_preclamp:1; GLuint default_color_mode:1; GLuint pad0:1; @@ -1085,7 +1087,8 @@ struct brw_sampler_state GLuint r_wrap_mode:3; GLuint t_wrap_mode:3; GLuint s_wrap_mode:3; - GLuint pad:3; + GLuint cube_control_mode:1; + GLuint pad:2; GLuint max_lod:10; GLuint min_lod:10; } ss1; @@ -1099,7 +1102,9 @@ struct brw_sampler_state struct { - GLuint pad:19; + GLuint non_normalized_coord:1; + GLuint pad:12; + GLuint address_round:6; GLuint max_aniso:3; GLuint chroma_key_mode:1; GLuint chroma_key_index:2; @@ -1210,10 +1215,9 @@ struct brw_surface_state struct { GLuint pad1:16; - GLuint llc_mapping:1; - GLuint mlc_mapping:1; + GLuint cache_control:2; GLuint gfdt:1; - GLuint gfdt_src:1; + GLuint encrypt:1; GLuint y_offset:4; GLuint pad0:1; GLuint x_offset:7; @@ -1377,6 +1381,18 @@ struct brw_instruction GLuint dest_horiz_stride:2; GLuint dest_address_mode:1; } ia16; + + struct { + GLuint dest_reg_file:2; + GLuint dest_reg_type:3; + GLuint src0_reg_file:2; + GLuint src0_reg_type:3; + GLuint src1_reg_file:2; + GLuint src1_reg_type:3; + GLuint pad:1; + + GLint jump_count:16; + } branch_gen6; } bits1; diff --git a/src/mesa/drivers/dri/i965/brw_tex.c b/src/mesa/drivers/dri/i965/brw_tex.c index e911b105b23..39dfd34f4c9 100644 --- a/src/mesa/drivers/dri/i965/brw_tex.c +++ b/src/mesa/drivers/dri/i965/brw_tex.c @@ -45,7 +45,7 @@ */ void brw_validate_textures( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; int i; diff --git a/src/mesa/drivers/dri/i965/brw_tex_layout.c b/src/mesa/drivers/dri/i965/brw_tex_layout.c index 768ccfd79c4..9ac0713a1d3 100644 --- a/src/mesa/drivers/dri/i965/brw_tex_layout.c +++ b/src/mesa/drivers/dri/i965/brw_tex_layout.c @@ -48,7 +48,7 @@ GLboolean brw_miptree_layout(struct intel_context *intel, switch (mt->target) { case GL_TEXTURE_CUBE_MAP: - if (intel->gen == 5) { + if (intel->gen >= 5) { GLuint align_h = 2; GLuint level; GLuint qpitch = 0; diff --git a/src/mesa/drivers/dri/i965/brw_vs.c b/src/mesa/drivers/dri/i965/brw_vs.c index 9f90e1e5e5c..4a41c7a5176 100644 --- a/src/mesa/drivers/dri/i965/brw_vs.c +++ b/src/mesa/drivers/dri/i965/brw_vs.c @@ -43,7 +43,7 @@ static void do_vs_prog( struct brw_context *brw, struct brw_vertex_program *vp, struct brw_vs_prog_key *key ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; GLuint program_size; const GLuint *program; struct brw_vs_compile c; @@ -96,6 +96,7 @@ static void do_vs_prog( struct brw_context *brw, sizeof(c.prog_data)); assert(ctx->Const.VertexProgram.MaxNativeParameters == ARRAY_SIZE(c.constant_map)); + (void) ctx; aux_size = sizeof(c.prog_data); if (c.vp->use_const_buffer) @@ -114,7 +115,7 @@ static void do_vs_prog( struct brw_context *brw, static void brw_upload_vs_prog(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct brw_vs_prog_key key; struct brw_vertex_program *vp = (struct brw_vertex_program *)brw->vertex_program; diff --git a/src/mesa/drivers/dri/i965/brw_vs_constval.c b/src/mesa/drivers/dri/i965/brw_vs_constval.c index 249a800bf4b..47cc0a7da7a 100644 --- a/src/mesa/drivers/dri/i965/brw_vs_constval.c +++ b/src/mesa/drivers/dri/i965/brw_vs_constval.c @@ -190,7 +190,7 @@ static GLuint get_input_size(struct brw_context *brw, */ static void calc_wm_input_sizes( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; /* BRW_NEW_VERTEX_PROGRAM */ const struct brw_vertex_program *vp = brw_vertex_program_const(brw->vertex_program); diff --git a/src/mesa/drivers/dri/i965/brw_vs_emit.c b/src/mesa/drivers/dri/i965/brw_vs_emit.c index ad0d00b87c4..e2bff1386b8 100644 --- a/src/mesa/drivers/dri/i965/brw_vs_emit.c +++ b/src/mesa/drivers/dri/i965/brw_vs_emit.c @@ -254,7 +254,7 @@ static void brw_vs_alloc_regs( struct brw_vs_compile *c ) c->first_overflow_output = 0; if (intel->gen >= 6) - mrf = 4; + mrf = 3; /* no more pos store in attribute */ else if (intel->gen == 5) mrf = 8; else @@ -593,11 +593,15 @@ static void emit_math1( struct brw_vs_compile *c, struct brw_compile *p = &c->func; struct intel_context *intel = &p->brw->intel; struct brw_reg tmp = dst; - GLboolean need_tmp = (intel->gen < 6 && - (dst.dw1.bits.writemask != 0xf || - dst.file != BRW_GENERAL_REGISTER_FILE)); + GLboolean need_tmp = GL_FALSE; - if (need_tmp) + if (dst.file != BRW_GENERAL_REGISTER_FILE) + need_tmp = GL_TRUE; + + if (intel->gen < 6 && dst.dw1.bits.writemask != 0xf) + need_tmp = GL_TRUE; + + if (need_tmp) tmp = get_tmp(c); brw_math(p, @@ -626,9 +630,13 @@ static void emit_math2( struct brw_vs_compile *c, struct brw_compile *p = &c->func; struct intel_context *intel = &p->brw->intel; struct brw_reg tmp = dst; - GLboolean need_tmp = (intel->gen < 6 && - (dst.dw1.bits.writemask != 0xf || - dst.file != BRW_GENERAL_REGISTER_FILE)); + GLboolean need_tmp = GL_FALSE; + + if (dst.file != BRW_GENERAL_REGISTER_FILE) + need_tmp = GL_TRUE; + + if (intel->gen < 6 && dst.dw1.bits.writemask != 0xf) + need_tmp = GL_TRUE; if (need_tmp) tmp = get_tmp(c); @@ -1392,8 +1400,11 @@ static void emit_vertex_write( struct brw_vs_compile *c) if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ]; - brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11)); - brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8)); + if (intel->gen < 6) { + brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11)); + brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8)); + } else + brw_MOV(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0)); } for (i = 0; i < c->key.nr_userclip; i++) { @@ -1451,8 +1462,7 @@ static void emit_vertex_write( struct brw_vs_compile *c) * position. */ brw_MOV(p, brw_message_reg(2), pos); - brw_MOV(p, brw_message_reg(3), pos); - len_vertex_header = 2; + len_vertex_header = 1; } else if (intel->gen == 5) { /* There are 20 DWs (D0-D19) in VUE header on Ironlake: * dword 0-3 (m1) of the header is indices, point width, clip flags. @@ -1633,6 +1643,10 @@ void brw_vs_emit(struct brw_vs_compile *c ) printf("\n"); } + /* FIXME Need to fix conditional instruction to remove this */ + if (intel->gen >= 6) + p->single_program_flow = GL_TRUE; + brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_set_access_mode(p, BRW_ALIGN_16); if_depth_in_loop[loop_depth] = 0; diff --git a/src/mesa/drivers/dri/i965/brw_vs_state.c b/src/mesa/drivers/dri/i965/brw_vs_state.c index 9b2dd5b3d1c..ebae94269f9 100644 --- a/src/mesa/drivers/dri/i965/brw_vs_state.c +++ b/src/mesa/drivers/dri/i965/brw_vs_state.c @@ -51,7 +51,7 @@ struct brw_vs_unit_key { static void vs_unit_populate_key(struct brw_context *brw, struct brw_vs_unit_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); diff --git a/src/mesa/drivers/dri/i965/brw_vs_surface_state.c b/src/mesa/drivers/dri/i965/brw_vs_surface_state.c index 0250a68d292..eabac511602 100644 --- a/src/mesa/drivers/dri/i965/brw_vs_surface_state.c +++ b/src/mesa/drivers/dri/i965/brw_vs_surface_state.c @@ -45,7 +45,7 @@ static void prepare_vs_constants(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; struct brw_vertex_program *vp = (struct brw_vertex_program *) brw->vertex_program; @@ -101,7 +101,7 @@ const struct brw_tracked_state brw_vs_constants = { * Sets brw->vs.surf_bo[surf] and brw->vp->const_buffer. */ static void -brw_update_vs_constant_surface( GLcontext *ctx, +brw_update_vs_constant_surface( struct gl_context *ctx, GLuint surf) { struct brw_context *brw = brw_context(ctx); @@ -151,7 +151,7 @@ prepare_vs_surfaces(struct brw_context *brw) */ static void upload_vs_surfaces(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; uint32_t *bind; int i; diff --git a/src/mesa/drivers/dri/i965/brw_vtbl.c b/src/mesa/drivers/dri/i965/brw_vtbl.c index 8f1601d10f1..3d7a98c9812 100644 --- a/src/mesa/drivers/dri/i965/brw_vtbl.c +++ b/src/mesa/drivers/dri/i965/brw_vtbl.c @@ -43,7 +43,6 @@ #include "brw_defines.h" #include "brw_state.h" #include "brw_draw.h" -#include "brw_state.h" #include "brw_vs.h" #include "brw_wm.h" diff --git a/src/mesa/drivers/dri/i965/brw_wm.c b/src/mesa/drivers/dri/i965/brw_wm.c index d70be7bda28..7aad6caf719 100644 --- a/src/mesa/drivers/dri/i965/brw_wm.c +++ b/src/mesa/drivers/dri/i965/brw_wm.c @@ -215,7 +215,8 @@ static void do_wm_prog( struct brw_context *brw, static void brw_wm_populate_key( struct brw_context *brw, struct brw_wm_prog_key *key ) { - GLcontext *ctx = &brw->intel.ctx; + struct intel_context *intel = &brw->intel; + struct gl_context *ctx = &brw->intel.ctx; /* BRW_NEW_FRAGMENT_PROGRAM */ const struct brw_fragment_program *fp = (struct brw_fragment_program *)brw->fragment_program; @@ -278,12 +279,52 @@ static void brw_wm_populate_key( struct brw_context *brw, } } } - - brw_wm_lookup_iz(line_aa, - lookup, - uses_depth, - key); + if (intel->gen >= 6) { + /* R0-1: masks, pixel X/Y coordinates. */ + key->nr_payload_regs = 2; + /* R2: only for 32-pixel dispatch.*/ + /* R3-4: perspective pixel location barycentric */ + key->nr_payload_regs += 2; + /* R5-6: perspective pixel location bary for dispatch width != 8 */ + if (!fp->isGLSL) { /* dispatch_width != 8 */ + key->nr_payload_regs += 2; + } + /* R7-10: perspective centroid barycentric */ + /* R11-14: perspective sample barycentric */ + /* R15-18: linear pixel location barycentric */ + /* R19-22: linear centroid barycentric */ + /* R23-26: linear sample barycentric */ + + /* R27: interpolated depth if uses source depth */ + if (uses_depth) { + key->source_depth_reg = key->nr_payload_regs; + key->nr_payload_regs++; + if (!fp->isGLSL) { /* dispatch_width != 8 */ + /* R28: interpolated depth if not 8-wide. */ + key->nr_payload_regs++; + } + } + /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. + */ + if (uses_depth) { + key->source_w_reg = key->nr_payload_regs; + key->nr_payload_regs++; + if (!fp->isGLSL) { /* dispatch_width != 8 */ + /* R30: interpolated W if not 8-wide. */ + key->nr_payload_regs++; + } + } + /* R31: MSAA position offsets. */ + /* R32-: bary for 32-pixel. */ + /* R58-59: interp W for 32-pixel. */ + } else { + brw_wm_lookup_iz(intel, + line_aa, + lookup, + uses_depth, + key); + } /* BRW_NEW_WM_INPUT_DIMENSIONS */ key->proj_attrib_mask = brw->wm.input_size_masks[4-1]; @@ -301,13 +342,44 @@ static void brw_wm_populate_key( struct brw_context *brw, if (unit->_ReallyEnabled) { const struct gl_texture_object *t = unit->_Current; const struct gl_texture_image *img = t->Image[0][t->BaseLevel]; + int swizzles[SWIZZLE_NIL + 1] = { + SWIZZLE_X, + SWIZZLE_Y, + SWIZZLE_Z, + SWIZZLE_W, + SWIZZLE_ZERO, + SWIZZLE_ONE, + SWIZZLE_NIL + }; + + key->tex_swizzles[i] = SWIZZLE_NOOP; + + /* GL_DEPTH_TEXTURE_MODE is normally handled through + * brw_wm_surface_state, but it applies to shadow compares as + * well and our shadow compares always return the result in + * all 4 channels. + */ + if (t->CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB) { + if (t->DepthMode == GL_ALPHA) { + swizzles[0] = SWIZZLE_ZERO; + swizzles[1] = SWIZZLE_ZERO; + swizzles[2] = SWIZZLE_ZERO; + } else if (t->DepthMode == GL_LUMINANCE) { + swizzles[3] = SWIZZLE_ONE; + } + } + if (img->InternalFormat == GL_YCBCR_MESA) { key->yuvtex_mask |= 1 << i; if (img->TexFormat == MESA_FORMAT_YCBCR) key->yuvtex_swap_mask |= 1 << i; } - key->tex_swizzles[i] = t->_Swizzle; + key->tex_swizzles[i] = + MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)], + swizzles[GET_SWZ(t->_Swizzle, 1)], + swizzles[GET_SWZ(t->_Swizzle, 2)], + swizzles[GET_SWZ(t->_Swizzle, 3)]); } else { key->tex_swizzles[i] = SWIZZLE_NOOP; diff --git a/src/mesa/drivers/dri/i965/brw_wm.h b/src/mesa/drivers/dri/i965/brw_wm.h index 2639d4f26b3..a094c45a7e0 100644 --- a/src/mesa/drivers/dri/i965/brw_wm.h +++ b/src/mesa/drivers/dri/i965/brw_wm.h @@ -59,6 +59,7 @@ struct brw_wm_prog_key { GLuint source_depth_reg:3; + GLuint source_w_reg:3; GLuint aa_dest_stencil_reg:3; GLuint dest_depth_reg:3; GLuint nr_payload_regs:4; @@ -181,7 +182,11 @@ struct brw_wm_instruction { #define MAX_WM_OPCODE (MAX_OPCODE + 9) #define PROGRAM_PAYLOAD (PROGRAM_FILE_MAX) +#define NUM_FILES (PROGRAM_PAYLOAD + 1) + #define PAYLOAD_DEPTH (FRAG_ATTRIB_MAX) +#define PAYLOAD_W (FRAG_ATTRIB_MAX + 1) +#define PAYLOAD_FP_REG_MAX (FRAG_ATTRIB_MAX + 2) struct brw_wm_compile { struct brw_compile func; @@ -224,7 +229,7 @@ struct brw_wm_compile { } payload; - const struct brw_wm_ref *pass0_fp_reg[PROGRAM_PAYLOAD+1][256][4]; + const struct brw_wm_ref *pass0_fp_reg[NUM_FILES][256][4]; struct brw_wm_ref undef_ref; struct brw_wm_value undef_value; @@ -252,7 +257,7 @@ struct brw_wm_compile { struct { GLboolean inited; struct brw_reg reg; - } wm_regs[PROGRAM_PAYLOAD+1][256][4]; + } wm_regs[NUM_FILES][256][4]; GLboolean used_grf[BRW_WM_MAX_GRF]; GLuint first_free_grf; @@ -299,7 +304,8 @@ void brw_wm_print_insn( struct brw_wm_compile *c, void brw_wm_print_program( struct brw_wm_compile *c, const char *stage ); -void brw_wm_lookup_iz( GLuint line_aa, +void brw_wm_lookup_iz( struct intel_context *intel, + GLuint line_aa, GLuint lookup, GLboolean ps_uses_depth, struct brw_wm_prog_key *key ); @@ -373,6 +379,7 @@ void emit_fb_write(struct brw_wm_compile *c, void emit_frontfacing(struct brw_compile *p, const struct brw_reg *dst, GLuint mask); +void emit_kil_nv(struct brw_wm_compile *c); void emit_linterp(struct brw_compile *p, const struct brw_reg *dst, GLuint mask, @@ -460,13 +467,10 @@ void emit_xpd(struct brw_compile *p, const struct brw_reg *arg0, const struct brw_reg *arg1); -GLboolean brw_compile_shader(GLcontext *ctx, +GLboolean brw_compile_shader(struct gl_context *ctx, struct gl_shader *shader); -GLboolean brw_link_shader(GLcontext *ctx, struct gl_shader_program *prog); -struct gl_shader *brw_new_shader(GLcontext *ctx, GLuint name, GLuint type); -struct gl_shader_program *brw_new_shader_program(GLcontext *ctx, GLuint name); - -GLboolean brw_do_channel_expressions(struct exec_list *instructions); -GLboolean brw_do_vector_splitting(struct exec_list *instructions); +GLboolean brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog); +struct gl_shader *brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type); +struct gl_shader_program *brw_new_shader_program(struct gl_context *ctx, GLuint name); #endif diff --git a/src/mesa/drivers/dri/i965/brw_wm_emit.c b/src/mesa/drivers/dri/i965/brw_wm_emit.c index f3ad01b3fec..cb71c665b47 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_emit.c +++ b/src/mesa/drivers/dri/i965/brw_wm_emit.c @@ -173,6 +173,7 @@ void emit_delta_xy(struct brw_compile *p, GLuint mask, const struct brw_reg *arg0) { + struct intel_context *intel = &p->brw->intel; struct brw_reg r1 = brw_vec1_grf(1, 0); if (mask == 0) @@ -180,6 +181,21 @@ void emit_delta_xy(struct brw_compile *p, assert(mask == WRITEMASK_XY); + if (intel->gen >= 6) { + /* XXX Gen6 WM doesn't have Xstart/Ystart in payload r1.0/r1.1. + Just add them with 0.0 for dst reg.. */ + r1 = brw_imm_v(0x00000000); + brw_ADD(p, + dst[0], + retype(arg0[0], BRW_REGISTER_TYPE_UW), + r1); + brw_ADD(p, + dst[1], + retype(arg0[1], BRW_REGISTER_TYPE_UW), + r1); + return; + } + /* Calc delta X,Y by subtracting origin in r1 from the pixel * centers produced by emit_pixel_xy(). */ @@ -253,6 +269,15 @@ void emit_pixel_w(struct brw_wm_compile *c, { struct brw_compile *p = &c->func; struct intel_context *intel = &p->brw->intel; + struct brw_reg src; + struct brw_reg temp_dst; + + if (intel->gen >= 6) + temp_dst = dst[3]; + else + temp_dst = brw_message_reg(2); + + assert(intel->gen < 6); /* Don't need this if all you are doing is interpolating color, for * instance. @@ -264,31 +289,35 @@ void emit_pixel_w(struct brw_wm_compile *c, * result straight into a message reg. */ if (can_do_pln(intel, deltas)) { - brw_PLN(p, brw_message_reg(2), interp3, deltas[0]); + brw_PLN(p, temp_dst, interp3, deltas[0]); } else { brw_LINE(p, brw_null_reg(), interp3, deltas[0]); - brw_MAC(p, brw_message_reg(2), suboffset(interp3, 1), deltas[1]); + brw_MAC(p, temp_dst, suboffset(interp3, 1), deltas[1]); } /* Calc w */ + if (intel->gen >= 6) + src = temp_dst; + else + src = brw_null_reg(); + if (c->dispatch_width == 16) { brw_math_16(p, dst[3], BRW_MATH_FUNCTION_INV, BRW_MATH_SATURATE_NONE, - 2, brw_null_reg(), + 2, src, BRW_MATH_PRECISION_FULL); } else { brw_math(p, dst[3], BRW_MATH_FUNCTION_INV, BRW_MATH_SATURATE_NONE, - 2, brw_null_reg(), + 2, src, BRW_MATH_DATA_VECTOR, BRW_MATH_PRECISION_FULL); } } } - void emit_linterp(struct brw_compile *p, const struct brw_reg *dst, GLuint mask, @@ -307,7 +336,9 @@ void emit_linterp(struct brw_compile *p, for (i = 0; i < 4; i++) { if (mask & (1<<i)) { - if (can_do_pln(intel, deltas)) { + if (intel->gen >= 6) { + brw_PLN(p, dst[i], interp[i], brw_vec8_grf(2, 0)); + } else if (can_do_pln(intel, deltas)) { brw_PLN(p, dst[i], interp[i], deltas[0]); } else { brw_LINE(p, brw_null_reg(), interp[i], deltas[0]); @@ -330,6 +361,11 @@ void emit_pinterp(struct brw_compile *p, GLuint nr = arg0[0].nr; GLuint i; + if (intel->gen >= 6) { + emit_linterp(p, dst, mask, arg0, interp); + return; + } + interp[0] = brw_vec1_grf(nr, 0); interp[1] = brw_vec1_grf(nr, 4); interp[2] = brw_vec1_grf(nr+1, 0); @@ -910,10 +946,8 @@ void emit_math2(struct brw_wm_compile *c, const struct brw_reg *arg1) { struct brw_compile *p = &c->func; + struct intel_context *intel = &p->brw->intel; int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1; - GLuint saturate = ((mask & SATURATE) ? - BRW_MATH_SATURATE_SATURATE : - BRW_MATH_SATURATE_NONE); if (!(mask & WRITEMASK_XYZW)) return; /* Do not emit dead code */ @@ -922,35 +956,103 @@ void emit_math2(struct brw_wm_compile *c, brw_push_insn_state(p); - brw_set_compression_control(p, BRW_COMPRESSION_NONE); - brw_MOV(p, brw_message_reg(3), arg1[0]); - if (c->dispatch_width == 16) { - brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF); - brw_MOV(p, brw_message_reg(5), sechalf(arg1[0])); - } + /* math can only operate on up to a vec8 at a time, so in + * dispatch_width==16 we have to do the second half manually. + */ + if (intel->gen >= 6) { + struct brw_reg src0 = arg0[0]; + struct brw_reg src1 = arg1[0]; + struct brw_reg temp_dst = dst[dst_chan]; + + if (arg0[0].hstride == BRW_HORIZONTAL_STRIDE_0) { + if (arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) { + /* Both scalar arguments. Do scalar calc. */ + src0.hstride = BRW_HORIZONTAL_STRIDE_1; + src1.hstride = BRW_HORIZONTAL_STRIDE_1; + temp_dst.hstride = BRW_HORIZONTAL_STRIDE_1; + temp_dst.width = BRW_WIDTH_1; + + if (arg0[0].subnr != 0) { + brw_MOV(p, temp_dst, src0); + src0 = temp_dst; + + /* Ouch. We've used the temp as a dst, and we still + * need a temp to store arg1 in, because src and dst + * offsets have to be equal. Leaving this up to + * glsl2-965 to handle correctly. + */ + assert(arg1[0].subnr == 0); + } else if (arg1[0].subnr != 0) { + brw_MOV(p, temp_dst, src1); + src1 = temp_dst; + } + } else { + brw_MOV(p, temp_dst, src0); + src0 = temp_dst; + } + } else if (arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) { + brw_MOV(p, temp_dst, src1); + src1 = temp_dst; + } - brw_set_compression_control(p, BRW_COMPRESSION_NONE); - brw_math(p, - dst[dst_chan], - function, - saturate, - 2, - arg0[0], - BRW_MATH_DATA_VECTOR, - BRW_MATH_PRECISION_FULL); + brw_set_saturate(p, (mask & SATURATE) ? 1 : 0); + brw_set_compression_control(p, BRW_COMPRESSION_NONE); + brw_math2(p, + temp_dst, + function, + src0, + src1); + if (c->dispatch_width == 16) { + brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF); + brw_math2(p, + sechalf(temp_dst), + function, + sechalf(src0), + sechalf(src1)); + } - /* Send two messages to perform all 16 operations: - */ - if (c->dispatch_width == 16) { - brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF); + /* Splat a scalar result into all the channels. */ + if (arg0[0].hstride == BRW_HORIZONTAL_STRIDE_0 && + arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) { + temp_dst.hstride = BRW_HORIZONTAL_STRIDE_0; + temp_dst.vstride = BRW_VERTICAL_STRIDE_0; + brw_MOV(p, dst[dst_chan], temp_dst); + } + } else { + GLuint saturate = ((mask & SATURATE) ? + BRW_MATH_SATURATE_SATURATE : + BRW_MATH_SATURATE_NONE); + + brw_set_compression_control(p, BRW_COMPRESSION_NONE); + brw_MOV(p, brw_message_reg(3), arg1[0]); + if (c->dispatch_width == 16) { + brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF); + brw_MOV(p, brw_message_reg(5), sechalf(arg1[0])); + } + + brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_math(p, - offset(dst[dst_chan],1), + dst[dst_chan], function, saturate, - 4, - sechalf(arg0[0]), + 2, + arg0[0], BRW_MATH_DATA_VECTOR, BRW_MATH_PRECISION_FULL); + + /* Send two messages to perform all 16 operations: + */ + if (c->dispatch_width == 16) { + brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF); + brw_math(p, + offset(dst[dst_chan],1), + function, + saturate, + 4, + sechalf(arg0[0]), + BRW_MATH_DATA_VECTOR, + BRW_MATH_PRECISION_FULL); + } } brw_pop_insn_state(p); } @@ -1028,7 +1130,7 @@ void emit_tex(struct brw_wm_compile *c, /* Fill in the shadow comparison reference value. */ if (shadow) { - if (intel->gen == 5) { + if (intel->gen >= 5) { /* Fill in the cube map array index value. */ brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0)); cur_mrf += mrf_per_channel; @@ -1041,7 +1143,7 @@ void emit_tex(struct brw_wm_compile *c, cur_mrf += mrf_per_channel; } - if (intel->gen == 5) { + if (intel->gen >= 5) { if (shadow) msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5; else @@ -1094,7 +1196,7 @@ void emit_txb(struct brw_wm_compile *c, * from mattering. */ if (c->dispatch_width == 16 || intel->gen < 5) { - if (intel->gen == 5) + if (intel->gen >= 5) msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5; else msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS; @@ -1223,7 +1325,7 @@ static void emit_kil( struct brw_wm_compile *c, /* KIL_NV kills the pixels that are currently executing, not based on a test * of the arguments. */ -static void emit_kil_nv( struct brw_wm_compile *c ) +void emit_kil_nv( struct brw_wm_compile *c ) { struct brw_compile *p = &c->func; struct brw_reg r0uw = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); @@ -1553,9 +1655,12 @@ static void spill_values( struct brw_wm_compile *c, void brw_wm_emit( struct brw_wm_compile *c ) { struct brw_compile *p = &c->func; + struct intel_context *intel = &p->brw->intel; GLuint insn; brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED); + if (intel->gen >= 6) + brw_set_acc_write_control(p, 1); /* Check if any of the payload regs need to be spilled: */ @@ -1667,7 +1772,11 @@ void brw_wm_emit( struct brw_wm_compile *c ) break; case OPCODE_TRUNC: - emit_alu1(p, brw_RNDZ, dst, dst_flags, args[0]); + for (i = 0; i < 4; i++) { + if (dst_flags & (1<<i)) { + brw_RNDZ(p, dst[i], args[0][i]); + } + } break; case OPCODE_LRP: diff --git a/src/mesa/drivers/dri/i965/brw_wm_fp.c b/src/mesa/drivers/dri/i965/brw_wm_fp.c index 3870bf10fcb..15a238cda62 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_fp.c +++ b/src/mesa/drivers/dri/i965/brw_wm_fp.c @@ -89,6 +89,8 @@ static struct prog_src_register src_reg(GLuint file, GLuint idx) reg.Negate = NEGATE_NONE; reg.Abs = 0; reg.HasIndex2 = 0; + reg.RelAddr2 = 0; + reg.Index2 = 0; return reg; } @@ -336,6 +338,12 @@ static struct prog_src_register get_delta_xy( struct brw_wm_compile *c ) static struct prog_src_register get_pixel_w( struct brw_wm_compile *c ) { + /* This is only called for producing 1/w in pre-gen6 interp. for + * gen6, the interp opcodes don't use this argument. + */ + if (c->func.brw->intel.gen >= 6) + return src_undef(); + if (src_is_undef(c->pixel_w)) { struct prog_dst_register pixel_w = get_temp(c); struct prog_src_register deltas = get_delta_xy(c); @@ -363,7 +371,13 @@ static void emit_interp( struct brw_wm_compile *c, { struct prog_dst_register dst = dst_reg(PROGRAM_INPUT, idx); struct prog_src_register interp = src_reg(PROGRAM_PAYLOAD, idx); - struct prog_src_register deltas = get_delta_xy(c); + struct prog_src_register deltas; + + if (c->func.brw->intel.gen < 6) { + deltas = get_delta_xy(c); + } else { + deltas = src_undef(); + } /* Need to use PINTERP on attributes which have been * multiplied by 1/W in the SF program, and LINTERP on those @@ -520,12 +534,6 @@ static struct prog_src_register search_or_add_param5(struct brw_wm_compile *c, tokens[2] = s2; tokens[3] = s3; tokens[4] = s4; - - for (idx = 0; idx < paramList->NumParameters; idx++) { - if (paramList->Parameters[idx].Type == PROGRAM_STATE_VAR && - memcmp(paramList->Parameters[idx].StateIndexes, tokens, sizeof(tokens)) == 0) - return src_reg(PROGRAM_STATE_VAR, idx); - } idx = _mesa_add_state_reference( paramList, tokens ); @@ -543,28 +551,18 @@ static struct prog_src_register search_or_add_const4f( struct brw_wm_compile *c, GLfloat values[4]; GLuint idx; GLuint swizzle; + struct prog_src_register reg; values[0] = s0; values[1] = s1; values[2] = s2; values[3] = s3; - /* Have to search, otherwise multiple compilations will each grow - * the parameter list. - */ - for (idx = 0; idx < paramList->NumParameters; idx++) { - if (paramList->Parameters[idx].Type == PROGRAM_CONSTANT && - memcmp(paramList->ParameterValues[idx], values, sizeof(values)) == 0) - - /* XXX: this mimics the mesa bug which puts all constants and - * parameters into the "PROGRAM_STATE_VAR" category: - */ - return src_reg(PROGRAM_STATE_VAR, idx); - } - idx = _mesa_add_unnamed_constant( paramList, values, 4, &swizzle ); - assert(swizzle == SWIZZLE_NOOP); /* Need to handle swizzle in reg setup */ - return src_reg(PROGRAM_STATE_VAR, idx); + reg = src_reg(PROGRAM_STATE_VAR, idx); + reg.Swizzle = swizzle; + + return reg; } @@ -1056,6 +1054,7 @@ static void print_insns( const struct prog_instruction *insn, */ void brw_wm_pass_fp( struct brw_wm_compile *c ) { + struct intel_context *intel = &c->func.brw->intel; struct brw_fragment_program *fp = c->fp; GLuint insn; @@ -1067,7 +1066,14 @@ void brw_wm_pass_fp( struct brw_wm_compile *c ) } c->pixel_xy = src_undef(); - c->delta_xy = src_undef(); + if (intel->gen >= 6) { + /* The interpolation deltas come in as the perspective pixel + * location barycentric params. + */ + c->delta_xy = src_reg(PROGRAM_PAYLOAD, PAYLOAD_DEPTH); + } else { + c->delta_xy = src_undef(); + } c->pixel_w = src_undef(); c->nr_fp_insns = 0; c->fp->tex_units_used = 0x0; diff --git a/src/mesa/drivers/dri/i965/brw_wm_glsl.c b/src/mesa/drivers/dri/i965/brw_wm_glsl.c index 7d6724dc1c1..55aceea9b5c 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_glsl.c +++ b/src/mesa/drivers/dri/i965/brw_wm_glsl.c @@ -296,6 +296,8 @@ static void prealloc_reg(struct brw_wm_compile *c) reg = brw_vec8_grf(0, 0); set_reg(c, PROGRAM_PAYLOAD, PAYLOAD_DEPTH, i, reg); } + set_reg(c, PROGRAM_PAYLOAD, PAYLOAD_W, 0, + brw_vec8_grf(c->key.source_w_reg, 0)); reg_index += c->key.nr_payload_regs; /* constants */ @@ -340,31 +342,47 @@ static void prealloc_reg(struct brw_wm_compile *c) } } /* number of constant regs used (each reg is float[8]) */ - c->nr_creg = 2 * ((4 * nr_params + 15) / 16); - reg_index += c->nr_creg; + c->nr_creg = ALIGN(nr_params, 2) / 2; + reg_index += c->nr_creg; } } - /* fragment shader inputs */ - for (i = 0; i < VERT_RESULT_MAX; i++) { - int fp_input; - - if (i >= VERT_RESULT_VAR0) - fp_input = i - VERT_RESULT_VAR0 + FRAG_ATTRIB_VAR0; - else if (i <= VERT_RESULT_TEX7) - fp_input = i; - else - fp_input = -1; + /* fragment shader inputs: One 2-reg pair of interpolation + * coefficients for each vec4 to be set up. + */ + if (intel->gen >= 6) { + for (i = 0; i < FRAG_ATTRIB_MAX; i++) { + if (!(c->fp->program.Base.InputsRead & BITFIELD64_BIT(i))) + continue; - if (fp_input >= 0 && inputs & (1 << fp_input)) { - urb_read_length = reg_index; reg = brw_vec8_grf(reg_index, 0); - for (j = 0; j < 4; j++) - set_reg(c, PROGRAM_PAYLOAD, fp_input, j, reg); - } - if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) { + for (j = 0; j < 4; j++) { + set_reg(c, PROGRAM_PAYLOAD, i, j, reg); + } reg_index += 2; } + urb_read_length = reg_index; + } else { + for (i = 0; i < VERT_RESULT_MAX; i++) { + int fp_input; + + if (i >= VERT_RESULT_VAR0) + fp_input = i - VERT_RESULT_VAR0 + FRAG_ATTRIB_VAR0; + else if (i <= VERT_RESULT_TEX7) + fp_input = i; + else + fp_input = -1; + + if (fp_input >= 0 && inputs & (1 << fp_input)) { + urb_read_length = reg_index; + reg = brw_vec8_grf(reg_index, 0); + for (j = 0; j < 4; j++) + set_reg(c, PROGRAM_PAYLOAD, fp_input, j, reg); + } + if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) { + reg_index += 2; + } + } } c->prog_data.first_curbe_grf = c->key.nr_payload_regs; @@ -614,21 +632,6 @@ static void emit_arl(struct brw_wm_compile *c, brw_set_saturate(p, 0); } -/** - * For GLSL shaders, this KIL will be unconditional. - * It may be contained inside an IF/ENDIF structure of course. - */ -static void emit_kil(struct brw_wm_compile *c) -{ - struct brw_compile *p = &c->func; - struct brw_reg depth = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); - brw_push_insn_state(p); - brw_set_mask_control(p, BRW_MASK_DISABLE); - brw_NOT(p, c->emit_mask_reg, brw_mask_reg(1)); /* IMASK */ - brw_AND(p, depth, c->emit_mask_reg, depth); - brw_pop_insn_state(p); -} - static INLINE struct brw_reg high_words( struct brw_reg reg ) { return stride( suboffset( retype( reg, BRW_REGISTER_TYPE_W ), 1 ), @@ -708,6 +711,9 @@ static void brw_wm_emit_glsl(struct brw_context *brw, struct brw_wm_compile *c) brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack)); + if (intel->gen >= 6) + brw_set_acc_write_control(p, 1); + for (i = 0; i < c->nr_fp_insns; i++) { const struct prog_instruction *inst = &c->prog_instructions[i]; int dst_flags; @@ -898,7 +904,7 @@ static void brw_wm_emit_glsl(struct brw_context *brw, struct brw_wm_compile *c) c->fp->program.Base.SamplerUnits[inst->TexSrcUnit]); break; case OPCODE_KIL_NV: - emit_kil(c); + emit_kil_nv(c); break; case OPCODE_IF: assert(if_depth < MAX_IF_DEPTH); diff --git a/src/mesa/drivers/dri/i965/brw_wm_iz.c b/src/mesa/drivers/dri/i965/brw_wm_iz.c index 8505ef19510..62e556698ba 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_iz.c +++ b/src/mesa/drivers/dri/i965/brw_wm_iz.c @@ -120,24 +120,38 @@ const struct { * \param line_aa AA_NEVER, AA_ALWAYS or AA_SOMETIMES * \param lookup bitmask of IZ_* flags */ -void brw_wm_lookup_iz( GLuint line_aa, +void brw_wm_lookup_iz( struct intel_context *intel, + GLuint line_aa, GLuint lookup, GLboolean ps_uses_depth, struct brw_wm_prog_key *key ) { GLuint reg = 2; + GLboolean kill_stats_promoted_workaround = GL_FALSE; assert (lookup < IZ_BIT_MAX); - + + /* Crazy workaround in the windowizer, which we need to track in + * our register allocation and render target writes. See the "If + * statistics are enabled..." paragraph of 11.5.3.2: Early Depth + * Test Cases [Pre-DevGT] of the 3D Pipeline - Windower B-Spec. + */ + if (intel->stats_wm && + (lookup & IZ_PS_KILL_ALPHATEST_BIT) && + wm_iz_table[lookup].mode == P) { + kill_stats_promoted_workaround = GL_TRUE; + } + if (lookup & IZ_PS_COMPUTES_DEPTH_BIT) key->computes_depth = 1; - if (wm_iz_table[lookup].sd_present || ps_uses_depth) { + if (wm_iz_table[lookup].sd_present || ps_uses_depth || + kill_stats_promoted_workaround) { key->source_depth_reg = reg; reg += 2; } - if (wm_iz_table[lookup].sd_to_rt) + if (wm_iz_table[lookup].sd_to_rt || kill_stats_promoted_workaround) key->source_depth_to_render_target = 1; if (wm_iz_table[lookup].ds_present || line_aa != AA_NEVER) { diff --git a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c index 1fc802cfa65..fea96d35381 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_sampler_state.c @@ -100,10 +100,13 @@ struct wm_sampler_key { * Sets the sampler state for a single unit based off of the sampler key * entry. */ -static void brw_update_sampler_state(struct wm_sampler_entry *key, +static void brw_update_sampler_state(struct brw_context *brw, + struct wm_sampler_entry *key, drm_intel_bo *sdc_bo, struct brw_sampler_state *sampler) { + struct intel_context *intel = &brw->intel; + memset(sampler, 0, sizeof(*sampler)); switch (key->minfilter) { @@ -163,6 +166,10 @@ static void brw_update_sampler_state(struct wm_sampler_entry *key, sampler->ss1.s_wrap_mode = translate_wrap_mode(key->wrap_s); sampler->ss1.t_wrap_mode = translate_wrap_mode(key->wrap_t); + if (intel->gen >= 6 && + sampler->ss0.min_filter != sampler->ss0.mag_filter) + sampler->ss0.min_mag_neq = 1; + /* Cube-maps on 965 and later must use the same wrap mode for all 3 * coordinate dimensions. Futher, only CUBE and CLAMP are valid. */ @@ -226,7 +233,7 @@ static void brw_wm_sampler_populate_key(struct brw_context *brw, struct wm_sampler_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; int unit; char *last_entry_end = ((char*)&key->sampler_count) + sizeof(key->sampler_count); @@ -294,7 +301,7 @@ brw_wm_sampler_populate_key(struct brw_context *brw, */ static void upload_wm_samplers( struct brw_context *brw ) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct wm_sampler_key key; int i, sampler_key_size; @@ -329,7 +336,7 @@ static void upload_wm_samplers( struct brw_context *brw ) if (brw->wm.sdc_bo[i] == NULL) continue; - brw_update_sampler_state(&key.sampler[i], brw->wm.sdc_bo[i], + brw_update_sampler_state(brw, &key.sampler[i], brw->wm.sdc_bo[i], &sampler[i]); } diff --git a/src/mesa/drivers/dri/i965/brw_wm_state.c b/src/mesa/drivers/dri/i965/brw_wm_state.c index 6699d0a73e6..817adefb0cc 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_state.c @@ -58,7 +58,7 @@ struct brw_wm_unit_key { static void wm_unit_populate_key(struct brw_context *brw, struct brw_wm_unit_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const struct gl_fragment_program *fp = brw->fragment_program; const struct brw_fragment_program *bfp = (struct brw_fragment_program *) fp; struct intel_context *intel = &brw->intel; @@ -108,16 +108,11 @@ wm_unit_populate_key(struct brw_context *brw, struct brw_wm_unit_key *key) * 8-wide. */ if (ctx->Shader.CurrentProgram) { - int i; + struct brw_shader *shader = (struct brw_shader *) + ctx->Shader.CurrentProgram->_LinkedShaders[MESA_SHADER_FRAGMENT]; - for (i = 0; i < ctx->Shader.CurrentProgram->_NumLinkedShaders; i++) { - struct brw_shader *shader = - (struct brw_shader *)ctx->Shader.CurrentProgram->_LinkedShaders[i];; - - if (shader->base.Type == GL_FRAGMENT_SHADER && - shader->ir != NULL) { - key->is_glsl = GL_TRUE; - } + if (shader != NULL && shader->ir != NULL) { + key->is_glsl = GL_TRUE; } } diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c index 17b016b569b..5588702afc3 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c @@ -89,6 +89,18 @@ static GLuint translate_tex_format( gl_format mesa_format, case MESA_FORMAT_AL1616: return BRW_SURFACEFORMAT_L16A16_UNORM; + case MESA_FORMAT_R8: + return BRW_SURFACEFORMAT_R8_UNORM; + + case MESA_FORMAT_R16: + return BRW_SURFACEFORMAT_R16_UNORM; + + case MESA_FORMAT_RG88: + return BRW_SURFACEFORMAT_R8G8_UNORM; + + case MESA_FORMAT_RG1616: + return BRW_SURFACEFORMAT_R16G16_UNORM; + case MESA_FORMAT_RGB888: assert(0); /* not supported for sampling */ return BRW_SURFACEFORMAT_R8G8B8_UNORM; @@ -197,7 +209,7 @@ brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling) } static void -brw_update_texture_surface( GLcontext *ctx, GLuint unit ) +brw_update_texture_surface( struct gl_context *ctx, GLuint unit ) { struct brw_context *brw = brw_context(ctx); struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; @@ -303,7 +315,7 @@ brw_create_constant_surface(struct brw_context *brw, static void prepare_wm_constants(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; struct brw_fragment_program *fp = (struct brw_fragment_program *) brw->fragment_program; @@ -395,7 +407,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw, unsigned int unit) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; drm_intel_bo *region_bo = NULL; struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_region *region = irb ? irb->region : NULL; @@ -429,6 +441,9 @@ brw_update_renderbuffer_surface(struct brw_context *brw, case MESA_FORMAT_XRGB8888: key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM; break; + case MESA_FORMAT_SARGB8: + key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB; + break; case MESA_FORMAT_RGB565: key.surface_format = BRW_SURFACEFORMAT_B5G6R5_UNORM; break; @@ -441,6 +456,18 @@ brw_update_renderbuffer_surface(struct brw_context *brw, case MESA_FORMAT_A8: key.surface_format = BRW_SURFACEFORMAT_A8_UNORM; break; + case MESA_FORMAT_R8: + key.surface_format = BRW_SURFACEFORMAT_R8_UNORM; + break; + case MESA_FORMAT_R16: + key.surface_format = BRW_SURFACEFORMAT_R16_UNORM; + break; + case MESA_FORMAT_RG88: + key.surface_format = BRW_SURFACEFORMAT_R8G8_UNORM; + break; + case MESA_FORMAT_RG1616: + key.surface_format = BRW_SURFACEFORMAT_R16G16_UNORM; + break; default: _mesa_problem(ctx, "Bad renderbuffer format: %d\n", irb->Base.Format); } @@ -545,7 +572,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw, static void prepare_wm_surfaces(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; int i; int nr_surfaces = 0; @@ -592,7 +619,7 @@ prepare_wm_surfaces(struct brw_context *brw) static void upload_wm_surfaces(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; GLuint i; /* _NEW_BUFFERS | _NEW_COLOR */ diff --git a/src/mesa/drivers/dri/i965/gen6_cc.c b/src/mesa/drivers/dri/i965/gen6_cc.c index 26f1070a164..4a98e268624 100644 --- a/src/mesa/drivers/dri/i965/gen6_cc.c +++ b/src/mesa/drivers/dri/i965/gen6_cc.c @@ -49,7 +49,7 @@ static void blend_state_populate_key(struct brw_context *brw, struct gen6_blend_state_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); @@ -181,7 +181,7 @@ static void color_calc_state_populate_key(struct brw_context *brw, struct gen6_color_calc_state_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; memset(key, 0, sizeof(*key)); diff --git a/src/mesa/drivers/dri/i965/gen6_clip_state.c b/src/mesa/drivers/dri/i965/gen6_clip_state.c index acc4b7f1019..bf53146f11f 100644 --- a/src/mesa/drivers/dri/i965/gen6_clip_state.c +++ b/src/mesa/drivers/dri/i965/gen6_clip_state.c @@ -34,7 +34,7 @@ static void upload_clip_state(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; uint32_t depth_clamp = 0; uint32_t provoking; @@ -59,7 +59,7 @@ upload_clip_state(struct brw_context *brw) GEN6_CLIP_XY_TEST | depth_clamp | provoking); - OUT_BATCH(0); + OUT_BATCH(GEN6_CLIP_FORCE_ZERO_RTAINDEX); ADVANCE_BATCH(); intel_batchbuffer_emit_mi_flush(intel->batch); diff --git a/src/mesa/drivers/dri/i965/gen6_depthstencil.c b/src/mesa/drivers/dri/i965/gen6_depthstencil.c index d9eca9af354..96e6eade6b7 100644 --- a/src/mesa/drivers/dri/i965/gen6_depthstencil.c +++ b/src/mesa/drivers/dri/i965/gen6_depthstencil.c @@ -41,7 +41,7 @@ static void depth_stencil_state_populate_key(struct brw_context *brw, struct brw_depth_stencil_state_key *key) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const unsigned back = ctx->Stencil._BackFace; memset(key, 0, sizeof(*key)); diff --git a/src/mesa/drivers/dri/i965/gen6_scissor_state.c b/src/mesa/drivers/dri/i965/gen6_scissor_state.c index 34a9dc234c2..5684c2e44c8 100644 --- a/src/mesa/drivers/dri/i965/gen6_scissor_state.c +++ b/src/mesa/drivers/dri/i965/gen6_scissor_state.c @@ -33,9 +33,9 @@ static void prepare_scissor_state(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const GLboolean render_to_fbo = (ctx->DrawBuffer->Name != 0); - struct gen6_scissor_state scissor; + struct gen6_scissor_rect scissor; /* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */ @@ -84,7 +84,6 @@ static void upload_scissor_state_pointers(struct brw_context *brw) OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); ADVANCE_BATCH(); - intel_batchbuffer_emit_mi_flush(intel->batch); } diff --git a/src/mesa/drivers/dri/i965/gen6_sf_state.c b/src/mesa/drivers/dri/i965/gen6_sf_state.c index 6820ca3abf4..377b3a41bdd 100644 --- a/src/mesa/drivers/dri/i965/gen6_sf_state.c +++ b/src/mesa/drivers/dri/i965/gen6_sf_state.c @@ -33,20 +33,29 @@ #include "intel_batchbuffer.h" static uint32_t -get_attr_override(struct brw_context *brw, int attr) +get_attr_override(struct brw_context *brw, int fs_attr) { - uint32_t attr_override; - int attr_index = 0, i; + int attr_index = 0, i, vs_attr; + + if (fs_attr <= FRAG_ATTRIB_TEX7) + vs_attr = fs_attr; + else if (fs_attr == FRAG_ATTRIB_FACE) + vs_attr = 0; /* XXX */ + else if (fs_attr == FRAG_ATTRIB_PNTC) + vs_attr = 0; /* XXX */ + else { + assert(fs_attr >= FRAG_ATTRIB_VAR0); + vs_attr = fs_attr - FRAG_ATTRIB_VAR0 + VERT_RESULT_VAR0; + } /* Find the source index (0 = first attribute after the 4D position) * for this output attribute. attr is currently a VERT_RESULT_* but should * be FRAG_ATTRIB_*. */ - for (i = 0; i < attr; i++) { + for (i = 1; i < vs_attr; i++) { if (brw->vs.prog_data->outputs_written & BITFIELD64_BIT(i)) attr_index++; } - attr_override = attr_index; return attr_index; } @@ -55,18 +64,18 @@ static void upload_sf_state(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; /* CACHE_NEW_VS_PROG */ uint32_t num_inputs = brw_count_bits(brw->vs.prog_data->outputs_written); - /* This should probably be FS inputs read */ - uint32_t num_outputs = brw_count_bits(brw->vs.prog_data->outputs_written); - uint32_t dw1, dw2, dw3, dw4; + uint32_t num_outputs = brw_count_bits(brw->fragment_program->Base.InputsRead); + uint32_t dw1, dw2, dw3, dw4, dw16; int i; /* _NEW_BUFFER */ GLboolean render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0; int attr = 0; dw1 = + GEN6_SF_SWIZZLE_ENABLE | num_outputs << GEN6_SF_NUM_OUTPUTS_SHIFT | (num_inputs + 1) / 2 << GEN6_SF_URB_ENTRY_READ_LENGTH_SHIFT | 1 << GEN6_SF_URB_ENTRY_READ_OFFSET_SHIFT; @@ -74,11 +83,15 @@ upload_sf_state(struct brw_context *brw) GEN6_SF_STATISTICS_ENABLE; dw3 = 0; dw4 = 0; + dw16 = 0; /* _NEW_POLYGON */ if ((ctx->Polygon.FrontFace == GL_CCW) ^ render_to_fbo) dw2 |= GEN6_SF_WINDING_CCW; + if (ctx->Polygon.OffsetFill) + dw2 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_SOLID; + /* _NEW_SCISSOR */ if (ctx->Scissor.Enabled) dw3 |= GEN6_SF_SCISSOR_ENABLE; @@ -113,12 +126,13 @@ upload_sf_state(struct brw_context *brw) } /* _NEW_POINT */ - if (ctx->Point._Attenuated) + if (!(ctx->VertexProgram.PointSizeEnabled || + ctx->Point._Attenuated)) dw4 |= GEN6_SF_USE_STATE_POINT_WIDTH; dw4 |= U_FIXED(CLAMP(ctx->Point.Size, 0.125, 225.875), 3) << GEN6_SF_POINT_WIDTH_SHIFT; - if (render_to_fbo) + if (ctx->Point.SpriteOrigin == GL_LOWER_LEFT) dw1 |= GEN6_SF_POINT_SPRITE_LOWERLEFT; /* _NEW_LIGHT */ @@ -132,6 +146,13 @@ upload_sf_state(struct brw_context *brw) (1 << GEN6_SF_TRIFAN_PROVOKE_SHIFT); } + if (ctx->Point.PointSprite) { + for (i = 0; i < 8; i++) { + if (ctx->Point.CoordReplace[i]) + dw16 |= (1 << i); + } + } + BEGIN_BATCH(20); OUT_BATCH(CMD_3D_SF_STATE << 16 | (20 - 2)); OUT_BATCH(dw1); @@ -144,11 +165,8 @@ upload_sf_state(struct brw_context *brw) for (i = 0; i < 8; i++) { uint32_t attr_overrides = 0; - /* These should be generating FS inputs read instead of VS - * outputs written - */ for (; attr < 64; attr++) { - if (brw->vs.prog_data->outputs_written & BITFIELD64_BIT(attr)) { + if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(attr)) { attr_overrides |= get_attr_override(brw, attr); attr++; break; @@ -156,7 +174,7 @@ upload_sf_state(struct brw_context *brw) } for (; attr < 64; attr++) { - if (brw->vs.prog_data->outputs_written & BITFIELD64_BIT(attr)) { + if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(attr)) { attr_overrides |= get_attr_override(brw, attr) << 16; attr++; break; @@ -164,7 +182,7 @@ upload_sf_state(struct brw_context *brw) } OUT_BATCH(attr_overrides); } - OUT_BATCH(0); /* point sprite texcoord bitmask */ + OUT_BATCH(dw16); /* point sprite texcoord bitmask */ OUT_BATCH(0); /* constant interp bitmask */ OUT_BATCH(0); /* wrapshortest enables 0-7 */ OUT_BATCH(0); /* wrapshortest enables 8-15 */ diff --git a/src/mesa/drivers/dri/i965/gen6_viewport_state.c b/src/mesa/drivers/dri/i965/gen6_viewport_state.c index 301c68e7f9e..b515e7712ed 100644 --- a/src/mesa/drivers/dri/i965/gen6_viewport_state.c +++ b/src/mesa/drivers/dri/i965/gen6_viewport_state.c @@ -65,7 +65,7 @@ const struct brw_tracked_state gen6_clip_vp = { static void prepare_sf_vp(struct brw_context *brw) { - GLcontext *ctx = &brw->intel.ctx; + struct gl_context *ctx = &brw->intel.ctx; const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF; struct brw_sf_viewport sfv; GLfloat y_scale, y_bias; @@ -107,7 +107,9 @@ const struct brw_tracked_state gen6_sf_vp = { static void prepare_viewport_state_pointers(struct brw_context *brw) { - brw_add_validated_bo(brw, brw->sf.state_bo); + brw_add_validated_bo(brw, brw->clip.vp_bo); + brw_add_validated_bo(brw, brw->sf.vp_bo); + brw_add_validated_bo(brw, brw->cc.vp_bo); } static void upload_viewport_state_pointers(struct brw_context *brw) diff --git a/src/mesa/drivers/dri/i965/gen6_vs_state.c b/src/mesa/drivers/dri/i965/gen6_vs_state.c index 4080a9dedfd..3eca4e971b1 100644 --- a/src/mesa/drivers/dri/i965/gen6_vs_state.c +++ b/src/mesa/drivers/dri/i965/gen6_vs_state.c @@ -37,7 +37,7 @@ static void upload_vs_state(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; const struct brw_vertex_program *vp = brw_vertex_program_const(brw->vertex_program); unsigned int nr_params = vp->program.Base.Parameters->NumParameters; @@ -93,7 +93,7 @@ upload_vs_state(struct brw_context *brw) BEGIN_BATCH(6); OUT_BATCH(CMD_3D_VS_STATE << 16 | (6 - 2)); OUT_RELOC(brw->vs.prog_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BATCH((0 << GEN6_VS_SAMPLER_COUNT_SHIFT) | + OUT_BATCH(GEN6_VS_SPF_MODE | (0 << GEN6_VS_SAMPLER_COUNT_SHIFT) | (brw->vs.nr_surfaces << GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT)); OUT_BATCH(0); /* scratch space base offset */ OUT_BATCH((1 << GEN6_VS_DISPATCH_START_GRF_SHIFT) | diff --git a/src/mesa/drivers/dri/i965/gen6_wm_state.c b/src/mesa/drivers/dri/i965/gen6_wm_state.c index 2cd640de175..58102666354 100644 --- a/src/mesa/drivers/dri/i965/gen6_wm_state.c +++ b/src/mesa/drivers/dri/i965/gen6_wm_state.c @@ -37,7 +37,7 @@ static void prepare_wm_constants(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; const struct brw_fragment_program *fp = brw_fragment_program_const(brw->fragment_program); @@ -81,7 +81,7 @@ static void upload_wm_state(struct brw_context *brw) { struct intel_context *intel = &brw->intel; - GLcontext *ctx = &intel->ctx; + struct gl_context *ctx = &intel->ctx; const struct brw_fragment_program *fp = brw_fragment_program_const(brw->fragment_program); uint32_t dw2, dw4, dw5, dw6; @@ -151,8 +151,9 @@ upload_wm_state(struct brw_context *brw) if (fp->program.UsesKill || ctx->Color.AlphaEnabled) dw5 |= GEN6_WM_KILL_ENABLE; - /* This should probably be FS inputs read */ - dw6 |= brw_count_bits(brw->vs.prog_data->outputs_written) << + dw6 |= GEN6_WM_PERSPECTIVE_PIXEL_BARYCENTRIC; + + dw6 |= brw_count_bits(brw->fragment_program->Base.InputsRead) << GEN6_WM_NUM_SF_OUTPUTS_SHIFT; BEGIN_BATCH(9); @@ -172,7 +173,8 @@ upload_wm_state(struct brw_context *brw) const struct brw_tracked_state gen6_wm_state = { .dirty = { - .mesa = _NEW_LINE | _NEW_POLYGONSTIPPLE | _NEW_COLOR, + .mesa = (_NEW_LINE | _NEW_POLYGONSTIPPLE | _NEW_COLOR | + _NEW_PROGRAM_CONSTANTS), .brw = (BRW_NEW_CURBE_OFFSETS | BRW_NEW_FRAGMENT_PROGRAM | BRW_NEW_NR_WM_SURFACES | |