summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965
diff options
context:
space:
mode:
authorEric Anholt <[email protected]>2011-05-24 16:34:27 -0700
committerEric Anholt <[email protected]>2011-05-27 08:51:39 -0700
commit11dd9e9c0fcf9985b90ff4b63b2833345fece027 (patch)
tree1c21b7988cd8d3e3b99a490e0d70829071c78dd2 /src/mesa/drivers/dri/i965
parentb7b700aeb0eab2cae26a01d9db42feea969333c7 (diff)
i965/fs: Split the BRW native code emit to brw_fs_emit.cpp
This is all separate from the visitor and the optimization passes which feed into it. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri/i965')
-rw-r--r--src/mesa/drivers/dri/i965/Makefile1
-rw-r--r--src/mesa/drivers/dri/i965/brw_fs.cpp839
-rw-r--r--src/mesa/drivers/dri/i965/brw_fs_emit.cpp875
3 files changed, 876 insertions, 839 deletions
diff --git a/src/mesa/drivers/dri/i965/Makefile b/src/mesa/drivers/dri/i965/Makefile
index 32e96165994..9f27b8f98ec 100644
--- a/src/mesa/drivers/dri/i965/Makefile
+++ b/src/mesa/drivers/dri/i965/Makefile
@@ -116,6 +116,7 @@ C_SOURCES = \
CXX_SOURCES = \
brw_cubemap_normalize.cpp \
brw_fs.cpp \
+ brw_fs_emit.cpp \
brw_fs_channel_expressions.cpp \
brw_fs_reg_allocate.cpp \
brw_fs_schedule_instructions.cpp \
diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp
index cac5f48cc0c..9bc7b444657 100644
--- a/src/mesa/drivers/dri/i965/brw_fs.cpp
+++ b/src/mesa/drivers/dri/i965/brw_fs.cpp
@@ -47,7 +47,6 @@ extern "C" {
#include "../glsl/ir_print_visitor.h"
#define MAX_INSTRUCTION (1 << 30)
-static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
static int
type_size(const struct glsl_type *type)
@@ -2256,495 +2255,6 @@ fs_visitor::emit_fb_writes()
this->current_annotation = NULL;
}
-void
-fs_visitor::generate_fb_write(fs_inst *inst)
-{
- GLboolean eot = inst->eot;
- struct brw_reg implied_header;
-
- /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
- * move, here's g1.
- */
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-
- if (inst->header_present) {
- if (intel->gen >= 6) {
- brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
- brw_MOV(p,
- retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
- retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-
- if (inst->target > 0) {
- /* Set the render target index for choosing BLEND_STATE. */
- brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
- BRW_REGISTER_TYPE_UD),
- brw_imm_ud(inst->target));
- }
-
- implied_header = brw_null_reg();
- } else {
- implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
-
- brw_MOV(p,
- brw_message_reg(inst->base_mrf + 1),
- brw_vec8_grf(1, 0));
- }
- } else {
- implied_header = brw_null_reg();
- }
-
- brw_pop_insn_state(p);
-
- brw_fb_WRITE(p,
- c->dispatch_width,
- inst->base_mrf,
- implied_header,
- inst->target,
- inst->mlen,
- 0,
- eot,
- inst->header_present);
-}
-
-/* Computes the integer pixel x,y values from the origin.
- *
- * This is the basis of gl_FragCoord computation, but is also used
- * pre-gen6 for computing the deltas from v0 for computing
- * interpolation.
- */
-void
-fs_visitor::generate_pixel_xy(struct brw_reg dst, bool is_x)
-{
- struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
- struct brw_reg src;
- struct brw_reg deltas;
-
- if (is_x) {
- src = stride(suboffset(g1_uw, 4), 2, 4, 0);
- deltas = brw_imm_v(0x10101010);
- } else {
- src = stride(suboffset(g1_uw, 5), 2, 4, 0);
- deltas = brw_imm_v(0x11001100);
- }
-
- if (c->dispatch_width == 16) {
- dst = vec16(dst);
- }
-
- /* We do this 8 or 16-wide, but since the destination is UW we
- * don't do compression in the 16-wide case.
- */
- brw_push_insn_state(p);
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
- brw_ADD(p, dst, src, deltas);
- brw_pop_insn_state(p);
-}
-
-void
-fs_visitor::generate_linterp(fs_inst *inst,
- struct brw_reg dst, struct brw_reg *src)
-{
- struct brw_reg delta_x = src[0];
- struct brw_reg delta_y = src[1];
- struct brw_reg interp = src[2];
-
- if (brw->has_pln &&
- delta_y.nr == delta_x.nr + 1 &&
- (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
- brw_PLN(p, dst, interp, delta_x);
- } else {
- brw_LINE(p, brw_null_reg(), interp, delta_x);
- brw_MAC(p, dst, suboffset(interp, 1), delta_y);
- }
-}
-
-void
-fs_visitor::generate_math(fs_inst *inst,
- struct brw_reg dst, struct brw_reg *src)
-{
- int op;
-
- switch (inst->opcode) {
- case FS_OPCODE_RCP:
- op = BRW_MATH_FUNCTION_INV;
- break;
- case FS_OPCODE_RSQ:
- op = BRW_MATH_FUNCTION_RSQ;
- break;
- case FS_OPCODE_SQRT:
- op = BRW_MATH_FUNCTION_SQRT;
- break;
- case FS_OPCODE_EXP2:
- op = BRW_MATH_FUNCTION_EXP;
- break;
- case FS_OPCODE_LOG2:
- op = BRW_MATH_FUNCTION_LOG;
- break;
- case FS_OPCODE_POW:
- op = BRW_MATH_FUNCTION_POW;
- break;
- case FS_OPCODE_SIN:
- op = BRW_MATH_FUNCTION_SIN;
- break;
- case FS_OPCODE_COS:
- op = BRW_MATH_FUNCTION_COS;
- break;
- default:
- assert(!"not reached: unknown math function");
- op = 0;
- break;
- }
-
- if (intel->gen >= 6) {
- assert(inst->mlen == 0);
-
- if (inst->opcode == FS_OPCODE_POW) {
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
- brw_math2(p, dst, op, src[0], src[1]);
-
- if (c->dispatch_width == 16) {
- brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
- brw_math2(p, sechalf(dst), op, sechalf(src[0]), sechalf(src[1]));
- brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
- }
- } else {
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
- brw_math(p, dst,
- op,
- inst->saturate ? BRW_MATH_SATURATE_SATURATE :
- BRW_MATH_SATURATE_NONE,
- 0, src[0],
- BRW_MATH_DATA_VECTOR,
- BRW_MATH_PRECISION_FULL);
-
- if (c->dispatch_width == 16) {
- brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
- brw_math(p, sechalf(dst),
- op,
- inst->saturate ? BRW_MATH_SATURATE_SATURATE :
- BRW_MATH_SATURATE_NONE,
- 0, sechalf(src[0]),
- BRW_MATH_DATA_VECTOR,
- BRW_MATH_PRECISION_FULL);
- brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
- }
- }
- } else /* gen <= 5 */{
- assert(inst->mlen >= 1);
-
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
- brw_math(p, dst,
- op,
- inst->saturate ? BRW_MATH_SATURATE_SATURATE :
- BRW_MATH_SATURATE_NONE,
- inst->base_mrf, src[0],
- BRW_MATH_DATA_VECTOR,
- BRW_MATH_PRECISION_FULL);
-
- if (c->dispatch_width == 16) {
- brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
- brw_math(p, sechalf(dst),
- op,
- inst->saturate ? BRW_MATH_SATURATE_SATURATE :
- BRW_MATH_SATURATE_NONE,
- inst->base_mrf + 1, sechalf(src[0]),
- BRW_MATH_DATA_VECTOR,
- BRW_MATH_PRECISION_FULL);
-
- brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
- }
- }
-}
-
-void
-fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
-{
- int msg_type = -1;
- int rlen = 4;
- uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
-
- if (c->dispatch_width == 16)
- simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
-
- if (intel->gen >= 5) {
- switch (inst->opcode) {
- case FS_OPCODE_TEX:
- if (inst->shadow_compare) {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
- } else {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
- }
- break;
- case FS_OPCODE_TXB:
- if (inst->shadow_compare) {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
- } else {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
- }
- break;
- case FS_OPCODE_TXL:
- if (inst->shadow_compare) {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
- } else {
- msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
- }
- break;
- case FS_OPCODE_TXD:
- assert(!"TXD isn't supported on gen5+ yet.");
- break;
- }
- } else {
- switch (inst->opcode) {
- case FS_OPCODE_TEX:
- /* Note that G45 and older determines shadow compare and dispatch width
- * from message length for most messages.
- */
- assert(c->dispatch_width == 8);
- msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
- if (inst->shadow_compare) {
- assert(inst->mlen == 6);
- } else {
- assert(inst->mlen <= 4);
- }
- break;
- case FS_OPCODE_TXB:
- if (inst->shadow_compare) {
- assert(inst->mlen == 6);
- msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
- } else {
- assert(inst->mlen == 9);
- msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
- simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
- }
- break;
- case FS_OPCODE_TXL:
- if (inst->shadow_compare) {
- assert(inst->mlen == 6);
- msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
- } else {
- assert(inst->mlen == 9);
- msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
- simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
- }
- break;
- case FS_OPCODE_TXD:
- assert(!"TXD isn't supported on gen4 yet.");
- break;
- }
- }
- assert(msg_type != -1);
-
- if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
- rlen = 8;
- dst = vec16(dst);
- }
-
- brw_SAMPLE(p,
- retype(dst, BRW_REGISTER_TYPE_UW),
- inst->base_mrf,
- src,
- SURF_INDEX_TEXTURE(inst->sampler),
- inst->sampler,
- WRITEMASK_XYZW,
- msg_type,
- rlen,
- inst->mlen,
- 0,
- inst->header_present,
- simd_mode);
-}
-
-
-/* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
- * looking like:
- *
- * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
- *
- * and we're trying to produce:
- *
- * DDX DDY
- * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
- * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
- * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
- * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
- * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
- * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
- * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
- * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
- *
- * and add another set of two more subspans if in 16-pixel dispatch mode.
- *
- * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
- * for each pair, and vertstride = 2 jumps us 2 elements after processing a
- * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
- * between each other. We could probably do it like ddx and swizzle the right
- * order later, but bail for now and just produce
- * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
- */
-void
-fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
-{
- struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
- BRW_REGISTER_TYPE_F,
- BRW_VERTICAL_STRIDE_2,
- BRW_WIDTH_2,
- BRW_HORIZONTAL_STRIDE_0,
- BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
- struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
- BRW_REGISTER_TYPE_F,
- BRW_VERTICAL_STRIDE_2,
- BRW_WIDTH_2,
- BRW_HORIZONTAL_STRIDE_0,
- BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
- brw_ADD(p, dst, src0, negate(src1));
-}
-
-void
-fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
-{
- struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
- BRW_REGISTER_TYPE_F,
- BRW_VERTICAL_STRIDE_4,
- BRW_WIDTH_4,
- BRW_HORIZONTAL_STRIDE_0,
- BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
- struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
- BRW_REGISTER_TYPE_F,
- BRW_VERTICAL_STRIDE_4,
- BRW_WIDTH_4,
- BRW_HORIZONTAL_STRIDE_0,
- BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
- brw_ADD(p, dst, src0, negate(src1));
-}
-
-void
-fs_visitor::generate_discard(fs_inst *inst)
-{
- struct brw_reg f0 = brw_flag_reg();
-
- if (intel->gen >= 6) {
- struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
- struct brw_reg some_register;
-
- /* As of gen6, we no longer have the mask register to look at,
- * so life gets a bit more complicated.
- */
-
- /* Load the flag register with all ones. */
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_MOV(p, f0, brw_imm_uw(0xffff));
- brw_pop_insn_state(p);
-
- /* Do a comparison that should always fail, to produce 0s in the flag
- * reg where we have active channels.
- */
- some_register = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
- brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
- BRW_CONDITIONAL_NZ, some_register, some_register);
-
- /* Undo CMP's whacking of predication*/
- brw_set_predicate_control(p, BRW_PREDICATE_NONE);
-
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_AND(p, g1, f0, g1);
- brw_pop_insn_state(p);
- } else {
- struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
-
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
-
- /* Unlike the 965, we have the mask reg, so we just need
- * somewhere to invert that (containing channels to be disabled)
- * so it can be ANDed with the mask of pixels still to be
- * written. Use the flag reg for consistency with gen6+.
- */
- brw_NOT(p, f0, brw_mask_reg(1)); /* IMASK */
- brw_AND(p, g0, f0, g0);
-
- brw_pop_insn_state(p);
- }
-}
-
-void
-fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
-{
- assert(inst->mlen != 0);
-
- brw_MOV(p,
- retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
- retype(src, BRW_REGISTER_TYPE_UD));
- brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
- inst->offset);
-}
-
-void
-fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
-{
- assert(inst->mlen != 0);
-
- /* Clear any post destination dependencies that would be ignored by
- * the block read. See the B-Spec for pre-gen5 send instruction.
- *
- * This could use a better solution, since texture sampling and
- * math reads could potentially run into it as well -- anywhere
- * that we have a SEND with a destination that is a register that
- * was written but not read within the last N instructions (what's
- * N? unsure). This is rare because of dead code elimination, but
- * not impossible.
- */
- if (intel->gen == 4 && !intel->is_g4x)
- brw_MOV(p, brw_null_reg(), dst);
-
- brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
- inst->offset);
-
- if (intel->gen == 4 && !intel->is_g4x) {
- /* gen4 errata: destination from a send can't be used as a
- * destination until it's been read. Just read it so we don't
- * have to worry.
- */
- brw_MOV(p, brw_null_reg(), dst);
- }
-}
-
-
-void
-fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
-{
- assert(inst->mlen != 0);
-
- /* Clear any post destination dependencies that would be ignored by
- * the block read. See the B-Spec for pre-gen5 send instruction.
- *
- * This could use a better solution, since texture sampling and
- * math reads could potentially run into it as well -- anywhere
- * that we have a SEND with a destination that is a register that
- * was written but not read within the last N instructions (what's
- * N? unsure). This is rare because of dead code elimination, but
- * not impossible.
- */
- if (intel->gen == 4 && !intel->is_g4x)
- brw_MOV(p, brw_null_reg(), dst);
-
- brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
- inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
-
- if (intel->gen == 4 && !intel->is_g4x) {
- /* gen4 errata: destination from a send can't be used as a
- * destination until it's been read. Just read it so we don't
- * have to worry.
- */
- brw_MOV(p, brw_null_reg(), dst);
- }
-}
-
/**
* To be called after the last _mesa_add_state_reference() call, to
* set up prog_data.param[] for assign_curb_setup() and
@@ -3645,355 +3155,6 @@ fs_visitor::virtual_grf_interferes(int a, int b)
return start < end;
}
-static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg)
-{
- struct brw_reg brw_reg;
-
- switch (reg->file) {
- case GRF:
- case ARF:
- case MRF:
- if (reg->smear == -1) {
- brw_reg = brw_vec8_reg(reg->file,
- reg->hw_reg, 0);
- } else {
- brw_reg = brw_vec1_reg(reg->file,
- reg->hw_reg, reg->smear);
- }
- brw_reg = retype(brw_reg, reg->type);
- if (reg->sechalf)
- brw_reg = sechalf(brw_reg);
- break;
- case IMM:
- switch (reg->type) {
- case BRW_REGISTER_TYPE_F:
- brw_reg = brw_imm_f(reg->imm.f);
- break;
- case BRW_REGISTER_TYPE_D:
- brw_reg = brw_imm_d(reg->imm.i);
- break;
- case BRW_REGISTER_TYPE_UD:
- brw_reg = brw_imm_ud(reg->imm.u);
- break;
- default:
- assert(!"not reached");
- brw_reg = brw_null_reg();
- break;
- }
- break;
- case FIXED_HW_REG:
- brw_reg = reg->fixed_hw_reg;
- break;
- case BAD_FILE:
- /* Probably unused. */
- brw_reg = brw_null_reg();
- break;
- case UNIFORM:
- assert(!"not reached");
- brw_reg = brw_null_reg();
- break;
- default:
- assert(!"not reached");
- brw_reg = brw_null_reg();
- break;
- }
- if (reg->abs)
- brw_reg = brw_abs(brw_reg);
- if (reg->negate)
- brw_reg = negate(brw_reg);
-
- return brw_reg;
-}
-
-void
-fs_visitor::generate_code()
-{
- int last_native_inst = p->nr_insn;
- const char *last_annotation_string = NULL;
- ir_instruction *last_annotation_ir = NULL;
-
- int loop_stack_array_size = 16;
- int loop_stack_depth = 0;
- brw_instruction **loop_stack =
- rzalloc_array(this->mem_ctx, brw_instruction *, loop_stack_array_size);
- int *if_depth_in_loop =
- rzalloc_array(this->mem_ctx, int, loop_stack_array_size);
-
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- printf("Native code for fragment shader %d (%d-wide dispatch):\n",
- ctx->Shader.CurrentFragmentProgram->Name, c->dispatch_width);
- }
-
- foreach_iter(exec_list_iterator, iter, this->instructions) {
- fs_inst *inst = (fs_inst *)iter.get();
- struct brw_reg src[3], dst;
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- if (last_annotation_ir != inst->ir) {
- last_annotation_ir = inst->ir;
- if (last_annotation_ir) {
- printf(" ");
- last_annotation_ir->print();
- printf("\n");
- }
- }
- if (last_annotation_string != inst->annotation) {
- last_annotation_string = inst->annotation;
- if (last_annotation_string)
- printf(" %s\n", last_annotation_string);
- }
- }
-
- for (unsigned int i = 0; i < 3; i++) {
- src[i] = brw_reg_from_fs_reg(&inst->src[i]);
- }
- dst = brw_reg_from_fs_reg(&inst->dst);
-
- brw_set_conditionalmod(p, inst->conditional_mod);
- brw_set_predicate_control(p, inst->predicated);
- brw_set_predicate_inverse(p, inst->predicate_inverse);
- brw_set_saturate(p, inst->saturate);
-
- if (inst->force_uncompressed || c->dispatch_width == 8) {
- brw_set_compression_control(p, BRW_COMPRESSION_NONE);
- } else if (inst->force_sechalf) {
- brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
- } else {
- brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
- }
-
- switch (inst->opcode) {
- case BRW_OPCODE_MOV:
- brw_MOV(p, dst, src[0]);
- break;
- case BRW_OPCODE_ADD:
- brw_ADD(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_MUL:
- brw_MUL(p, dst, src[0], src[1]);
- break;
-
- case BRW_OPCODE_FRC:
- brw_FRC(p, dst, src[0]);
- break;
- case BRW_OPCODE_RNDD:
- brw_RNDD(p, dst, src[0]);
- break;
- case BRW_OPCODE_RNDE:
- brw_RNDE(p, dst, src[0]);
- break;
- case BRW_OPCODE_RNDZ:
- brw_RNDZ(p, dst, src[0]);
- break;
-
- case BRW_OPCODE_AND:
- brw_AND(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_OR:
- brw_OR(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_XOR:
- brw_XOR(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_NOT:
- brw_NOT(p, dst, src[0]);
- break;
- case BRW_OPCODE_ASR:
- brw_ASR(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_SHR:
- brw_SHR(p, dst, src[0], src[1]);
- break;
- case BRW_OPCODE_SHL:
- brw_SHL(p, dst, src[0], src[1]);
- break;
-
- case BRW_OPCODE_CMP:
- brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
- break;
- case BRW_OPCODE_SEL:
- brw_SEL(p, dst, src[0], src[1]);
- break;
-
- case BRW_OPCODE_IF:
- if (inst->src[0].file != BAD_FILE) {
- /* The instruction has an embedded compare (only allowed on gen6) */
- assert(intel->gen == 6);
- gen6_IF(p, inst->conditional_mod, src[0], src[1]);
- } else {
- brw_IF(p, c->dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
- }
- if_depth_in_loop[loop_stack_depth]++;
- break;
-
- case BRW_OPCODE_ELSE:
- brw_ELSE(p);
- break;
- case BRW_OPCODE_ENDIF:
- brw_ENDIF(p);
- if_depth_in_loop[loop_stack_depth]--;
- break;
-
- case BRW_OPCODE_DO:
- loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
- if (loop_stack_array_size <= loop_stack_depth) {
- loop_stack_array_size *= 2;
- loop_stack = reralloc(this->mem_ctx, loop_stack, brw_instruction *,
- loop_stack_array_size);
- if_depth_in_loop = reralloc(this->mem_ctx, if_depth_in_loop, int,
- loop_stack_array_size);
- }
- if_depth_in_loop[loop_stack_depth] = 0;
- break;
-
- case BRW_OPCODE_BREAK:
- brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
- brw_set_predicate_control(p, BRW_PREDICATE_NONE);
- break;
- case BRW_OPCODE_CONTINUE:
- /* FINISHME: We need to write the loop instruction support still. */
- if (intel->gen >= 6)
- gen6_CONT(p, loop_stack[loop_stack_depth - 1]);
- else
- brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
- brw_set_predicate_control(p, BRW_PREDICATE_NONE);
- break;
-
- case BRW_OPCODE_WHILE: {
- struct brw_instruction *inst0, *inst1;
- GLuint br = 1;
-
- if (intel->gen >= 5)
- br = 2;
-
- assert(loop_stack_depth > 0);
- loop_stack_depth--;
- inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
- if (intel->gen < 6) {
- /* patch all the BREAK/CONT instructions from last BGNLOOP */
- while (inst0 > loop_stack[loop_stack_depth]) {
- inst0--;
- if (inst0->header.opcode == BRW_OPCODE_BREAK &&
- inst0->bits3.if_else.jump_count == 0) {
- inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
- }
- else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
- inst0->bits3.if_else.jump_count == 0) {
- inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
- }
- }
- }
- }
- break;
-
- case FS_OPCODE_RCP:
- case FS_OPCODE_RSQ:
- case FS_OPCODE_SQRT:
- case FS_OPCODE_EXP2:
- case FS_OPCODE_LOG2:
- case FS_OPCODE_POW:
- case FS_OPCODE_SIN:
- case FS_OPCODE_COS:
- generate_math(inst, dst, src);
- break;
- case FS_OPCODE_PIXEL_X:
- generate_pixel_xy(dst, true);
- break;
- case FS_OPCODE_PIXEL_Y:
- generate_pixel_xy(dst, false);
- break;
- case FS_OPCODE_CINTERP:
- brw_MOV(p, dst, src[0]);
- break;
- case FS_OPCODE_LINTERP:
- generate_linterp(inst, dst, src);
- break;
- case FS_OPCODE_TEX:
- case FS_OPCODE_TXB:
- case FS_OPCODE_TXD:
- case FS_OPCODE_TXL:
- generate_tex(inst, dst, src[0]);
- break;
- case FS_OPCODE_DISCARD:
- generate_discard(inst);
- break;
- case FS_OPCODE_DDX:
- generate_ddx(inst, dst, src[0]);
- break;
- case FS_OPCODE_DDY:
- generate_ddy(inst, dst, src[0]);
- break;
-
- case FS_OPCODE_SPILL:
- generate_spill(inst, src[0]);
- break;
-
- case FS_OPCODE_UNSPILL:
- generate_unspill(inst, dst);
- break;
-
- case FS_OPCODE_PULL_CONSTANT_LOAD:
- generate_pull_constant_load(inst, dst);
- break;
-
- case FS_OPCODE_FB_WRITE:
- generate_fb_write(inst);
- break;
- default:
- if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
- _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
- brw_opcodes[inst->opcode].name);
- } else {
- _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
- }
- fail("unsupported opcode in FS\n");
- }
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
- if (0) {
- printf("0x%08x 0x%08x 0x%08x 0x%08x ",
- ((uint32_t *)&p->store[i])[3],
- ((uint32_t *)&p->store[i])[2],
- ((uint32_t *)&p->store[i])[1],
- ((uint32_t *)&p->store[i])[0]);
- }
- brw_disasm(stdout, &p->store[i], intel->gen);
- }
- }
-
- last_native_inst = p->nr_insn;
- }
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- printf("\n");
- }
-
- ralloc_free(loop_stack);
- ralloc_free(if_depth_in_loop);
-
- brw_set_uip_jip(p);
-
- /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
- * emit issues, it doesn't get the jump distances into the output,
- * which is often something we want to debug. So this is here in
- * case you're doing that.
- */
- if (0) {
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- for (unsigned int i = 0; i < p->nr_insn; i++) {
- printf("0x%08x 0x%08x 0x%08x 0x%08x ",
- ((uint32_t *)&p->store[i])[3],
- ((uint32_t *)&p->store[i])[2],
- ((uint32_t *)&p->store[i])[1],
- ((uint32_t *)&p->store[i])[0]);
- brw_disasm(stdout, &p->store[i], intel->gen);
- }
- }
- }
-}
-
bool
fs_visitor::run()
{
diff --git a/src/mesa/drivers/dri/i965/brw_fs_emit.cpp b/src/mesa/drivers/dri/i965/brw_fs_emit.cpp
new file mode 100644
index 00000000000..8958aeb8153
--- /dev/null
+++ b/src/mesa/drivers/dri/i965/brw_fs_emit.cpp
@@ -0,0 +1,875 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/** @file brw_fs_emit.cpp
+ *
+ * This file supports emitting code from the FS LIR to the actual
+ * native instructions.
+ */
+
+extern "C" {
+#include "main/macros.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+} /* extern "C" */
+
+#include "brw_fs.h"
+#include "../glsl/ir_print_visitor.h"
+
+void
+fs_visitor::generate_fb_write(fs_inst *inst)
+{
+ GLboolean eot = inst->eot;
+ struct brw_reg implied_header;
+
+ /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
+ * move, here's g1.
+ */
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ if (inst->header_present) {
+ if (intel->gen >= 6) {
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ brw_MOV(p,
+ retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ if (inst->target > 0) {
+ /* Set the render target index for choosing BLEND_STATE. */
+ brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
+ BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(inst->target));
+ }
+
+ implied_header = brw_null_reg();
+ } else {
+ implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
+
+ brw_MOV(p,
+ brw_message_reg(inst->base_mrf + 1),
+ brw_vec8_grf(1, 0));
+ }
+ } else {
+ implied_header = brw_null_reg();
+ }
+
+ brw_pop_insn_state(p);
+
+ brw_fb_WRITE(p,
+ c->dispatch_width,
+ inst->base_mrf,
+ implied_header,
+ inst->target,
+ inst->mlen,
+ 0,
+ eot,
+ inst->header_present);
+}
+
+/* Computes the integer pixel x,y values from the origin.
+ *
+ * This is the basis of gl_FragCoord computation, but is also used
+ * pre-gen6 for computing the deltas from v0 for computing
+ * interpolation.
+ */
+void
+fs_visitor::generate_pixel_xy(struct brw_reg dst, bool is_x)
+{
+ struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
+ struct brw_reg src;
+ struct brw_reg deltas;
+
+ if (is_x) {
+ src = stride(suboffset(g1_uw, 4), 2, 4, 0);
+ deltas = brw_imm_v(0x10101010);
+ } else {
+ src = stride(suboffset(g1_uw, 5), 2, 4, 0);
+ deltas = brw_imm_v(0x11001100);
+ }
+
+ if (c->dispatch_width == 16) {
+ dst = vec16(dst);
+ }
+
+ /* We do this 8 or 16-wide, but since the destination is UW we
+ * don't do compression in the 16-wide case.
+ */
+ brw_push_insn_state(p);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_ADD(p, dst, src, deltas);
+ brw_pop_insn_state(p);
+}
+
+void
+fs_visitor::generate_linterp(fs_inst *inst,
+ struct brw_reg dst, struct brw_reg *src)
+{
+ struct brw_reg delta_x = src[0];
+ struct brw_reg delta_y = src[1];
+ struct brw_reg interp = src[2];
+
+ if (brw->has_pln &&
+ delta_y.nr == delta_x.nr + 1 &&
+ (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
+ brw_PLN(p, dst, interp, delta_x);
+ } else {
+ brw_LINE(p, brw_null_reg(), interp, delta_x);
+ brw_MAC(p, dst, suboffset(interp, 1), delta_y);
+ }
+}
+
+void
+fs_visitor::generate_math(fs_inst *inst,
+ struct brw_reg dst, struct brw_reg *src)
+{
+ int op;
+
+ switch (inst->opcode) {
+ case FS_OPCODE_RCP:
+ op = BRW_MATH_FUNCTION_INV;
+ break;
+ case FS_OPCODE_RSQ:
+ op = BRW_MATH_FUNCTION_RSQ;
+ break;
+ case FS_OPCODE_SQRT:
+ op = BRW_MATH_FUNCTION_SQRT;
+ break;
+ case FS_OPCODE_EXP2:
+ op = BRW_MATH_FUNCTION_EXP;
+ break;
+ case FS_OPCODE_LOG2:
+ op = BRW_MATH_FUNCTION_LOG;
+ break;
+ case FS_OPCODE_POW:
+ op = BRW_MATH_FUNCTION_POW;
+ break;
+ case FS_OPCODE_SIN:
+ op = BRW_MATH_FUNCTION_SIN;
+ break;
+ case FS_OPCODE_COS:
+ op = BRW_MATH_FUNCTION_COS;
+ break;
+ default:
+ assert(!"not reached: unknown math function");
+ op = 0;
+ break;
+ }
+
+ if (intel->gen >= 6) {
+ assert(inst->mlen == 0);
+
+ if (inst->opcode == FS_OPCODE_POW) {
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_math2(p, dst, op, src[0], src[1]);
+
+ if (c->dispatch_width == 16) {
+ brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+ brw_math2(p, sechalf(dst), op, sechalf(src[0]), sechalf(src[1]));
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ }
+ } else {
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_math(p, dst,
+ op,
+ inst->saturate ? BRW_MATH_SATURATE_SATURATE :
+ BRW_MATH_SATURATE_NONE,
+ 0, src[0],
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+
+ if (c->dispatch_width == 16) {
+ brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+ brw_math(p, sechalf(dst),
+ op,
+ inst->saturate ? BRW_MATH_SATURATE_SATURATE :
+ BRW_MATH_SATURATE_NONE,
+ 0, sechalf(src[0]),
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ }
+ }
+ } else /* gen <= 5 */{
+ assert(inst->mlen >= 1);
+
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_math(p, dst,
+ op,
+ inst->saturate ? BRW_MATH_SATURATE_SATURATE :
+ BRW_MATH_SATURATE_NONE,
+ inst->base_mrf, src[0],
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+
+ if (c->dispatch_width == 16) {
+ brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+ brw_math(p, sechalf(dst),
+ op,
+ inst->saturate ? BRW_MATH_SATURATE_SATURATE :
+ BRW_MATH_SATURATE_NONE,
+ inst->base_mrf + 1, sechalf(src[0]),
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ }
+ }
+}
+
+void
+fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
+{
+ int msg_type = -1;
+ int rlen = 4;
+ uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
+
+ if (c->dispatch_width == 16)
+ simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
+
+ if (intel->gen >= 5) {
+ switch (inst->opcode) {
+ case FS_OPCODE_TEX:
+ if (inst->shadow_compare) {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
+ } else {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
+ }
+ break;
+ case FS_OPCODE_TXB:
+ if (inst->shadow_compare) {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
+ } else {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
+ }
+ break;
+ case FS_OPCODE_TXL:
+ if (inst->shadow_compare) {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
+ } else {
+ msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
+ }
+ break;
+ case FS_OPCODE_TXD:
+ assert(!"TXD isn't supported on gen5+ yet.");
+ break;
+ }
+ } else {
+ switch (inst->opcode) {
+ case FS_OPCODE_TEX:
+ /* Note that G45 and older determines shadow compare and dispatch width
+ * from message length for most messages.
+ */
+ assert(c->dispatch_width == 8);
+ msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
+ if (inst->shadow_compare) {
+ assert(inst->mlen == 6);
+ } else {
+ assert(inst->mlen <= 4);
+ }
+ break;
+ case FS_OPCODE_TXB:
+ if (inst->shadow_compare) {
+ assert(inst->mlen == 6);
+ msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
+ } else {
+ assert(inst->mlen == 9);
+ msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
+ simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
+ }
+ break;
+ case FS_OPCODE_TXL:
+ if (inst->shadow_compare) {
+ assert(inst->mlen == 6);
+ msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
+ } else {
+ assert(inst->mlen == 9);
+ msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
+ simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
+ }
+ break;
+ case FS_OPCODE_TXD:
+ assert(!"TXD isn't supported on gen4 yet.");
+ break;
+ }
+ }
+ assert(msg_type != -1);
+
+ if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
+ rlen = 8;
+ dst = vec16(dst);
+ }
+
+ brw_SAMPLE(p,
+ retype(dst, BRW_REGISTER_TYPE_UW),
+ inst->base_mrf,
+ src,
+ SURF_INDEX_TEXTURE(inst->sampler),
+ inst->sampler,
+ WRITEMASK_XYZW,
+ msg_type,
+ rlen,
+ inst->mlen,
+ 0,
+ inst->header_present,
+ simd_mode);
+}
+
+
+/* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
+ * looking like:
+ *
+ * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
+ *
+ * and we're trying to produce:
+ *
+ * DDX DDY
+ * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
+ * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
+ * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
+ * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
+ * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
+ * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
+ * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
+ * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
+ *
+ * and add another set of two more subspans if in 16-pixel dispatch mode.
+ *
+ * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
+ * for each pair, and vertstride = 2 jumps us 2 elements after processing a
+ * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
+ * between each other. We could probably do it like ddx and swizzle the right
+ * order later, but bail for now and just produce
+ * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
+ */
+void
+fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
+{
+ struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_2,
+ BRW_WIDTH_2,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+ struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_2,
+ BRW_WIDTH_2,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+ brw_ADD(p, dst, src0, negate(src1));
+}
+
+void
+fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
+{
+ struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_4,
+ BRW_WIDTH_4,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+ struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_4,
+ BRW_WIDTH_4,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+ brw_ADD(p, dst, src0, negate(src1));
+}
+
+void
+fs_visitor::generate_discard(fs_inst *inst)
+{
+ struct brw_reg f0 = brw_flag_reg();
+
+ if (intel->gen >= 6) {
+ struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
+ struct brw_reg some_register;
+
+ /* As of gen6, we no longer have the mask register to look at,
+ * so life gets a bit more complicated.
+ */
+
+ /* Load the flag register with all ones. */
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_MOV(p, f0, brw_imm_uw(0xffff));
+ brw_pop_insn_state(p);
+
+ /* Do a comparison that should always fail, to produce 0s in the flag
+ * reg where we have active channels.
+ */
+ some_register = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
+ brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
+ BRW_CONDITIONAL_NZ, some_register, some_register);
+
+ /* Undo CMP's whacking of predication*/
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_AND(p, g1, f0, g1);
+ brw_pop_insn_state(p);
+ } else {
+ struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
+
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ /* Unlike the 965, we have the mask reg, so we just need
+ * somewhere to invert that (containing channels to be disabled)
+ * so it can be ANDed with the mask of pixels still to be
+ * written. Use the flag reg for consistency with gen6+.
+ */
+ brw_NOT(p, f0, brw_mask_reg(1)); /* IMASK */
+ brw_AND(p, g0, f0, g0);
+
+ brw_pop_insn_state(p);
+ }
+}
+
+void
+fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
+{
+ assert(inst->mlen != 0);
+
+ brw_MOV(p,
+ retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
+ retype(src, BRW_REGISTER_TYPE_UD));
+ brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
+ inst->offset);
+}
+
+void
+fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
+{
+ assert(inst->mlen != 0);
+
+ /* Clear any post destination dependencies that would be ignored by
+ * the block read. See the B-Spec for pre-gen5 send instruction.
+ *
+ * This could use a better solution, since texture sampling and
+ * math reads could potentially run into it as well -- anywhere
+ * that we have a SEND with a destination that is a register that
+ * was written but not read within the last N instructions (what's
+ * N? unsure). This is rare because of dead code elimination, but
+ * not impossible.
+ */
+ if (intel->gen == 4 && !intel->is_g4x)
+ brw_MOV(p, brw_null_reg(), dst);
+
+ brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
+ inst->offset);
+
+ if (intel->gen == 4 && !intel->is_g4x) {
+ /* gen4 errata: destination from a send can't be used as a
+ * destination until it's been read. Just read it so we don't
+ * have to worry.
+ */
+ brw_MOV(p, brw_null_reg(), dst);
+ }
+}
+
+void
+fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
+{
+ assert(inst->mlen != 0);
+
+ /* Clear any post destination dependencies that would be ignored by
+ * the block read. See the B-Spec for pre-gen5 send instruction.
+ *
+ * This could use a better solution, since texture sampling and
+ * math reads could potentially run into it as well -- anywhere
+ * that we have a SEND with a destination that is a register that
+ * was written but not read within the last N instructions (what's
+ * N? unsure). This is rare because of dead code elimination, but
+ * not impossible.
+ */
+ if (intel->gen == 4 && !intel->is_g4x)
+ brw_MOV(p, brw_null_reg(), dst);
+
+ brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
+ inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
+
+ if (intel->gen == 4 && !intel->is_g4x) {
+ /* gen4 errata: destination from a send can't be used as a
+ * destination until it's been read. Just read it so we don't
+ * have to worry.
+ */
+ brw_MOV(p, brw_null_reg(), dst);
+ }
+}
+
+static struct brw_reg
+brw_reg_from_fs_reg(fs_reg *reg)
+{
+ struct brw_reg brw_reg;
+
+ switch (reg->file) {
+ case GRF:
+ case ARF:
+ case MRF:
+ if (reg->smear == -1) {
+ brw_reg = brw_vec8_reg(reg->file,
+ reg->hw_reg, 0);
+ } else {
+ brw_reg = brw_vec1_reg(reg->file,
+ reg->hw_reg, reg->smear);
+ }
+ brw_reg = retype(brw_reg, reg->type);
+ if (reg->sechalf)
+ brw_reg = sechalf(brw_reg);
+ break;
+ case IMM:
+ switch (reg->type) {
+ case BRW_REGISTER_TYPE_F:
+ brw_reg = brw_imm_f(reg->imm.f);
+ break;
+ case BRW_REGISTER_TYPE_D:
+ brw_reg = brw_imm_d(reg->imm.i);
+ break;
+ case BRW_REGISTER_TYPE_UD:
+ brw_reg = brw_imm_ud(reg->imm.u);
+ break;
+ default:
+ assert(!"not reached");
+ brw_reg = brw_null_reg();
+ break;
+ }
+ break;
+ case FIXED_HW_REG:
+ brw_reg = reg->fixed_hw_reg;
+ break;
+ case BAD_FILE:
+ /* Probably unused. */
+ brw_reg = brw_null_reg();
+ break;
+ case UNIFORM:
+ assert(!"not reached");
+ brw_reg = brw_null_reg();
+ break;
+ default:
+ assert(!"not reached");
+ brw_reg = brw_null_reg();
+ break;
+ }
+ if (reg->abs)
+ brw_reg = brw_abs(brw_reg);
+ if (reg->negate)
+ brw_reg = negate(brw_reg);
+
+ return brw_reg;
+}
+
+void
+fs_visitor::generate_code()
+{
+ int last_native_inst = p->nr_insn;
+ const char *last_annotation_string = NULL;
+ ir_instruction *last_annotation_ir = NULL;
+
+ int loop_stack_array_size = 16;
+ int loop_stack_depth = 0;
+ brw_instruction **loop_stack =
+ rzalloc_array(this->mem_ctx, brw_instruction *, loop_stack_array_size);
+ int *if_depth_in_loop =
+ rzalloc_array(this->mem_ctx, int, loop_stack_array_size);
+
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ printf("Native code for fragment shader %d (%d-wide dispatch):\n",
+ ctx->Shader.CurrentFragmentProgram->Name, c->dispatch_width);
+ }
+
+ foreach_iter(exec_list_iterator, iter, this->instructions) {
+ fs_inst *inst = (fs_inst *)iter.get();
+ struct brw_reg src[3], dst;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ if (last_annotation_ir != inst->ir) {
+ last_annotation_ir = inst->ir;
+ if (last_annotation_ir) {
+ printf(" ");
+ last_annotation_ir->print();
+ printf("\n");
+ }
+ }
+ if (last_annotation_string != inst->annotation) {
+ last_annotation_string = inst->annotation;
+ if (last_annotation_string)
+ printf(" %s\n", last_annotation_string);
+ }
+ }
+
+ for (unsigned int i = 0; i < 3; i++) {
+ src[i] = brw_reg_from_fs_reg(&inst->src[i]);
+ }
+ dst = brw_reg_from_fs_reg(&inst->dst);
+
+ brw_set_conditionalmod(p, inst->conditional_mod);
+ brw_set_predicate_control(p, inst->predicated);
+ brw_set_predicate_inverse(p, inst->predicate_inverse);
+ brw_set_saturate(p, inst->saturate);
+
+ if (inst->force_uncompressed || c->dispatch_width == 8) {
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ } else if (inst->force_sechalf) {
+ brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+ } else {
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ }
+
+ switch (inst->opcode) {
+ case BRW_OPCODE_MOV:
+ brw_MOV(p, dst, src[0]);
+ break;
+ case BRW_OPCODE_ADD:
+ brw_ADD(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_MUL:
+ brw_MUL(p, dst, src[0], src[1]);
+ break;
+
+ case BRW_OPCODE_FRC:
+ brw_FRC(p, dst, src[0]);
+ break;
+ case BRW_OPCODE_RNDD:
+ brw_RNDD(p, dst, src[0]);
+ break;
+ case BRW_OPCODE_RNDE:
+ brw_RNDE(p, dst, src[0]);
+ break;
+ case BRW_OPCODE_RNDZ:
+ brw_RNDZ(p, dst, src[0]);
+ break;
+
+ case BRW_OPCODE_AND:
+ brw_AND(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_OR:
+ brw_OR(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_XOR:
+ brw_XOR(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_NOT:
+ brw_NOT(p, dst, src[0]);
+ break;
+ case BRW_OPCODE_ASR:
+ brw_ASR(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_SHR:
+ brw_SHR(p, dst, src[0], src[1]);
+ break;
+ case BRW_OPCODE_SHL:
+ brw_SHL(p, dst, src[0], src[1]);
+ break;
+
+ case BRW_OPCODE_CMP:
+ brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
+ break;
+ case BRW_OPCODE_SEL:
+ brw_SEL(p, dst, src[0], src[1]);
+ break;
+
+ case BRW_OPCODE_IF:
+ if (inst->src[0].file != BAD_FILE) {
+ /* The instruction has an embedded compare (only allowed on gen6) */
+ assert(intel->gen == 6);
+ gen6_IF(p, inst->conditional_mod, src[0], src[1]);
+ } else {
+ brw_IF(p, c->dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
+ }
+ if_depth_in_loop[loop_stack_depth]++;
+ break;
+
+ case BRW_OPCODE_ELSE:
+ brw_ELSE(p);
+ break;
+ case BRW_OPCODE_ENDIF:
+ brw_ENDIF(p);
+ if_depth_in_loop[loop_stack_depth]--;
+ break;
+
+ case BRW_OPCODE_DO:
+ loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
+ if (loop_stack_array_size <= loop_stack_depth) {
+ loop_stack_array_size *= 2;
+ loop_stack = reralloc(this->mem_ctx, loop_stack, brw_instruction *,
+ loop_stack_array_size);
+ if_depth_in_loop = reralloc(this->mem_ctx, if_depth_in_loop, int,
+ loop_stack_array_size);
+ }
+ if_depth_in_loop[loop_stack_depth] = 0;
+ break;
+
+ case BRW_OPCODE_BREAK:
+ brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ break;
+ case BRW_OPCODE_CONTINUE:
+ /* FINISHME: We need to write the loop instruction support still. */
+ if (intel->gen >= 6)
+ gen6_CONT(p, loop_stack[loop_stack_depth - 1]);
+ else
+ brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ break;
+
+ case BRW_OPCODE_WHILE: {
+ struct brw_instruction *inst0, *inst1;
+ GLuint br = 1;
+
+ if (intel->gen >= 5)
+ br = 2;
+
+ assert(loop_stack_depth > 0);
+ loop_stack_depth--;
+ inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
+ if (intel->gen < 6) {
+ /* patch all the BREAK/CONT instructions from last BGNLOOP */
+ while (inst0 > loop_stack[loop_stack_depth]) {
+ inst0--;
+ if (inst0->header.opcode == BRW_OPCODE_BREAK &&
+ inst0->bits3.if_else.jump_count == 0) {
+ inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
+ }
+ else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
+ inst0->bits3.if_else.jump_count == 0) {
+ inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
+ }
+ }
+ }
+ }
+ break;
+
+ case FS_OPCODE_RCP:
+ case FS_OPCODE_RSQ:
+ case FS_OPCODE_SQRT:
+ case FS_OPCODE_EXP2:
+ case FS_OPCODE_LOG2:
+ case FS_OPCODE_POW:
+ case FS_OPCODE_SIN:
+ case FS_OPCODE_COS:
+ generate_math(inst, dst, src);
+ break;
+ case FS_OPCODE_PIXEL_X:
+ generate_pixel_xy(dst, true);
+ break;
+ case FS_OPCODE_PIXEL_Y:
+ generate_pixel_xy(dst, false);
+ break;
+ case FS_OPCODE_CINTERP:
+ brw_MOV(p, dst, src[0]);
+ break;
+ case FS_OPCODE_LINTERP:
+ generate_linterp(inst, dst, src);
+ break;
+ case FS_OPCODE_TEX:
+ case FS_OPCODE_TXB:
+ case FS_OPCODE_TXD:
+ case FS_OPCODE_TXL:
+ generate_tex(inst, dst, src[0]);
+ break;
+ case FS_OPCODE_DISCARD:
+ generate_discard(inst);
+ break;
+ case FS_OPCODE_DDX:
+ generate_ddx(inst, dst, src[0]);
+ break;
+ case FS_OPCODE_DDY:
+ generate_ddy(inst, dst, src[0]);
+ break;
+
+ case FS_OPCODE_SPILL:
+ generate_spill(inst, src[0]);
+ break;
+
+ case FS_OPCODE_UNSPILL:
+ generate_unspill(inst, dst);
+ break;
+
+ case FS_OPCODE_PULL_CONSTANT_LOAD:
+ generate_pull_constant_load(inst, dst);
+ break;
+
+ case FS_OPCODE_FB_WRITE:
+ generate_fb_write(inst);
+ break;
+ default:
+ if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
+ _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
+ brw_opcodes[inst->opcode].name);
+ } else {
+ _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
+ }
+ fail("unsupported opcode in FS\n");
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
+ if (0) {
+ printf("0x%08x 0x%08x 0x%08x 0x%08x ",
+ ((uint32_t *)&p->store[i])[3],
+ ((uint32_t *)&p->store[i])[2],
+ ((uint32_t *)&p->store[i])[1],
+ ((uint32_t *)&p->store[i])[0]);
+ }
+ brw_disasm(stdout, &p->store[i], intel->gen);
+ }
+ }
+
+ last_native_inst = p->nr_insn;
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ printf("\n");
+ }
+
+ ralloc_free(loop_stack);
+ ralloc_free(if_depth_in_loop);
+
+ brw_set_uip_jip(p);
+
+ /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
+ * emit issues, it doesn't get the jump distances into the output,
+ * which is often something we want to debug. So this is here in
+ * case you're doing that.
+ */
+ if (0) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ for (unsigned int i = 0; i < p->nr_insn; i++) {
+ printf("0x%08x 0x%08x 0x%08x 0x%08x ",
+ ((uint32_t *)&p->store[i])[3],
+ ((uint32_t *)&p->store[i])[2],
+ ((uint32_t *)&p->store[i])[1],
+ ((uint32_t *)&p->store[i])[0]);
+ brw_disasm(stdout, &p->store[i], intel->gen);
+ }
+ }
+ }
+}