diff options
author | Chia-I Wu <[email protected]> | 2012-12-13 05:48:46 +0800 |
---|---|---|
committer | Chia-I Wu <[email protected]> | 2013-04-26 16:20:52 +0800 |
commit | 825aa60707d620745ff3c1b6e43976977c81c2a9 (patch) | |
tree | e48b8e9e2d7c11262a06715442c1d35ae19bb9f7 /src/gallium/drivers/ilo/shader | |
parent | 7118ff8bb02046bb2f440e2a5c48d9a41bb057b1 (diff) |
ilo: compile VS/GS/FS with the toy compiler
Diffstat (limited to 'src/gallium/drivers/ilo/shader')
-rw-r--r-- | src/gallium/drivers/ilo/shader/ilo_shader_cs.c | 38 | ||||
-rw-r--r-- | src/gallium/drivers/ilo/shader/ilo_shader_fs.c | 1694 | ||||
-rw-r--r-- | src/gallium/drivers/ilo/shader/ilo_shader_gs.c | 1437 | ||||
-rw-r--r-- | src/gallium/drivers/ilo/shader/ilo_shader_vs.c | 1273 |
4 files changed, 4442 insertions, 0 deletions
diff --git a/src/gallium/drivers/ilo/shader/ilo_shader_cs.c b/src/gallium/drivers/ilo/shader/ilo_shader_cs.c new file mode 100644 index 00000000000..85182f2a1b7 --- /dev/null +++ b/src/gallium/drivers/ilo/shader/ilo_shader_cs.c @@ -0,0 +1,38 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 2012-2013 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chia-I Wu <[email protected]> + */ + +#include "ilo_shader.h" + +/** + * Compile the compute shader. + */ +struct ilo_shader * +ilo_shader_compile_cs(const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + return NULL; +} diff --git a/src/gallium/drivers/ilo/shader/ilo_shader_fs.c b/src/gallium/drivers/ilo/shader/ilo_shader_fs.c new file mode 100644 index 00000000000..d5f365cb4d7 --- /dev/null +++ b/src/gallium/drivers/ilo/shader/ilo_shader_fs.c @@ -0,0 +1,1694 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 2012-2013 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chia-I Wu <[email protected]> + */ + +#include "tgsi/tgsi_dump.h" +#include "toy_compiler.h" +#include "toy_tgsi.h" +#include "toy_legalize.h" +#include "toy_optimize.h" +#include "toy_helpers.h" +#include "ilo_context.h" +#include "ilo_shader.h" + +struct fs_compile_context { + struct ilo_shader *shader; + const struct ilo_shader_variant *variant; + + struct toy_compiler tc; + struct toy_tgsi tgsi; + + enum brw_message_target const_cache; + int dispatch_mode; + + struct { + int barycentric_interps[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT]; + int source_depth; + int source_w; + int pos_offset; + } payloads[2]; + + int first_const_grf; + int first_attr_grf; + int first_free_grf; + int last_free_grf; + + int num_grf_per_vrf; + + int first_free_mrf; + int last_free_mrf; +}; + +static void +fetch_position(struct fs_compile_context *fcc, struct toy_dst dst) +{ + struct toy_compiler *tc = &fcc->tc; + const struct toy_src src_z = + tsrc(TOY_FILE_GRF, fcc->payloads[0].source_depth, 0); + const struct toy_src src_w = + tsrc(TOY_FILE_GRF, fcc->payloads[0].source_w, 0); + const int fb_height = + (fcc->variant->u.fs.fb_height) ? fcc->variant->u.fs.fb_height : 1; + const bool origin_upper_left = + (fcc->tgsi.props.fs_coord_origin == TGSI_FS_COORD_ORIGIN_UPPER_LEFT); + const bool pixel_center_integer = + (fcc->tgsi.props.fs_coord_pixel_center == + TGSI_FS_COORD_PIXEL_CENTER_INTEGER); + struct toy_src subspan_x, subspan_y; + struct toy_dst tmp, tmp_uw; + struct toy_dst real_dst[4]; + + tdst_transpose(dst, real_dst); + + subspan_x = tsrc_uw(tsrc(TOY_FILE_GRF, 1, 2 * 4)); + subspan_x = tsrc_rect(subspan_x, TOY_RECT_240); + + subspan_y = tsrc_offset(subspan_x, 0, 1); + + tmp_uw = tdst_uw(tc_alloc_tmp(tc)); + tmp = tc_alloc_tmp(tc); + + /* X */ + tc_ADD(tc, tmp_uw, subspan_x, tsrc_imm_v(0x10101010)); + tc_MOV(tc, tmp, tsrc_from(tmp_uw)); + if (pixel_center_integer) + tc_MOV(tc, real_dst[0], tsrc_from(tmp)); + else + tc_ADD(tc, real_dst[0], tsrc_from(tmp), tsrc_imm_f(0.5f)); + + /* Y */ + tc_ADD(tc, tmp_uw, subspan_y, tsrc_imm_v(0x11001100)); + tc_MOV(tc, tmp, tsrc_from(tmp_uw)); + if (origin_upper_left && pixel_center_integer) { + tc_MOV(tc, real_dst[1], tsrc_from(tmp)); + } + else { + struct toy_src y = tsrc_from(tmp); + float offset = 0.0f; + + if (!pixel_center_integer) + offset += 0.5f; + + if (!origin_upper_left) { + offset += (float) (fb_height - 1); + y = tsrc_negate(y); + } + + tc_ADD(tc, real_dst[1], y, tsrc_imm_f(offset)); + } + + /* Z and W */ + tc_MOV(tc, real_dst[2], src_z); + tc_INV(tc, real_dst[3], src_w); +} + +static void +fetch_face(struct fs_compile_context *fcc, struct toy_dst dst) +{ + struct toy_compiler *tc = &fcc->tc; + const struct toy_src r0 = tsrc_d(tsrc(TOY_FILE_GRF, 0, 0)); + struct toy_dst tmp_f, tmp; + struct toy_dst real_dst[4]; + + tdst_transpose(dst, real_dst); + + tmp_f = tc_alloc_tmp(tc); + tmp = tdst_d(tmp_f); + tc_SHR(tc, tmp, tsrc_rect(r0, TOY_RECT_010), tsrc_imm_d(15)); + tc_AND(tc, tmp, tsrc_from(tmp), tsrc_imm_d(1)); + tc_MOV(tc, tmp_f, tsrc_from(tmp)); + + /* convert to 1.0 and -1.0 */ + tc_MUL(tc, tmp_f, tsrc_from(tmp_f), tsrc_imm_f(-2.0f)); + tc_ADD(tc, real_dst[0], tsrc_from(tmp_f), tsrc_imm_f(1.0f)); + + tc_MOV(tc, real_dst[1], tsrc_imm_f(0.0f)); + tc_MOV(tc, real_dst[2], tsrc_imm_f(0.0f)); + tc_MOV(tc, real_dst[3], tsrc_imm_f(1.0f)); +} + +static void +fetch_attr(struct fs_compile_context *fcc, struct toy_dst dst, int slot) +{ + struct toy_compiler *tc = &fcc->tc; + struct toy_dst real_dst[4]; + bool is_const = false; + int grf, mode, ch; + + tdst_transpose(dst, real_dst); + + grf = fcc->first_attr_grf + slot * 2; + + switch (fcc->tgsi.inputs[slot].interp) { + case TGSI_INTERPOLATE_CONSTANT: + is_const = true; + break; + case TGSI_INTERPOLATE_LINEAR: + if (fcc->tgsi.inputs[slot].centroid) + mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC; + else + mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC; + break; + case TGSI_INTERPOLATE_COLOR: + if (fcc->variant->u.fs.flatshade) { + is_const = true; + break; + } + /* fall through */ + case TGSI_INTERPOLATE_PERSPECTIVE: + if (fcc->tgsi.inputs[slot].centroid) + mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC; + else + mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC; + break; + default: + assert(!"unexpected FS interpolation"); + mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC; + break; + } + + if (is_const) { + struct toy_src a0[4]; + + a0[0] = tsrc(TOY_FILE_GRF, grf + 0, 3 * 4); + a0[1] = tsrc(TOY_FILE_GRF, grf + 0, 7 * 4); + a0[2] = tsrc(TOY_FILE_GRF, grf + 1, 3 * 4); + a0[3] = tsrc(TOY_FILE_GRF, grf + 1, 7 * 4); + + for (ch = 0; ch < 4; ch++) + tc_MOV(tc, real_dst[ch], tsrc_rect(a0[ch], TOY_RECT_010)); + } + else { + struct toy_src attr[4], uv; + + attr[0] = tsrc(TOY_FILE_GRF, grf + 0, 0); + attr[1] = tsrc(TOY_FILE_GRF, grf + 0, 4 * 4); + attr[2] = tsrc(TOY_FILE_GRF, grf + 1, 0); + attr[3] = tsrc(TOY_FILE_GRF, grf + 1, 4 * 4); + + uv = tsrc(TOY_FILE_GRF, fcc->payloads[0].barycentric_interps[mode], 0); + + for (ch = 0; ch < 4; ch++) { + tc_add2(tc, BRW_OPCODE_PLN, real_dst[ch], + tsrc_rect(attr[ch], TOY_RECT_010), uv); + } + } + + if (fcc->tgsi.inputs[slot].semantic_name == TGSI_SEMANTIC_FOG) { + tc_MOV(tc, real_dst[1], tsrc_imm_f(0.0f)); + tc_MOV(tc, real_dst[2], tsrc_imm_f(0.0f)); + tc_MOV(tc, real_dst[3], tsrc_imm_f(1.0f)); + } +} + +static void +fs_lower_opcode_tgsi_in(struct fs_compile_context *fcc, + struct toy_dst dst, int dim, int idx) +{ + int slot; + + assert(!dim); + + slot = toy_tgsi_find_input(&fcc->tgsi, idx); + if (slot < 0) + return; + + switch (fcc->tgsi.inputs[slot].semantic_name) { + case TGSI_SEMANTIC_POSITION: + fetch_position(fcc, dst); + break; + case TGSI_SEMANTIC_FACE: + fetch_face(fcc, dst); + break; + default: + fetch_attr(fcc, dst, slot); + break; + } +} + +static void +fs_lower_opcode_tgsi_const_gen6(struct fs_compile_context *fcc, + struct toy_dst dst, int dim, struct toy_src idx) +{ + const struct toy_dst header = + tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 0)); + const struct toy_dst global_offset = + tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 2 * 4)); + const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); + struct toy_compiler *tc = &fcc->tc; + unsigned msg_type, msg_ctrl, msg_len; + struct toy_inst *inst; + struct toy_src desc; + struct toy_dst tmp, real_dst[4]; + int i; + + /* set message header */ + inst = tc_MOV(tc, header, r0); + inst->mask_ctrl = BRW_MASK_DISABLE; + + /* set global offset */ + inst = tc_MOV(tc, global_offset, idx); + inst->mask_ctrl = BRW_MASK_DISABLE; + inst->exec_size = BRW_EXECUTE_1; + inst->src[0].rect = TOY_RECT_010; + + msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ; + msg_ctrl = BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW << 8; + msg_len = 1; + + desc = tsrc_imm_mdesc_data_port(tc, false, msg_len, 1, true, false, + msg_type, msg_ctrl, ILO_WM_CONST_SURFACE(dim)); + + tmp = tc_alloc_tmp(tc); + + tc_SEND(tc, tmp, tsrc_from(header), desc, fcc->const_cache); + + tdst_transpose(dst, real_dst); + for (i = 0; i < 4; i++) { + const struct toy_src src = + tsrc_offset(tsrc_rect(tsrc_from(tmp), TOY_RECT_010), 0, i); + + /* cast to type D to make sure these are raw moves */ + tc_MOV(tc, tdst_d(real_dst[i]), tsrc_d(src)); + } +} + +static void +fs_lower_opcode_tgsi_const_gen7(struct fs_compile_context *fcc, + struct toy_dst dst, int dim, struct toy_src idx) +{ + struct toy_compiler *tc = &fcc->tc; + const struct toy_dst offset = + tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 0)); + struct toy_src desc; + struct toy_inst *inst; + struct toy_dst tmp, real_dst[4]; + int i; + + /* + * In 4c1fdae0a01b3f92ec03b61aac1d3df500d51fc6, pull constant load was + * changed from OWord Block Read to ld to increase performance in the + * classic driver. Since we use the constant cache instead of the data + * cache, I wonder if we still want to follow the classic driver. + */ + + /* set offset */ + inst = tc_MOV(tc, offset, tsrc_rect(idx, TOY_RECT_010)); + inst->exec_size = BRW_EXECUTE_8; + inst->mask_ctrl = BRW_MASK_DISABLE; + + desc = tsrc_imm_mdesc_sampler(tc, 1, 1, false, + BRW_SAMPLER_SIMD_MODE_SIMD4X2, + GEN5_SAMPLER_MESSAGE_SAMPLE_LD, + 0, + ILO_WM_CONST_SURFACE(dim)); + + tmp = tc_alloc_tmp(tc); + inst = tc_SEND(tc, tmp, tsrc_from(offset), desc, BRW_SFID_SAMPLER); + inst->exec_size = BRW_EXECUTE_8; + inst->mask_ctrl = BRW_MASK_DISABLE; + + tdst_transpose(dst, real_dst); + for (i = 0; i < 4; i++) { + const struct toy_src src = + tsrc_offset(tsrc_rect(tsrc_from(tmp), TOY_RECT_010), 0, i); + + /* cast to type D to make sure these are raw moves */ + tc_MOV(tc, tdst_d(real_dst[i]), tsrc_d(src)); + } +} + +static void +fs_lower_opcode_tgsi_imm(struct fs_compile_context *fcc, + struct toy_dst dst, int idx) +{ + const uint32_t *imm; + struct toy_dst real_dst[4]; + int ch; + + imm = toy_tgsi_get_imm(&fcc->tgsi, idx, NULL); + + tdst_transpose(dst, real_dst); + /* raw moves */ + for (ch = 0; ch < 4; ch++) + tc_MOV(&fcc->tc, tdst_ud(real_dst[ch]), tsrc_imm_ud(imm[ch])); +} + +static void +fs_lower_opcode_tgsi_sv(struct fs_compile_context *fcc, + struct toy_dst dst, int dim, int idx) +{ + struct toy_compiler *tc = &fcc->tc; + const struct toy_tgsi *tgsi = &fcc->tgsi; + int slot; + + assert(!dim); + + slot = toy_tgsi_find_system_value(tgsi, idx); + if (slot < 0) + return; + + switch (tgsi->system_values[slot].semantic_name) { + case TGSI_SEMANTIC_PRIMID: + case TGSI_SEMANTIC_INSTANCEID: + case TGSI_SEMANTIC_VERTEXID: + default: + tc_fail(tc, "unhandled system value"); + tc_MOV(tc, dst, tsrc_imm_d(0)); + break; + } +} + +static void +fs_lower_opcode_tgsi_direct(struct fs_compile_context *fcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &fcc->tc; + int dim, idx; + + assert(inst->src[0].file == TOY_FILE_IMM); + dim = inst->src[0].val32; + + assert(inst->src[1].file == TOY_FILE_IMM); + idx = inst->src[1].val32; + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + fs_lower_opcode_tgsi_in(fcc, inst->dst, dim, idx); + break; + case TOY_OPCODE_TGSI_CONST: + if (tc->gen >= ILO_GEN(7)) + fs_lower_opcode_tgsi_const_gen7(fcc, inst->dst, dim, inst->src[1]); + else + fs_lower_opcode_tgsi_const_gen6(fcc, inst->dst, dim, inst->src[1]); + break; + case TOY_OPCODE_TGSI_SV: + fs_lower_opcode_tgsi_sv(fcc, inst->dst, dim, idx); + break; + case TOY_OPCODE_TGSI_IMM: + assert(!dim); + fs_lower_opcode_tgsi_imm(fcc, inst->dst, idx); + break; + default: + tc_fail(tc, "unhandled TGSI fetch"); + break; + } + + tc_discard_inst(tc, inst); +} + +static void +fs_lower_opcode_tgsi_indirect(struct fs_compile_context *fcc, + struct toy_inst *inst) +{ + tc_fail(&fcc->tc, "no TGSI indirection support"); +} + +/** + * Emit instructions to move sampling parameters to the message registers. + */ +static int +fs_add_sampler_params_gen6(struct toy_compiler *tc, int msg_type, + int base_mrf, int param_size, + struct toy_src *coords, int num_coords, + struct toy_src bias_or_lod, struct toy_src ref_or_si, + struct toy_src *ddx, struct toy_src *ddy, + int num_derivs) +{ + int num_params, i; + + assert(num_coords <= 4); + assert(num_derivs <= 3 && num_derivs <= num_coords); + +#define SAMPLER_PARAM(p) (tdst(TOY_FILE_MRF, base_mrf + (p) * param_size, 0)) + switch (msg_type) { + case GEN5_SAMPLER_MESSAGE_SAMPLE: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + num_params = num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS: + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + tc_MOV(tc, SAMPLER_PARAM(4), bias_or_lod); + num_params = 5; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + tc_MOV(tc, SAMPLER_PARAM(4), ref_or_si); + num_params = 5; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + for (i = 0; i < num_derivs; i++) { + tc_MOV(tc, SAMPLER_PARAM(4 + i * 2), ddx[i]); + tc_MOV(tc, SAMPLER_PARAM(5 + i * 2), ddy[i]); + } + num_params = 4 + num_derivs * 2; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE: + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + tc_MOV(tc, SAMPLER_PARAM(4), ref_or_si); + tc_MOV(tc, SAMPLER_PARAM(5), bias_or_lod); + num_params = 6; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_LD: + assert(num_coords <= 3); + + for (i = 0; i < num_coords; i++) + tc_MOV(tc, tdst_d(SAMPLER_PARAM(i)), coords[i]); + tc_MOV(tc, tdst_d(SAMPLER_PARAM(3)), bias_or_lod); + tc_MOV(tc, tdst_d(SAMPLER_PARAM(4)), ref_or_si); + num_params = 5; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO: + tc_MOV(tc, tdst_d(SAMPLER_PARAM(0)), bias_or_lod); + num_params = 1; + break; + default: + tc_fail(tc, "unknown sampler opcode"); + num_params = 0; + break; + } +#undef SAMPLER_PARAM + + return num_params * param_size; +} + +static int +fs_add_sampler_params_gen7(struct toy_compiler *tc, int msg_type, + int base_mrf, int param_size, + struct toy_src *coords, int num_coords, + struct toy_src bias_or_lod, struct toy_src ref_or_si, + struct toy_src *ddx, struct toy_src *ddy, + int num_derivs) +{ + int num_params, i; + + assert(num_coords <= 4); + assert(num_derivs <= 3 && num_derivs <= num_coords); + +#define SAMPLER_PARAM(p) (tdst(TOY_FILE_MRF, base_mrf + (p) * param_size, 0)) + switch (msg_type) { + case GEN5_SAMPLER_MESSAGE_SAMPLE: + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(i), coords[i]); + num_params = num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS: + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD: + tc_MOV(tc, SAMPLER_PARAM(0), bias_or_lod); + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(1 + i), coords[i]); + num_params = 1 + num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE: + tc_MOV(tc, SAMPLER_PARAM(0), ref_or_si); + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(1 + i), coords[i]); + num_params = 1 + num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS: + for (i = 0; i < num_coords; i++) { + tc_MOV(tc, SAMPLER_PARAM(i * 3), coords[i]); + if (i < num_derivs) { + tc_MOV(tc, SAMPLER_PARAM(i * 3 + 1), ddx[i]); + tc_MOV(tc, SAMPLER_PARAM(i * 3 + 2), ddy[i]); + } + } + num_params = num_coords * 3 - ((num_coords > num_derivs) ? 2 : 0); + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE: + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE: + tc_MOV(tc, SAMPLER_PARAM(0), ref_or_si); + tc_MOV(tc, SAMPLER_PARAM(1), bias_or_lod); + for (i = 0; i < num_coords; i++) + tc_MOV(tc, SAMPLER_PARAM(2 + i), coords[i]); + num_params = 2 + num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_LD: + assert(num_coords >= 1 && num_coords <= 3); + + tc_MOV(tc, tdst_d(SAMPLER_PARAM(0)), coords[0]); + tc_MOV(tc, tdst_d(SAMPLER_PARAM(1)), bias_or_lod); + for (i = 1; i < num_coords; i++) + tc_MOV(tc, tdst_d(SAMPLER_PARAM(1 + i)), coords[i]); + num_params = 1 + num_coords; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO: + tc_MOV(tc, tdst_d(SAMPLER_PARAM(0)), bias_or_lod); + num_params = 1; + break; + default: + tc_fail(tc, "unknown sampler opcode"); + num_params = 0; + break; + } +#undef SAMPLER_PARAM + + return num_params * param_size; +} + +/** + * Set up message registers and return the message descriptor for sampling. + */ +static struct toy_src +fs_prepare_tgsi_sampling(struct toy_compiler *tc, const struct toy_inst *inst, + int base_mrf, const uint32_t *saturate_coords, + unsigned *ret_sampler_index) +{ + unsigned simd_mode, msg_type, msg_len, sampler_index, binding_table_index; + struct toy_src coords[4], ddx[4], ddy[4], bias_or_lod, ref_or_si; + int num_coords, ref_pos, num_derivs; + int sampler_src, param_size, i; + + switch (inst->exec_size) { + case BRW_EXECUTE_8: + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8; + param_size = 1; + break; + case BRW_EXECUTE_16: + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16; + param_size = 2; + break; + default: + tc_fail(tc, "unsupported execute size for sampling"); + return tsrc_null(); + break; + } + + num_coords = toy_tgsi_get_texture_coord_dim(inst->tex.target, &ref_pos); + tsrc_transpose(inst->src[0], coords); + bias_or_lod = tsrc_null(); + ref_or_si = tsrc_null(); + num_derivs = 0; + sampler_src = 1; + + /* + * For TXD, + * + * src0 := (x, y, z, w) + * src1 := ddx + * src2 := ddy + * src3 := sampler + * + * For TEX2, TXB2, and TXL2, + * + * src0 := (x, y, z, w) + * src1 := (v or bias or lod, ...) + * src2 := sampler + * + * For TEX, TXB, TXL, and TXP, + * + * src0 := (x, y, z, w or bias or lod or projection) + * src1 := sampler + * + * For TXQ, + * + * src0 := (lod, ...) + * src1 := sampler + * + * For TXQ_LZ, + * + * src0 := sampler + * + * And for TXF, + * + * src0 := (x, y, z, w or lod) + * src1 := sampler + * + * State trackers should not generate opcode+texture combinations with + * which the two definitions conflict (e.g., TXB with SHADOW2DARRAY). + */ + switch (inst->opcode) { + case TOY_OPCODE_TGSI_TEX: + if (ref_pos >= 0) { + assert(ref_pos < 4); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE; + } + break; + case TOY_OPCODE_TGSI_TXD: + if (ref_pos >= 0) + tc_fail(tc, "TXD with shadow sampler not supported"); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS; + tsrc_transpose(inst->src[1], ddx); + tsrc_transpose(inst->src[2], ddy); + num_derivs = num_coords; + sampler_src = 3; + break; + case TOY_OPCODE_TGSI_TXP: + if (ref_pos >= 0) { + assert(ref_pos < 3); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE; + } + + /* project the coordinates */ + { + struct toy_dst tmp[4]; + + tc_alloc_tmp4(tc, tmp); + + tc_INV(tc, tmp[3], coords[3]); + for (i = 0; i < num_coords && i < 3; i++) { + tc_MUL(tc, tmp[i], coords[i], tsrc_from(tmp[3])); + coords[i] = tsrc_from(tmp[i]); + } + + if (ref_pos >= i) { + tc_MUL(tc, tmp[ref_pos], ref_or_si, tsrc_from(tmp[3])); + ref_or_si = tsrc_from(tmp[ref_pos]); + } + } + break; + case TOY_OPCODE_TGSI_TXB: + if (ref_pos >= 0) { + assert(ref_pos < 3); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS; + } + + bias_or_lod = coords[3]; + break; + case TOY_OPCODE_TGSI_TXL: + if (ref_pos >= 0) { + assert(ref_pos < 3); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; + } + + bias_or_lod = coords[3]; + break; + case TOY_OPCODE_TGSI_TXF: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; + + switch (inst->tex.target) { + case TGSI_TEXTURE_2D_MSAA: + case TGSI_TEXTURE_2D_ARRAY_MSAA: + assert(ref_pos >= 0 && ref_pos < 4); + /* lod is always 0 */ + bias_or_lod = tsrc_imm_d(0); + ref_or_si = coords[ref_pos]; + break; + default: + bias_or_lod = coords[3]; + break; + } + + /* offset the coordinates */ + if (!tsrc_is_null(inst->tex.offsets[0])) { + struct toy_dst tmp[4]; + struct toy_src offsets[4]; + + tc_alloc_tmp4(tc, tmp); + tsrc_transpose(inst->tex.offsets[0], offsets); + + for (i = 0; i < num_coords; i++) { + tc_ADD(tc, tmp[i], coords[i], offsets[i]); + coords[i] = tsrc_from(tmp[i]); + } + } + + sampler_src = 1; + break; + case TOY_OPCODE_TGSI_TXQ: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; + num_coords = 0; + bias_or_lod = coords[0]; + break; + case TOY_OPCODE_TGSI_TXQ_LZ: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; + num_coords = 0; + sampler_src = 0; + break; + case TOY_OPCODE_TGSI_TEX2: + if (ref_pos >= 0) { + assert(ref_pos < 5); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE; + + if (ref_pos >= 4) { + struct toy_src src1[4]; + tsrc_transpose(inst->src[1], src1); + ref_or_si = src1[ref_pos - 4]; + } + else { + ref_or_si = coords[ref_pos]; + } + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE; + } + + sampler_src = 2; + break; + case TOY_OPCODE_TGSI_TXB2: + if (ref_pos >= 0) { + assert(ref_pos < 4); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS; + } + + { + struct toy_src src1[4]; + tsrc_transpose(inst->src[1], src1); + bias_or_lod = src1[0]; + } + + sampler_src = 2; + break; + case TOY_OPCODE_TGSI_TXL2: + if (ref_pos >= 0) { + assert(ref_pos < 4); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; + ref_or_si = coords[ref_pos]; + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; + } + + { + struct toy_src src1[4]; + tsrc_transpose(inst->src[1], src1); + bias_or_lod = src1[0]; + } + + sampler_src = 2; + break; + default: + assert(!"unhandled sampling opcode"); + return tsrc_null(); + break; + } + + assert(inst->src[sampler_src].file == TOY_FILE_IMM); + sampler_index = inst->src[sampler_src].val32; + binding_table_index = ILO_WM_TEXTURE_SURFACE(sampler_index); + + /* + * From the Sandy Bridge PRM, volume 4 part 1, page 18: + * + * "Note that the (cube map) coordinates delivered to the sampling + * engine must already have been divided by the component with the + * largest absolute value." + */ + switch (inst->tex.target) { + case TGSI_TEXTURE_CUBE: + case TGSI_TEXTURE_SHADOWCUBE: + case TGSI_TEXTURE_CUBE_ARRAY: + case TGSI_TEXTURE_SHADOWCUBE_ARRAY: + /* TXQ does not need coordinates */ + if (num_coords >= 3) { + struct toy_dst tmp[4]; + + tc_alloc_tmp4(tc, tmp); + + tc_SEL(tc, tmp[3], tsrc_absolute(coords[0]), + tsrc_absolute(coords[1]), BRW_CONDITIONAL_GE); + tc_SEL(tc, tmp[3], tsrc_from(tmp[3]), + tsrc_absolute(coords[2]), BRW_CONDITIONAL_GE); + tc_INV(tc, tmp[3], tsrc_from(tmp[3])); + + for (i = 0; i < 3; i++) { + tc_MUL(tc, tmp[i], coords[i], tsrc_from(tmp[3])); + coords[i] = tsrc_from(tmp[i]); + } + } + break; + } + + /* + * Saturate (s, t, r). saturate_coords is set for sampler and coordinate + * that uses linear filtering and PIPE_TEX_WRAP_CLAMP respectively. It is + * so that sampling outside the border gets the correct colors. + */ + for (i = 0; i < MIN2(num_coords, 3); i++) { + bool is_rect; + + if (!(saturate_coords[i] & (1 << sampler_index))) + continue; + + switch (inst->tex.target) { + case TGSI_TEXTURE_RECT: + case TGSI_TEXTURE_SHADOWRECT: + is_rect = true; + break; + default: + is_rect = false; + break; + } + + if (is_rect) { + struct toy_src min, max; + struct toy_dst tmp; + + tc_fail(tc, "GL_CLAMP with rectangle texture unsupported"); + tmp = tc_alloc_tmp(tc); + + /* saturate to [0, width] or [0, height] */ + /* TODO TXQ? */ + min = tsrc_imm_f(0.0f); + max = tsrc_imm_f(2048.0f); + + tc_SEL(tc, tmp, coords[i], min, BRW_CONDITIONAL_G); + tc_SEL(tc, tmp, tsrc_from(tmp), max, BRW_CONDITIONAL_L); + + coords[i] = tsrc_from(tmp); + } + else { + struct toy_dst tmp; + struct toy_inst *inst2; + + tmp = tc_alloc_tmp(tc); + + /* saturate to [0.0f, 1.0f] */ + inst2 = tc_MOV(tc, tmp, coords[i]); + inst2->saturate = true; + + coords[i] = tsrc_from(tmp); + } + } + + /* set up sampler parameters */ + if (tc->gen >= ILO_GEN(7)) { + msg_len = fs_add_sampler_params_gen7(tc, msg_type, base_mrf, param_size, + coords, num_coords, bias_or_lod, ref_or_si, ddx, ddy, num_derivs); + } + else { + msg_len = fs_add_sampler_params_gen6(tc, msg_type, base_mrf, param_size, + coords, num_coords, bias_or_lod, ref_or_si, ddx, ddy, num_derivs); + } + + /* + * From the Sandy Bridge PRM, volume 4 part 1, page 136: + * + * "The maximum message length allowed to the sampler is 11. This would + * disallow sample_d, sample_b_c, and sample_l_c with a SIMD Mode of + * SIMD16." + */ + if (msg_len > 11) + tc_fail(tc, "maximum length for messages to the sampler is 11"); + + if (ret_sampler_index) + *ret_sampler_index = sampler_index; + + return tsrc_imm_mdesc_sampler(tc, msg_len, 4 * param_size, + false, simd_mode, msg_type, sampler_index, binding_table_index); +} + +static void +fs_lower_opcode_tgsi_sampling(struct fs_compile_context *fcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &fcc->tc; + struct toy_dst dst[4], tmp[4]; + struct toy_src desc; + unsigned sampler_index; + int swizzles[4], i; + bool need_filter; + + desc = fs_prepare_tgsi_sampling(tc, inst, + fcc->first_free_mrf, + fcc->variant->saturate_tex_coords, + &sampler_index); + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_TXF: + case TOY_OPCODE_TGSI_TXQ: + case TOY_OPCODE_TGSI_TXQ_LZ: + need_filter = false; + break; + default: + need_filter = true; + break; + } + + toy_compiler_lower_to_send(tc, inst, false, BRW_SFID_SAMPLER); + inst->src[0] = tsrc(TOY_FILE_MRF, fcc->first_free_mrf, 0); + inst->src[1] = desc; + for (i = 2; i < Elements(inst->src); i++) + inst->src[i] = tsrc_null(); + + /* write to temps first */ + tc_alloc_tmp4(tc, tmp); + tdst_transpose(inst->dst, dst); + inst->dst = tmp[0]; + + tc_move_inst(tc, inst); + + if (need_filter) { + assert(sampler_index < fcc->variant->num_sampler_views); + swizzles[0] = fcc->variant->sampler_view_swizzles[sampler_index].r; + swizzles[1] = fcc->variant->sampler_view_swizzles[sampler_index].g; + swizzles[2] = fcc->variant->sampler_view_swizzles[sampler_index].b; + swizzles[3] = fcc->variant->sampler_view_swizzles[sampler_index].a; + } + else { + swizzles[0] = PIPE_SWIZZLE_RED; + swizzles[1] = PIPE_SWIZZLE_GREEN; + swizzles[2] = PIPE_SWIZZLE_BLUE; + swizzles[3] = PIPE_SWIZZLE_ALPHA; + } + + /* swizzle the results */ + for (i = 0; i < 4; i++) { + switch (swizzles[i]) { + case PIPE_SWIZZLE_ZERO: + tc_MOV(tc, dst[i], tsrc_imm_f(0.0f)); + break; + case PIPE_SWIZZLE_ONE: + tc_MOV(tc, dst[i], tsrc_imm_f(1.0f)); + break; + default: + tc_MOV(tc, dst[i], tsrc_from(tmp[swizzles[i]])); + break; + } + } +} + +static void +fs_lower_opcode_derivative(struct toy_compiler *tc, struct toy_inst *inst) +{ + struct toy_dst dst[4]; + struct toy_src src[4]; + int i; + + tdst_transpose(inst->dst, dst); + tsrc_transpose(inst->src[0], src); + + /* + * Every four fragments are from a 2x2 subspan, with + * + * fragment 1 on the top-left, + * fragment 2 on the top-right, + * fragment 3 on the bottom-left, + * fragment 4 on the bottom-right. + * + * DDX should thus produce + * + * dst = src.yyww - src.xxzz + * + * and DDY should produce + * + * dst = src.zzww - src.xxyy + * + * But since we are in BRW_ALIGN_1, swizzling does not work and we have to + * play with the region parameters. + */ + if (inst->opcode == TOY_OPCODE_DDX) { + for (i = 0; i < 4; i++) { + struct toy_src left, right; + + left = tsrc_rect(src[i], TOY_RECT_220); + right = tsrc_offset(left, 0, 1); + + tc_ADD(tc, dst[i], right, tsrc_negate(left)); + } + } + else { + for (i = 0; i < 4; i++) { + struct toy_src top, bottom; + + /* approximate with dst = src.zzzz - src.xxxx */ + top = tsrc_rect(src[i], TOY_RECT_440); + bottom = tsrc_offset(top, 0, 2); + + tc_ADD(tc, dst[i], bottom, tsrc_negate(top)); + } + } + + tc_discard_inst(tc, inst); +} + +static void +fs_lower_opcode_fb_write(struct toy_compiler *tc, struct toy_inst *inst) +{ + /* fs_write_fb() has set up the message registers */ + toy_compiler_lower_to_send(tc, inst, true, + GEN6_SFID_DATAPORT_RENDER_CACHE); +} + +static void +fs_lower_opcode_kil(struct toy_compiler *tc, struct toy_inst *inst) +{ + struct toy_dst pixel_mask_dst; + struct toy_src f0, pixel_mask; + struct toy_inst *tmp; + + /* lower half of r1.7:ud */ + pixel_mask_dst = tdst_uw(tdst(TOY_FILE_GRF, 1, 7 * 4)); + pixel_mask = tsrc_rect(tsrc_from(pixel_mask_dst), TOY_RECT_010); + + f0 = tsrc_rect(tsrc_uw(tsrc(TOY_FILE_ARF, BRW_ARF_FLAG, 0)), TOY_RECT_010); + + /* KILP or KIL */ + if (tsrc_is_null(inst->src[0])) { + struct toy_src dummy = tsrc_uw(tsrc(TOY_FILE_GRF, 0, 0)); + struct toy_dst f0_dst = tdst_uw(tdst(TOY_FILE_ARF, BRW_ARF_FLAG, 0)); + + /* create a mask that masks out all pixels */ + tmp = tc_MOV(tc, f0_dst, tsrc_rect(tsrc_imm_uw(0xffff), TOY_RECT_010)); + tmp->exec_size = BRW_EXECUTE_1; + tmp->mask_ctrl = BRW_MASK_DISABLE; + + tc_CMP(tc, tdst_null(), dummy, dummy, BRW_CONDITIONAL_NEQ); + + /* swapping the two src operands breaks glBitmap()!? */ + tmp = tc_AND(tc, pixel_mask_dst, f0, pixel_mask); + tmp->exec_size = BRW_EXECUTE_1; + tmp->mask_ctrl = BRW_MASK_DISABLE; + } + else { + struct toy_src src[4]; + int i; + + tsrc_transpose(inst->src[0], src); + /* mask out killed pixels */ + for (i = 0; i < 4; i++) { + tc_CMP(tc, tdst_null(), src[i], tsrc_imm_f(0.0f), + BRW_CONDITIONAL_GE); + + /* swapping the two src operands breaks glBitmap()!? */ + tmp = tc_AND(tc, pixel_mask_dst, f0, pixel_mask); + tmp->exec_size = BRW_EXECUTE_1; + tmp->mask_ctrl = BRW_MASK_DISABLE; + } + } + + tc_discard_inst(tc, inst); +} + +static void +fs_lower_virtual_opcodes(struct fs_compile_context *fcc) +{ + struct toy_compiler *tc = &fcc->tc; + struct toy_inst *inst; + + /* lower TGSI's first, as they might be lowered to other virtual opcodes */ + tc_head(tc); + while ((inst = tc_next(tc)) != NULL) { + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + case TOY_OPCODE_TGSI_CONST: + case TOY_OPCODE_TGSI_SV: + case TOY_OPCODE_TGSI_IMM: + fs_lower_opcode_tgsi_direct(fcc, inst); + break; + case TOY_OPCODE_TGSI_INDIRECT_FETCH: + case TOY_OPCODE_TGSI_INDIRECT_STORE: + fs_lower_opcode_tgsi_indirect(fcc, inst); + break; + case TOY_OPCODE_TGSI_TEX: + case TOY_OPCODE_TGSI_TXB: + case TOY_OPCODE_TGSI_TXD: + case TOY_OPCODE_TGSI_TXL: + case TOY_OPCODE_TGSI_TXP: + case TOY_OPCODE_TGSI_TXF: + case TOY_OPCODE_TGSI_TXQ: + case TOY_OPCODE_TGSI_TXQ_LZ: + case TOY_OPCODE_TGSI_TEX2: + case TOY_OPCODE_TGSI_TXB2: + case TOY_OPCODE_TGSI_TXL2: + case TOY_OPCODE_TGSI_SAMPLE: + case TOY_OPCODE_TGSI_SAMPLE_I: + case TOY_OPCODE_TGSI_SAMPLE_I_MS: + case TOY_OPCODE_TGSI_SAMPLE_B: + case TOY_OPCODE_TGSI_SAMPLE_C: + case TOY_OPCODE_TGSI_SAMPLE_C_LZ: + case TOY_OPCODE_TGSI_SAMPLE_D: + case TOY_OPCODE_TGSI_SAMPLE_L: + case TOY_OPCODE_TGSI_GATHER4: + case TOY_OPCODE_TGSI_SVIEWINFO: + case TOY_OPCODE_TGSI_SAMPLE_POS: + case TOY_OPCODE_TGSI_SAMPLE_INFO: + fs_lower_opcode_tgsi_sampling(fcc, inst); + break; + } + } + + tc_head(tc); + while ((inst = tc_next(tc)) != NULL) { + switch (inst->opcode) { + case TOY_OPCODE_INV: + case TOY_OPCODE_LOG: + case TOY_OPCODE_EXP: + case TOY_OPCODE_SQRT: + case TOY_OPCODE_RSQ: + case TOY_OPCODE_SIN: + case TOY_OPCODE_COS: + case TOY_OPCODE_FDIV: + case TOY_OPCODE_POW: + case TOY_OPCODE_INT_DIV_QUOTIENT: + case TOY_OPCODE_INT_DIV_REMAINDER: + toy_compiler_lower_math(tc, inst); + break; + case TOY_OPCODE_DDX: + case TOY_OPCODE_DDY: + fs_lower_opcode_derivative(tc, inst); + break; + case TOY_OPCODE_FB_WRITE: + fs_lower_opcode_fb_write(tc, inst); + break; + case TOY_OPCODE_KIL: + fs_lower_opcode_kil(tc, inst); + break; + default: + if (inst->opcode > 127) + tc_fail(tc, "unhandled virtual opcode"); + break; + } + } +} + +/** + * Compile the shader. + */ +static bool +fs_compile(struct fs_compile_context *fcc) +{ + struct toy_compiler *tc = &fcc->tc; + struct ilo_shader *sh = fcc->shader; + + fs_lower_virtual_opcodes(fcc); + toy_compiler_legalize_for_ra(tc); + toy_compiler_optimize(tc); + toy_compiler_allocate_registers(tc, + fcc->first_free_grf, + fcc->last_free_grf, + fcc->num_grf_per_vrf); + toy_compiler_legalize_for_asm(tc); + + if (tc->fail) { + ilo_err("failed to legalize FS instructions: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_FS) { + ilo_printf("legalized instructions:\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + if (true) { + sh->kernel = toy_compiler_assemble(tc, &sh->kernel_size); + } + else { + static const uint32_t microcode[] = { + /* fill in the microcode here */ + 0x0, 0x0, 0x0, 0x0, + }; + const bool swap = true; + + sh->kernel_size = sizeof(microcode); + sh->kernel = MALLOC(sh->kernel_size); + + if (sh->kernel) { + const int num_dwords = sizeof(microcode) / 4; + const uint32_t *src = microcode; + uint32_t *dst = (uint32_t *) sh->kernel; + int i; + + for (i = 0; i < num_dwords; i += 4) { + if (swap) { + dst[i + 0] = src[i + 3]; + dst[i + 1] = src[i + 2]; + dst[i + 2] = src[i + 1]; + dst[i + 3] = src[i + 0]; + } + else { + memcpy(dst, src, 16); + } + } + } + } + + if (!sh->kernel) { + ilo_err("failed to compile FS: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_FS) { + ilo_printf("disassembly:\n"); + toy_compiler_disassemble(tc, sh->kernel, sh->kernel_size); + ilo_printf("\n"); + } + + return true; +} + +/** + * Emit instructions to write the color buffers (and the depth buffer). + */ +static void +fs_write_fb(struct fs_compile_context *fcc) +{ + struct toy_compiler *tc = &fcc->tc; + int base_mrf = fcc->first_free_mrf; + const struct toy_dst header = tdst_ud(tdst(TOY_FILE_MRF, base_mrf, 0)); + bool header_present = false; + struct toy_src desc; + unsigned msg_type, ctrl; + int color_slots[ILO_MAX_DRAW_BUFFERS], num_cbufs; + int pos_slot = -1, cbuf, i; + + for (i = 0; i < Elements(color_slots); i++) + color_slots[i] = -1; + + for (i = 0; i < fcc->tgsi.num_outputs; i++) { + if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_COLOR) { + assert(fcc->tgsi.outputs[i].semantic_index < Elements(color_slots)); + color_slots[fcc->tgsi.outputs[i].semantic_index] = i; + } + else if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_POSITION) { + pos_slot = i; + } + } + + num_cbufs = fcc->variant->u.fs.num_cbufs; + /* still need to send EOT (and probably depth) */ + if (!num_cbufs) + num_cbufs = 1; + + /* we need the header to specify the pixel mask or render target */ + if (fcc->tgsi.uses_kill || num_cbufs > 1) { + const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); + struct toy_inst *inst; + + inst = tc_MOV(tc, header, r0); + inst->mask_ctrl = BRW_MASK_DISABLE; + base_mrf += fcc->num_grf_per_vrf; + + /* this is a two-register header */ + if (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) { + inst = tc_MOV(tc, tdst_offset(header, 1, 0), tsrc_offset(r0, 1, 0)); + inst->mask_ctrl = BRW_MASK_DISABLE; + base_mrf += fcc->num_grf_per_vrf; + } + + header_present = true; + } + + for (cbuf = 0; cbuf < num_cbufs; cbuf++) { + const int slot = + color_slots[(fcc->tgsi.props.fs_color0_writes_all_cbufs) ? 0 : cbuf]; + int mrf = base_mrf, vrf; + struct toy_src src[4]; + + if (slot >= 0) { + const unsigned undefined_mask = + fcc->tgsi.outputs[slot].undefined_mask; + const int index = fcc->tgsi.outputs[slot].index; + + vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); + if (vrf >= 0) { + const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); + tsrc_transpose(tmp, src); + } + else { + /* use (0, 0, 0, 0) */ + tsrc_transpose(tsrc_imm_f(0.0f), src); + } + + for (i = 0; i < 4; i++) { + const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); + + if (undefined_mask & (1 << i)) + src[i] = tsrc_imm_f(0.0f); + + tc_MOV(tc, dst, src[i]); + + mrf += fcc->num_grf_per_vrf; + } + } + else { + /* use (0, 0, 0, 0) */ + for (i = 0; i < 4; i++) { + const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); + + tc_MOV(tc, dst, tsrc_imm_f(0.0f)); + mrf += fcc->num_grf_per_vrf; + } + } + + /* select BLEND_STATE[rt] */ + if (cbuf > 0) { + struct toy_inst *inst; + + inst = tc_MOV(tc, tdst_offset(header, 0, 2), tsrc_imm_ud(cbuf)); + inst->mask_ctrl = BRW_MASK_DISABLE; + inst->exec_size = BRW_EXECUTE_1; + inst->src[0].rect = TOY_RECT_010; + } + + if (cbuf == 0 && pos_slot >= 0) { + const int index = fcc->tgsi.outputs[pos_slot].index; + const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); + struct toy_src src[4]; + int vrf; + + vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); + if (vrf >= 0) { + const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); + tsrc_transpose(tmp, src); + } + else { + /* use (0, 0, 0, 0) */ + tsrc_transpose(tsrc_imm_f(0.0f), src); + } + + /* only Z */ + tc_MOV(tc, dst, src[2]); + + mrf += fcc->num_grf_per_vrf; + } + + msg_type = (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE) ? + BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE : + BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01; + + ctrl = (cbuf == num_cbufs - 1) << 12 | + msg_type << 8; + + desc = tsrc_imm_mdesc_data_port(tc, cbuf == num_cbufs - 1, + mrf - fcc->first_free_mrf, 0, + header_present, false, + GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE, + ctrl, ILO_WM_DRAW_SURFACE(cbuf)); + + tc_add2(tc, TOY_OPCODE_FB_WRITE, tdst_null(), + tsrc(TOY_FILE_MRF, fcc->first_free_mrf, 0), desc); + } +} + +/** + * Set up shader outputs for fixed-function units. + */ +static void +fs_setup_shader_out(struct ilo_shader *sh, const struct toy_tgsi *tgsi) +{ + int i; + + sh->out.count = tgsi->num_outputs; + for (i = 0; i < tgsi->num_outputs; i++) { + sh->out.semantic_names[i] = tgsi->outputs[i].semantic_name; + sh->out.semantic_indices[i] = tgsi->outputs[i].semantic_index; + + if (tgsi->outputs[i].semantic_name == TGSI_SEMANTIC_POSITION) + sh->out.has_pos = true; + } +} + +/** + * Set up shader inputs for fixed-function units. + */ +static void +fs_setup_shader_in(struct ilo_shader *sh, const struct toy_tgsi *tgsi, + bool flatshade) +{ + int i; + + sh->in.count = tgsi->num_inputs; + for (i = 0; i < tgsi->num_inputs; i++) { + sh->in.semantic_names[i] = tgsi->inputs[i].semantic_name; + sh->in.semantic_indices[i] = tgsi->inputs[i].semantic_index; + sh->in.interp[i] = tgsi->inputs[i].interp; + sh->in.centroid[i] = tgsi->inputs[i].centroid; + + if (tgsi->inputs[i].semantic_name == TGSI_SEMANTIC_POSITION) { + sh->in.has_pos = true; + continue; + } + else if (tgsi->inputs[i].semantic_name == TGSI_SEMANTIC_FACE) { + continue; + } + + switch (tgsi->inputs[i].interp) { + case TGSI_INTERPOLATE_LINEAR: + sh->in.has_linear_interp = true; + + if (tgsi->inputs[i].centroid) { + sh->in.barycentric_interpolation_mode |= + 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC; + } + else { + sh->in.barycentric_interpolation_mode |= + 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC; + } + break; + case TGSI_INTERPOLATE_COLOR: + if (flatshade) + break; + /* fall through */ + case TGSI_INTERPOLATE_PERSPECTIVE: + if (tgsi->inputs[i].centroid) { + sh->in.barycentric_interpolation_mode |= + 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC; + } + else { + sh->in.barycentric_interpolation_mode |= + 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC; + } + break; + default: + break; + } + } +} + +static int +fs_setup_payloads(struct fs_compile_context *fcc) +{ + const struct ilo_shader *sh = fcc->shader; + int grf, i; + + grf = 0; + + /* r0: header */ + grf++; + + /* r1-r2: coordinates and etc. */ + grf += (fcc->dispatch_mode == GEN6_WM_32_DISPATCH_ENABLE) ? 2 : 1; + + for (i = 0; i < Elements(fcc->payloads); i++) { + int interp; + + /* r3-r26 or r32-r55: barycentric interpolation parameters */ + for (interp = 0; interp < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; interp++) { + if (!(sh->in.barycentric_interpolation_mode & (1 << interp))) + continue; + + fcc->payloads[i].barycentric_interps[interp] = grf; + grf += (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) ? 2 : 4; + } + + /* r27-r28 or r56-r57: interpoloated depth */ + if (sh->in.has_pos) { + fcc->payloads[i].source_depth = grf; + grf += (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) ? 1 : 2; + } + + /* r29-r30 or r58-r59: interpoloated w */ + if (sh->in.has_pos) { + fcc->payloads[i].source_w = grf; + grf += (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) ? 1 : 2; + } + + /* r31 or r60: position offset */ + if (false) { + fcc->payloads[i].pos_offset = grf; + grf++; + } + + if (fcc->dispatch_mode != GEN6_WM_32_DISPATCH_ENABLE) + break; + } + + return grf; +} + +/** + * Translate the TGSI tokens. + */ +static bool +fs_setup_tgsi(struct toy_compiler *tc, const struct tgsi_token *tokens, + struct toy_tgsi *tgsi) +{ + if (ilo_debug & ILO_DEBUG_FS) { + ilo_printf("dumping fragment shader\n"); + ilo_printf("\n"); + + tgsi_dump(tokens, 0); + ilo_printf("\n"); + } + + toy_compiler_translate_tgsi(tc, tokens, false, tgsi); + if (tc->fail) { + ilo_err("failed to translate FS TGSI tokens: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_FS) { + ilo_printf("TGSI translator:\n"); + toy_tgsi_dump(tgsi); + ilo_printf("\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + return true; +} + +/** + * Set up FS compile context. This includes translating the TGSI tokens. + */ +static bool +fs_setup(struct fs_compile_context *fcc, + const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + int num_consts; + + memset(fcc, 0, sizeof(*fcc)); + + fcc->shader = CALLOC_STRUCT(ilo_shader); + if (!fcc->shader) + return false; + + fcc->variant = variant; + + toy_compiler_init(&fcc->tc, state->info.gen); + + fcc->dispatch_mode = GEN6_WM_8_DISPATCH_ENABLE; + + fcc->tc.templ.access_mode = BRW_ALIGN_1; + if (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE) { + fcc->tc.templ.qtr_ctrl = GEN6_COMPRESSION_1H; + fcc->tc.templ.exec_size = BRW_EXECUTE_16; + } + else { + fcc->tc.templ.qtr_ctrl = GEN6_COMPRESSION_1Q; + fcc->tc.templ.exec_size = BRW_EXECUTE_8; + } + + fcc->tc.rect_linear_width = 8; + + /* + * The classic driver uses the sampler cache (gen6) or the data cache + * (gen7). Why? + */ + fcc->const_cache = GEN6_SFID_DATAPORT_CONSTANT_CACHE; + + if (!fs_setup_tgsi(&fcc->tc, state->info.tokens, &fcc->tgsi)) { + toy_compiler_cleanup(&fcc->tc); + FREE(fcc->shader); + return false; + } + + fs_setup_shader_in(fcc->shader, &fcc->tgsi, fcc->variant->u.fs.flatshade); + fs_setup_shader_out(fcc->shader, &fcc->tgsi); + + /* we do not make use of push constant buffers yet */ + num_consts = 0; + + fcc->first_const_grf = fs_setup_payloads(fcc); + fcc->first_attr_grf = fcc->first_const_grf + num_consts; + fcc->first_free_grf = fcc->first_attr_grf + fcc->shader->in.count * 2; + fcc->last_free_grf = 127; + + /* m0 is reserved for system routines */ + fcc->first_free_mrf = 1; + fcc->last_free_mrf = 15; + + /* instructions are compressed with BRW_EXECUTE_16 */ + fcc->num_grf_per_vrf = + (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE) ? 2 : 1; + + if (fcc->tc.gen >= ILO_GEN(7)) { + fcc->last_free_grf -= 15; + fcc->first_free_mrf = fcc->last_free_grf + 1; + fcc->last_free_mrf = fcc->first_free_mrf + 14; + } + + fcc->shader->in.start_grf = fcc->first_const_grf; + fcc->shader->has_kill = fcc->tgsi.uses_kill; + fcc->shader->dispatch_16 = + (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE); + + return true; +} + +/** + * Compile the fragment shader. + */ +struct ilo_shader * +ilo_shader_compile_fs(const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + struct fs_compile_context fcc; + + if (!fs_setup(&fcc, state, variant)) + return NULL; + + fs_write_fb(&fcc); + + if (!fs_compile(&fcc)) { + FREE(fcc.shader); + fcc.shader = NULL; + } + + toy_tgsi_cleanup(&fcc.tgsi); + toy_compiler_cleanup(&fcc.tc); + + return fcc.shader; +} diff --git a/src/gallium/drivers/ilo/shader/ilo_shader_gs.c b/src/gallium/drivers/ilo/shader/ilo_shader_gs.c new file mode 100644 index 00000000000..620e0bffc15 --- /dev/null +++ b/src/gallium/drivers/ilo/shader/ilo_shader_gs.c @@ -0,0 +1,1437 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 2012-2013 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chia-I Wu <[email protected]> + */ + +#include "tgsi/tgsi_dump.h" +#include "toy_compiler.h" +#include "toy_tgsi.h" +#include "toy_legalize.h" +#include "toy_optimize.h" +#include "toy_helpers.h" +#include "ilo_shader.h" + +/* XXX Below is proof-of-concept code. Skip this file! */ + +/* + * TODO + * - primitive id is in r0.1. FS receives PID as a flat attribute. + * - set VUE header m0.1 for layered rendering + */ +struct gs_compile_context { + struct ilo_shader *shader; + const struct ilo_shader_variant *variant; + const struct pipe_stream_output_info *so_info; + + struct toy_compiler tc; + struct toy_tgsi tgsi; + int output_map[PIPE_MAX_SHADER_OUTPUTS]; + + bool write_so; + bool write_vue; + + int in_vue_size; + int in_vue_count; + + int out_vue_size; + int out_vue_min_count; + + bool is_static; + + struct { + struct toy_src header; + struct toy_src svbi; + struct toy_src vues[6]; + } payload; + + struct { + struct toy_dst urb_write_header; + bool prim_start; + bool prim_end; + int prim_type; + + struct toy_dst tmp; + + /* buffered tgsi_outs */ + struct toy_dst buffers[3]; + int buffer_needed, buffer_cur; + + struct toy_dst so_written; + struct toy_dst so_index; + + struct toy_src tgsi_outs[PIPE_MAX_SHADER_OUTPUTS]; + } vars; + + struct { + struct toy_dst total_vertices; + struct toy_dst total_prims; + + struct toy_dst num_vertices; + struct toy_dst num_vertices_in_prim; + } dynamic_data; + + struct { + int total_vertices; + int total_prims; + /* this limits the max vertice count to be 256 */ + uint32_t last_vertex[8]; + + int num_vertices; + int num_vertices_in_prim; + } static_data; + + int first_free_grf; + int last_free_grf; + int first_free_mrf; + int last_free_mrf; +}; + +static void +gs_COPY8(struct toy_compiler *tc, struct toy_dst dst, struct toy_src src) +{ + struct toy_inst *inst; + + inst = tc_MOV(tc, dst, src); + inst->exec_size = BRW_EXECUTE_8; + inst->mask_ctrl = BRW_MASK_DISABLE; +} + +static void +gs_COPY4(struct toy_compiler *tc, + struct toy_dst dst, int dst_ch, + struct toy_src src, int src_ch) +{ + struct toy_inst *inst; + + inst = tc_MOV(tc, + tdst_offset(dst, 0, dst_ch), + tsrc_offset(src, 0, src_ch)); + inst->exec_size = BRW_EXECUTE_4; + inst->mask_ctrl = BRW_MASK_DISABLE; +} + +static void +gs_COPY1(struct toy_compiler *tc, + struct toy_dst dst, int dst_ch, + struct toy_src src, int src_ch) +{ + struct toy_inst *inst; + + inst = tc_MOV(tc, + tdst_offset(dst, 0, dst_ch), + tsrc_rect(tsrc_offset(src, 0, src_ch), TOY_RECT_010)); + inst->exec_size = BRW_EXECUTE_1; + inst->mask_ctrl = BRW_MASK_DISABLE; +} + +static void +gs_init_vars(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_dst dst; + + /* init URB_WRITE header */ + dst = gcc->vars.urb_write_header; + + gs_COPY8(tc, dst, gcc->payload.header); + + gcc->vars.prim_start = true; + gcc->vars.prim_end = false; + switch (gcc->out_vue_min_count) { + case 1: + gcc->vars.prim_type = _3DPRIM_POINTLIST; + break; + case 2: + gcc->vars.prim_type = _3DPRIM_LINESTRIP; + break; + case 3: + gcc->vars.prim_type = _3DPRIM_TRISTRIP; + break; + } + + if (gcc->write_so) + tc_MOV(tc, gcc->vars.so_written, tsrc_imm_d(0)); +} + +static void +gs_save_output(struct gs_compile_context *gcc, const struct toy_src *outs) +{ + struct toy_compiler *tc = &gcc->tc; + const struct toy_dst buf = gcc->vars.buffers[gcc->vars.buffer_cur]; + int i; + + for (i = 0; i < gcc->shader->out.count; i++) + tc_MOV(tc, tdst_offset(buf, i, 0), outs[i]); + + /* advance the cursor */ + gcc->vars.buffer_cur++; + gcc->vars.buffer_cur %= gcc->vars.buffer_needed; +} + +static void +gs_write_so(struct gs_compile_context *gcc, + struct toy_dst dst, + struct toy_src index, struct toy_src out, + bool send_write_commit_message, + int binding_table_index) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_dst mrf_header; + struct toy_src desc; + + mrf_header = tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); + + /* m0.5: destination index */ + gs_COPY1(tc, mrf_header, 5, index, 0); + + /* m0.0 - m0.3: RGBA */ + gs_COPY4(tc, mrf_header, 0, tsrc_type(out, mrf_header.type), 0); + + desc = tsrc_imm_mdesc_data_port(tc, false, + 1, send_write_commit_message, + true, send_write_commit_message, + GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE, 0, + binding_table_index); + + tc_SEND(tc, dst, tsrc_from(mrf_header), desc, + GEN6_SFID_DATAPORT_RENDER_CACHE); +} + +static void +gs_write_vue(struct gs_compile_context *gcc, + struct toy_dst dst, struct toy_src msg_header, + const struct toy_src *outs, int num_outs, + bool eot) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_dst mrf_header; + struct toy_src desc; + int sent = 0; + + mrf_header = tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); + gs_COPY8(tc, mrf_header, msg_header); + + while (sent < num_outs) { + int mrf = gcc->first_free_mrf + 1; + const int mrf_avail = gcc->last_free_mrf - mrf + 1; + int msg_len, num_entries, i; + bool complete; + + num_entries = (num_outs - sent + 1) / 2; + complete = true; + if (num_entries > mrf_avail) { + num_entries = mrf_avail; + complete = false; + } + + for (i = 0; i < num_entries; i++) { + gs_COPY4(tc, tdst(TOY_FILE_MRF, mrf + i / 2, 0), 0, + outs[sent + 2 * i], 0); + if (sent + i * 2 + 1 < gcc->shader->out.count) { + gs_COPY4(tc, tdst(TOY_FILE_MRF, mrf + i / 2, 0), 4, + outs[sent + 2 * i + 1], 0); + } + mrf++; + } + + /* do not forget the header */ + msg_len = num_entries + 1; + + if (complete) { + desc = tsrc_imm_mdesc_urb(tc, + eot, msg_len, !eot, true, true, !eot, + BRW_URB_SWIZZLE_NONE, sent, 0); + } + else { + desc = tsrc_imm_mdesc_urb(tc, + false, msg_len, 0, false, true, false, + BRW_URB_SWIZZLE_NONE, sent, 0); + } + + tc_add2(tc, TOY_OPCODE_URB_WRITE, + (complete) ? dst : tdst_null(), tsrc_from(mrf_header), desc); + + sent += num_entries * 2; + } +} + +static void +gs_ff_sync(struct gs_compile_context *gcc, struct toy_dst dst, + struct toy_src num_prims) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_dst mrf_header = + tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); + struct toy_src desc; + bool allocate; + + gs_COPY8(tc, mrf_header, gcc->payload.header); + + /* set NumSOVertsToWrite and NumSOPrimsNeeded */ + if (gcc->write_so) { + if (num_prims.file == TOY_FILE_IMM) { + const uint32_t v = + (num_prims.val32 * gcc->in_vue_count) << 16 | num_prims.val32; + + gs_COPY1(tc, mrf_header, 0, tsrc_imm_d(v), 0); + } + else { + struct toy_dst m0_0 = tdst_d(gcc->vars.tmp); + + tc_MUL(tc, m0_0, num_prims, tsrc_imm_d(gcc->in_vue_count << 16)); + tc_OR(tc, m0_0, tsrc_from(m0_0), num_prims); + + gs_COPY1(tc, mrf_header, 0, tsrc_from(m0_0), 0); + } + } + + /* set NumGSPrimsGenerated */ + if (gcc->write_vue) + gs_COPY1(tc, mrf_header, 1, num_prims, 0); + + /* + * From the Sandy Bridge PRM, volume 2 part 1, page 173: + * + * "Programming Note: If the GS stage is enabled, software must always + * allocate at least one GS URB Entry. This is true even if the GS + * thread never needs to output vertices to the pipeline, e.g., when + * only performing stream output. This is an artifact of the need to + * pass the GS thread an initial destination URB handle." + */ + allocate = true; + desc = tsrc_imm_mdesc_urb(tc, false, 1, 1, + false, false, allocate, + BRW_URB_SWIZZLE_NONE, 0, 1); + + tc_SEND(tc, dst, tsrc_from(mrf_header), desc, BRW_SFID_URB); +} + +static void +gs_discard(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_dst mrf_header; + struct toy_src desc; + + mrf_header = tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); + + gs_COPY8(tc, mrf_header, tsrc_from(gcc->vars.urb_write_header)); + + desc = tsrc_imm_mdesc_urb(tc, + true, 1, 0, true, false, false, + BRW_URB_SWIZZLE_NONE, 0, 0); + + tc_add2(tc, TOY_OPCODE_URB_WRITE, + tdst_null(), tsrc_from(mrf_header), desc); +} + +static void +gs_lower_opcode_endprim(struct gs_compile_context *gcc, struct toy_inst *inst) +{ + /* if has control flow, set PrimEnd on the last vertex and URB_WRITE */ +} + +static void +gs_lower_opcode_emit_vue_dynamic(struct gs_compile_context *gcc) +{ + /* TODO similar to the static version */ + + /* + * When SO is enabled and the inputs are lines or triangles, vertices are + * always buffered. we can defer the emission of the current vertex until + * the next EMIT or ENDPRIM. Or, we can emit two URB_WRITEs with the later + * patching the former. + */ +} + +static void +gs_lower_opcode_emit_so_dynamic(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + + tc_IF(tc, tdst_null(), + tsrc_from(gcc->dynamic_data.num_vertices_in_prim), + tsrc_imm_d(gcc->out_vue_min_count), + BRW_CONDITIONAL_GE); + + { + tc_ADD(tc, gcc->vars.tmp, tsrc_from(gcc->vars.so_index), tsrc_imm_d(0x03020100)); + + /* TODO same as static version */ + } + + tc_ENDIF(tc); + + tc_ADD(tc, gcc->vars.so_index, + tsrc_from(gcc->vars.so_index), tsrc_imm_d(gcc->out_vue_min_count)); +} + +static void +gs_lower_opcode_emit_vue_static(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_inst *inst2; + bool eot; + + eot = (gcc->static_data.num_vertices == gcc->static_data.total_vertices); + + gcc->vars.prim_end = + ((gcc->static_data.last_vertex[(gcc->static_data.num_vertices - 1) / 32] & + 1 << ((gcc->static_data.num_vertices - 1) % 32)) != 0); + + if (eot && gcc->write_so) { + inst2 = tc_OR(tc, tdst_offset(gcc->vars.urb_write_header, 0, 2), + tsrc_from(gcc->vars.so_written), + tsrc_imm_d(gcc->vars.prim_type << 2 | + gcc->vars.prim_start << 1 | + gcc->vars.prim_end)); + inst2->exec_size = BRW_EXECUTE_1; + inst2->src[0] = tsrc_rect(inst2->src[0], TOY_RECT_010); + inst2->src[1] = tsrc_rect(inst2->src[1], TOY_RECT_010); + } + else { + gs_COPY1(tc, gcc->vars.urb_write_header, 2, + tsrc_imm_d(gcc->vars.prim_type << 2 | + gcc->vars.prim_start << 1 | + gcc->vars.prim_end), 0); + } + + gs_write_vue(gcc, tdst_d(gcc->vars.tmp), + tsrc_from(gcc->vars.urb_write_header), + gcc->vars.tgsi_outs, + gcc->shader->out.count, eot); + + if (!eot) { + gs_COPY1(tc, gcc->vars.urb_write_header, 0, + tsrc_from(tdst_d(gcc->vars.tmp)), 0); + } + + gcc->vars.prim_start = gcc->vars.prim_end; + gcc->vars.prim_end = false; +} + +static void +gs_lower_opcode_emit_so_static(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_inst *inst; + int i, j; + + if (gcc->static_data.num_vertices_in_prim < gcc->out_vue_min_count) + return; + + inst = tc_MOV(tc, tdst_w(gcc->vars.tmp), tsrc_imm_v(0x03020100)); + inst->exec_size = BRW_EXECUTE_8; + inst->mask_ctrl = BRW_MASK_DISABLE; + + tc_ADD(tc, tdst_d(gcc->vars.tmp), tsrc_from(tdst_d(gcc->vars.tmp)), + tsrc_rect(tsrc_from(gcc->vars.so_index), TOY_RECT_010)); + + tc_IF(tc, tdst_null(), + tsrc_rect(tsrc_offset(tsrc_from(tdst_d(gcc->vars.tmp)), 0, gcc->out_vue_min_count - 1), TOY_RECT_010), + tsrc_rect(tsrc_offset(gcc->payload.svbi, 0, 4), TOY_RECT_010), + BRW_CONDITIONAL_LE); + { + for (i = 0; i < gcc->out_vue_min_count; i++) { + for (j = 0; j < gcc->so_info->num_outputs; j++) { + const int idx = gcc->so_info->output[j].register_index; + struct toy_src index, out; + int binding_table_index; + bool write_commit; + + index = tsrc_d(tsrc_offset(tsrc_from(gcc->vars.tmp), 0, i)); + + if (i == gcc->out_vue_min_count - 1) { + out = gcc->vars.tgsi_outs[idx]; + } + else { + /* gcc->vars.buffer_cur also points to the first vertex */ + const int buf = + (gcc->vars.buffer_cur + i) % gcc->vars.buffer_needed; + + out = tsrc_offset(tsrc_from(gcc->vars.buffers[buf]), idx, 0); + } + + out = tsrc_offset(out, 0, gcc->so_info->output[j].start_component); + + /* + * From the Sandy Bridge PRM, volume 4 part 2, page 19: + * + * "The Kernel must do a write commit on the last write to DAP + * prior to a URB_WRITE with End of Thread." + */ + write_commit = + (gcc->static_data.num_vertices == gcc->static_data.total_vertices && + i == gcc->out_vue_min_count - 1 && + j == gcc->so_info->num_outputs - 1); + + + binding_table_index = ILO_GS_SO_SURFACE(j); + + gs_write_so(gcc, gcc->vars.tmp, index, + out, write_commit, binding_table_index); + + /* + * From the Sandy Bridge PRM, volume 4 part 1, page 168: + * + * "The write commit does not modify the destination register, but + * merely clears the dependency associated with the destination + * register. Thus, a simple "mov" instruction using the register as a + * source is sufficient to wait for the write commit to occur." + */ + if (write_commit) + tc_MOV(tc, gcc->vars.tmp, tsrc_from(gcc->vars.tmp)); + } + } + + /* SONumPrimsWritten occupies the higher word of m0.2 of URB_WRITE */ + tc_ADD(tc, gcc->vars.so_written, + tsrc_from(gcc->vars.so_written), tsrc_imm_d(1 << 16)); + tc_ADD(tc, gcc->vars.so_index, + tsrc_from(gcc->vars.so_index), tsrc_imm_d(gcc->out_vue_min_count)); + } + tc_ENDIF(tc); +} + +static void +gs_lower_opcode_emit_static(struct gs_compile_context *gcc, + struct toy_inst *inst) +{ + gcc->static_data.num_vertices++; + gcc->static_data.num_vertices_in_prim++; + + if (gcc->write_so) { + gs_lower_opcode_emit_so_static(gcc); + + if (gcc->out_vue_min_count > 1 && + gcc->static_data.num_vertices != gcc->static_data.total_vertices) + gs_save_output(gcc, gcc->vars.tgsi_outs); + } + + if (gcc->write_vue) + gs_lower_opcode_emit_vue_static(gcc); +} + +static void +gs_lower_opcode_emit_dynamic(struct gs_compile_context *gcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &gcc->tc; + + tc_ADD(tc, gcc->dynamic_data.num_vertices, + tsrc_from(gcc->dynamic_data.num_vertices), tsrc_imm_d(1)); + tc_ADD(tc, gcc->dynamic_data.num_vertices_in_prim, + tsrc_from(gcc->dynamic_data.num_vertices_in_prim), tsrc_imm_d(1)); + + if (gcc->write_so) { + gs_lower_opcode_emit_so_dynamic(gcc); + + if (gcc->out_vue_min_count > 1) + gs_save_output(gcc, gcc->vars.tgsi_outs); + } + + if (gcc->write_vue) + gs_lower_opcode_emit_vue_dynamic(gcc); +} + +static void +gs_lower_opcode_emit(struct gs_compile_context *gcc, struct toy_inst *inst) +{ + if (gcc->is_static) + gs_lower_opcode_emit_static(gcc, inst); + else + gs_lower_opcode_emit_dynamic(gcc, inst); +} + +static void +gs_lower_opcode_tgsi_in(struct gs_compile_context *gcc, + struct toy_dst dst, int dim, int idx) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_src attr; + int slot, reg = -1, subreg; + + slot = toy_tgsi_find_input(&gcc->tgsi, idx); + if (slot >= 0) { + int i; + + for (i = 0; i < gcc->variant->u.gs.num_inputs; i++) { + if (gcc->variant->u.gs.semantic_names[i] == + gcc->tgsi.inputs[slot].semantic_name && + gcc->variant->u.gs.semantic_indices[i] == + gcc->tgsi.inputs[slot].semantic_index) { + reg = i / 2; + subreg = (i % 2) * 4; + break; + } + } + } + + if (reg < 0) { + tc_MOV(tc, dst, tsrc_imm_f(0.0f)); + return; + } + + /* fix vertex ordering for _3DPRIM_TRISTRIP_REVERSE */ + if (gcc->in_vue_count == 3 && dim < 2) { + struct toy_inst *inst; + + /* get PrimType */ + inst = tc_AND(tc, tdst_d(gcc->vars.tmp), + tsrc_offset(gcc->payload.header, 0, 2), tsrc_imm_d(0x1f)); + inst->exec_size = BRW_EXECUTE_1; + inst->src[0] = tsrc_rect(inst->src[0], TOY_RECT_010); + inst->src[1] = tsrc_rect(inst->src[1], TOY_RECT_010); + + inst = tc_CMP(tc, tdst_null(), tsrc_from(tdst_d(gcc->vars.tmp)), + tsrc_imm_d(_3DPRIM_TRISTRIP_REVERSE), BRW_CONDITIONAL_NEQ); + inst->src[0] = tsrc_rect(inst->src[0], TOY_RECT_010); + + attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); + inst = tc_MOV(tc, dst, attr); + inst->pred_ctrl = BRW_PREDICATE_NORMAL; + + /* swap IN[0] and IN[1] for _3DPRIM_TRISTRIP_REVERSE */ + dim = !dim; + + attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); + inst = tc_MOV(tc, dst, attr); + inst->pred_ctrl = BRW_PREDICATE_NORMAL; + inst->pred_inv = true; + } + else { + attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); + tc_MOV(tc, dst, attr); + } + + +} + +static void +gs_lower_opcode_tgsi_imm(struct gs_compile_context *gcc, + struct toy_dst dst, int idx) +{ + const uint32_t *imm; + int ch; + + imm = toy_tgsi_get_imm(&gcc->tgsi, idx, NULL); + + for (ch = 0; ch < 4; ch++) { + struct toy_inst *inst; + + /* raw moves */ + inst = tc_MOV(&gcc->tc, + tdst_writemask(tdst_ud(dst), 1 << ch), + tsrc_imm_ud(imm[ch])); + inst->access_mode = BRW_ALIGN_16; + } +} + +static void +gs_lower_opcode_tgsi_direct(struct gs_compile_context *gcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &gcc->tc; + int dim, idx; + + assert(inst->src[0].file == TOY_FILE_IMM); + dim = inst->src[0].val32; + + assert(inst->src[1].file == TOY_FILE_IMM); + idx = inst->src[1].val32; + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + gs_lower_opcode_tgsi_in(gcc, inst->dst, dim, idx); + /* fetch all dimensions */ + if (dim == 0) { + int i; + + for (i = 1; i < gcc->in_vue_count; i++) { + const int vrf = toy_tgsi_get_vrf(&gcc->tgsi, TGSI_FILE_INPUT, i, idx); + struct toy_dst dst; + + if (vrf < 0) + continue; + + dst = tdst(TOY_FILE_VRF, vrf, 0); + gs_lower_opcode_tgsi_in(gcc, dst, i, idx); + } + } + break; + case TOY_OPCODE_TGSI_IMM: + assert(!dim); + gs_lower_opcode_tgsi_imm(gcc, inst->dst, idx); + break; + case TOY_OPCODE_TGSI_CONST: + case TOY_OPCODE_TGSI_SV: + default: + tc_fail(tc, "unhandled TGSI fetch"); + break; + } + + tc_discard_inst(tc, inst); +} + +static void +gs_lower_virtual_opcodes(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct toy_inst *inst; + + tc_head(tc); + while ((inst = tc_next(tc)) != NULL) { + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + case TOY_OPCODE_TGSI_CONST: + case TOY_OPCODE_TGSI_SV: + case TOY_OPCODE_TGSI_IMM: + gs_lower_opcode_tgsi_direct(gcc, inst); + break; + case TOY_OPCODE_TGSI_INDIRECT_FETCH: + case TOY_OPCODE_TGSI_INDIRECT_STORE: + /* TODO similar to VS */ + tc_fail(tc, "no indirection support"); + tc_discard_inst(tc, inst); + break; + case TOY_OPCODE_TGSI_TEX: + case TOY_OPCODE_TGSI_TXB: + case TOY_OPCODE_TGSI_TXD: + case TOY_OPCODE_TGSI_TXL: + case TOY_OPCODE_TGSI_TXP: + case TOY_OPCODE_TGSI_TXF: + case TOY_OPCODE_TGSI_TXQ: + case TOY_OPCODE_TGSI_TXQ_LZ: + case TOY_OPCODE_TGSI_TEX2: + case TOY_OPCODE_TGSI_TXB2: + case TOY_OPCODE_TGSI_TXL2: + case TOY_OPCODE_TGSI_SAMPLE: + case TOY_OPCODE_TGSI_SAMPLE_I: + case TOY_OPCODE_TGSI_SAMPLE_I_MS: + case TOY_OPCODE_TGSI_SAMPLE_B: + case TOY_OPCODE_TGSI_SAMPLE_C: + case TOY_OPCODE_TGSI_SAMPLE_C_LZ: + case TOY_OPCODE_TGSI_SAMPLE_D: + case TOY_OPCODE_TGSI_SAMPLE_L: + case TOY_OPCODE_TGSI_GATHER4: + case TOY_OPCODE_TGSI_SVIEWINFO: + case TOY_OPCODE_TGSI_SAMPLE_POS: + case TOY_OPCODE_TGSI_SAMPLE_INFO: + /* TODO similar to VS */ + tc_fail(tc, "no sampling support"); + tc_discard_inst(tc, inst); + break; + case TOY_OPCODE_EMIT: + gs_lower_opcode_emit(gcc, inst); + tc_discard_inst(tc, inst); + break; + case TOY_OPCODE_ENDPRIM: + gs_lower_opcode_endprim(gcc, inst); + tc_discard_inst(tc, inst); + break; + default: + break; + } + } + + tc_head(tc); + while ((inst = tc_next(tc)) != NULL) { + switch (inst->opcode) { + case TOY_OPCODE_INV: + case TOY_OPCODE_LOG: + case TOY_OPCODE_EXP: + case TOY_OPCODE_SQRT: + case TOY_OPCODE_RSQ: + case TOY_OPCODE_SIN: + case TOY_OPCODE_COS: + case TOY_OPCODE_FDIV: + case TOY_OPCODE_POW: + case TOY_OPCODE_INT_DIV_QUOTIENT: + case TOY_OPCODE_INT_DIV_REMAINDER: + toy_compiler_lower_math(tc, inst); + break; + case TOY_OPCODE_URB_WRITE: + toy_compiler_lower_to_send(tc, inst, false, BRW_SFID_URB); + break; + default: + if (inst->opcode > 127) + tc_fail(tc, "unhandled virtual opcode"); + break; + } + } +} + +/** + * Get the number of (tessellated) primitives generated by this shader. + * Return false if that is unknown until runtime. + */ +static void +get_num_prims_static(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + const struct toy_inst *inst; + int num_vertices_in_prim = 0, if_depth = 0, do_depth = 0; + bool is_static = true; + + tc_head(tc); + while ((inst = tc_next_no_skip(tc)) != NULL) { + switch (inst->opcode) { + case BRW_OPCODE_IF: + if_depth++; + break; + case BRW_OPCODE_ENDIF: + if_depth--; + break; + case BRW_OPCODE_DO: + do_depth++; + break; + case BRW_OPCODE_WHILE: + do_depth--; + break; + case TOY_OPCODE_EMIT: + if (if_depth || do_depth) { + is_static = false; + } + else { + gcc->static_data.total_vertices++; + + num_vertices_in_prim++; + if (num_vertices_in_prim >= gcc->out_vue_min_count) + gcc->static_data.total_prims++; + } + break; + case TOY_OPCODE_ENDPRIM: + if (if_depth || do_depth) { + is_static = false; + } + else { + const int vertidx = gcc->static_data.total_vertices - 1; + const int idx = vertidx / 32; + const int subidx = vertidx % 32; + + gcc->static_data.last_vertex[idx] |= 1 << subidx; + num_vertices_in_prim = 0; + } + break; + default: + break; + } + + if (!is_static) + break; + } + + gcc->is_static = is_static; +} + +/** + * Compile the shader. + */ +static bool +gs_compile(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct ilo_shader *sh = gcc->shader; + + get_num_prims_static(gcc); + + if (gcc->is_static) { + tc_head(tc); + + gs_init_vars(gcc); + gs_ff_sync(gcc, tdst_d(gcc->vars.tmp), tsrc_imm_d(gcc->static_data.total_prims)); + gs_COPY1(tc, gcc->vars.urb_write_header, 0, tsrc_from(tdst_d(gcc->vars.tmp)), 0); + if (gcc->write_so) + gs_COPY4(tc, gcc->vars.so_index, 0, tsrc_from(tdst_d(gcc->vars.tmp)), 1); + + tc_tail(tc); + } + else { + tc_fail(tc, "no control flow support"); + return false; + } + + if (!gcc->write_vue) + gs_discard(gcc); + + gs_lower_virtual_opcodes(gcc); + toy_compiler_legalize_for_ra(tc); + toy_compiler_optimize(tc); + toy_compiler_allocate_registers(tc, + gcc->first_free_grf, + gcc->last_free_grf, + 1); + toy_compiler_legalize_for_asm(tc); + + if (tc->fail) { + ilo_err("failed to legalize GS instructions: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_GS) { + ilo_printf("legalized instructions:\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + sh->kernel = toy_compiler_assemble(tc, &sh->kernel_size); + if (!sh->kernel) + return false; + + if (ilo_debug & ILO_DEBUG_GS) { + ilo_printf("disassembly:\n"); + toy_compiler_disassemble(tc, sh->kernel, sh->kernel_size); + ilo_printf("\n"); + } + + return true; +} + +static bool +gs_compile_passthrough(struct gs_compile_context *gcc) +{ + struct toy_compiler *tc = &gcc->tc; + struct ilo_shader *sh = gcc->shader; + + gcc->is_static = true; + gcc->static_data.total_vertices = gcc->in_vue_count; + gcc->static_data.total_prims = 1; + gcc->static_data.last_vertex[0] = 1 << (gcc->in_vue_count - 1); + + gs_init_vars(gcc); + gs_ff_sync(gcc, tdst_d(gcc->vars.tmp), tsrc_imm_d(gcc->static_data.total_prims)); + gs_COPY1(tc, gcc->vars.urb_write_header, 0, tsrc_from(tdst_d(gcc->vars.tmp)), 0); + if (gcc->write_so) + gs_COPY4(tc, gcc->vars.so_index, 0, tsrc_from(tdst_d(gcc->vars.tmp)), 1); + + { + int vert, attr; + + for (vert = 0; vert < gcc->out_vue_min_count; vert++) { + for (attr = 0; attr < gcc->shader->out.count; attr++) { + tc_MOV(tc, tdst_from(gcc->vars.tgsi_outs[attr]), + tsrc_offset(gcc->payload.vues[vert], attr / 2, (attr % 2) * 4)); + } + + gs_lower_opcode_emit(gcc, NULL); + } + + gs_lower_opcode_endprim(gcc, NULL); + } + + if (!gcc->write_vue) + gs_discard(gcc); + + gs_lower_virtual_opcodes(gcc); + + toy_compiler_legalize_for_ra(tc); + toy_compiler_optimize(tc); + toy_compiler_allocate_registers(tc, + gcc->first_free_grf, + gcc->last_free_grf, + 1); + + toy_compiler_legalize_for_asm(tc); + + if (tc->fail) { + ilo_err("failed to translate GS TGSI tokens: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_GS) { + int i; + + ilo_printf("VUE count %d, VUE size %d\n", + gcc->in_vue_count, gcc->in_vue_size); + ilo_printf("%srasterizer discard\n", + (gcc->variant->u.gs.rasterizer_discard) ? "" : "no "); + + for (i = 0; i < gcc->so_info->num_outputs; i++) { + ilo_printf("SO[%d] = OUT[%d]\n", i, + gcc->so_info->output[i].register_index); + } + + ilo_printf("legalized instructions:\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + sh->kernel = toy_compiler_assemble(tc, &sh->kernel_size); + if (!sh->kernel) { + ilo_err("failed to compile GS: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_GS) { + ilo_printf("disassembly:\n"); + toy_compiler_disassemble(tc, sh->kernel, sh->kernel_size); + ilo_printf("\n"); + } + + return true; +} + +/** + * Translate the TGSI tokens. + */ +static bool +gs_setup_tgsi(struct toy_compiler *tc, const struct tgsi_token *tokens, + struct toy_tgsi *tgsi) +{ + if (ilo_debug & ILO_DEBUG_GS) { + ilo_printf("dumping geometry shader\n"); + ilo_printf("\n"); + + tgsi_dump(tokens, 0); + ilo_printf("\n"); + } + + toy_compiler_translate_tgsi(tc, tokens, true, tgsi); + if (tc->fail) + return false; + + if (ilo_debug & ILO_DEBUG_GS) { + ilo_printf("TGSI translator:\n"); + toy_tgsi_dump(tgsi); + ilo_printf("\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + return true; +} + +/** + * Set up shader inputs for fixed-function units. + */ +static void +gs_setup_shader_in(struct ilo_shader *sh, + const struct ilo_shader_variant *variant) +{ + int i; + + for (i = 0; i < variant->u.gs.num_inputs; i++) { + sh->in.semantic_names[i] = variant->u.gs.semantic_names[i]; + sh->in.semantic_indices[i] = variant->u.gs.semantic_indices[i]; + sh->in.interp[i] = TGSI_INTERPOLATE_CONSTANT; + sh->in.centroid[i] = false; + } + + sh->in.count = variant->u.gs.num_inputs; + + sh->in.has_pos = false; + sh->in.has_linear_interp = false; + sh->in.barycentric_interpolation_mode = 0; +} + +/** + * Set up shader outputs for fixed-function units. + * + * XXX share the code with VS + */ +static void +gs_setup_shader_out(struct ilo_shader *sh, const struct toy_tgsi *tgsi, + bool output_clipdist, int *output_map) +{ + int psize_slot = -1, pos_slot = -1; + int clipdist_slot[2] = { -1, -1 }; + int color_slot[4] = { -1, -1, -1, -1 }; + int num_outs, i; + + /* find out the slots of outputs that need special care */ + for (i = 0; i < tgsi->num_outputs; i++) { + switch (tgsi->outputs[i].semantic_name) { + case TGSI_SEMANTIC_PSIZE: + psize_slot = i; + break; + case TGSI_SEMANTIC_POSITION: + pos_slot = i; + break; + case TGSI_SEMANTIC_CLIPDIST: + if (tgsi->outputs[i].semantic_index) + clipdist_slot[1] = i; + else + clipdist_slot[0] = i; + break; + case TGSI_SEMANTIC_COLOR: + if (tgsi->outputs[i].semantic_index) + color_slot[2] = i; + else + color_slot[0] = i; + break; + case TGSI_SEMANTIC_BCOLOR: + if (tgsi->outputs[i].semantic_index) + color_slot[3] = i; + else + color_slot[1] = i; + break; + default: + break; + } + } + + /* the first two VUEs are always PSIZE and POSITION */ + num_outs = 2; + sh->out.semantic_names[0] = TGSI_SEMANTIC_PSIZE; + sh->out.semantic_indices[0] = 0; + sh->out.semantic_names[1] = TGSI_SEMANTIC_POSITION; + sh->out.semantic_indices[1] = 0; + + sh->out.has_pos = true; + output_map[0] = psize_slot; + output_map[1] = pos_slot; + + /* followed by optional clip distances */ + if (output_clipdist) { + sh->out.semantic_names[num_outs] = TGSI_SEMANTIC_CLIPDIST; + sh->out.semantic_indices[num_outs] = 0; + output_map[num_outs++] = clipdist_slot[0]; + + sh->out.semantic_names[num_outs] = TGSI_SEMANTIC_CLIPDIST; + sh->out.semantic_indices[num_outs] = 1; + output_map[num_outs++] = clipdist_slot[1]; + } + + /* + * make BCOLOR follow COLOR so that we can make use of + * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING in 3DSTATE_SF + */ + for (i = 0; i < 4; i++) { + const int slot = color_slot[i]; + + if (slot < 0) + continue; + + sh->out.semantic_names[num_outs] = tgsi->outputs[slot].semantic_name; + sh->out.semantic_indices[num_outs] = tgsi->outputs[slot].semantic_index; + + output_map[num_outs++] = slot; + } + + /* add the rest of the outputs */ + for (i = 0; i < tgsi->num_outputs; i++) { + switch (tgsi->outputs[i].semantic_name) { + case TGSI_SEMANTIC_PSIZE: + case TGSI_SEMANTIC_POSITION: + case TGSI_SEMANTIC_CLIPDIST: + case TGSI_SEMANTIC_COLOR: + case TGSI_SEMANTIC_BCOLOR: + break; + default: + sh->out.semantic_names[num_outs] = tgsi->outputs[i].semantic_name; + sh->out.semantic_indices[num_outs] = tgsi->outputs[i].semantic_index; + output_map[num_outs++] = i; + break; + } + } + + sh->out.count = num_outs; +} + +static void +gs_setup_vars(struct gs_compile_context *gcc) +{ + int grf = gcc->first_free_grf; + int i; + + gcc->vars.urb_write_header = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); + grf++; + + gcc->vars.tmp = tdst(TOY_FILE_GRF, grf, 0); + grf++; + + if (gcc->write_so) { + gcc->vars.buffer_needed = gcc->out_vue_min_count - 1; + for (i = 0; i < gcc->vars.buffer_needed; i++) { + gcc->vars.buffers[i] = tdst(TOY_FILE_GRF, grf, 0); + grf += gcc->shader->out.count; + } + + gcc->vars.so_written = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); + grf++; + + gcc->vars.so_index = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); + grf++; + } + + gcc->first_free_grf = grf; + + if (!gcc->tgsi.reg_mapping) { + for (i = 0; i < gcc->shader->out.count; i++) + gcc->vars.tgsi_outs[i] = tsrc(TOY_FILE_GRF, grf++, 0); + + gcc->first_free_grf = grf; + return; + } + + for (i = 0; i < gcc->shader->out.count; i++) { + const int slot = gcc->output_map[i]; + const int vrf = (slot >= 0) ? toy_tgsi_get_vrf(&gcc->tgsi, + TGSI_FILE_OUTPUT, 0, gcc->tgsi.outputs[slot].index) : -1; + + if (vrf >= 0) + gcc->vars.tgsi_outs[i] = tsrc(TOY_FILE_VRF, vrf, 0); + else + gcc->vars.tgsi_outs[i] = (i == 0) ? tsrc_imm_d(0) : tsrc_imm_f(0.0f); + } +} + +static void +gs_setup_payload(struct gs_compile_context *gcc) +{ + int grf, i; + + grf = 0; + + /* r0: payload header */ + gcc->payload.header = tsrc_d(tsrc(TOY_FILE_GRF, grf, 0)); + grf++; + + /* r1: SVBI */ + if (gcc->write_so) { + gcc->payload.svbi = tsrc_ud(tsrc(TOY_FILE_GRF, grf, 0)); + grf++; + } + + /* URB data */ + gcc->shader->in.start_grf = grf; + + /* no pull constants */ + + /* VUEs */ + for (i = 0; i < gcc->in_vue_count; i++) { + gcc->payload.vues[i] = tsrc(TOY_FILE_GRF, grf, 0); + grf += gcc->in_vue_size; + } + + gcc->first_free_grf = grf; + gcc->last_free_grf = 127; +} + +/** + * Set up GS compile context. This includes translating the TGSI tokens. + */ +static bool +gs_setup(struct gs_compile_context *gcc, + const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant, + int num_verts) +{ + memset(gcc, 0, sizeof(*gcc)); + + gcc->shader = CALLOC_STRUCT(ilo_shader); + if (!gcc->shader) + return false; + + gcc->variant = variant; + gcc->so_info = &state->info.stream_output; + + toy_compiler_init(&gcc->tc, state->info.gen); + + gcc->write_so = (state->info.stream_output.num_outputs > 0); + gcc->write_vue = !gcc->variant->u.gs.rasterizer_discard; + + gcc->tc.templ.access_mode = BRW_ALIGN_16; + gcc->tc.templ.exec_size = BRW_EXECUTE_4; + gcc->tc.rect_linear_width = 4; + + if (state->info.tokens) { + if (!gs_setup_tgsi(&gcc->tc, state->info.tokens, &gcc->tgsi)) { + toy_compiler_cleanup(&gcc->tc); + FREE(gcc->shader); + return false; + } + + switch (gcc->tgsi.props.gs_input_prim) { + case PIPE_PRIM_POINTS: + gcc->in_vue_count = 1; + break; + case PIPE_PRIM_LINES: + gcc->in_vue_count = 2; + gcc->shader->in.discard_adj = true; + break; + case PIPE_PRIM_TRIANGLES: + gcc->in_vue_count = 3; + gcc->shader->in.discard_adj = true; + break; + case PIPE_PRIM_LINES_ADJACENCY: + gcc->in_vue_count = 4; + break; + case PIPE_PRIM_TRIANGLES_ADJACENCY: + gcc->in_vue_count = 6; + break; + default: + tc_fail(&gcc->tc, "unsupported GS input type"); + gcc->in_vue_count = 0; + break; + } + + switch (gcc->tgsi.props.gs_output_prim) { + case PIPE_PRIM_POINTS: + gcc->out_vue_min_count = 1; + break; + case PIPE_PRIM_LINE_STRIP: + gcc->out_vue_min_count = 2; + break; + case PIPE_PRIM_TRIANGLE_STRIP: + gcc->out_vue_min_count = 3; + break; + default: + tc_fail(&gcc->tc, "unsupported GS output type"); + gcc->out_vue_min_count = 0; + break; + } + } + else { + int i; + + gcc->in_vue_count = num_verts; + gcc->out_vue_min_count = num_verts; + + gcc->tgsi.num_outputs = gcc->variant->u.gs.num_inputs; + for (i = 0; i < gcc->variant->u.gs.num_inputs; i++) { + gcc->tgsi.outputs[i].semantic_name = + gcc->variant->u.gs.semantic_names[i]; + gcc->tgsi.outputs[i].semantic_index = + gcc->variant->u.gs.semantic_indices[i]; + } + } + + gcc->tc.templ.access_mode = BRW_ALIGN_1; + + gs_setup_shader_in(gcc->shader, gcc->variant); + gs_setup_shader_out(gcc->shader, &gcc->tgsi, false, gcc->output_map); + + gcc->in_vue_size = (gcc->shader->in.count + 1) / 2; + + gcc->out_vue_size = (gcc->shader->out.count + 1) / 2; + + gs_setup_payload(gcc); + gs_setup_vars(gcc); + + /* m0 is reserved for system routines */ + gcc->first_free_mrf = 1; + gcc->last_free_mrf = 15; + + return true; +} + +/** + * Compile the geometry shader. + */ +struct ilo_shader * +ilo_shader_compile_gs(const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + struct gs_compile_context gcc; + + if (!gs_setup(&gcc, state, variant, 0)) + return NULL; + + if (!gs_compile(&gcc)) { + FREE(gcc.shader); + gcc.shader = NULL; + } + + toy_tgsi_cleanup(&gcc.tgsi); + toy_compiler_cleanup(&gcc.tc); + + return gcc.shader;; +} + +static bool +append_gs_to_vs(struct ilo_shader *vs, struct ilo_shader *gs, int num_verts) +{ + void *combined; + int gs_offset; + + if (!gs) + return false; + + /* kernels must be aligned to 64-byte */ + gs_offset = align(vs->kernel_size, 64); + combined = REALLOC(vs->kernel, vs->kernel_size, + gs_offset + gs->kernel_size); + if (!combined) + return false; + + memcpy(combined + gs_offset, gs->kernel, gs->kernel_size); + + vs->kernel = combined; + vs->kernel_size = gs_offset + gs->kernel_size; + + vs->stream_output = true; + vs->gs_offsets[num_verts - 1] = gs_offset; + vs->gs_start_grf = gs->in.start_grf; + + ilo_shader_destroy(gs); + + return true; +} + +bool +ilo_shader_compile_gs_passthrough(const struct ilo_shader_state *vs_state, + const struct ilo_shader_variant *vs_variant, + const int *so_mapping, + struct ilo_shader *vs) +{ + struct gs_compile_context gcc; + struct ilo_shader_state state; + struct ilo_shader_variant variant; + const int num_verts = 3; + int i; + + /* init GS state and variant */ + state = *vs_state; + state.info.tokens = NULL; + for (i = 0; i < state.info.stream_output.num_outputs; i++) { + const int reg = state.info.stream_output.output[i].register_index; + + state.info.stream_output.output[i].register_index = so_mapping[reg]; + } + + variant = *vs_variant; + variant.u.gs.rasterizer_discard = vs_variant->u.vs.rasterizer_discard; + variant.u.gs.num_inputs = vs->out.count; + for (i = 0; i < vs->out.count; i++) { + variant.u.gs.semantic_names[i] = + vs->out.semantic_names[i]; + variant.u.gs.semantic_indices[i] = + vs->out.semantic_indices[i]; + } + + if (!gs_setup(&gcc, &state, &variant, num_verts)) + return false; + + if (!gs_compile_passthrough(&gcc)) { + FREE(gcc.shader); + gcc.shader = NULL; + } + + /* no need to call toy_tgsi_cleanup() */ + toy_compiler_cleanup(&gcc.tc); + + return append_gs_to_vs(vs, gcc.shader, num_verts); +} diff --git a/src/gallium/drivers/ilo/shader/ilo_shader_vs.c b/src/gallium/drivers/ilo/shader/ilo_shader_vs.c new file mode 100644 index 00000000000..09a7b0424e7 --- /dev/null +++ b/src/gallium/drivers/ilo/shader/ilo_shader_vs.c @@ -0,0 +1,1273 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 2012-2013 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chia-I Wu <[email protected]> + */ + +#include "tgsi/tgsi_dump.h" +#include "toy_compiler.h" +#include "toy_tgsi.h" +#include "toy_legalize.h" +#include "toy_optimize.h" +#include "toy_helpers.h" +#include "ilo_context.h" +#include "ilo_shader.h" + +struct vs_compile_context { + struct ilo_shader *shader; + const struct ilo_shader_variant *variant; + + struct toy_compiler tc; + struct toy_tgsi tgsi; + enum brw_message_target const_cache; + + int output_map[PIPE_MAX_SHADER_OUTPUTS]; + + int num_grf_per_vrf; + int first_const_grf; + int first_vue_grf; + int first_free_grf; + int last_free_grf; + + int first_free_mrf; + int last_free_mrf; +}; + +static void +vs_lower_opcode_tgsi_in(struct vs_compile_context *vcc, + struct toy_dst dst, int dim, int idx) +{ + struct toy_compiler *tc = &vcc->tc; + int slot; + + assert(!dim); + + slot = toy_tgsi_find_input(&vcc->tgsi, idx); + if (slot >= 0) { + const int first_in_grf = vcc->first_vue_grf + + (vcc->shader->in.count - vcc->tgsi.num_inputs); + const int grf = first_in_grf + vcc->tgsi.inputs[slot].semantic_index; + const struct toy_src src = tsrc(TOY_FILE_GRF, grf, 0); + + tc_MOV(tc, dst, src); + } + else { + /* undeclared input */ + tc_MOV(tc, dst, tsrc_imm_f(0.0f)); + } +} + +static void +vs_lower_opcode_tgsi_const_gen6(struct vs_compile_context *vcc, + struct toy_dst dst, int dim, + struct toy_src idx) +{ + const struct toy_dst header = + tdst_ud(tdst(TOY_FILE_MRF, vcc->first_free_mrf, 0)); + const struct toy_dst block_offsets = + tdst_ud(tdst(TOY_FILE_MRF, vcc->first_free_mrf + 1, 0)); + const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); + struct toy_compiler *tc = &vcc->tc; + unsigned msg_type, msg_ctrl, msg_len; + struct toy_inst *inst; + struct toy_src desc; + + /* set message header */ + inst = tc_MOV(tc, header, r0); + inst->mask_ctrl = BRW_MASK_DISABLE; + + /* set block offsets */ + tc_MOV(tc, block_offsets, idx); + + msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; + msg_ctrl = BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD << 8;; + msg_len = 2; + + desc = tsrc_imm_mdesc_data_port(tc, false, msg_len, 1, true, false, + msg_type, msg_ctrl, ILO_VS_CONST_SURFACE(dim)); + + tc_SEND(tc, dst, tsrc_from(header), desc, vcc->const_cache); +} + +static void +vs_lower_opcode_tgsi_const_gen7(struct vs_compile_context *vcc, + struct toy_dst dst, int dim, + struct toy_src idx) +{ + struct toy_compiler *tc = &vcc->tc; + const struct toy_dst offset = + tdst_ud(tdst(TOY_FILE_MRF, vcc->first_free_mrf, 0)); + struct toy_src desc; + + /* + * In 259b65e2e7938de4aab323033cfe2b33369ddb07, pull constant load was + * changed from OWord Dual Block Read to ld to increase performance in the + * classic driver. Since we use the constant cache instead of the data + * cache, I wonder if we still want to follow the classic driver. + */ + + /* set offset */ + tc_MOV(tc, offset, idx); + + desc = tsrc_imm_mdesc_sampler(tc, 1, 1, false, + BRW_SAMPLER_SIMD_MODE_SIMD4X2, + GEN5_SAMPLER_MESSAGE_SAMPLE_LD, + 0, + ILO_VS_CONST_SURFACE(dim)); + + tc_SEND(tc, dst, tsrc_from(offset), desc, BRW_SFID_SAMPLER); +} + +static void +vs_lower_opcode_tgsi_imm(struct vs_compile_context *vcc, + struct toy_dst dst, int idx) +{ + const uint32_t *imm; + int ch; + + imm = toy_tgsi_get_imm(&vcc->tgsi, idx, NULL); + + for (ch = 0; ch < 4; ch++) { + /* raw moves */ + tc_MOV(&vcc->tc, + tdst_writemask(tdst_ud(dst), 1 << ch), + tsrc_imm_ud(imm[ch])); + } +} + + +static void +vs_lower_opcode_tgsi_sv(struct vs_compile_context *vcc, + struct toy_dst dst, int dim, int idx) +{ + struct toy_compiler *tc = &vcc->tc; + const struct toy_tgsi *tgsi = &vcc->tgsi; + int slot; + + assert(!dim); + + slot = toy_tgsi_find_system_value(tgsi, idx); + if (slot < 0) + return; + + switch (tgsi->system_values[slot].semantic_name) { + case TGSI_SEMANTIC_INSTANCEID: + case TGSI_SEMANTIC_VERTEXID: + /* + * In 3DSTATE_VERTEX_ELEMENTS, we prepend an extra vertex element for + * the generated IDs, with VID in the X channel and IID in the Y + * channel. + */ + { + const int grf = vcc->first_vue_grf; + const struct toy_src src = tsrc(TOY_FILE_GRF, grf, 0); + const enum toy_swizzle swizzle = + (tgsi->system_values[slot].semantic_name == + TGSI_SEMANTIC_INSTANCEID) ? TOY_SWIZZLE_Y : TOY_SWIZZLE_X; + + tc_MOV(tc, tdst_d(dst), tsrc_d(tsrc_swizzle1(src, swizzle))); + } + break; + case TGSI_SEMANTIC_PRIMID: + default: + tc_fail(tc, "unhandled system value"); + tc_MOV(tc, dst, tsrc_imm_d(0)); + break; + } +} + +static void +vs_lower_opcode_tgsi_direct(struct vs_compile_context *vcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &vcc->tc; + int dim, idx; + + assert(inst->src[0].file == TOY_FILE_IMM); + dim = inst->src[0].val32; + + assert(inst->src[1].file == TOY_FILE_IMM); + idx = inst->src[1].val32; + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + vs_lower_opcode_tgsi_in(vcc, inst->dst, dim, idx); + break; + case TOY_OPCODE_TGSI_CONST: + if (tc->gen >= ILO_GEN(7)) + vs_lower_opcode_tgsi_const_gen7(vcc, inst->dst, dim, inst->src[1]); + else + vs_lower_opcode_tgsi_const_gen6(vcc, inst->dst, dim, inst->src[1]); + break; + case TOY_OPCODE_TGSI_SV: + vs_lower_opcode_tgsi_sv(vcc, inst->dst, dim, idx); + break; + case TOY_OPCODE_TGSI_IMM: + assert(!dim); + vs_lower_opcode_tgsi_imm(vcc, inst->dst, idx); + break; + default: + tc_fail(tc, "unhandled TGSI fetch"); + break; + } + + tc_discard_inst(tc, inst); +} + +static void +vs_lower_opcode_tgsi_indirect(struct vs_compile_context *vcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &vcc->tc; + enum tgsi_file_type file; + int dim, idx; + struct toy_src indirect_dim, indirect_idx; + + assert(inst->src[0].file == TOY_FILE_IMM); + file = inst->src[0].val32; + + assert(inst->src[1].file == TOY_FILE_IMM); + dim = inst->src[1].val32; + indirect_dim = inst->src[2]; + + assert(inst->src[3].file == TOY_FILE_IMM); + idx = inst->src[3].val32; + indirect_idx = inst->src[4]; + + /* no dimension indirection */ + assert(indirect_dim.file == TOY_FILE_IMM); + dim += indirect_dim.val32; + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_INDIRECT_FETCH: + if (file == TGSI_FILE_CONSTANT) { + if (idx) { + struct toy_dst tmp = tc_alloc_tmp(tc); + + tc_ADD(tc, tmp, indirect_idx, tsrc_imm_d(idx)); + indirect_idx = tsrc_from(tmp); + } + + if (tc->gen >= ILO_GEN(7)) + vs_lower_opcode_tgsi_const_gen7(vcc, inst->dst, dim, indirect_idx); + else + vs_lower_opcode_tgsi_const_gen6(vcc, inst->dst, dim, indirect_idx); + break; + } + /* fall through */ + case TOY_OPCODE_TGSI_INDIRECT_STORE: + default: + tc_fail(tc, "unhandled TGSI indirection"); + break; + } + + tc_discard_inst(tc, inst); +} + +/** + * Emit instructions to move sampling parameters to the message registers. + */ +static int +vs_add_sampler_params(struct toy_compiler *tc, int msg_type, int base_mrf, + struct toy_src coords, int num_coords, + struct toy_src bias_or_lod, struct toy_src ref_or_si, + struct toy_src ddx, struct toy_src ddy, int num_derivs) +{ + const unsigned coords_writemask = (1 << num_coords) - 1; + struct toy_dst m[3]; + int num_params, i; + + assert(num_coords <= 4); + assert(num_derivs <= 3 && num_derivs <= num_coords); + + for (i = 0; i < Elements(m); i++) + m[i] = tdst(TOY_FILE_MRF, base_mrf + i, 0); + + switch (msg_type) { + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD: + tc_MOV(tc, tdst_writemask(m[0], coords_writemask), coords); + tc_MOV(tc, tdst_writemask(m[1], TOY_WRITEMASK_X), bias_or_lod); + num_params = 5; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS: + tc_MOV(tc, tdst_writemask(m[0], coords_writemask), coords); + tc_MOV(tc, tdst_writemask(m[1], TOY_WRITEMASK_XZ), + tsrc_swizzle(ddx, 0, 0, 1, 1)); + tc_MOV(tc, tdst_writemask(m[1], TOY_WRITEMASK_YW), + tsrc_swizzle(ddy, 0, 0, 1, 1)); + if (num_derivs > 2) { + tc_MOV(tc, tdst_writemask(m[2], TOY_WRITEMASK_X), + tsrc_swizzle1(ddx, 2)); + tc_MOV(tc, tdst_writemask(m[2], TOY_WRITEMASK_Y), + tsrc_swizzle1(ddy, 2)); + } + num_params = 4 + num_derivs * 2; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE: + tc_MOV(tc, tdst_writemask(m[0], coords_writemask), coords); + tc_MOV(tc, tdst_writemask(m[1], TOY_WRITEMASK_X), ref_or_si); + tc_MOV(tc, tdst_writemask(m[1], TOY_WRITEMASK_Y), bias_or_lod); + num_params = 6; + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_LD: + assert(num_coords <= 3); + tc_MOV(tc, tdst_writemask(tdst_d(m[0]), coords_writemask), coords); + tc_MOV(tc, tdst_writemask(tdst_d(m[0]), TOY_WRITEMASK_W), bias_or_lod); + if (tc->gen >= ILO_GEN(7)) { + num_params = 4; + } + else { + tc_MOV(tc, tdst_writemask(tdst_d(m[1]), TOY_WRITEMASK_X), ref_or_si); + num_params = 5; + } + break; + case GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO: + tc_MOV(tc, tdst_writemask(tdst_d(m[0]), TOY_WRITEMASK_X), bias_or_lod); + num_params = 1; + break; + default: + tc_fail(tc, "unknown sampler opcode"); + num_params = 0; + break; + } + + return (num_params + 3) / 4; +} + +/** + * Set up message registers and return the message descriptor for sampling. + */ +static struct toy_src +vs_prepare_tgsi_sampling(struct toy_compiler *tc, const struct toy_inst *inst, + int base_mrf, unsigned *ret_sampler_index) +{ + unsigned simd_mode, msg_type, msg_len, sampler_index, binding_table_index; + struct toy_src coords, ddx, ddy, bias_or_lod, ref_or_si; + int num_coords, ref_pos, num_derivs; + int sampler_src; + + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD4X2; + + coords = inst->src[0]; + ddx = tsrc_null(); + ddy = tsrc_null(); + bias_or_lod = tsrc_null(); + ref_or_si = tsrc_null(); + num_derivs = 0; + sampler_src = 1; + + num_coords = toy_tgsi_get_texture_coord_dim(inst->tex.target, &ref_pos); + + /* extract the parameters */ + switch (inst->opcode) { + case TOY_OPCODE_TGSI_TXD: + if (ref_pos >= 0) + tc_fail(tc, "TXD with shadow sampler not supported"); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS; + ddx = inst->src[1]; + ddy = inst->src[2]; + num_derivs = num_coords; + sampler_src = 3; + break; + case TOY_OPCODE_TGSI_TXL: + if (ref_pos >= 0) { + assert(ref_pos < 3); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; + ref_or_si = tsrc_swizzle1(coords, ref_pos); + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; + } + + bias_or_lod = tsrc_swizzle1(coords, TOY_SWIZZLE_W); + break; + case TOY_OPCODE_TGSI_TXF: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; + + switch (inst->tex.target) { + case TGSI_TEXTURE_2D_MSAA: + case TGSI_TEXTURE_2D_ARRAY_MSAA: + assert(ref_pos >= 0 && ref_pos < 4); + /* lod is always 0 */ + bias_or_lod = tsrc_imm_d(0); + ref_or_si = tsrc_swizzle1(coords, ref_pos); + break; + default: + bias_or_lod = tsrc_swizzle1(coords, TOY_SWIZZLE_W); + break; + } + + /* offset the coordinates */ + if (!tsrc_is_null(inst->tex.offsets[0])) { + struct toy_dst tmp; + + tmp = tc_alloc_tmp(tc); + tc_ADD(tc, tmp, coords, inst->tex.offsets[0]); + coords = tsrc_from(tmp); + } + + sampler_src = 1; + break; + case TOY_OPCODE_TGSI_TXQ: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; + num_coords = 0; + bias_or_lod = tsrc_swizzle1(coords, TOY_SWIZZLE_X); + break; + case TOY_OPCODE_TGSI_TXQ_LZ: + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; + num_coords = 0; + sampler_src = 0; + break; + case TOY_OPCODE_TGSI_TXL2: + if (ref_pos >= 0) { + assert(ref_pos < 4); + + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; + ref_or_si = tsrc_swizzle1(coords, ref_pos); + } + else { + msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; + } + + bias_or_lod = tsrc_swizzle1(inst->src[1], TOY_SWIZZLE_X); + sampler_src = 2; + break; + default: + assert(!"unhandled sampling opcode"); + return tsrc_null(); + break; + } + + assert(inst->src[sampler_src].file == TOY_FILE_IMM); + sampler_index = inst->src[sampler_src].val32; + binding_table_index = ILO_VS_TEXTURE_SURFACE(sampler_index); + + /* + * From the Sandy Bridge PRM, volume 4 part 1, page 18: + * + * "Note that the (cube map) coordinates delivered to the sampling + * engine must already have been divided by the component with the + * largest absolute value." + */ + switch (inst->tex.target) { + case TGSI_TEXTURE_CUBE: + case TGSI_TEXTURE_SHADOWCUBE: + case TGSI_TEXTURE_CUBE_ARRAY: + case TGSI_TEXTURE_SHADOWCUBE_ARRAY: + /* TXQ does not need coordinates */ + if (num_coords >= 3) { + struct toy_dst tmp, max; + struct toy_src abs_coords[3]; + int i; + + tmp = tc_alloc_tmp(tc); + max = tdst_writemask(tmp, TOY_WRITEMASK_W); + + for (i = 0; i < 3; i++) + abs_coords[i] = tsrc_absolute(tsrc_swizzle1(coords, i)); + + tc_SEL(tc, max, abs_coords[0], abs_coords[0], BRW_CONDITIONAL_GE); + tc_SEL(tc, max, tsrc_from(max), abs_coords[0], BRW_CONDITIONAL_GE); + tc_INV(tc, max, tsrc_from(max)); + + for (i = 0; i < 3; i++) + tc_MUL(tc, tdst_writemask(tmp, 1 << i), coords, tsrc_from(max)); + + coords = tsrc_from(tmp); + } + break; + } + + /* set up sampler parameters */ + msg_len = vs_add_sampler_params(tc, msg_type, base_mrf, + coords, num_coords, bias_or_lod, ref_or_si, ddx, ddy, num_derivs); + + /* + * From the Sandy Bridge PRM, volume 4 part 1, page 136: + * + * "The maximum message length allowed to the sampler is 11. This would + * disallow sample_d, sample_b_c, and sample_l_c with a SIMD Mode of + * SIMD16." + */ + if (msg_len > 11) + tc_fail(tc, "maximum length for messages to the sampler is 11"); + + if (ret_sampler_index) + *ret_sampler_index = sampler_index; + + return tsrc_imm_mdesc_sampler(tc, msg_len, 1, + false, simd_mode, msg_type, sampler_index, binding_table_index); +} + +static void +vs_lower_opcode_tgsi_sampling(struct vs_compile_context *vcc, + struct toy_inst *inst) +{ + struct toy_compiler *tc = &vcc->tc; + struct toy_src desc; + struct toy_dst dst, tmp; + unsigned sampler_index; + int swizzles[4], i; + unsigned swizzle_zero_mask, swizzle_one_mask, swizzle_normal_mask; + bool need_filter; + + desc = vs_prepare_tgsi_sampling(tc, inst, + vcc->first_free_mrf, &sampler_index); + + switch (inst->opcode) { + case TOY_OPCODE_TGSI_TXF: + case TOY_OPCODE_TGSI_TXQ: + case TOY_OPCODE_TGSI_TXQ_LZ: + need_filter = false; + break; + default: + need_filter = true; + break; + } + + toy_compiler_lower_to_send(tc, inst, false, BRW_SFID_SAMPLER); + inst->src[0] = tsrc(TOY_FILE_MRF, vcc->first_free_mrf, 0); + inst->src[1] = desc; + + /* write to a temp first */ + tmp = tc_alloc_tmp(tc); + dst = inst->dst; + inst->dst = tmp; + + tc_move_inst(tc, inst); + + if (need_filter) { + assert(sampler_index < vcc->variant->num_sampler_views); + swizzles[0] = vcc->variant->sampler_view_swizzles[sampler_index].r; + swizzles[1] = vcc->variant->sampler_view_swizzles[sampler_index].g; + swizzles[2] = vcc->variant->sampler_view_swizzles[sampler_index].b; + swizzles[3] = vcc->variant->sampler_view_swizzles[sampler_index].a; + } + else { + swizzles[0] = PIPE_SWIZZLE_RED; + swizzles[1] = PIPE_SWIZZLE_GREEN; + swizzles[2] = PIPE_SWIZZLE_BLUE; + swizzles[3] = PIPE_SWIZZLE_ALPHA; + } + + swizzle_zero_mask = 0; + swizzle_one_mask = 0; + swizzle_normal_mask = 0; + for (i = 0; i < 4; i++) { + switch (swizzles[i]) { + case PIPE_SWIZZLE_ZERO: + swizzle_zero_mask |= 1 << i; + swizzles[i] = i; + break; + case PIPE_SWIZZLE_ONE: + swizzle_one_mask |= 1 << i; + swizzles[i] = i; + break; + default: + swizzle_normal_mask |= 1 << i; + break; + } + } + + /* swizzle the results */ + if (swizzle_normal_mask) { + tc_MOV(tc, tdst_writemask(dst, swizzle_normal_mask), + tsrc_swizzle(tsrc_from(tmp), swizzles[0], + swizzles[1], swizzles[2], swizzles[3])); + } + if (swizzle_zero_mask) + tc_MOV(tc, tdst_writemask(dst, swizzle_zero_mask), tsrc_imm_f(0.0f)); + if (swizzle_one_mask) + tc_MOV(tc, tdst_writemask(dst, swizzle_one_mask), tsrc_imm_f(1.0f)); +} + +static void +vs_lower_opcode_urb_write(struct toy_compiler *tc, struct toy_inst *inst) +{ + /* vs_write_vue() has set up the message registers */ + toy_compiler_lower_to_send(tc, inst, false, BRW_SFID_URB); +} + +static void +vs_lower_virtual_opcodes(struct vs_compile_context *vcc) +{ + struct toy_compiler *tc = &vcc->tc; + struct toy_inst *inst; + + tc_head(tc); + while ((inst = tc_next(tc)) != NULL) { + switch (inst->opcode) { + case TOY_OPCODE_TGSI_IN: + case TOY_OPCODE_TGSI_CONST: + case TOY_OPCODE_TGSI_SV: + case TOY_OPCODE_TGSI_IMM: + vs_lower_opcode_tgsi_direct(vcc, inst); + break; + case TOY_OPCODE_TGSI_INDIRECT_FETCH: + case TOY_OPCODE_TGSI_INDIRECT_STORE: + vs_lower_opcode_tgsi_indirect(vcc, inst); + break; + case TOY_OPCODE_TGSI_TEX: + case TOY_OPCODE_TGSI_TXB: + case TOY_OPCODE_TGSI_TXD: + case TOY_OPCODE_TGSI_TXL: + case TOY_OPCODE_TGSI_TXP: + case TOY_OPCODE_TGSI_TXF: + case TOY_OPCODE_TGSI_TXQ: + case TOY_OPCODE_TGSI_TXQ_LZ: + case TOY_OPCODE_TGSI_TEX2: + case TOY_OPCODE_TGSI_TXB2: + case TOY_OPCODE_TGSI_TXL2: + case TOY_OPCODE_TGSI_SAMPLE: + case TOY_OPCODE_TGSI_SAMPLE_I: + case TOY_OPCODE_TGSI_SAMPLE_I_MS: + case TOY_OPCODE_TGSI_SAMPLE_B: + case TOY_OPCODE_TGSI_SAMPLE_C: + case TOY_OPCODE_TGSI_SAMPLE_C_LZ: + case TOY_OPCODE_TGSI_SAMPLE_D: + case TOY_OPCODE_TGSI_SAMPLE_L: + case TOY_OPCODE_TGSI_GATHER4: + case TOY_OPCODE_TGSI_SVIEWINFO: + case TOY_OPCODE_TGSI_SAMPLE_POS: + case TOY_OPCODE_TGSI_SAMPLE_INFO: + vs_lower_opcode_tgsi_sampling(vcc, inst); + break; + case TOY_OPCODE_INV: + case TOY_OPCODE_LOG: + case TOY_OPCODE_EXP: + case TOY_OPCODE_SQRT: + case TOY_OPCODE_RSQ: + case TOY_OPCODE_SIN: + case TOY_OPCODE_COS: + case TOY_OPCODE_FDIV: + case TOY_OPCODE_POW: + case TOY_OPCODE_INT_DIV_QUOTIENT: + case TOY_OPCODE_INT_DIV_REMAINDER: + toy_compiler_lower_math(tc, inst); + break; + case TOY_OPCODE_URB_WRITE: + vs_lower_opcode_urb_write(tc, inst); + break; + default: + if (inst->opcode > 127) + tc_fail(tc, "unhandled virtual opcode"); + break; + } + } +} + +/** + * Compile the shader. + */ +static bool +vs_compile(struct vs_compile_context *vcc) +{ + struct toy_compiler *tc = &vcc->tc; + struct ilo_shader *sh = vcc->shader; + + vs_lower_virtual_opcodes(vcc); + toy_compiler_legalize_for_ra(tc); + toy_compiler_optimize(tc); + toy_compiler_allocate_registers(tc, + vcc->first_free_grf, + vcc->last_free_grf, + vcc->num_grf_per_vrf); + toy_compiler_legalize_for_asm(tc); + + if (tc->fail) { + ilo_err("failed to legalize VS instructions: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_VS) { + ilo_printf("legalized instructions:\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + if (true) { + sh->kernel = toy_compiler_assemble(tc, &sh->kernel_size); + } + else { + static const uint32_t microcode[] = { + /* fill in the microcode here */ + 0x0, 0x0, 0x0, 0x0, + }; + const bool swap = true; + + sh->kernel_size = sizeof(microcode); + sh->kernel = MALLOC(sh->kernel_size); + + if (sh->kernel) { + const int num_dwords = sizeof(microcode) / 4; + const uint32_t *src = microcode; + uint32_t *dst = (uint32_t *) sh->kernel; + int i; + + for (i = 0; i < num_dwords; i += 4) { + if (swap) { + dst[i + 0] = src[i + 3]; + dst[i + 1] = src[i + 2]; + dst[i + 2] = src[i + 1]; + dst[i + 3] = src[i + 0]; + } + else { + memcpy(dst, src, 16); + } + } + } + } + + if (!sh->kernel) { + ilo_err("failed to compile VS: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_VS) { + ilo_printf("disassembly:\n"); + toy_compiler_disassemble(tc, sh->kernel, sh->kernel_size); + ilo_printf("\n"); + } + + return true; +} + +/** + * Collect the toy registers to be written to the VUE. + */ +static int +vs_collect_outputs(struct vs_compile_context *vcc, struct toy_src *outs) +{ + const struct toy_tgsi *tgsi = &vcc->tgsi; + int i; + + for (i = 0; i < vcc->shader->out.count; i++) { + const int slot = vcc->output_map[i]; + const int vrf = (slot >= 0) ? toy_tgsi_get_vrf(tgsi, + TGSI_FILE_OUTPUT, 0, tgsi->outputs[slot].index) : -1; + struct toy_src src; + + if (vrf >= 0) { + struct toy_dst dst; + + dst = tdst(TOY_FILE_VRF, vrf, 0); + src = tsrc_from(dst); + + if (i == 0) { + /* PSIZE is at channel W */ + tc_MOV(&vcc->tc, tdst_writemask(dst, TOY_WRITEMASK_W), + tsrc_swizzle1(src, TOY_SWIZZLE_X)); + + /* the other channels are for the header */ + dst = tdst_d(dst); + tc_MOV(&vcc->tc, tdst_writemask(dst, TOY_WRITEMASK_XYZ), + tsrc_imm_d(0)); + } + else { + /* initialize unused channels to 0.0f */ + if (tgsi->outputs[slot].undefined_mask) { + dst = tdst_writemask(dst, tgsi->outputs[slot].undefined_mask); + tc_MOV(&vcc->tc, dst, tsrc_imm_f(0.0f)); + } + } + } + else { + /* XXX this is too ugly */ + if (vcc->shader->out.semantic_names[i] == TGSI_SEMANTIC_CLIPDIST && + slot < 0) { + /* ok, we need to compute clip distance */ + int clipvert_slot = -1, clipvert_vrf, j; + + for (j = 0; j < tgsi->num_outputs; j++) { + if (tgsi->outputs[j].semantic_name == + TGSI_SEMANTIC_CLIPVERTEX) { + clipvert_slot = j; + break; + } + else if (tgsi->outputs[j].semantic_name == + TGSI_SEMANTIC_POSITION) { + /* remember pos, but keep looking */ + clipvert_slot = j; + } + } + + clipvert_vrf = (clipvert_slot >= 0) ? toy_tgsi_get_vrf(tgsi, + TGSI_FILE_OUTPUT, 0, tgsi->outputs[clipvert_slot].index) : -1; + if (clipvert_vrf >= 0) { + struct toy_dst tmp = tc_alloc_tmp(&vcc->tc); + struct toy_src clipvert = tsrc(TOY_FILE_VRF, clipvert_vrf, 0); + int first_ucp, last_ucp; + + if (vcc->shader->out.semantic_indices[i]) { + first_ucp = 4; + last_ucp = MIN2(7, vcc->variant->u.vs.num_ucps - 1); + } + else { + first_ucp = 0; + last_ucp = MIN2(3, vcc->variant->u.vs.num_ucps - 1); + } + + for (j = first_ucp; j <= last_ucp; j++) { + const int plane_grf = vcc->first_const_grf + j / 2; + const int plane_subreg = (j & 1) * 16; + const struct toy_src plane = tsrc_rect(tsrc(TOY_FILE_GRF, + plane_grf, plane_subreg), TOY_RECT_041); + const unsigned writemask = 1 << ((j >= 4) ? j - 4 : j); + + tc_DP4(&vcc->tc, tdst_writemask(tmp, writemask), + clipvert, plane); + } + + src = tsrc_from(tmp); + } + else { + src = tsrc_imm_f(0.0f); + } + } + else { + src = (i == 0) ? tsrc_imm_d(0) : tsrc_imm_f(0.0f); + } + } + + outs[i] = src; + } + + return i; +} + +/** + * Emit instructions to write the VUE. + */ +static void +vs_write_vue(struct vs_compile_context *vcc) +{ + struct toy_compiler *tc = &vcc->tc; + struct toy_src outs[PIPE_MAX_SHADER_OUTPUTS]; + struct toy_dst header; + struct toy_src r0; + struct toy_inst *inst; + int sent_attrs, total_attrs; + + header = tdst_ud(tdst(TOY_FILE_MRF, vcc->first_free_mrf, 0)); + r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); + inst = tc_MOV(tc, header, r0); + inst->mask_ctrl = BRW_MASK_DISABLE; + + if (tc->gen >= ILO_GEN(7)) { + inst = tc_OR(tc, tdst_offset(header, 0, 5), + tsrc_rect(tsrc_offset(r0, 0, 5), TOY_RECT_010), + tsrc_rect(tsrc_imm_ud(0xff00), TOY_RECT_010)); + inst->exec_size = BRW_EXECUTE_1; + inst->access_mode = BRW_ALIGN_1; + inst->mask_ctrl = BRW_MASK_DISABLE; + } + + total_attrs = vs_collect_outputs(vcc, outs); + sent_attrs = 0; + while (sent_attrs < total_attrs) { + struct toy_src desc; + int mrf = vcc->first_free_mrf + 1, avail_mrf_for_attrs; + int num_attrs, msg_len, i; + bool eot; + + num_attrs = total_attrs - sent_attrs; + eot = true; + + /* see if we need another message */ + avail_mrf_for_attrs = vcc->last_free_mrf - mrf + 1; + if (num_attrs > avail_mrf_for_attrs) { + /* + * From the Sandy Bridge PRM, volume 4 part 2, page 22: + * + * "Offset. This field specifies a destination offset (in 256-bit + * units) from the start of the URB entry(s), as referenced by + * URB Return Handle n, at which the data (if any) will be + * written." + * + * As we need to offset the following messages, we must make sure + * this one writes an even number of attributes. + */ + num_attrs = avail_mrf_for_attrs & ~1; + eot = false; + } + + if (tc->gen >= ILO_GEN(7)) { + /* do not forget about the header */ + msg_len = 1 + num_attrs; + } + else { + /* + * From the Sandy Bridge PRM, volume 4 part 2, page 26: + * + * "At least 256 bits per vertex (512 bits total, M1 & M2) must + * be written. Writing only 128 bits per vertex (256 bits + * total, M1 only) results in UNDEFINED operation." + * + * "[DevSNB] Interleave writes must be in multiples of 256 per + * vertex." + * + * That is, we must write or appear to write an even number of + * attributes, starting from two. + */ + if (num_attrs % 2 && num_attrs == avail_mrf_for_attrs) { + num_attrs--; + eot = false; + } + + msg_len = 1 + align(num_attrs, 2); + } + + for (i = 0; i < num_attrs; i++) + tc_MOV(tc, tdst(TOY_FILE_MRF, mrf++, 0), outs[sent_attrs + i]); + + assert(sent_attrs % 2 == 0); + desc = tsrc_imm_mdesc_urb(tc, eot, msg_len, 0, + eot, true, false, BRW_URB_SWIZZLE_INTERLEAVE, sent_attrs / 2, 0); + + tc_add2(tc, TOY_OPCODE_URB_WRITE, tdst_null(), tsrc_from(header), desc); + + sent_attrs += num_attrs; + } +} + +/** + * Set up shader inputs for fixed-function units. + */ +static void +vs_setup_shader_in(struct ilo_shader *sh, const struct toy_tgsi *tgsi) +{ + int num_attrs, i; + + /* vertex/instance id is the first VE if exists */ + for (i = 0; i < tgsi->num_system_values; i++) { + bool found = false; + + switch (tgsi->system_values[i].semantic_name) { + case TGSI_SEMANTIC_INSTANCEID: + case TGSI_SEMANTIC_VERTEXID: + found = true; + break; + default: + break; + } + + if (found) { + sh->in.semantic_names[sh->in.count] = + tgsi->system_values[i].semantic_name; + sh->in.semantic_indices[sh->in.count] = + tgsi->system_values[i].semantic_index; + sh->in.interp[sh->in.count] = TGSI_INTERPOLATE_CONSTANT; + sh->in.centroid[sh->in.count] = false; + + sh->in.count++; + break; + } + } + + num_attrs = 0; + for (i = 0; i < tgsi->num_inputs; i++) { + assert(tgsi->inputs[i].semantic_name == TGSI_SEMANTIC_GENERIC); + if (tgsi->inputs[i].semantic_index >= num_attrs) + num_attrs = tgsi->inputs[i].semantic_index + 1; + } + assert(num_attrs <= PIPE_MAX_ATTRIBS); + + /* VF cannot remap VEs. VE[i] must be used as GENERIC[i]. */ + for (i = 0; i < num_attrs; i++) { + sh->in.semantic_names[sh->in.count + i] = TGSI_SEMANTIC_GENERIC; + sh->in.semantic_indices[sh->in.count + i] = i; + sh->in.interp[sh->in.count + i] = TGSI_INTERPOLATE_CONSTANT; + sh->in.centroid[sh->in.count + i] = false; + } + + sh->in.count += num_attrs; + + sh->in.has_pos = false; + sh->in.has_linear_interp = false; + sh->in.barycentric_interpolation_mode = 0; +} + +/** + * Set up shader outputs for fixed-function units. + */ +static void +vs_setup_shader_out(struct ilo_shader *sh, const struct toy_tgsi *tgsi, + bool output_clipdist, int *output_map) +{ + int psize_slot = -1, pos_slot = -1; + int clipdist_slot[2] = { -1, -1 }; + int color_slot[4] = { -1, -1, -1, -1 }; + int num_outs, i; + + /* find out the slots of outputs that need special care */ + for (i = 0; i < tgsi->num_outputs; i++) { + switch (tgsi->outputs[i].semantic_name) { + case TGSI_SEMANTIC_PSIZE: + psize_slot = i; + break; + case TGSI_SEMANTIC_POSITION: + pos_slot = i; + break; + case TGSI_SEMANTIC_CLIPDIST: + if (tgsi->outputs[i].semantic_index) + clipdist_slot[1] = i; + else + clipdist_slot[0] = i; + break; + case TGSI_SEMANTIC_COLOR: + if (tgsi->outputs[i].semantic_index) + color_slot[2] = i; + else + color_slot[0] = i; + break; + case TGSI_SEMANTIC_BCOLOR: + if (tgsi->outputs[i].semantic_index) + color_slot[3] = i; + else + color_slot[1] = i; + break; + default: + break; + } + } + + /* the first two VUEs are always PSIZE and POSITION */ + num_outs = 2; + sh->out.semantic_names[0] = TGSI_SEMANTIC_PSIZE; + sh->out.semantic_indices[0] = 0; + sh->out.semantic_names[1] = TGSI_SEMANTIC_POSITION; + sh->out.semantic_indices[1] = 0; + + sh->out.has_pos = true; + output_map[0] = psize_slot; + output_map[1] = pos_slot; + + /* followed by optional clip distances */ + if (output_clipdist) { + sh->out.semantic_names[num_outs] = TGSI_SEMANTIC_CLIPDIST; + sh->out.semantic_indices[num_outs] = 0; + output_map[num_outs++] = clipdist_slot[0]; + + sh->out.semantic_names[num_outs] = TGSI_SEMANTIC_CLIPDIST; + sh->out.semantic_indices[num_outs] = 1; + output_map[num_outs++] = clipdist_slot[1]; + } + + /* + * make BCOLOR follow COLOR so that we can make use of + * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING in 3DSTATE_SF + */ + for (i = 0; i < 4; i++) { + const int slot = color_slot[i]; + + if (slot < 0) + continue; + + sh->out.semantic_names[num_outs] = tgsi->outputs[slot].semantic_name; + sh->out.semantic_indices[num_outs] = tgsi->outputs[slot].semantic_index; + + output_map[num_outs++] = slot; + } + + /* add the rest of the outputs */ + for (i = 0; i < tgsi->num_outputs; i++) { + switch (tgsi->outputs[i].semantic_name) { + case TGSI_SEMANTIC_PSIZE: + case TGSI_SEMANTIC_POSITION: + case TGSI_SEMANTIC_CLIPDIST: + case TGSI_SEMANTIC_COLOR: + case TGSI_SEMANTIC_BCOLOR: + break; + default: + sh->out.semantic_names[num_outs] = tgsi->outputs[i].semantic_name; + sh->out.semantic_indices[num_outs] = tgsi->outputs[i].semantic_index; + output_map[num_outs++] = i; + break; + } + } + + sh->out.count = num_outs; +} + +/** + * Translate the TGSI tokens. + */ +static bool +vs_setup_tgsi(struct toy_compiler *tc, const struct tgsi_token *tokens, + struct toy_tgsi *tgsi) +{ + if (ilo_debug & ILO_DEBUG_VS) { + ilo_printf("dumping vertex shader\n"); + ilo_printf("\n"); + + tgsi_dump(tokens, 0); + ilo_printf("\n"); + } + + toy_compiler_translate_tgsi(tc, tokens, true, tgsi); + if (tc->fail) { + ilo_err("failed to translate VS TGSI tokens: %s\n", tc->reason); + return false; + } + + if (ilo_debug & ILO_DEBUG_VS) { + ilo_printf("TGSI translator:\n"); + toy_tgsi_dump(tgsi); + ilo_printf("\n"); + toy_compiler_dump(tc); + ilo_printf("\n"); + } + + return true; +} + +/** + * Set up VS compile context. This includes translating the TGSI tokens. + */ +static bool +vs_setup(struct vs_compile_context *vcc, + const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + int num_consts; + + memset(vcc, 0, sizeof(*vcc)); + + vcc->shader = CALLOC_STRUCT(ilo_shader); + if (!vcc->shader) + return false; + + vcc->variant = variant; + + toy_compiler_init(&vcc->tc, state->info.gen); + vcc->tc.templ.access_mode = BRW_ALIGN_16; + vcc->tc.templ.exec_size = BRW_EXECUTE_8; + vcc->tc.rect_linear_width = 4; + + /* + * The classic driver uses the sampler cache (gen6) or the data cache + * (gen7). Why? + */ + vcc->const_cache = GEN6_SFID_DATAPORT_CONSTANT_CACHE; + + if (!vs_setup_tgsi(&vcc->tc, state->info.tokens, &vcc->tgsi)) { + toy_compiler_cleanup(&vcc->tc); + FREE(vcc->shader); + return false; + } + + vs_setup_shader_in(vcc->shader, &vcc->tgsi); + vs_setup_shader_out(vcc->shader, &vcc->tgsi, + (vcc->variant->u.vs.num_ucps > 0), vcc->output_map); + + /* fit each pair of user clip planes into a register */ + num_consts = (vcc->variant->u.vs.num_ucps + 1) / 2; + + /* r0 is reserved for payload header */ + vcc->first_const_grf = 1; + vcc->first_vue_grf = vcc->first_const_grf + num_consts; + vcc->first_free_grf = vcc->first_vue_grf + vcc->shader->in.count; + vcc->last_free_grf = 127; + + /* m0 is reserved for system routines */ + vcc->first_free_mrf = 1; + vcc->last_free_mrf = 15; + + vcc->num_grf_per_vrf = 1; + + if (vcc->tc.gen >= ILO_GEN(7)) { + vcc->last_free_grf -= 15; + vcc->first_free_mrf = vcc->last_free_grf + 1; + vcc->last_free_mrf = vcc->first_free_mrf + 14; + } + + vcc->shader->in.start_grf = vcc->first_const_grf; + vcc->shader->pcb.clip_state_size = + vcc->variant->u.vs.num_ucps * (sizeof(float) * 4); + + return true; +} + +/** + * Compile the vertex shader. + */ +struct ilo_shader * +ilo_shader_compile_vs(const struct ilo_shader_state *state, + const struct ilo_shader_variant *variant) +{ + struct vs_compile_context vcc; + bool need_gs; + + if (!vs_setup(&vcc, state, variant)) + return NULL; + + if (vcc.tc.gen >= ILO_GEN(7)) { + need_gs = false; + } + else { + need_gs = variant->u.vs.rasterizer_discard || + state->info.stream_output.num_outputs; + } + + vs_write_vue(&vcc); + + if (!vs_compile(&vcc)) { + FREE(vcc.shader); + vcc.shader = NULL; + } + + toy_tgsi_cleanup(&vcc.tgsi); + toy_compiler_cleanup(&vcc.tc); + + if (need_gs) { + int so_mapping[PIPE_MAX_SHADER_OUTPUTS]; + int i, j; + + for (i = 0; i < vcc.tgsi.num_outputs; i++) { + int attr = 0; + + for (j = 0; j < vcc.shader->out.count; j++) { + if (vcc.tgsi.outputs[i].semantic_name == + vcc.shader->out.semantic_names[j] && + vcc.tgsi.outputs[i].semantic_index == + vcc.shader->out.semantic_indices[j]) { + attr = j; + break; + } + } + + so_mapping[i] = attr; + } + + if (!ilo_shader_compile_gs_passthrough(state, variant, + so_mapping, vcc.shader)) { + ilo_shader_destroy(vcc.shader); + vcc.shader = NULL; + } + } + + return vcc.shader; +} |