diff options
author | Jason Ekstrand <[email protected]> | 2015-11-25 14:14:05 -0800 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2015-12-10 12:25:16 -0800 |
commit | 78b81be627734ea7fa50ea246c07b0d4a3a1638a (patch) | |
tree | 10b0b098de5b3a111d076e9d8c5fca440fad45ad /src/mesa | |
parent | f3970fad9e5b04e04de366a65fed5a30da618f9d (diff) |
nir: Get rid of *_indirect variants of input/output load/store intrinsics
There is some special-casing needed in a competent back-end. However, they
can do their special-casing easily enough based on whether or not the
offset is a constant. In the mean time, having the *_indirect variants
adds special cases a number of places where they don't need to be and, in
general, only complicates things. To complicate matters, NIR had no way to
convdert an indirect load/store to a direct one in the case that the
indirect was a constant so we would still not really get what the back-ends
wanted. The best solution seems to be to get rid of the *_indirect
variants entirely.
This commit is a bunch of different changes squashed together:
- nir: Get rid of *_indirect variants of input/output load/store intrinsics
- nir/glsl: Stop handling UBO/SSBO load/stores differently depending on indirect
- nir/lower_io: Get rid of load/store_foo_indirect
- i965/fs: Get rid of load/store_foo_indirect
- i965/vec4: Get rid of load/store_foo_indirect
- tgsi_to_nir: Get rid of load/store_foo_indirect
- ir3/nir: Use the new unified io intrinsics
- vc4: Do all uniform loads with byte offsets
- vc4/nir: Use the new unified io intrinsics
- vc4: Fix load_user_clip_plane crash
- vc4: add missing src for store outputs
- vc4: Fix state uniforms
- nir/lower_clip: Update to the new load/store intrinsics
- nir/lower_two_sided_color: Update to the new load intrinsic
NIR and i965 changes are
Reviewed-by: Kenneth Graunke <[email protected]>
NIR indirect declarations and vc4 changes are
Reviewed-by: Eric Anholt <[email protected]>
ir3 changes are
Reviewed-by: Rob Clark <[email protected]>
NIR changes are
Acked-by: Rob Clark <[email protected]>
Diffstat (limited to 'src/mesa')
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs.h | 2 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_fs_nir.cpp | 128 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_nir.c | 45 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp | 7 | ||||
-rw-r--r-- | src/mesa/drivers/dri/i965/brw_vec4_nir.cpp | 95 |
5 files changed, 138 insertions, 139 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_fs.h b/src/mesa/drivers/dri/i965/brw_fs.h index cead99155f4..f2e384129cb 100644 --- a/src/mesa/drivers/dri/i965/brw_fs.h +++ b/src/mesa/drivers/dri/i965/brw_fs.h @@ -280,7 +280,7 @@ public: unsigned stream_id); void emit_gs_thread_end(); void emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src, - const fs_reg &indirect_offset, unsigned imm_offset, + unsigned base_offset, const nir_src &offset_src, unsigned num_components); void emit_cs_terminate(); fs_reg *emit_cs_local_invocation_id_setup(); diff --git a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp index 13059999e7d..db38f619272 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp @@ -1603,28 +1603,30 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src, void fs_visitor::emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src, - const fs_reg &indirect_offset, - unsigned imm_offset, + unsigned base_offset, + const nir_src &offset_src, unsigned num_components) { struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data; + nir_const_value *vertex_const = nir_src_as_const_value(vertex_src); + nir_const_value *offset_const = nir_src_as_const_value(offset_src); + const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8; + /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y], * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only * gl_PointSize is available as a GS input, however, so it must be that. */ - const bool is_point_size = - indirect_offset.file == BAD_FILE && imm_offset == 0; - - nir_const_value *vertex_const = nir_src_as_const_value(vertex_src); - const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8; + const bool is_point_size = (base_offset == 0); - if (indirect_offset.file == BAD_FILE && vertex_const != NULL && - 4 * imm_offset < push_reg_count) { - imm_offset = 4 * imm_offset + vertex_const->u[0] * push_reg_count; + if (offset_const != NULL && vertex_const != NULL && + 4 * (base_offset + offset_const->u[0]) < push_reg_count) { + int imm_offset = (base_offset + offset_const->u[0]) * 4 + + vertex_const->u[0] * push_reg_count; /* This input was pushed into registers. */ if (is_point_size) { /* gl_PointSize comes in .w */ + assert(imm_offset == 0); bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type)); } else { for (unsigned i = 0; i < num_components; i++) { @@ -1683,21 +1685,21 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst, } fs_inst *inst; - if (indirect_offset.file == BAD_FILE) { + if (offset_const) { /* Constant indexing - use global offset. */ inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle); - inst->offset = imm_offset; + inst->offset = base_offset + offset_const->u[0]; inst->base_mrf = -1; inst->mlen = 1; inst->regs_written = num_components; } else { /* Indirect indexing - use per-slot offsets as well. */ - const fs_reg srcs[] = { icp_handle, indirect_offset }; + const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) }; fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2); bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0); inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload); - inst->offset = imm_offset; + inst->offset = base_offset; inst->base_mrf = -1; inst->mlen = 2; inst->regs_written = num_components; @@ -1763,17 +1765,12 @@ fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld, retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD)); break; - case nir_intrinsic_load_input_indirect: case nir_intrinsic_load_input: unreachable("load_input intrinsics are invalid for the GS stage"); - case nir_intrinsic_load_per_vertex_input_indirect: - indirect_offset = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_D); - /* fallthrough */ case nir_intrinsic_load_per_vertex_input: - emit_gs_input_load(dest, instr->src[0], - indirect_offset, instr->const_index[0], - instr->num_components); + emit_gs_input_load(dest, instr->src[0], instr->const_index[0], + instr->src[1], instr->num_components); break; case nir_intrinsic_emit_vertex_with_counter: @@ -2137,8 +2134,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr if (nir_intrinsic_infos[instr->intrinsic].has_dest) dest = get_nir_dest(instr->dest); - bool has_indirect = false; - switch (instr->intrinsic) { case nir_intrinsic_atomic_counter_inc: case nir_intrinsic_atomic_counter_dec: @@ -2327,19 +2322,20 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1)); break; - case nir_intrinsic_load_uniform_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_uniform: { /* Offsets are in bytes but they should always be multiples of 4 */ assert(instr->const_index[0] % 4 == 0); - assert(instr->const_index[1] % 4 == 0); fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type); - src.reg_offset = instr->const_index[1] / 4; - if (has_indirect) + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + if (const_offset) { + /* Offsets are in bytes but they should always be multiples of 4 */ + assert(const_offset->u[0] % 4 == 0); + src.reg_offset = const_offset->u[0] / 4; + } else { src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0])); + } for (unsigned j = 0; j < instr->num_components; j++) { bld.MOV(offset(dest, bld, j), offset(src, bld, j)); @@ -2347,9 +2343,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_load_ubo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_ubo: { nir_const_value *const_index = nir_src_as_const_value(instr->src[0]); fs_reg surf_index; @@ -2377,24 +2370,24 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr nir->info.num_ubos - 1); } - if (has_indirect) { + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset == NULL) { fs_reg base_offset = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_D); - unsigned vec4_offset = instr->const_index[0]; for (int i = 0; i < instr->num_components; i++) VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index, - base_offset, vec4_offset + i * 4); + base_offset, i * 4); } else { fs_reg packed_consts = vgrf(glsl_type::float_type); packed_consts.type = dest.type; - struct brw_reg const_offset_reg = brw_imm_ud(instr->const_index[0] & ~15); + struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u[0] & ~15); bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts, surf_index, const_offset_reg); for (unsigned i = 0; i < instr->num_components; i++) { - packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i); + packed_consts.set_smear(const_offset->u[0] % 16 / 4 + i); /* The std140 packing rules don't allow vectors to cross 16-byte * boundaries, and a reg is 32 bytes. @@ -2408,9 +2401,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_load_ssbo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_ssbo: { assert(devinfo->gen >= 7); @@ -2436,12 +2426,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr nir->info.num_ssbos - 1); } - /* Get the offset to read from */ fs_reg offset_reg; - if (has_indirect) { - offset_reg = get_nir_src(instr->src[1]); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset) { + offset_reg = brw_imm_ud(const_offset->u[0]); } else { - offset_reg = brw_imm_ud(instr->const_index[0]); + offset_reg = get_nir_src(instr->src[1]); } /* Read the vector */ @@ -2456,9 +2446,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_load_shared_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_shared: { assert(devinfo->gen >= 7); @@ -2466,10 +2453,14 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr /* Get the offset to read from */ fs_reg offset_reg; - if (has_indirect) { - offset_reg = get_nir_src(instr->src[0]); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset) { + offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]); } else { - offset_reg = brw_imm_ud(instr->const_index[0]); + offset_reg = vgrf(glsl_type::uint_type); + bld.ADD(offset_reg, + retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD), + brw_imm_ud(instr->const_index[0])); } /* Read the vector */ @@ -2484,9 +2475,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_store_shared_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_store_shared: { assert(devinfo->gen >= 7); @@ -2509,13 +2497,15 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr unsigned length = ffs(~(writemask >> first_component)) - 1; fs_reg offset_reg; - if (!has_indirect) { - offset_reg = brw_imm_ud(instr->const_index[0] + 4 * first_component); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset) { + offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] + + 4 * first_component); } else { offset_reg = vgrf(glsl_type::uint_type); bld.ADD(offset_reg, retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD), - brw_imm_ud(4 * first_component)); + brw_imm_ud(instr->const_index[0] + 4 * first_component)); } emit_untyped_write(bld, surf_index, offset_reg, @@ -2532,9 +2522,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_load_input_indirect: - unreachable("Not allowed"); - /* fallthrough */ case nir_intrinsic_load_input: { fs_reg src; if (stage == MESA_SHADER_VERTEX) { @@ -2544,15 +2531,16 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr instr->const_index[0]); } + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + assert(const_offset && "Indirect input loads not allowed"); + src = offset(src, bld, const_offset->u[0]); + for (unsigned j = 0; j < instr->num_components; j++) { bld.MOV(offset(dest, bld, j), offset(src, bld, j)); } break; } - case nir_intrinsic_store_ssbo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_store_ssbo: { assert(devinfo->gen >= 7); @@ -2579,7 +2567,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg val_reg = get_nir_src(instr->src[0]); /* Writemask */ - unsigned writemask = instr->const_index[1]; + unsigned writemask = instr->const_index[0]; /* Combine groups of consecutive enabled channels in one write * message. We use ffs to find the first enabled channel and then ffs on @@ -2589,10 +2577,11 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr while (writemask) { unsigned first_component = ffs(writemask) - 1; unsigned length = ffs(~(writemask >> first_component)) - 1; - fs_reg offset_reg; - if (!has_indirect) { - offset_reg = brw_imm_ud(instr->const_index[0] + 4 * first_component); + fs_reg offset_reg; + nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]); + if (const_offset) { + offset_reg = brw_imm_ud(const_offset->u[0] + 4 * first_component); } else { offset_reg = vgrf(glsl_type::uint_type); bld.ADD(offset_reg, @@ -2613,14 +2602,15 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; } - case nir_intrinsic_store_output_indirect: - unreachable("Not allowed"); - /* fallthrough */ case nir_intrinsic_store_output: { fs_reg src = get_nir_src(instr->src[0]); fs_reg new_dest = offset(retype(nir_outputs, src.type), bld, instr->const_index[0]); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + assert(const_offset && "Indirect output stores not allowed"); + new_dest = offset(new_dest, bld, const_offset->u[0]); + for (unsigned j = 0; j < instr->num_components; j++) { bld.MOV(offset(new_dest, bld, j), offset(src, bld, j)); } diff --git a/src/mesa/drivers/dri/i965/brw_nir.c b/src/mesa/drivers/dri/i965/brw_nir.c index d62470379ee..14ad172a2c3 100644 --- a/src/mesa/drivers/dri/i965/brw_nir.c +++ b/src/mesa/drivers/dri/i965/brw_nir.c @@ -24,31 +24,49 @@ #include "brw_nir.h" #include "brw_shader.h" #include "glsl/nir/glsl_to_nir.h" +#include "glsl/nir/nir_builder.h" #include "program/prog_to_nir.h" +struct remap_vs_attrs_state { + nir_builder b; + uint64_t inputs_read; +}; + static bool -remap_vs_attrs(nir_block *block, void *closure) +remap_vs_attrs(nir_block *block, void *void_state) { - GLbitfield64 inputs_read = *((GLbitfield64 *) closure); + struct remap_vs_attrs_state *state = void_state; - nir_foreach_instr(block, instr) { + nir_foreach_instr_safe(block, instr) { if (instr->type != nir_instr_type_intrinsic) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); - /* We set EmitNoIndirect for VS inputs, so there are no indirects. */ - assert(intrin->intrinsic != nir_intrinsic_load_input_indirect); - if (intrin->intrinsic == nir_intrinsic_load_input) { /* Attributes come in a contiguous block, ordered by their * gl_vert_attrib value. That means we can compute the slot * number for an attribute by masking out the enabled attributes * before it and counting the bits. */ - int attr = intrin->const_index[0]; - int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr)); + nir_const_value *const_offset = nir_src_as_const_value(intrin->src[0]); + + /* We set EmitNoIndirect for VS inputs, so there are no indirects. */ + assert(const_offset); + + int attr = intrin->const_index[0] + const_offset->u[0]; + int slot = _mesa_bitcount_64(state->inputs_read & + BITFIELD64_MASK(attr)); + + /* The NIR -> FS pass will just add the base and offset together, so + * there's no reason to keep them separate. Just put it all in + * const_index[0] and set the offset src[0] to load_const(0). + */ intrin->const_index[0] = 4 * slot; + + state->b.cursor = nir_before_instr(&intrin->instr); + nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], + nir_src_for_ssa(nir_imm_int(&state->b, 0))); } } return true; @@ -79,10 +97,17 @@ brw_nir_lower_inputs(nir_shader *nir, * key->inputs_read since the two are identical aside from Gen4-5 * edge flag differences. */ - GLbitfield64 inputs_read = nir->info.inputs_read; + struct remap_vs_attrs_state remap_state = { + .inputs_read = nir->info.inputs_read, + }; + + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + nir_foreach_overload(nir, overload) { if (overload->impl) { - nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read); + nir_builder_init(&remap_state.b, overload->impl); + nir_foreach_block(overload->impl, remap_vs_attrs, &remap_state); } } } diff --git a/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp b/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp index e51ef4b37d5..6f66978f8e1 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp @@ -60,19 +60,19 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg src; switch (instr->intrinsic) { - case nir_intrinsic_load_per_vertex_input_indirect: - assert(!"EmitNoIndirectInput should prevent this."); case nir_intrinsic_load_per_vertex_input: { /* The EmitNoIndirectInput flag guarantees our vertex index will * be constant. We should handle indirects someday. */ nir_const_value *vertex = nir_src_as_const_value(instr->src[0]); + nir_const_value *offset = nir_src_as_const_value(instr->src[1]); /* Make up a type...we have no way of knowing... */ const glsl_type *const type = glsl_type::ivec(instr->num_components); src = src_reg(ATTR, BRW_VARYING_SLOT_COUNT * vertex->u[0] + - instr->const_index[0], type); + instr->const_index[0] + offset->u[0], + type); dest = get_nir_dest(instr->dest, src.type); dest.writemask = brw_writemask_for_size(instr->num_components); emit(MOV(dest, src)); @@ -80,7 +80,6 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) } case nir_intrinsic_load_input: - case nir_intrinsic_load_input_indirect: unreachable("nir_lower_io should have produced per_vertex intrinsics"); case nir_intrinsic_emit_vertex_with_counter: { diff --git a/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp b/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp index 50570cd7703..f965b39360f 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_nir.cpp @@ -369,22 +369,17 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) dst_reg dest; src_reg src; - bool has_indirect = false; - switch (instr->intrinsic) { - case nir_intrinsic_load_input_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_input: { - int offset = instr->const_index[0]; - src = src_reg(ATTR, offset, glsl_type::uvec4_type); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + + /* We set EmitNoIndirectInput for VS */ + assert(const_offset); + + src = src_reg(ATTR, instr->const_index[0] + const_offset->u[0], + glsl_type::uvec4_type); - if (has_indirect) { - dest.reladdr = new(mem_ctx) src_reg(get_nir_src(instr->src[0], - BRW_REGISTER_TYPE_D, - 1)); - } dest = get_nir_dest(instr->dest, src.type); dest.writemask = brw_writemask_for_size(instr->num_components); @@ -392,11 +387,11 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; } - case nir_intrinsic_store_output_indirect: - unreachable("nir_lower_outputs_to_temporaries should prevent this"); - case nir_intrinsic_store_output: { - int varying = instr->const_index[0]; + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + assert(const_offset); + + int varying = instr->const_index[0] + const_offset->u[0]; src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, instr->num_components); @@ -431,9 +426,6 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; } - case nir_intrinsic_store_ssbo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_store_ssbo: { assert(devinfo->gen >= 7); @@ -458,20 +450,19 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) } /* Offset */ - src_reg offset_reg = src_reg(this, glsl_type::uint_type); - unsigned const_offset_bytes = 0; - if (has_indirect) { - emit(MOV(dst_reg(offset_reg), get_nir_src(instr->src[2], 1))); + src_reg offset_reg; + nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]); + if (const_offset) { + offset_reg = brw_imm_ud(const_offset->u[0]); } else { - const_offset_bytes = instr->const_index[0]; - emit(MOV(dst_reg(offset_reg), brw_imm_ud(const_offset_bytes))); + offset_reg = get_nir_src(instr->src[2], 1); } /* Value */ src_reg val_reg = get_nir_src(instr->src[0], 4); /* Writemask */ - unsigned write_mask = instr->const_index[1]; + unsigned write_mask = instr->const_index[0]; /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped * writes will use SIMD8 mode. In order to hide this and keep symmetry across @@ -537,9 +528,8 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) * write at to skip the channels we skipped, if any. */ if (skipped_channels > 0) { - if (!has_indirect) { - const_offset_bytes += 4 * skipped_channels; - offset_reg = brw_imm_ud(const_offset_bytes); + if (offset_reg.file == IMM) { + offset_reg.ud += 4 * skipped_channels; } else { emit(ADD(dst_reg(offset_reg), offset_reg, brw_imm_ud(4 * skipped_channels))); @@ -574,9 +564,6 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; } - case nir_intrinsic_load_ssbo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_ssbo: { assert(devinfo->gen >= 7); @@ -604,13 +591,12 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) nir->info.num_ssbos - 1); } - src_reg offset_reg = src_reg(this, glsl_type::uint_type); - unsigned const_offset_bytes = 0; - if (has_indirect) { - emit(MOV(dst_reg(offset_reg), get_nir_src(instr->src[1], 1))); + src_reg offset_reg; + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset) { + offset_reg = brw_imm_ud(const_offset->u[0]); } else { - const_offset_bytes = instr->const_index[0]; - emit(MOV(dst_reg(offset_reg), brw_imm_ud((const_offset_bytes)))); + offset_reg = get_nir_src(instr->src[1], 1); } /* Read the vector */ @@ -673,20 +659,21 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; } - case nir_intrinsic_load_uniform_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_uniform: { /* Offsets are in bytes but they should always be multiples of 16 */ assert(instr->const_index[0] % 16 == 0); - assert(instr->const_index[1] % 16 == 0); dest = get_nir_dest(instr->dest); src = src_reg(dst_reg(UNIFORM, instr->const_index[0] / 16)); - src.reg_offset = instr->const_index[1] / 16; + src.type = dest.type; - if (has_indirect) { + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + if (const_offset) { + /* Offsets are in bytes but they should always be multiples of 16 */ + assert(const_offset->u[0] % 16 == 0); + src.reg_offset = const_offset->u[0] / 16; + } else { src_reg tmp = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_D, 1); src.reladdr = new(mem_ctx) src_reg(tmp); } @@ -724,9 +711,6 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; } - case nir_intrinsic_load_ubo_indirect: - has_indirect = true; - /* fallthrough */ case nir_intrinsic_load_ubo: { nir_const_value *const_block_index = nir_src_as_const_value(instr->src[0]); src_reg surf_index; @@ -760,11 +744,10 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) nir->info.num_ubos - 1); } - unsigned const_offset = instr->const_index[0]; src_reg offset; - - if (!has_indirect) { - offset = brw_imm_ud(const_offset & ~15); + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); + if (const_offset) { + offset = brw_imm_ud(const_offset->u[0] & ~15); } else { offset = get_nir_src(instr->src[1], nir_type_int, 1); } @@ -778,10 +761,12 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) NULL, NULL /* before_block/inst */); packed_consts.swizzle = brw_swizzle_for_size(instr->num_components); - packed_consts.swizzle += BRW_SWIZZLE4(const_offset % 16 / 4, - const_offset % 16 / 4, - const_offset % 16 / 4, - const_offset % 16 / 4); + if (const_offset) { + packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u[0] % 16 / 4, + const_offset->u[0] % 16 / 4, + const_offset->u[0] % 16 / 4, + const_offset->u[0] % 16 / 4); + } emit(MOV(dest, packed_consts)); break; |