summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-10-01 00:46:19 -0700
committerKenneth Graunke <[email protected]>2015-10-04 14:00:01 -0700
commit5d7f8cb5a511977e256e773716fac3415d01443e (patch)
tree8f9a63c314cb8e9ca5880a044534eb803b695512 /src/mesa/drivers/dri/i965
parentf2a4b40cf15cbc5eaab1776ad275ed8eead3322f (diff)
nir: Introduce new nir_intrinsic_load_per_vertex_input intrinsics.
Geometry and tessellation shaders process multiple vertices; their inputs are arrays indexed by the vertex number. While GLSL makes this look like a normal array, it can be very different behind the scenes. On Intel hardware, all inputs for a particular vertex are stored together - as if they were grouped into a single struct. This means that consecutive elements of these top-level arrays are not contiguous. In fact, they may sometimes be in completely disjoint memory segments. NIR's existing load_input intrinsics are awkward for this case, as they distill everything down to a single offset. We'd much rather keep the vertex ID separate, but build up an offset as normal beyond that. This patch introduces new nir_intrinsic_load_per_vertex_input intrinsics to handle this case. They work like ordinary load_input intrinsics, but have an extra source (src[0]) which represents the outermost array index. v2: Rebase on earlier refactors. v3: Use ssa defs instead of nir_srcs, rebase on earlier refactors. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/mesa/drivers/dri/i965')
-rw-r--r--src/mesa/drivers/dri/i965/brw_nir.c13
-rw-r--r--src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp58
2 files changed, 34 insertions, 37 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_nir.c b/src/mesa/drivers/dri/i965/brw_nir.c
index 12f47ad0ded..80f36dc2399 100644
--- a/src/mesa/drivers/dri/i965/brw_nir.c
+++ b/src/mesa/drivers/dri/i965/brw_nir.c
@@ -30,8 +30,17 @@
static void
brw_nir_lower_inputs(nir_shader *nir, bool is_scalar)
{
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
- is_scalar ? type_size_scalar : type_size_vec4);
+ switch (nir->stage) {
+ case MESA_SHADER_GEOMETRY:
+ foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ var->data.driver_location = var->data.location;
+ }
+ break;
+ default:
+ nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
+ is_scalar ? type_size_scalar : type_size_vec4);
+ break;
+ }
}
static void
diff --git a/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp b/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp
index af4c102c026..1b929b3df2c 100644
--- a/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp
+++ b/src/mesa/drivers/dri/i965/brw_vec4_gs_nir.cpp
@@ -29,41 +29,6 @@ namespace brw {
void
vec4_gs_visitor::nir_setup_inputs()
{
- nir_inputs = ralloc_array(mem_ctx, src_reg, nir->num_inputs);
-
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
- int offset = var->data.driver_location;
- if (var->type->base_type == GLSL_TYPE_ARRAY) {
- /* Geometry shader inputs are arrays, but they use an unusual array
- * layout: instead of all array elements for a given geometry shader
- * input being stored consecutively, all geometry shader inputs are
- * interleaved into one giant array. At this stage of compilation, we
- * assume that the stride of the array is BRW_VARYING_SLOT_COUNT.
- * Later, setup_attributes() will remap our accesses to the actual
- * input array.
- */
- assert(var->type->length > 0);
- int length = var->type->length;
- int size = type_size_vec4(var->type) / length;
- for (int i = 0; i < length; i++) {
- int location = var->data.location + i * BRW_VARYING_SLOT_COUNT;
- for (int j = 0; j < size; j++) {
- src_reg src = src_reg(ATTR, location + j, var->type);
- src = retype(src, brw_type_for_base_type(var->type));
- nir_inputs[offset] = src;
- offset++;
- }
- }
- } else {
- int size = type_size_vec4(var->type);
- for (int i = 0; i < size; i++) {
- src_reg src = src_reg(ATTR, var->data.location + i, var->type);
- src = retype(src, brw_type_for_base_type(var->type));
- nir_inputs[offset] = src;
- offset++;
- }
- }
- }
}
void
@@ -96,6 +61,29 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
src_reg src;
switch (instr->intrinsic) {
+ case nir_intrinsic_load_per_vertex_input_indirect:
+ assert(!"EmitNoIndirectInput should prevent this.");
+ case nir_intrinsic_load_per_vertex_input: {
+ /* The EmitNoIndirectInput flag guarantees our vertex index will
+ * be constant. We should handle indirects someday.
+ */
+ nir_const_value *vertex = nir_src_as_const_value(instr->src[0]);
+
+ /* Make up a type...we have no way of knowing... */
+ const glsl_type *const type = glsl_type::ivec(instr->num_components);
+
+ src = src_reg(ATTR, BRW_VARYING_SLOT_COUNT * vertex->u[0] +
+ instr->const_index[0], type);
+ dest = get_nir_dest(instr->dest, src.type);
+ dest.writemask = brw_writemask_for_size(instr->num_components);
+ emit(MOV(dest, src));
+ break;
+ }
+
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_input_indirect:
+ unreachable("nir_lower_io should have produced per_vertex intrinsics");
+
case nir_intrinsic_emit_vertex_with_counter: {
this->vertex_count =
retype(get_nir_src(instr->src[0], 1), BRW_REGISTER_TYPE_UD);