summaryrefslogtreecommitdiffstats
path: root/src/glsl
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-10-01 00:46:19 -0700
committerKenneth Graunke <[email protected]>2015-10-04 14:00:01 -0700
commit5d7f8cb5a511977e256e773716fac3415d01443e (patch)
tree8f9a63c314cb8e9ca5880a044534eb803b695512 /src/glsl
parentf2a4b40cf15cbc5eaab1776ad275ed8eead3322f (diff)
nir: Introduce new nir_intrinsic_load_per_vertex_input intrinsics.
Geometry and tessellation shaders process multiple vertices; their inputs are arrays indexed by the vertex number. While GLSL makes this look like a normal array, it can be very different behind the scenes. On Intel hardware, all inputs for a particular vertex are stored together - as if they were grouped into a single struct. This means that consecutive elements of these top-level arrays are not contiguous. In fact, they may sometimes be in completely disjoint memory segments. NIR's existing load_input intrinsics are awkward for this case, as they distill everything down to a single offset. We'd much rather keep the vertex ID separate, but build up an offset as normal beyond that. This patch introduces new nir_intrinsic_load_per_vertex_input intrinsics to handle this case. They work like ordinary load_input intrinsics, but have an extra source (src[0]) which represents the outermost array index. v2: Rebase on earlier refactors. v3: Use ssa defs instead of nir_srcs, rebase on earlier refactors. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/glsl')
-rw-r--r--src/glsl/nir/nir_intrinsics.h1
-rw-r--r--src/glsl/nir/nir_lower_io.c55
-rw-r--r--src/glsl/nir/nir_print.c2
3 files changed, 52 insertions, 6 deletions
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h
index ac4c2ba0eb2..263d8c14f4a 100644
--- a/src/glsl/nir/nir_intrinsics.h
+++ b/src/glsl/nir/nir_intrinsics.h
@@ -228,6 +228,7 @@ SYSTEM_VALUE(num_work_groups, 3, 0)
LOAD(uniform, 0, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(ubo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(input, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(per_vertex_input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
LOAD(ssbo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
/*
diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c
index b1cf7bec6d2..688b48f4675 100644
--- a/src/glsl/nir/nir_lower_io.c
+++ b/src/glsl/nir/nir_lower_io.c
@@ -63,8 +63,20 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
*size = location;
}
+/**
+ * Returns true if we're processing a stage whose inputs are arrays indexed
+ * by a vertex number (such as geometry shader inputs).
+ */
+static bool
+stage_uses_per_vertex_inputs(struct lower_io_state *state)
+{
+ gl_shader_stage stage = state->builder.shader->stage;
+ return stage == MESA_SHADER_GEOMETRY;
+}
+
static unsigned
get_io_offset(nir_deref_var *deref, nir_instr *instr,
+ nir_ssa_def **vertex_index,
nir_ssa_def **out_indirect,
struct lower_io_state *state)
{
@@ -75,6 +87,22 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr,
b->cursor = nir_before_instr(instr);
nir_deref *tail = &deref->deref;
+
+ /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
+ * outermost array index separate. Process the rest normally.
+ */
+ if (vertex_index != NULL) {
+ tail = tail->child;
+ assert(tail->deref_type == nir_deref_type_array);
+ nir_deref_array *deref_array = nir_deref_as_array(tail);
+
+ nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset);
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1));
+ }
+ *vertex_index = vtx;
+ }
+
while (tail->child != NULL) {
const struct glsl_type *parent_type = tail->type;
tail = tail->child;
@@ -107,13 +135,19 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr,
}
static nir_intrinsic_op
-load_op(nir_variable_mode mode, bool has_indirect)
+load_op(struct lower_io_state *state,
+ nir_variable_mode mode, bool per_vertex, bool has_indirect)
{
nir_intrinsic_op op;
switch (mode) {
case nir_var_shader_in:
- op = has_indirect ? nir_intrinsic_load_input_indirect :
- nir_intrinsic_load_input;
+ if (per_vertex) {
+ op = has_indirect ? nir_intrinsic_load_per_vertex_input_indirect :
+ nir_intrinsic_load_per_vertex_input;
+ } else {
+ op = has_indirect ? nir_intrinsic_load_input_indirect :
+ nir_intrinsic_load_input;
+ }
break;
case nir_var_uniform:
op = has_indirect ? nir_intrinsic_load_uniform_indirect :
@@ -150,14 +184,20 @@ nir_lower_io_block(nir_block *block, void *void_state)
if (mode != nir_var_shader_in && mode != nir_var_uniform)
continue;
+ bool per_vertex = stage_uses_per_vertex_inputs(state) &&
+ mode == nir_var_shader_in;
+
nir_ssa_def *indirect;
+ nir_ssa_def *vertex_index;
unsigned offset = get_io_offset(intrin->variables[0], &intrin->instr,
+ per_vertex ? &vertex_index : NULL,
&indirect, state);
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(state->mem_ctx,
- load_op(mode, indirect));
+ load_op(state, mode, per_vertex,
+ indirect));
load->num_components = intrin->num_components;
unsigned location = intrin->variables[0]->var->data.driver_location;
@@ -168,8 +208,11 @@ nir_lower_io_block(nir_block *block, void *void_state)
load->const_index[0] = location + offset;
}
+ if (per_vertex)
+ load->src[0] = nir_src_for_ssa(vertex_index);
+
if (indirect)
- load->src[0] = nir_src_for_ssa(indirect);
+ load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(indirect);
if (intrin->dest.is_ssa) {
nir_ssa_dest_init(&load->instr, &load->dest,
@@ -192,7 +235,7 @@ nir_lower_io_block(nir_block *block, void *void_state)
nir_ssa_def *indirect;
unsigned offset = get_io_offset(intrin->variables[0], &intrin->instr,
- &indirect, state);
+ NULL, &indirect, state);
offset += intrin->variables[0]->var->data.driver_location;
nir_intrinsic_op store_op;
diff --git a/src/glsl/nir/nir_print.c b/src/glsl/nir/nir_print.c
index 3936bae078b..09663996869 100644
--- a/src/glsl/nir/nir_print.c
+++ b/src/glsl/nir/nir_print.c
@@ -443,6 +443,8 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_input_indirect:
+ case nir_intrinsic_load_per_vertex_input:
+ case nir_intrinsic_load_per_vertex_input_indirect:
var_list = &state->shader->inputs;
break;
case nir_intrinsic_store_output: