summaryrefslogtreecommitdiffstats
path: root/src/glsl/nir/nir_lower_io.c
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-10-19 11:44:28 -0700
committerKenneth Graunke <[email protected]>2015-11-13 15:15:41 -0800
commit134728fdaef9d2a5d072d25b31437ac0fecd9076 (patch)
treee7f64a77d46e769a4872082ddf1a8eba687ed38e /src/glsl/nir/nir_lower_io.c
parentc51d7d5fe3425b0b1cb551f47979a1e41f1f73d8 (diff)
nir: Allow outputs reads and add the relevant intrinsics.
Normally, we rely on nir_lower_outputs_to_temporaries to create shadow variables for outputs, buffering the results and writing them all out at the end of the program. However, this is infeasible for tessellation control shader outputs. Tessellation control shaders can generate multiple output vertices, and write per-vertex outputs. These are arrays indexed by the vertex number; each thread only writes one element, but can read any other element - including those being concurrently written by other threads. The barrier() intrinsic synchronizes between threads. Even if we tried to shadow every output element (which is of dubious value), we'd have to read updated values in at barrier() time, which means we need to allow output reads. Most stages should continue using nir_lower_outputs_to_temporaries(), but in theory drivers could choose not to if they really wanted. v2: Rebase to accomodate Jason's review feedback. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/glsl/nir/nir_lower_io.c')
-rw-r--r--src/glsl/nir/nir_lower_io.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c
index b7b599da6d4..8a4177fb9f0 100644
--- a/src/glsl/nir/nir_lower_io.c
+++ b/src/glsl/nir/nir_lower_io.c
@@ -161,6 +161,15 @@ load_op(struct lower_io_state *state,
nir_intrinsic_load_input;
}
break;
+ case nir_var_shader_out:
+ if (per_vertex) {
+ op = has_indirect ? nir_intrinsic_load_per_vertex_output_indirect :
+ nir_intrinsic_load_per_vertex_output;
+ } else {
+ op = has_indirect ? nir_intrinsic_load_output_indirect :
+ nir_intrinsic_load_output;
+ }
+ break;
case nir_var_uniform:
op = has_indirect ? nir_intrinsic_load_uniform_indirect :
nir_intrinsic_load_uniform;
@@ -191,13 +200,16 @@ nir_lower_io_block(nir_block *block, void *void_state)
if (state->mode != -1 && state->mode != mode)
continue;
+ if (mode != nir_var_shader_in &&
+ mode != nir_var_shader_out &&
+ mode != nir_var_uniform)
+ continue;
+
switch (intrin->intrinsic) {
case nir_intrinsic_load_var: {
- if (mode != nir_var_shader_in && mode != nir_var_uniform)
- continue;
-
bool per_vertex =
- is_per_vertex_input(state, intrin->variables[0]->var);
+ is_per_vertex_input(state, intrin->variables[0]->var) ||
+ is_per_vertex_output(state, intrin->variables[0]->var);
nir_ssa_def *indirect;
nir_ssa_def *vertex_index;
@@ -241,8 +253,7 @@ nir_lower_io_block(nir_block *block, void *void_state)
}
case nir_intrinsic_store_var: {
- if (intrin->variables[0]->var->data.mode != nir_var_shader_out)
- continue;
+ assert(mode == nir_var_shader_out);
nir_ssa_def *indirect;
nir_ssa_def *vertex_index;