summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir/nir_lower_io.c
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2016-07-12 02:16:30 -0700
committerKenneth Graunke <[email protected]>2016-07-15 17:17:09 -0700
commit349fe79c9bc9bd20b877f0425763509208179b47 (patch)
treef3fedcc7564af5da9891f6ec7e0b59fef3012696 /src/compiler/nir/nir_lower_io.c
parent7171a9a87d001d4958a8fb14a78c8235ba4e7e26 (diff)
nir: Share get_io_offset handling in nir_lower_io.
The load/store/atomic cases all duplicated the get_io_offset code, with a few tiny differences: stores didn't bother checking for per-vertex inputs, because they can't be stored to, and atomics didn't check at all, since shared variables aren't per-vertex. However, it's harmless to check, and allows us to share more code. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/compiler/nir/nir_lower_io.c')
-rw-r--r--src/compiler/nir/nir_lower_io.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index 9b53efffd94..45cc67123a5 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -279,19 +279,18 @@ nir_lower_io_block(nir_block *block,
b->cursor = nir_before_instr(instr);
- switch (intrin->intrinsic) {
- case nir_intrinsic_load_var: {
- const bool per_vertex =
- is_per_vertex_input(state, var) ||
- is_per_vertex_output(state, var);
+ const bool per_vertex =
+ is_per_vertex_input(state, var) || is_per_vertex_output(state, var);
- nir_ssa_def *offset;
- nir_ssa_def *vertex_index;
+ nir_ssa_def *offset;
+ nir_ssa_def *vertex_index;
- offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- state->type_size);
+ offset = get_io_offset(b, intrin->variables[0],
+ per_vertex ? &vertex_index : NULL,
+ state->type_size);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_var: {
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(state->mem_ctx,
load_op(mode, per_vertex));
@@ -330,15 +329,6 @@ nir_lower_io_block(nir_block *block,
case nir_intrinsic_store_var: {
assert(mode == nir_var_shader_out || mode == nir_var_shared);
- nir_ssa_def *offset;
- nir_ssa_def *vertex_index;
-
- const bool per_vertex = is_per_vertex_output(state, var);
-
- offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- state->type_size);
-
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(state->mem_ctx,
store_op(state, mode, per_vertex));
@@ -375,11 +365,6 @@ nir_lower_io_block(nir_block *block,
case nir_intrinsic_var_atomic_comp_swap: {
assert(mode == nir_var_shared);
- nir_ssa_def *offset;
-
- offset = get_io_offset(b, intrin->variables[0],
- NULL, state->type_size);
-
nir_intrinsic_instr *atomic =
nir_intrinsic_instr_create(state->mem_ctx,
atomic_op(intrin->intrinsic));