summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2014-10-22 11:22:53 -0700
committerJason Ekstrand <[email protected]>2015-01-15 07:18:59 -0800
commit2943522d80393d7996f3aeec173df399d8bd9bbc (patch)
tree675902d02826ea5e656f21c390db92f3856b2738 /src
parentff0a9fcf332ce319fae1eb53f3e5d863d0289cbf (diff)
nir: Add a naieve from-SSA pass
This pass is kind of stupidly implemented but it should be enough to get us up and going. We probably want something better that doesn't generate all of the redundant moves eventually. However, the i965 backend should be able to handle the movs, so I'm not too worried about it in the short term.
Diffstat (limited to 'src')
-rw-r--r--src/glsl/Makefile.sources1
-rw-r--r--src/glsl/nir/nir.h1
-rw-r--r--src/glsl/nir/nir_from_ssa.c193
3 files changed, 195 insertions, 0 deletions
diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources
index eb390fb14d4..20bf272569c 100644
--- a/src/glsl/Makefile.sources
+++ b/src/glsl/Makefile.sources
@@ -17,6 +17,7 @@ NIR_FILES = \
$(GLSL_SRCDIR)/nir/nir.c \
$(GLSL_SRCDIR)/nir/nir.h \
$(GLSL_SRCDIR)/nir/nir_dominance.c \
+ $(GLSL_SRCDIR)/nir/nir_from_ssa.c \
$(GLSL_SRCDIR)/nir/nir_intrinsics.c \
$(GLSL_SRCDIR)/nir/nir_intrinsics.h \
$(GLSL_SRCDIR)/nir/nir_lower_atomics.c \
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index 195908d9a13..0ddd44a9905 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -1296,6 +1296,7 @@ void nir_lower_atomics(nir_shader *shader);
void nir_convert_to_ssa_impl(nir_function_impl *impl);
void nir_convert_to_ssa(nir_shader *shader);
+void nir_convert_from_ssa(nir_shader *shader);
bool nir_opt_global_to_local(nir_shader *shader);
diff --git a/src/glsl/nir/nir_from_ssa.c b/src/glsl/nir/nir_from_ssa.c
new file mode 100644
index 00000000000..abfee3577a0
--- /dev/null
+++ b/src/glsl/nir/nir_from_ssa.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "nir.h"
+
+/*
+ * Implements a quick-and-dirty out-of-ssa pass.
+ */
+
+struct from_ssa_state {
+ void *mem_ctx;
+ void *dead_ctx;
+ struct hash_table *ssa_table;
+ nir_function_impl *current_impl;
+};
+
+static bool
+rewrite_ssa_src(nir_src *src, void *void_state)
+{
+ struct from_ssa_state *state = void_state;
+
+ if (src->is_ssa) {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(state->ssa_table, src->ssa);
+ assert(entry);
+ memset(src, 0, sizeof *src);
+ src->reg.reg = (nir_register *)entry->data;
+ }
+
+ return true;
+}
+
+static nir_register *
+reg_create_from_def(nir_ssa_def *def, struct from_ssa_state *state)
+{
+ nir_register *reg = nir_local_reg_create(state->current_impl);
+ reg->name = def->name;
+ reg->num_components = def->num_components;
+ reg->num_array_elems = 0;
+
+ /* Might as well steal the use-def information from SSA */
+ _mesa_set_destroy(reg->uses, NULL);
+ reg->uses = def->uses;
+ _mesa_set_destroy(reg->if_uses, NULL);
+ reg->if_uses = def->if_uses;
+ _mesa_set_add(reg->defs, _mesa_hash_pointer(def->parent_instr),
+ def->parent_instr);
+
+ /* Add the new register to the table and rewrite the destination */
+ _mesa_hash_table_insert(state->ssa_table, def, reg);
+
+ return reg;
+}
+
+static bool
+rewrite_ssa_dest(nir_dest *dest, void *void_state)
+{
+ struct from_ssa_state *state = void_state;
+
+ if (dest->is_ssa) {
+ nir_register *reg = reg_create_from_def(&dest->ssa, state);
+ memset(dest, 0, sizeof *dest);
+ dest->reg.reg = reg;
+ }
+
+ return true;
+}
+
+static bool
+convert_from_ssa_block(nir_block *block, void *void_state)
+{
+ struct from_ssa_state *state = void_state;
+
+ nir_foreach_instr_safe(block, instr) {
+ if (instr->type == nir_instr_type_ssa_undef) {
+ nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
+ reg_create_from_def(&undef->def, state);
+ exec_node_remove(&instr->node);
+ ralloc_steal(state->dead_ctx, instr);
+ } else {
+ nir_foreach_src(instr, rewrite_ssa_src, state);
+ nir_foreach_dest(instr, rewrite_ssa_dest, state);
+ }
+ }
+
+ if (block->cf_node.node.next != NULL && /* check that we aren't the end node */
+ !nir_cf_node_is_last(&block->cf_node) &&
+ nir_cf_node_next(&block->cf_node)->type == nir_cf_node_if) {
+ nir_if *if_stmt = nir_cf_node_as_if(nir_cf_node_next(&block->cf_node));
+ rewrite_ssa_src(&if_stmt->condition, state);
+ }
+
+ return true;
+}
+
+static bool
+remove_phi_nodes(nir_block *block, void *void_state)
+{
+ struct from_ssa_state *state = void_state;
+
+ nir_foreach_instr_safe(block, instr) {
+ /* Phi nodes only ever come at the start of a block */
+ if (instr->type != nir_instr_type_phi)
+ break;
+
+ nir_foreach_dest(instr, rewrite_ssa_dest, state);
+
+ nir_phi_instr *phi = nir_instr_as_phi(instr);
+ foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
+ assert(src->src.is_ssa);
+ struct hash_entry *entry =
+ _mesa_hash_table_search(state->ssa_table, src->src.ssa);
+ nir_alu_instr *mov = nir_alu_instr_create(state->mem_ctx, nir_op_imov);
+ mov->dest.dest = nir_dest_copy(phi->dest, state->mem_ctx);
+ if (entry) {
+ nir_register *reg = (nir_register *)entry->data;
+ mov->src[0].src.reg.reg = reg;
+ mov->dest.write_mask = (1 << reg->num_components) - 1;
+ } else {
+ mov->src[0].src = nir_src_copy(src->src, state->mem_ctx);
+ mov->dest.write_mask = (1 << src->src.ssa->num_components) - 1;
+ }
+
+ nir_instr *block_end = nir_block_last_instr(src->pred);
+ if (block_end && block_end->type == nir_instr_type_jump) {
+ /* If the last instruction in the block is a jump, we want to
+ * place the moves after the jump. Otherwise, we want to place
+ * them at the very end.
+ */
+ exec_node_insert_node_before(&block_end->node, &mov->instr.node);
+ } else {
+ exec_list_push_tail(&src->pred->instr_list, &mov->instr.node);
+ }
+ }
+
+ exec_node_remove(&instr->node);
+ ralloc_steal(state->dead_ctx, instr);
+ }
+
+ return true;
+}
+
+static void
+nir_convert_from_ssa_impl(nir_function_impl *impl)
+{
+ struct from_ssa_state state;
+
+ state.mem_ctx = ralloc_parent(impl);
+ state.dead_ctx = ralloc_context(NULL);
+ state.current_impl = impl;
+ state.ssa_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ nir_foreach_block(impl, remove_phi_nodes, &state);
+ nir_foreach_block(impl, convert_from_ssa_block, &state);
+
+ /* Clean up dead instructions and the hash table */
+ ralloc_free(state.dead_ctx);
+ _mesa_hash_table_destroy(state.ssa_table, NULL);
+}
+
+void
+nir_convert_from_ssa(nir_shader *shader)
+{
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl)
+ nir_convert_from_ssa_impl(overload->impl);
+ }
+}