summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2015-01-14 15:19:49 -0800
committerJason Ekstrand <[email protected]>2015-01-15 07:20:24 -0800
commitd3636da9026dd10493fb57b0d6803e33166abd1b (patch)
treef2ba2461f378c7bedad73219a9ca3350376b8397
parent700ba5daaf9ec340dc967bbf1ba355470a573b37 (diff)
nir: Add a pass for lowering copy instructions
Reviewed-by: Connor Abbott <[email protected]>
-rw-r--r--src/glsl/Makefile.sources1
-rw-r--r--src/glsl/nir/nir.h3
-rw-r--r--src/glsl/nir/nir_lower_var_copies.c223
3 files changed, 227 insertions, 0 deletions
diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources
index 4c9aa77ceb6..006e9472188 100644
--- a/src/glsl/Makefile.sources
+++ b/src/glsl/Makefile.sources
@@ -32,6 +32,7 @@ NIR_FILES = \
$(GLSL_SRCDIR)/nir/nir_lower_system_values.c \
$(GLSL_SRCDIR)/nir/nir_lower_to_source_mods.c \
$(GLSL_SRCDIR)/nir/nir_lower_vars_to_ssa.c \
+ $(GLSL_SRCDIR)/nir/nir_lower_var_copies.c \
$(GLSL_SRCDIR)/nir/nir_lower_vec_to_movs.c \
$(GLSL_SRCDIR)/nir/nir_metadata.c \
$(GLSL_SRCDIR)/nir/nir_opcodes.c \
diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
index 12cac728cff..1faeb02156c 100644
--- a/src/glsl/nir/nir.h
+++ b/src/glsl/nir/nir.h
@@ -1461,6 +1461,9 @@ void nir_dump_cfg(nir_shader *shader, FILE *fp);
void nir_split_var_copies(nir_shader *shader);
+void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx);
+void nir_lower_var_copies(nir_shader *shader);
+
void nir_lower_global_vars_to_local(nir_shader *shader);
void nir_lower_locals_to_regs(nir_shader *shader);
diff --git a/src/glsl/nir/nir_lower_var_copies.c b/src/glsl/nir/nir_lower_var_copies.c
new file mode 100644
index 00000000000..a9d1a47ea69
--- /dev/null
+++ b/src/glsl/nir/nir_lower_var_copies.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand ([email protected])
+ *
+ */
+
+#include "nir.h"
+#include "nir_types.h"
+
+/*
+ * Lowers all copy intrinsics to sequences of load/store intrinsics.
+ */
+
+/* Walks down the deref chain and returns the next deref in the chain whose
+ * child is a wildcard. In other words, given the chain a[1].foo[*].bar,
+ * this function will return the deref to foo. Calling it a second time
+ * with the [*].bar, it will return NULL.
+ */
+static nir_deref *
+deref_next_wildcard_parent(nir_deref *deref)
+{
+ for (nir_deref *tail = deref; tail->child; tail = tail->child) {
+ if (tail->child->deref_type != nir_deref_type_array)
+ continue;
+
+ nir_deref_array *arr = nir_deref_as_array(tail->child);
+
+ if (arr->deref_array_type == nir_deref_array_type_wildcard)
+ return tail;
+ }
+
+ return NULL;
+}
+
+/* Returns the last deref in the chain.
+ */
+static nir_deref *
+get_deref_tail(nir_deref *deref)
+{
+ while (deref->child)
+ deref = deref->child;
+
+ return deref;
+}
+
+static int
+type_get_length(const struct glsl_type *type)
+{
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY:
+ return glsl_get_length(type);
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_matrix(type))
+ return glsl_get_matrix_columns(type);
+ else
+ return glsl_get_vector_elements(type);
+ default:
+ unreachable("Invalid deref base type");
+ }
+}
+
+/* This function recursively walks the given deref chain and replaces the
+ * given copy instruction with an equivalent sequence load/store
+ * operations.
+ *
+ * @copy_instr The copy instruction to replace; new instructions will be
+ * inserted before this one
+ *
+ * @dest_head The head of the destination variable deref chain
+ *
+ * @src_head The head of the source variable deref chain
+ *
+ * @dest_tail The current tail of the destination variable deref chain;
+ * this is used for recursion and external callers of this
+ * function should call it with tail == head
+ *
+ * @src_tail The current tail of the source variable deref chain;
+ * this is used for recursion and external callers of this
+ * function should call it with tail == head
+ *
+ * @state The current variable lowering state
+ */
+static void
+emit_copy_load_store(nir_intrinsic_instr *copy_instr,
+ nir_deref_var *dest_head, nir_deref_var *src_head,
+ nir_deref *dest_tail, nir_deref *src_tail, void *mem_ctx)
+{
+ /* Find the next pair of wildcards */
+ nir_deref *src_arr_parent = deref_next_wildcard_parent(src_tail);
+ nir_deref *dest_arr_parent = deref_next_wildcard_parent(dest_tail);
+
+ if (src_arr_parent || dest_arr_parent) {
+ /* Wildcards had better come in matched pairs */
+ assert(dest_arr_parent && dest_arr_parent);
+
+ nir_deref_array *src_arr = nir_deref_as_array(src_arr_parent->child);
+ nir_deref_array *dest_arr = nir_deref_as_array(dest_arr_parent->child);
+
+ unsigned length = type_get_length(src_arr_parent->type);
+ /* The wildcards should represent the same number of elements */
+ assert(length == type_get_length(dest_arr_parent->type));
+ assert(length > 0);
+
+ /* Walk over all of the elements that this wildcard refers to and
+ * call emit_copy_load_store on each one of them */
+ src_arr->deref_array_type = nir_deref_array_type_direct;
+ dest_arr->deref_array_type = nir_deref_array_type_direct;
+ for (unsigned i = 0; i < length; i++) {
+ src_arr->base_offset = i;
+ dest_arr->base_offset = i;
+ emit_copy_load_store(copy_instr, dest_head, src_head,
+ &dest_arr->deref, &src_arr->deref, mem_ctx);
+ }
+ src_arr->deref_array_type = nir_deref_array_type_wildcard;
+ dest_arr->deref_array_type = nir_deref_array_type_wildcard;
+ } else {
+ /* In this case, we have no wildcards anymore, so all we have to do
+ * is just emit the load and store operations. */
+ src_tail = get_deref_tail(src_tail);
+ dest_tail = get_deref_tail(dest_tail);
+
+ assert(src_tail->type == dest_tail->type);
+
+ unsigned num_components = glsl_get_vector_elements(src_tail->type);
+
+ nir_deref *src_deref = nir_copy_deref(mem_ctx, &src_head->deref);
+ nir_deref *dest_deref = nir_copy_deref(mem_ctx, &dest_head->deref);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_load_var);
+ load->num_components = num_components;
+ load->variables[0] = nir_deref_as_var(src_deref);
+ load->dest.is_ssa = true;
+ nir_ssa_def_init(&load->instr, &load->dest.ssa, num_components, NULL);
+
+ nir_instr_insert_before(&copy_instr->instr, &load->instr);
+
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_store_var);
+ store->num_components = num_components;
+ store->variables[0] = nir_deref_as_var(dest_deref);
+ store->src[0].is_ssa = true;
+ store->src[0].ssa = &load->dest.ssa;
+
+ nir_instr_insert_before(&copy_instr->instr, &store->instr);
+ }
+}
+
+/* Lowers a copy instruction to a sequence of load/store instructions
+ *
+ * The new instructions are placed before the copy instruction in the IR.
+ */
+void
+nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx)
+{
+ assert(copy->intrinsic == nir_intrinsic_copy_var);
+ emit_copy_load_store(copy, copy->variables[0], copy->variables[1],
+ &copy->variables[0]->deref,
+ &copy->variables[1]->deref, mem_ctx);
+}
+
+static bool
+lower_var_copies_block(nir_block *block, void *mem_ctx)
+{
+ nir_foreach_instr_safe(block, instr) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *copy = nir_instr_as_intrinsic(instr);
+ if (copy->intrinsic != nir_intrinsic_copy_var)
+ continue;
+
+ nir_lower_var_copy_instr(copy, mem_ctx);
+
+ nir_instr_remove(&copy->instr);
+ ralloc_free(copy);
+ }
+
+ return true;
+}
+
+static void
+lower_var_copies_impl(nir_function_impl *impl)
+{
+ nir_foreach_block(impl, lower_var_copies_block, ralloc_parent(impl));
+}
+
+/* Lowers every copy_var instruction in the program to a sequence of
+ * load/store instructions.
+ */
+void
+nir_lower_var_copies(nir_shader *shader)
+{
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl)
+ lower_var_copies_impl(overload->impl);
+ }
+}