aboutsummaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorConnor Abbott <[email protected]>2019-05-10 10:18:12 +0200
committerConnor Abbott <[email protected]>2019-07-08 14:15:06 +0200
commitfd5ed6b9d6ab5e905a10c2033007ca478c33e1af (patch)
tree305f7923695fbe3580fecaf1725df72dc835d8a8 /src/compiler
parent27f0c3c15ef753bf5eb089c725341790e9693c53 (diff)
nir: Move st_nir_assign_var_locations() to common code
It isn't really doing anything Gallium-specific, and it's needed for handling component packing, overlapping, etc. Reviewed-by: Bas Nieuwenhuizen <[email protected]> Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/nir/nir.h5
-rw-r--r--src/compiler/nir/nir_linking_helpers.c109
2 files changed, 114 insertions, 0 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 8e5102f53d8..3119e656e24 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -3143,6 +3143,11 @@ void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
+
+void nir_assign_io_var_locations(struct exec_list *var_list,
+ unsigned *size,
+ gl_shader_stage stage);
+
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires
diff --git a/src/compiler/nir/nir_linking_helpers.c b/src/compiler/nir/nir_linking_helpers.c
index faf8a1eaced..d25b350a75e 100644
--- a/src/compiler/nir/nir_linking_helpers.c
+++ b/src/compiler/nir/nir_linking_helpers.c
@@ -970,3 +970,112 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
return progress;
}
+
+/* TODO any better helper somewhere to sort a list? */
+
+static void
+insert_sorted(struct exec_list *var_list, nir_variable *new_var)
+{
+ nir_foreach_variable(var, var_list) {
+ if (var->data.location > new_var->data.location) {
+ exec_node_insert_node_before(&var->node, &new_var->node);
+ return;
+ }
+ }
+ exec_list_push_tail(var_list, &new_var->node);
+}
+
+static void
+sort_varyings(struct exec_list *var_list)
+{
+ struct exec_list new_list;
+ exec_list_make_empty(&new_list);
+ nir_foreach_variable_safe(var, var_list) {
+ exec_node_remove(&var->node);
+ insert_sorted(&new_list, var);
+ }
+ exec_list_move_nodes_to(&new_list, var_list);
+}
+
+void
+nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
+ gl_shader_stage stage)
+{
+ unsigned location = 0;
+ unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
+ uint64_t processed_locs[2] = {0};
+
+ sort_varyings(var_list);
+
+ const int base = stage == MESA_SHADER_FRAGMENT ?
+ (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
+
+ int UNUSED last_loc = 0;
+ nir_foreach_variable(var, var_list) {
+
+ const struct glsl_type *type = var->type;
+ if (nir_is_per_vertex_io(var, stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ unsigned var_size = glsl_count_attribute_slots(type, false);
+
+ /* Builtins don't allow component packing so we only need to worry about
+ * user defined varyings sharing the same location.
+ */
+ bool processed = false;
+ if (var->data.location >= base) {
+ unsigned glsl_location = var->data.location - base;
+
+ for (unsigned i = 0; i < var_size; i++) {
+ if (processed_locs[var->data.index] &
+ ((uint64_t)1 << (glsl_location + i)))
+ processed = true;
+ else
+ processed_locs[var->data.index] |=
+ ((uint64_t)1 << (glsl_location + i));
+ }
+ }
+
+ /* Because component packing allows varyings to share the same location
+ * we may have already have processed this location.
+ */
+ if (processed) {
+ unsigned driver_location = assigned_locations[var->data.location];
+ var->data.driver_location = driver_location;
+ *size += glsl_count_attribute_slots(type, false);
+
+ /* An array may be packed such that is crosses multiple other arrays
+ * or variables, we need to make sure we have allocated the elements
+ * consecutively if the previously proccessed var was shorter than
+ * the current array we are processing.
+ *
+ * NOTE: The code below assumes the var list is ordered in ascending
+ * location order.
+ */
+ assert(last_loc <= var->data.location);
+ last_loc = var->data.location;
+ unsigned last_slot_location = driver_location + var_size;
+ if (last_slot_location > location) {
+ unsigned num_unallocated_slots = last_slot_location - location;
+ unsigned first_unallocated_slot = var_size - num_unallocated_slots;
+ for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
+ assigned_locations[var->data.location + i] = location;
+ location++;
+ }
+ }
+ continue;
+ }
+
+ for (unsigned i = 0; i < var_size; i++) {
+ assigned_locations[var->data.location + i] = location + i;
+ }
+
+ var->data.driver_location = location;
+ location += var_size;
+ }
+
+ *size += location;
+}
+