summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/compiler/nir/nir.h5
-rw-r--r--src/compiler/nir/nir_linking_helpers.c109
-rw-r--r--src/mesa/state_tracker/st_glsl_to_nir.cpp120
3 files changed, 119 insertions, 115 deletions
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 8e5102f53d8..3119e656e24 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -3143,6 +3143,11 @@ void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
+
+void nir_assign_io_var_locations(struct exec_list *var_list,
+ unsigned *size,
+ gl_shader_stage stage);
+
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires
diff --git a/src/compiler/nir/nir_linking_helpers.c b/src/compiler/nir/nir_linking_helpers.c
index faf8a1eaced..d25b350a75e 100644
--- a/src/compiler/nir/nir_linking_helpers.c
+++ b/src/compiler/nir/nir_linking_helpers.c
@@ -970,3 +970,112 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
return progress;
}
+
+/* TODO any better helper somewhere to sort a list? */
+
+static void
+insert_sorted(struct exec_list *var_list, nir_variable *new_var)
+{
+ nir_foreach_variable(var, var_list) {
+ if (var->data.location > new_var->data.location) {
+ exec_node_insert_node_before(&var->node, &new_var->node);
+ return;
+ }
+ }
+ exec_list_push_tail(var_list, &new_var->node);
+}
+
+static void
+sort_varyings(struct exec_list *var_list)
+{
+ struct exec_list new_list;
+ exec_list_make_empty(&new_list);
+ nir_foreach_variable_safe(var, var_list) {
+ exec_node_remove(&var->node);
+ insert_sorted(&new_list, var);
+ }
+ exec_list_move_nodes_to(&new_list, var_list);
+}
+
+void
+nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
+ gl_shader_stage stage)
+{
+ unsigned location = 0;
+ unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
+ uint64_t processed_locs[2] = {0};
+
+ sort_varyings(var_list);
+
+ const int base = stage == MESA_SHADER_FRAGMENT ?
+ (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
+
+ int UNUSED last_loc = 0;
+ nir_foreach_variable(var, var_list) {
+
+ const struct glsl_type *type = var->type;
+ if (nir_is_per_vertex_io(var, stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ unsigned var_size = glsl_count_attribute_slots(type, false);
+
+ /* Builtins don't allow component packing so we only need to worry about
+ * user defined varyings sharing the same location.
+ */
+ bool processed = false;
+ if (var->data.location >= base) {
+ unsigned glsl_location = var->data.location - base;
+
+ for (unsigned i = 0; i < var_size; i++) {
+ if (processed_locs[var->data.index] &
+ ((uint64_t)1 << (glsl_location + i)))
+ processed = true;
+ else
+ processed_locs[var->data.index] |=
+ ((uint64_t)1 << (glsl_location + i));
+ }
+ }
+
+ /* Because component packing allows varyings to share the same location
+ * we may have already have processed this location.
+ */
+ if (processed) {
+ unsigned driver_location = assigned_locations[var->data.location];
+ var->data.driver_location = driver_location;
+ *size += glsl_count_attribute_slots(type, false);
+
+ /* An array may be packed such that is crosses multiple other arrays
+ * or variables, we need to make sure we have allocated the elements
+ * consecutively if the previously proccessed var was shorter than
+ * the current array we are processing.
+ *
+ * NOTE: The code below assumes the var list is ordered in ascending
+ * location order.
+ */
+ assert(last_loc <= var->data.location);
+ last_loc = var->data.location;
+ unsigned last_slot_location = driver_location + var_size;
+ if (last_slot_location > location) {
+ unsigned num_unallocated_slots = last_slot_location - location;
+ unsigned first_unallocated_slot = var_size - num_unallocated_slots;
+ for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
+ assigned_locations[var->data.location + i] = location;
+ location++;
+ }
+ }
+ continue;
+ }
+
+ for (unsigned i = 0; i < var_size; i++) {
+ assigned_locations[var->data.location + i] = location + i;
+ }
+
+ var->data.driver_location = location;
+ location += var_size;
+ }
+
+ *size += location;
+}
+
diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp
index 97dfe7a54a2..221c8a030b0 100644
--- a/src/mesa/state_tracker/st_glsl_to_nir.cpp
+++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp
@@ -109,86 +109,6 @@ st_nir_assign_vs_in_locations(nir_shader *nir)
}
}
-static void
-st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- gl_shader_stage stage)
-{
- unsigned location = 0;
- unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
- uint64_t processed_locs[2] = {0};
-
- const int base = stage == MESA_SHADER_FRAGMENT ?
- (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
-
- int UNUSED last_loc = 0;
- nir_foreach_variable(var, var_list) {
-
- const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
- assert(glsl_type_is_array(type));
- type = glsl_get_array_element(type);
- }
-
- unsigned var_size = type_size(type);
-
- /* Builtins don't allow component packing so we only need to worry about
- * user defined varyings sharing the same location.
- */
- bool processed = false;
- if (var->data.location >= base) {
- unsigned glsl_location = var->data.location - base;
-
- for (unsigned i = 0; i < var_size; i++) {
- if (processed_locs[var->data.index] &
- ((uint64_t)1 << (glsl_location + i)))
- processed = true;
- else
- processed_locs[var->data.index] |=
- ((uint64_t)1 << (glsl_location + i));
- }
- }
-
- /* Because component packing allows varyings to share the same location
- * we may have already have processed this location.
- */
- if (processed) {
- unsigned driver_location = assigned_locations[var->data.location];
- var->data.driver_location = driver_location;
- *size += type_size(type);
-
- /* An array may be packed such that is crosses multiple other arrays
- * or variables, we need to make sure we have allocated the elements
- * consecutively if the previously proccessed var was shorter than
- * the current array we are processing.
- *
- * NOTE: The code below assumes the var list is ordered in ascending
- * location order.
- */
- assert(last_loc <= var->data.location);
- last_loc = var->data.location;
- unsigned last_slot_location = driver_location + var_size;
- if (last_slot_location > location) {
- unsigned num_unallocated_slots = last_slot_location - location;
- unsigned first_unallocated_slot = var_size - num_unallocated_slots;
- for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
- assigned_locations[var->data.location + i] = location;
- location++;
- }
- }
- continue;
- }
-
- for (unsigned i = 0; i < var_size; i++) {
- assigned_locations[var->data.location + i] = location + i;
- }
-
- var->data.driver_location = location;
- location += var_size;
- }
-
- *size += location;
-}
-
static int
st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
const char *name)
@@ -550,32 +470,6 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
}
}
-/* TODO any better helper somewhere to sort a list? */
-
-static void
-insert_sorted(struct exec_list *var_list, nir_variable *new_var)
-{
- nir_foreach_variable(var, var_list) {
- if (var->data.location > new_var->data.location) {
- exec_node_insert_node_before(&var->node, &new_var->node);
- return;
- }
- }
- exec_list_push_tail(var_list, &new_var->node);
-}
-
-static void
-sort_varyings(struct exec_list *var_list)
-{
- struct exec_list new_list;
- exec_list_make_empty(&new_list);
- nir_foreach_variable_safe(var, var_list) {
- exec_node_remove(&var->node);
- insert_sorted(&new_list, var);
- }
- exec_list_move_nodes_to(&new_list, var_list);
-}
-
static void
set_st_program(struct gl_program *prog,
struct gl_shader_program *shader_program,
@@ -914,32 +808,28 @@ st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
/* Re-lower global vars, to deal with any dead VS inputs. */
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
- sort_varyings(&nir->outputs);
- st_nir_assign_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL) {
- sort_varyings(&nir->inputs);
- st_nir_assign_var_locations(&nir->inputs,
+ nir_assign_io_var_locations(&nir->inputs,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
- sort_varyings(&nir->outputs);
- st_nir_assign_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
- sort_varyings(&nir->inputs);
- st_nir_assign_var_locations(&nir->inputs,
+ nir_assign_io_var_locations(&nir->inputs,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
- st_nir_assign_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
} else if (nir->info.stage == MESA_SHADER_COMPUTE) {