summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-08-26 03:07:29 -0700
committerKenneth Graunke <[email protected]>2015-10-01 10:58:30 -0700
commit193d29516ddb76f469fea17119493e2b685bc6b7 (patch)
treec668a8c83cc3c5a4c92bfb18de67f704683976db /src
parent39a1d36a67974dd9fc3c0d834d6a117cdfed8f33 (diff)
i965/nir: Refactor input/output lowering setup into helpers.
The code for input lowering is going to get significantly more complicated shortly, so I wanted to pull it out. Vertex shader inputs are handled nearly identically regardless of vec4/scalar mode, so I opted to not split that. I thought about having each function actually do the lowering, but one pass through nir_lower_io that handles all types (which weren't handled earlier) is probably more efficient. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Matt Turner <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/i965/brw_nir.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_nir.c b/src/mesa/drivers/dri/i965/brw_nir.c
index 40a83268979..2812fd72fd9 100644
--- a/src/mesa/drivers/dri/i965/brw_nir.c
+++ b/src/mesa/drivers/dri/i965/brw_nir.c
@@ -28,6 +28,26 @@
#include "program/prog_to_nir.h"
static void
+brw_nir_lower_inputs(nir_shader *nir,
+ const struct gl_program *prog,
+ bool is_scalar)
+{
+ nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
+ is_scalar ? type_size_scalar : type_size_vec4);
+}
+
+static void
+brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
+{
+ if (is_scalar) {
+ nir_assign_var_locations(&nir->outputs, &nir->num_outputs, type_size_scalar);
+ } else {
+ foreach_list_typed(nir_variable, var, node, &nir->outputs)
+ var->data.driver_location = var->data.location;
+ }
+}
+
+static void
nir_optimize(nir_shader *nir, bool is_scalar)
{
bool progress;
@@ -122,26 +142,12 @@ brw_create_nir(struct brw_context *brw,
/* Get rid of split copies */
nir_optimize(nir, is_scalar);
- if (is_scalar) {
- nir_assign_var_locations(&nir->uniforms,
- &nir->num_uniforms,
- type_size_scalar);
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
- nir_assign_var_locations(&nir->outputs, &nir->num_outputs, type_size_scalar);
- nir_lower_io(nir, -1, type_size_scalar);
- } else {
- nir_assign_var_locations(&nir->uniforms,
- &nir->num_uniforms,
- type_size_vec4);
-
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_vec4);
-
- foreach_list_typed(nir_variable, var, node, &nir->outputs)
- var->data.driver_location = var->data.location;
-
- nir_lower_io(nir, -1, type_size_vec4);
- }
-
+ brw_nir_lower_inputs(nir, prog, is_scalar);
+ brw_nir_lower_outputs(nir, is_scalar);
+ nir_assign_var_locations(&nir->uniforms,
+ &nir->num_uniforms,
+ is_scalar ? type_size_scalar : type_size_vec4);
+ nir_lower_io(nir, -1, is_scalar ? type_size_scalar : type_size_vec4);
nir_validate_shader(nir);
nir_remove_dead_variables(nir);