summaryrefslogtreecommitdiffstats
path: root/src/intel/compiler/brw_nir.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-06-04 18:19:06 -0500
committerJason Ekstrand <[email protected]>2019-06-05 20:07:28 +0000
commitbb67a99a2dfcf7258498ce868020feacd3d1fc15 (patch)
treea6a7a4df193199c519ee9b9027d776d10aa75cae /src/intel/compiler/brw_nir.c
parentfe2fc30cb559f7ef99c07bd219c057b011242cb4 (diff)
intel/nir: Stop returning the shader from helpers
Now that NIR_TEST_* doesn't swap the shader out from under us, it's sufficient to just modify the shader rather than having to return in case we're testing serialization or cloning. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r--src/intel/compiler/brw_nir.c36
1 files changed, 14 insertions, 22 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index b9642bbb417..b89984b70b7 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -530,7 +530,7 @@ brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
return indirect_mask;
}
-nir_shader *
+void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
{
@@ -643,8 +643,6 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
* assert in the opt_large_constants pass.
*/
OPT(nir_remove_dead_variables, nir_var_function_temp);
-
- return nir;
}
static unsigned
@@ -691,7 +689,7 @@ lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
* intended for the FS backend as long as nir_optimize is called again with
* is_scalar = true to scalarize everything prior to code gen.
*/
-nir_shader *
+void
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
const nir_shader *softfp64)
{
@@ -732,7 +730,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
- nir = brw_nir_optimize(nir, compiler, is_scalar, true);
+ brw_nir_optimize(nir, compiler, is_scalar, true);
bool lowered_64bit_ops = false;
do {
@@ -793,9 +791,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
-
- return nir;
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
void
@@ -814,12 +810,12 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
if (p_is_scalar && c_is_scalar) {
NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
}
if (nir_link_opt_varyings(*producer, *consumer))
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
@@ -837,8 +833,8 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
}
NIR_PASS_V(*producer, nir_lower_io_to_vector, nir_var_shader_out);
@@ -867,7 +863,7 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
* called on a shader, it will no longer be in SSA form so most optimizations
* will not work.
*/
-nir_shader *
+void
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar)
{
@@ -885,7 +881,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
OPT(nir_opt_algebraic_before_ffma);
} while (progress);
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
@@ -981,11 +977,9 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
_mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
-
- return nir;
}
-nir_shader *
+void
brw_nir_apply_sampler_key(nir_shader *nir,
const struct brw_compiler *compiler,
const struct brw_sampler_prog_key_data *key_tex,
@@ -1034,10 +1028,8 @@ brw_nir_apply_sampler_key(nir_shader *nir,
if (nir_lower_tex(nir, &tex_options)) {
nir_validate_shader(nir, "after nir_lower_tex");
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
-
- return nir;
}
enum brw_reg_type
@@ -1189,7 +1181,7 @@ brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compile
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
return nir;
}