diff options
author | Jason Ekstrand <[email protected]> | 2019-06-04 18:23:17 -0500 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2019-06-05 20:07:28 +0000 |
commit | 811c05dfe6189cabc6d8aa547a3463778f2d5249 (patch) | |
tree | 946a13c16f870c762406f7b80b8f08471a0f5f0a /src/intel/compiler/brw_nir.c | |
parent | bb67a99a2dfcf7258498ce868020feacd3d1fc15 (diff) |
intel/nir: Take nir_shader*s in brw_nir_link_shaders
Since NIR_PASS no longer swaps out the NIR pointer when NIR_TEST_* is
enabled, we can just take a single pointer and not a pointer to pointer.
Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r-- | src/intel/compiler/brw_nir.c | 66 |
1 files changed, 32 insertions, 34 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index b89984b70b7..6cce245c10e 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -796,63 +796,61 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir, void brw_nir_link_shaders(const struct brw_compiler *compiler, - nir_shader **producer, nir_shader **consumer) + nir_shader *producer, nir_shader *consumer) { - nir_lower_io_arrays_to_elements(*producer, *consumer); - nir_validate_shader(*producer, "after nir_lower_io_arrays_to_elements"); - nir_validate_shader(*consumer, "after nir_lower_io_arrays_to_elements"); + nir_lower_io_arrays_to_elements(producer, consumer); + nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements"); + nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements"); - const bool p_is_scalar = - compiler->scalar_stage[(*producer)->info.stage]; - const bool c_is_scalar = - compiler->scalar_stage[(*consumer)->info.stage]; + const bool p_is_scalar = compiler->scalar_stage[producer->info.stage]; + const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage]; if (p_is_scalar && c_is_scalar) { - NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out); - NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in); - brw_nir_optimize(*producer, compiler, p_is_scalar, false); - brw_nir_optimize(*consumer, compiler, c_is_scalar, false); + NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out); + NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in); + brw_nir_optimize(producer, compiler, p_is_scalar, false); + brw_nir_optimize(consumer, compiler, c_is_scalar, false); } - if (nir_link_opt_varyings(*producer, *consumer)) - brw_nir_optimize(*consumer, compiler, c_is_scalar, false); + if (nir_link_opt_varyings(producer, consumer)) + brw_nir_optimize(consumer, compiler, c_is_scalar, false); - NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out); - NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in); + NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out); + NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in); - if (nir_remove_unused_varyings(*producer, *consumer)) { - NIR_PASS_V(*producer, nir_lower_global_vars_to_local); - NIR_PASS_V(*consumer, nir_lower_global_vars_to_local); + if (nir_remove_unused_varyings(producer, consumer)) { + NIR_PASS_V(producer, nir_lower_global_vars_to_local); + NIR_PASS_V(consumer, nir_lower_global_vars_to_local); /* The backend might not be able to handle indirects on * temporaries so we need to lower indirects on any of the * varyings we have demoted here. */ - NIR_PASS_V(*producer, nir_lower_indirect_derefs, - brw_nir_no_indirect_mask(compiler, (*producer)->info.stage)); - NIR_PASS_V(*consumer, nir_lower_indirect_derefs, - brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage)); + NIR_PASS_V(producer, nir_lower_indirect_derefs, + brw_nir_no_indirect_mask(compiler, producer->info.stage)); + NIR_PASS_V(consumer, nir_lower_indirect_derefs, + brw_nir_no_indirect_mask(compiler, consumer->info.stage)); - brw_nir_optimize(*producer, compiler, p_is_scalar, false); - brw_nir_optimize(*consumer, compiler, c_is_scalar, false); + brw_nir_optimize(producer, compiler, p_is_scalar, false); + brw_nir_optimize(consumer, compiler, c_is_scalar, false); } - NIR_PASS_V(*producer, nir_lower_io_to_vector, nir_var_shader_out); - NIR_PASS_V(*producer, nir_opt_combine_stores, nir_var_shader_out); - NIR_PASS_V(*consumer, nir_lower_io_to_vector, nir_var_shader_in); + NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out); + NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out); + NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in); - if ((*producer)->info.stage != MESA_SHADER_TESS_CTRL) { + if (producer->info.stage != MESA_SHADER_TESS_CTRL) { /* Calling lower_io_to_vector creates output variable writes with * write-masks. On non-TCS outputs, the back-end can't handle it and we * need to call nir_lower_io_to_temporaries to get rid of them. This, * in turn, creates temporary variables and extra copy_deref intrinsics * that we need to clean up. */ - NIR_PASS_V(*producer, nir_lower_io_to_temporaries, - nir_shader_get_entrypoint(*producer), true, false); - NIR_PASS_V(*producer, nir_lower_global_vars_to_local); - NIR_PASS_V(*producer, nir_split_var_copies); - NIR_PASS_V(*producer, nir_lower_var_copies); + NIR_PASS_V(producer, nir_lower_io_to_temporaries, + nir_shader_get_entrypoint(producer), true, false); + NIR_PASS_V(producer, nir_lower_global_vars_to_local); + NIR_PASS_V(producer, nir_split_var_copies); + NIR_PASS_V(producer, nir_lower_var_copies); } } |