summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-06-04 18:19:06 -0500
committerJason Ekstrand <[email protected]>2019-06-05 20:07:28 +0000
commitbb67a99a2dfcf7258498ce868020feacd3d1fc15 (patch)
treea6a7a4df193199c519ee9b9027d776d10aa75cae /src/intel
parentfe2fc30cb559f7ef99c07bd219c057b011242cb4 (diff)
intel/nir: Stop returning the shader from helpers
Now that NIR_TEST_* doesn't swap the shader out from under us, it's sufficient to just modify the shader rather than having to return in case we're testing serialization or cloning. Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/blorp/blorp.c4
-rw-r--r--src/intel/compiler/brw_fs.cpp10
-rw-r--r--src/intel/compiler/brw_nir.c36
-rw-r--r--src/intel/compiler/brw_nir.h28
-rw-r--r--src/intel/compiler/brw_shader.cpp4
-rw-r--r--src/intel/compiler/brw_vec4.cpp4
-rw-r--r--src/intel/compiler/brw_vec4_gs_visitor.cpp4
-rw-r--r--src/intel/compiler/brw_vec4_tcs.cpp4
-rw-r--r--src/intel/vulkan/anv_pipeline.c2
9 files changed, 45 insertions, 51 deletions
diff --git a/src/intel/blorp/blorp.c b/src/intel/blorp/blorp.c
index cb5d0f73e77..8f64f1af37c 100644
--- a/src/intel/blorp/blorp.c
+++ b/src/intel/blorp/blorp.c
@@ -192,7 +192,7 @@ blorp_compile_fs(struct blorp_context *blorp, void *mem_ctx,
*/
wm_prog_data->base.binding_table.texture_start = BLORP_TEXTURE_BT_INDEX;
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
nir_remove_dead_variables(nir, nir_var_shader_in);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
@@ -221,7 +221,7 @@ blorp_compile_vs(struct blorp_context *blorp, void *mem_ctx,
nir->options =
compiler->glsl_compiler_options[MESA_SHADER_VERTEX].NirOptions;
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
vs_prog_data->inputs_read = nir->info.inputs_read;
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 9b4e030b54f..4151ed7485e 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -7980,7 +7980,7 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
{
const struct gen_device_info *devinfo = compiler->devinfo;
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
+ brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
brw_nir_lower_fs_inputs(shader, devinfo, key);
brw_nir_lower_fs_outputs(shader);
@@ -7990,7 +7990,7 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
if (!key->multisample_fbo)
NIR_PASS_V(shader, demote_sample_qualifiers);
NIR_PASS_V(shader, move_interpolation_to_top);
- shader = brw_postprocess_nir(shader, compiler, true);
+ brw_postprocess_nir(shader, compiler, true);
/* key->alpha_test_func means simulating alpha testing via discards,
* so the shader definitely kills pixels.
@@ -8241,7 +8241,7 @@ compile_cs_to_nir(const struct brw_compiler *compiler,
unsigned dispatch_width)
{
nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
+ brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
@@ -8249,7 +8249,9 @@ compile_cs_to_nir(const struct brw_compiler *compiler,
NIR_PASS_V(shader, nir_opt_constant_folding);
NIR_PASS_V(shader, nir_opt_dce);
- return brw_postprocess_nir(shader, compiler, true);
+ brw_postprocess_nir(shader, compiler, true);
+
+ return shader;
}
const unsigned *
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index b9642bbb417..b89984b70b7 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -530,7 +530,7 @@ brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
return indirect_mask;
}
-nir_shader *
+void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
{
@@ -643,8 +643,6 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
* assert in the opt_large_constants pass.
*/
OPT(nir_remove_dead_variables, nir_var_function_temp);
-
- return nir;
}
static unsigned
@@ -691,7 +689,7 @@ lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
* intended for the FS backend as long as nir_optimize is called again with
* is_scalar = true to scalarize everything prior to code gen.
*/
-nir_shader *
+void
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
const nir_shader *softfp64)
{
@@ -732,7 +730,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
- nir = brw_nir_optimize(nir, compiler, is_scalar, true);
+ brw_nir_optimize(nir, compiler, is_scalar, true);
bool lowered_64bit_ops = false;
do {
@@ -793,9 +791,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
-
- return nir;
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
void
@@ -814,12 +810,12 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
if (p_is_scalar && c_is_scalar) {
NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
}
if (nir_link_opt_varyings(*producer, *consumer))
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
@@ -837,8 +833,8 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
- *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
- *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
}
NIR_PASS_V(*producer, nir_lower_io_to_vector, nir_var_shader_out);
@@ -867,7 +863,7 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
* called on a shader, it will no longer be in SSA form so most optimizations
* will not work.
*/
-nir_shader *
+void
brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar)
{
@@ -885,7 +881,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
OPT(nir_opt_algebraic_before_ffma);
} while (progress);
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
@@ -981,11 +977,9 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
_mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
-
- return nir;
}
-nir_shader *
+void
brw_nir_apply_sampler_key(nir_shader *nir,
const struct brw_compiler *compiler,
const struct brw_sampler_prog_key_data *key_tex,
@@ -1034,10 +1028,8 @@ brw_nir_apply_sampler_key(nir_shader *nir,
if (nir_lower_tex(nir, &tex_options)) {
nir_validate_shader(nir, "after nir_lower_tex");
- nir = brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar, false);
}
-
- return nir;
}
enum brw_reg_type
@@ -1189,7 +1181,7 @@ brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compile
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
return nir;
}
diff --git a/src/intel/compiler/brw_nir.h b/src/intel/compiler/brw_nir.h
index ba450209e51..82c260ae533 100644
--- a/src/intel/compiler/brw_nir.h
+++ b/src/intel/compiler/brw_nir.h
@@ -92,9 +92,9 @@ enum {
void brw_nir_analyze_boolean_resolves(nir_shader *nir);
-nir_shader *brw_preprocess_nir(const struct brw_compiler *compiler,
- nir_shader *nir,
- const nir_shader *softfp64);
+void brw_preprocess_nir(const struct brw_compiler *compiler,
+ nir_shader *nir,
+ const nir_shader *softfp64);
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
@@ -126,9 +126,9 @@ void brw_nir_rewrite_bindless_image_intrinsic(nir_intrinsic_instr *intrin,
bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader);
-nir_shader *brw_postprocess_nir(nir_shader *nir,
- const struct brw_compiler *compiler,
- bool is_scalar);
+void brw_postprocess_nir(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar);
bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
const uint8_t *attrib_wa_flags);
@@ -137,10 +137,10 @@ bool brw_nir_apply_trig_workarounds(nir_shader *nir);
void brw_nir_apply_tcs_quads_workaround(nir_shader *nir);
-nir_shader *brw_nir_apply_sampler_key(nir_shader *nir,
- const struct brw_compiler *compiler,
- const struct brw_sampler_prog_key_data *key,
- bool is_scalar);
+void brw_nir_apply_sampler_key(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ const struct brw_sampler_prog_key_data *key,
+ bool is_scalar);
enum brw_reg_type brw_type_for_nir_type(const struct gen_device_info *devinfo,
nir_alu_type type);
@@ -166,10 +166,10 @@ void brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
bool brw_nir_opt_peephole_ffma(nir_shader *shader);
-nir_shader *brw_nir_optimize(nir_shader *nir,
- const struct brw_compiler *compiler,
- bool is_scalar,
- bool allow_copies);
+void brw_nir_optimize(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar,
+ bool allow_copies);
nir_shader *brw_nir_create_passthrough_tcs(void *mem_ctx,
const struct brw_compiler *compiler,
diff --git a/src/intel/compiler/brw_shader.cpp b/src/intel/compiler/brw_shader.cpp
index 2061afc1c24..643765c1b22 100644
--- a/src/intel/compiler/brw_shader.cpp
+++ b/src/intel/compiler/brw_shader.cpp
@@ -1244,10 +1244,10 @@ brw_compile_tes(const struct brw_compiler *compiler,
nir->info.inputs_read = key->inputs_read;
nir->info.patch_inputs_read = key->patch_inputs_read;
- nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
+ brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_tes_inputs(nir, input_vue_map);
brw_nir_lower_vue_outputs(nir);
- nir = brw_postprocess_nir(nir, compiler, is_scalar);
+ brw_postprocess_nir(nir, compiler, is_scalar);
brw_compute_vue_map(devinfo, &prog_data->base.vue_map,
nir->info.outputs_written,
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 7d60665b621..971439fa472 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -2823,7 +2823,7 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
char **error_str)
{
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
+ brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
const unsigned *assembly = NULL;
@@ -2847,7 +2847,7 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
brw_nir_lower_vs_inputs(shader, key->gl_attrib_wa_flags);
brw_nir_lower_vue_outputs(shader);
- shader = brw_postprocess_nir(shader, compiler, is_scalar);
+ brw_postprocess_nir(shader, compiler, is_scalar);
prog_data->base.clip_distance_mask =
((1 << shader->info.clip_distance_array_size) - 1);
diff --git a/src/intel/compiler/brw_vec4_gs_visitor.cpp b/src/intel/compiler/brw_vec4_gs_visitor.cpp
index 09be70d36d5..417daf1b493 100644
--- a/src/intel/compiler/brw_vec4_gs_visitor.cpp
+++ b/src/intel/compiler/brw_vec4_gs_visitor.cpp
@@ -639,10 +639,10 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
&c.input_vue_map, inputs_read,
shader->info.separate_shader);
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
+ brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(shader, &c.input_vue_map);
brw_nir_lower_vue_outputs(shader);
- shader = brw_postprocess_nir(shader, compiler, is_scalar);
+ brw_postprocess_nir(shader, compiler, is_scalar);
prog_data->base.clip_distance_mask =
((1 << shader->info.clip_distance_array_size) - 1);
diff --git a/src/intel/compiler/brw_vec4_tcs.cpp b/src/intel/compiler/brw_vec4_tcs.cpp
index c37f34cbe81..39df2d5054b 100644
--- a/src/intel/compiler/brw_vec4_tcs.cpp
+++ b/src/intel/compiler/brw_vec4_tcs.cpp
@@ -397,14 +397,14 @@ brw_compile_tcs(const struct brw_compiler *compiler,
nir->info.outputs_written,
nir->info.patch_outputs_written);
- nir = brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
+ brw_nir_apply_sampler_key(nir, compiler, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(nir, &input_vue_map);
brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map,
key->tes_primitive_mode);
if (key->quads_workaround)
brw_nir_apply_tcs_quads_workaround(nir);
- nir = brw_postprocess_nir(nir, compiler, is_scalar);
+ brw_postprocess_nir(nir, compiler, is_scalar);
bool has_primitive_id =
nir->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID);
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
index 2a88b55a391..c2b3814c170 100644
--- a/src/intel/vulkan/anv_pipeline.c
+++ b/src/intel/vulkan/anv_pipeline.c
@@ -246,7 +246,7 @@ anv_shader_compile_to_nir(struct anv_device *device,
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- nir = brw_preprocess_nir(compiler, nir, NULL);
+ brw_preprocess_nir(compiler, nir, NULL);
return nir;
}