summaryrefslogtreecommitdiffstats
path: root/src/intel/compiler/brw_nir.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-03-04 15:55:19 -0600
committerJason Ekstrand <[email protected]>2019-03-06 17:24:57 +0000
commite02959f442ed6546fb632a153ffc32848968038f (patch)
tree7704af3591371b6f8a83765e1cc598c1e1c4e25a /src/intel/compiler/brw_nir.c
parentf25ca337b40f1d5846ac146f00fba77b1610be37 (diff)
nir/lower_doubles: Inline functions directly in lower_doubles
Instead of trusting the caller to already have created a softfp64 function shader and added all its functions to our shader, we simply take the softfp64 shader as an argument and do the function inlining ouselves. This means that there's no more nasty functions lying around that the caller needs to worry about cleaning up. Reviewed-by: Matt Turner <[email protected]> Reviewed-by: Jordan Justen <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r--src/intel/compiler/brw_nir.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index 58b89a1bd3c..34aaa29a5cb 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -653,7 +653,8 @@ lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
* is_scalar = true to scalarize everything prior to code gen.
*/
nir_shader *
-brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
+ const nir_shader *softfp64)
{
const struct gen_device_info *devinfo = compiler->devinfo;
UNUSED bool progress; /* Written by OPT */
@@ -677,7 +678,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
progress = false;
OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_doubles, nir->options->lower_doubles_options);
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
/* Necessary to lower add -> sub and div -> mul/rcp */
OPT(nir_opt_algebraic);
@@ -685,21 +686,6 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
lowered_64bit_ops |= progress;
} while (progress);
- if (lowered_64bit_ops) {
- OPT(nir_lower_constant_initializers, nir_var_function_temp);
- OPT(nir_lower_returns);
- OPT(nir_inline_functions);
- OPT(nir_opt_deref);
- }
-
- const nir_function *entry_point = nir_shader_get_entrypoint(nir)->function;
- foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
- if (func != entry_point) {
- exec_node_remove(&func->node);
- }
- }
- assert(exec_list_length(&nir->functions) == 1);
-
if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
@@ -1098,7 +1084,7 @@ brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compile
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
- nir = brw_preprocess_nir(compiler, nir);
+ nir = brw_preprocess_nir(compiler, nir, NULL);
return nir;
}