diff options
author | Jason Ekstrand <[email protected]> | 2019-07-11 16:59:31 -0500 |
---|---|---|
committer | Jason Ekstrand <[email protected]> | 2019-07-16 16:05:16 +0000 |
commit | 110669c85c3fcfed0a4dff42a36fc3f97c5a559a (patch) | |
tree | 951d194b8b8499ba2985b1be477ceb4cc59700ef | |
parent | 548da20b22d43285fd919a4eaab8ef549b36b91e (diff) |
st,i965: Stop looping on 64-bit lowering
Now that the 64-bit lowering passes do a complete lowering in one go, we
don't need to loop anymore. We do, however, have to ensure that int64
lowering happens after double lowering because double lowering can
produce int64 ops.
Reviewed-by: Eric Anholt <[email protected]>
-rw-r--r-- | src/intel/compiler/brw_compiler.c | 4 | ||||
-rw-r--r-- | src/intel/compiler/brw_nir.c | 14 | ||||
-rw-r--r-- | src/mesa/state_tracker/st_glsl_to_nir.cpp | 25 |
3 files changed, 13 insertions, 30 deletions
diff --git a/src/intel/compiler/brw_compiler.c b/src/intel/compiler/brw_compiler.c index 6d9dac6c3ca..7ceeb14c70f 100644 --- a/src/intel/compiler/brw_compiler.c +++ b/src/intel/compiler/brw_compiler.c @@ -133,7 +133,9 @@ brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo) nir_lower_dceil | nir_lower_dfract | nir_lower_dround_even | - nir_lower_dmod; + nir_lower_dmod | + nir_lower_dsub | + nir_lower_ddiv; if (!devinfo->has_64bit_types || (INTEL_DEBUG & DEBUG_SOFT64)) { int64_options |= nir_lower_mov64 | diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index a0805758160..675fe026695 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -681,18 +681,8 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir, brw_nir_optimize(nir, compiler, is_scalar, true); - bool lowered_64bit_ops = false; - do { - progress = false; - - OPT(nir_lower_int64, nir->options->lower_int64_options); - OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options); - - /* Necessary to lower add -> sub and div -> mul/rcp */ - OPT(nir_opt_algebraic); - - lowered_64bit_ops |= progress; - } while (progress); + OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options); + OPT(nir_lower_int64, nir->options->lower_int64_options); /* This needs to be run after the first optimization pass but before we * lower indirect derefs away diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp index be1fc3b2484..9bd69383373 100644 --- a/src/mesa/state_tracker/st_glsl_to_nir.cpp +++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp @@ -367,23 +367,14 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, if (lower_64bit) { bool lowered_64bit_ops = false; - bool progress = false; - - NIR_PASS_V(nir, nir_opt_algebraic); - - do { - progress = false; - if (options->lower_int64_options) { - NIR_PASS(progress, nir, nir_lower_int64, - options->lower_int64_options); - } - if (options->lower_doubles_options) { - NIR_PASS(progress, nir, nir_lower_doubles, - st->ctx->SoftFP64, options->lower_doubles_options); - } - NIR_PASS(progress, nir, nir_opt_algebraic); - lowered_64bit_ops |= progress; - } while (progress); + if (options->lower_doubles_options) { + NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles, + st->ctx->SoftFP64, options->lower_doubles_options); + } + if (options->lower_int64_options) { + NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64, + options->lower_int64_options); + } if (lowered_64bit_ops) st_nir_opts(nir, is_scalar); |