aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/compiler/brw_nir.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-07-11 16:59:31 -0500
committerJason Ekstrand <[email protected]>2019-07-16 16:05:16 +0000
commit110669c85c3fcfed0a4dff42a36fc3f97c5a559a (patch)
tree951d194b8b8499ba2985b1be477ceb4cc59700ef /src/intel/compiler/brw_nir.c
parent548da20b22d43285fd919a4eaab8ef549b36b91e (diff)
st,i965: Stop looping on 64-bit lowering
Now that the 64-bit lowering passes do a complete lowering in one go, we don't need to loop anymore. We do, however, have to ensure that int64 lowering happens after double lowering because double lowering can produce int64 ops. Reviewed-by: Eric Anholt <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r--src/intel/compiler/brw_nir.c14
1 files changed, 2 insertions, 12 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index a0805758160..675fe026695 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -681,18 +681,8 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
brw_nir_optimize(nir, compiler, is_scalar, true);
- bool lowered_64bit_ops = false;
- do {
- progress = false;
-
- OPT(nir_lower_int64, nir->options->lower_int64_options);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
-
- /* Necessary to lower add -> sub and div -> mul/rcp */
- OPT(nir_opt_algebraic);
-
- lowered_64bit_ops |= progress;
- } while (progress);
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
/* This needs to be run after the first optimization pass but before we
* lower indirect derefs away