summaryrefslogtreecommitdiffstats
path: root/src/mesa/state_tracker
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-03-04 17:02:39 -0600
committerJason Ekstrand <[email protected]>2019-03-06 17:24:57 +0000
commit9ab1b1d0227499b7ff6a61fdebe75693212a67f5 (patch)
treed4893efdf252c51f7aa01b7c2c4d8b573c4a2116 /src/mesa/state_tracker
parent656ace3dd85b2eb8c565383763a00d059519df4c (diff)
st/nir: Move 64-bit lowering later
Now that we have a loop unrolling cost function and loop unrolling isn't going to kill us the moment we have a 64-bit op in a loop, we can go ahead and move 64-bit lowering later. This gives us the opportunity to do more optimizations and actually let the full optimizer run even on 64-bit ops rather than hoping one round of opt_algebraic will fix everything. This substantially reduces both fp64 shader compile times and the resulting code size. Reviewed-by: Matt Turner <[email protected]> Reviewed-by: Jordan Justen <[email protected]> Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/mesa/state_tracker')
-rw-r--r--src/mesa/state_tracker/st_glsl_to_nir.cpp7
1 files changed, 5 insertions, 2 deletions
diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp
index 3d01b91f425..fa0cdf771e4 100644
--- a/src/mesa/state_tracker/st_glsl_to_nir.cpp
+++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp
@@ -410,6 +410,8 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
NIR_PASS_V(nir, nir_lower_alu_to_scalar);
}
+ st_nir_opts(nir, is_scalar);
+
if (lower_64bit) {
bool lowered_64bit_ops = false;
bool progress = false;
@@ -429,9 +431,10 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
NIR_PASS(progress, nir, nir_opt_algebraic);
lowered_64bit_ops |= progress;
} while (progress);
- }
- st_nir_opts(nir, is_scalar);
+ if (progress)
+ st_nir_opts(nir, is_scalar);
+ }
return nir;
}