aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/compiler/brw_nir.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-07-12 18:47:15 -0500
committerJason Ekstrand <[email protected]>2019-07-13 02:59:28 +0000
commit974fabe810cad996cdf0c1acbcd1cba6e04f7357 (patch)
treebd3e96c0876c94b794546420ba4ab683b09098b5 /src/intel/compiler/brw_nir.c
parent7103baf01fcf8ce9424189d4180246b86ab3344e (diff)
intel: Run the optimization loop before and after lowering int64
For bindless SSBO access, we have to do 64-bit address calculations. On ICL and above, we don't have 64-bit integer support so we have to lower the address calculations to 32-bit arithmetic. If we don't run the optimization loop before lowering, we won't fold any of the address chain calculations before lowering 64-bit arithmetic and they aren't really foldable afterwards. This cuts the size of the generated code in the compute shader in dEQP-VK.ssbo.phys.layout.random.16bit.scalar.13 by around 30%. Reviewed-by: Kenneth Graunke <[email protected]> Reviewed-by: Caio Marcelo de Oliveira Filho <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir.c')
-rw-r--r--src/intel/compiler/brw_nir.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index ef387e51601..a0805758160 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -821,7 +821,6 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
UNUSED bool progress; /* Written by OPT */
OPT(brw_nir_lower_mem_access_bit_sizes);
- OPT(nir_lower_int64, nir->options->lower_int64_options);
do {
progress = false;
@@ -830,6 +829,9 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
brw_nir_optimize(nir, compiler, is_scalar, false);
+ if (OPT(nir_lower_int64, nir->options->lower_int64_options))
+ brw_nir_optimize(nir, compiler, is_scalar, false);
+
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(brw_nir_opt_peephole_ffma);