summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2019-07-19 17:23:26 -0500
committerJason Ekstrand <[email protected]>2019-07-31 18:14:09 -0500
commit942c759059eba4d0bcaca532e25700f9311d4b06 (patch)
treec70ceddcf4072b835b07f9a327922ba5f20daec5 /src
parent078dcb7ccd307e8839ffbedddeab0328f6344715 (diff)
intel: Use NIR to lower 64-bit varying access
Reviewed-by: Matt Turner <[email protected]>
Diffstat (limited to 'src')
-rw-r--r--src/intel/compiler/brw_nir.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c
index 3260376ad1c..9f56644ce41 100644
--- a/src/intel/compiler/brw_nir.c
+++ b/src/intel/compiler/brw_nir.c
@@ -171,7 +171,8 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -294,7 +295,8 @@ brw_nir_lower_vue_inputs(nir_shader *nir,
}
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -345,7 +347,8 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -396,7 +399,7 @@ brw_nir_lower_fs_inputs(nir_shader *nir,
}
}
- nir_lower_io_options lower_io_options = 0;
+ nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
if (key->persample_interp)
lower_io_options |= nir_lower_io_force_sample_interpolation;
@@ -417,7 +420,8 @@ brw_nir_lower_vue_outputs(nir_shader *nir)
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
}
void
@@ -428,7 +432,8 @@ brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
+ nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);