aboutsummaryrefslogtreecommitdiffstats
path: root/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2018-07-23 09:41:26 -0700
committerJason Ekstrand <[email protected]>2018-07-23 15:28:17 -0700
commit820d5e51b7060f02d6c12fbb1c349111022ff37a (patch)
tree6da91c27fc93007d2417c64a883de0dcb75835b4 /src/intel/compiler/brw_nir_analyze_ubo_ranges.c
parent62024fa775058013a5a75f576f1129239c95de11 (diff)
intel/compiler: Account for built-in uniforms in analyze_ubo_ranges
The original pass only looked for load_uniform intrinsics but there are a number of other places that could end up loading a push constant. One obvious omission was images which always implicitly use a push constant. Legacy VS clip planes also get pushed into the shader. This fixes some new Vulkan CTS tests that test random combinations of bindings and, in particular, test lots of UBOs and images together. Cc: [email protected] Cc: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_nir_analyze_ubo_ranges.c')
-rw-r--r--src/intel/compiler/brw_nir_analyze_ubo_ranges.c41
1 files changed, 38 insertions, 3 deletions
diff --git a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
index cd5137da06e..cfa531675fc 100644
--- a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
+++ b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
@@ -124,12 +124,29 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (intrin->intrinsic == nir_intrinsic_load_uniform)
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_uniform:
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_min:
+ case nir_intrinsic_image_deref_atomic_max:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_size:
state->uses_regular_uniforms = true;
-
- if (intrin->intrinsic != nir_intrinsic_load_ubo)
continue;
+ case nir_intrinsic_load_ubo:
+ break; /* Fall through to the analysis below */
+
+ default:
+ continue; /* Not a uniform or UBO intrinsic */
+ }
+
nir_const_value *block_const = nir_src_as_const_value(intrin->src[0]);
nir_const_value *offset_const = nir_src_as_const_value(intrin->src[1]);
@@ -179,6 +196,7 @@ print_ubo_entry(FILE *file,
void
brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
nir_shader *nir,
+ const struct brw_vs_prog_key *vs_key,
struct brw_ubo_range out_ranges[4])
{
const struct gen_device_info *devinfo = compiler->devinfo;
@@ -197,6 +215,23 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
_mesa_hash_table_create(mem_ctx, NULL, _mesa_key_pointer_equal),
};
+ switch (nir->info.stage) {
+ case MESA_SHADER_VERTEX:
+ if (vs_key && vs_key->nr_userclip_plane_consts > 0)
+ state.uses_regular_uniforms = true;
+ break;
+
+ case MESA_SHADER_COMPUTE:
+ /* Compute shaders use push constants to get the subgroup ID so it's
+ * best to just assume some system values are pushed.
+ */
+ state.uses_regular_uniforms = true;
+ break;
+
+ default:
+ break;
+ }
+
/* Walk the IR, recording how many times each UBO block/offset is used. */
nir_foreach_function(function, nir) {
if (function->impl) {