summaryrefslogtreecommitdiffstats
path: root/src/intel/compiler
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2018-10-20 12:21:46 -0500
committerJason Ekstrand <[email protected]>2018-11-08 10:09:25 -0600
commit52145070c096883959975c8b1767af2da32f9f11 (patch)
treed6b5c03b882ef75effcad39b2d950899e9aa9093 /src/intel/compiler
parent1413512b4c8f8da7514a52029515344145a221d2 (diff)
intel/analyze_ubo_ranges: Use nir_src_is_const and friends
Reviewed-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel/compiler')
-rw-r--r--src/intel/compiler/brw_nir_analyze_ubo_ranges.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
index 2dfc7b8ddd6..ab7a2705c9a 100644
--- a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
+++ b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c
@@ -147,12 +147,11 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block)
continue; /* Not a uniform or UBO intrinsic */
}
- nir_const_value *block_const = nir_src_as_const_value(intrin->src[0]);
- nir_const_value *offset_const = nir_src_as_const_value(intrin->src[1]);
-
- if (block_const && offset_const) {
- const int block = block_const->u32[0];
- const int offset = offset_const->u32[0] / 32;
+ if (nir_src_is_const(intrin->src[0]) &&
+ nir_src_is_const(intrin->src[1])) {
+ const int block = nir_src_as_uint(intrin->src[0]);
+ const unsigned byte_offset = nir_src_as_uint(intrin->src[1]);
+ const int offset = byte_offset / 32;
/* Avoid shifting by larger than the width of our bitfield, as this
* is undefined in C. Even if we require multiple bits to represent
@@ -166,8 +165,8 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block)
/* The value might span multiple 32-byte chunks. */
const int bytes = nir_intrinsic_dest_components(intrin) *
(nir_dest_bit_size(intrin->dest) / 8);
- const int start = ROUND_DOWN_TO(offset_const->u32[0], 32);
- const int end = ALIGN(offset_const->u32[0] + bytes, 32);
+ const int start = ROUND_DOWN_TO(byte_offset, 32);
+ const int end = ALIGN(byte_offset + bytes, 32);
const int chunks = (end - start) / 32;
/* TODO: should we count uses in loops as higher benefit? */