summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_opt_intrinsics.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/src/compiler/nir/nir_opt_intrinsics.c b/src/compiler/nir/nir_opt_intrinsics.c
index 26a0f9650ad..d5fdc516007 100644
--- a/src/compiler/nir/nir_opt_intrinsics.c
+++ b/src/compiler/nir/nir_opt_intrinsics.c
@@ -28,6 +28,26 @@
* \file nir_opt_intrinsics.c
*/
+static nir_ssa_def *
+high_subgroup_mask(nir_builder *b,
+ nir_ssa_def *count,
+ uint64_t base_mask)
+{
+ /* group_mask could probably be calculated more efficiently but we want to
+ * be sure not to shift by 64 if the subgroup size is 64 because the GLSL
+ * shift operator is undefined in that case. In any case if we were worried
+ * about efficency this should probably be done further down because the
+ * subgroup size is likely to be known at compile time.
+ */
+ nir_ssa_def *subgroup_size = nir_load_subgroup_size(b);
+ nir_ssa_def *all_bits = nir_imm_int64(b, ~0ull);
+ nir_ssa_def *shift = nir_isub(b, nir_imm_int(b, 64), subgroup_size);
+ nir_ssa_def *group_mask = nir_ushr(b, all_bits, shift);
+ nir_ssa_def *higher_bits = nir_ishl(b, nir_imm_int64(b, base_mask), count);
+
+ return nir_iand(b, higher_bits, group_mask);
+}
+
static bool
opt_intrinsics_impl(nir_function_impl *impl)
{
@@ -95,10 +115,10 @@ opt_intrinsics_impl(nir_function_impl *impl)
replacement = nir_ishl(&b, nir_imm_int64(&b, 1ull), count);
break;
case nir_intrinsic_load_subgroup_ge_mask:
- replacement = nir_ishl(&b, nir_imm_int64(&b, ~0ull), count);
+ replacement = high_subgroup_mask(&b, count, ~0ull);
break;
case nir_intrinsic_load_subgroup_gt_mask:
- replacement = nir_ishl(&b, nir_imm_int64(&b, ~1ull), count);
+ replacement = high_subgroup_mask(&b, count, ~1ull);
break;
case nir_intrinsic_load_subgroup_le_mask:
replacement = nir_inot(&b, nir_ishl(&b, nir_imm_int64(&b, ~1ull), count));