summaryrefslogtreecommitdiffstats
path: root/src/intel
diff options
context:
space:
mode:
authorCaio Marcelo de Oliveira Filho <[email protected]>2019-06-07 23:06:27 -0700
committerCaio Marcelo de Oliveira Filho <[email protected]>2019-07-08 08:57:25 -0700
commit45f5db5a84ae6fe5d4a4d1cd8b62b48d70629fc9 (patch)
tree82e41bac2b94d02f70eb5bc622f631dbdcd60790 /src/intel
parenta42e8f0ed1d1aa1b38282be28e4f55e246b55685 (diff)
intel/fs: Implement "demote to helper invocation"
The "demote" intrinsic works like "discard" but don't change the control flow, allowing derivative operations to work. This is the semantics of D3D discard. The "is_helper_invocation" intrinsic will return true for helper invocations -- both the ones that started as helpers and the ones that where demoted. This is needed to avoid changing the behavior of gl_HelperInvocation which is an input (so not expected to change during shader execution). v2: Emit the discard jump and comment why it is safe. (Jason) Rework the is_helper_invocation() that was stomping f0.1. (Jason) Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r--src/intel/compiler/brw_fs_nir.cpp24
1 files changed, 23 insertions, 1 deletions
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 864bf267f7e..00ce6af23c7 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -3502,6 +3502,23 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
bld.MOV(dest, fetch_render_target_array_index(bld));
break;
+ case nir_intrinsic_is_helper_invocation: {
+ /* Unlike the regular gl_HelperInvocation, that is defined at dispatch,
+ * the helperInvocationEXT() (aka SpvOpIsHelperInvocationEXT) takes into
+ * consideration demoted invocations. That information is stored in
+ * f0.1.
+ */
+ dest.type = BRW_REGISTER_TYPE_UD;
+
+ bld.MOV(dest, brw_imm_ud(0));
+
+ fs_inst *mov = bld.MOV(dest, brw_imm_ud(~0));
+ mov->predicate = BRW_PREDICATE_NORMAL;
+ mov->predicate_inverse = true;
+ mov->flag_subreg = 1;
+ break;
+ }
+
case nir_intrinsic_load_helper_invocation:
case nir_intrinsic_load_sample_mask_in:
case nir_intrinsic_load_sample_id: {
@@ -3549,6 +3566,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
break;
}
+ case nir_intrinsic_demote:
case nir_intrinsic_discard:
case nir_intrinsic_discard_if: {
/* We track our discarded pixels in f0.1. By predicating on it, we can
@@ -3608,10 +3626,14 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
cmp->flag_subreg = 1;
if (devinfo->gen >= 6) {
+ /* Due to the way we implement discard, the jump will only happen
+ * when the whole quad is discarded. So we can do this even for
+ * demote as it won't break its uniformity promises.
+ */
emit_discard_jump();
}
- limit_dispatch_width(16, "Fragment discard not implemented in SIMD32 mode.");
+ limit_dispatch_width(16, "Fragment discard/demote not implemented in SIMD32 mode.");
break;
}