summaryrefslogtreecommitdiffstats
path: root/src/compiler
diff options
context:
space:
mode:
authorPlamena Manolova <[email protected]>2018-04-27 14:12:30 +0100
committerPlamena Manolova <[email protected]>2018-06-01 16:36:36 +0100
commit60e843c4d5a5688196d13611a357cdc5b1b1141d (patch)
treed8df5f37248c6b7f8622b0e8bf1fa643b9f147f5 /src/compiler
parent53719f818cf320add55dc7ed3612725c2f6128ce (diff)
mesa: Add GL/GLSL plumbing for ARB_fragment_shader_interlock.
This extension provides new GLSL built-in functions beginInvocationInterlockARB() and endInvocationInterlockARB() that delimit a critical section of fragment shader code. For pairs of shader invocations with "overlapping" coverage in a given pixel, the OpenGL implementation will guarantee that the critical section of the fragment shader will be executed for only one fragment at a time. Signed-off-by: Plamena Manolova <[email protected]> Reviewed-by: Francisco Jerez <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r--src/compiler/glsl/ast.h10
-rw-r--r--src/compiler/glsl/ast_to_hir.cpp10
-rw-r--r--src/compiler/glsl/ast_type.cpp39
-rw-r--r--src/compiler/glsl/builtin_functions.cpp54
-rw-r--r--src/compiler/glsl/glsl_parser.yy30
-rw-r--r--src/compiler/glsl/glsl_parser_extras.cpp13
-rw-r--r--src/compiler/glsl/glsl_parser_extras.h7
-rw-r--r--src/compiler/glsl/glsl_to_nir.cpp12
-rw-r--r--src/compiler/glsl/ir.h2
-rw-r--r--src/compiler/glsl/linker.cpp8
-rw-r--r--src/compiler/nir/nir_intrinsics.py2
-rw-r--r--src/compiler/shader_info.h5
12 files changed, 191 insertions, 1 deletions
diff --git a/src/compiler/glsl/ast.h b/src/compiler/glsl/ast.h
index 9b88ff51d48..4d5e045b82c 100644
--- a/src/compiler/glsl/ast.h
+++ b/src/compiler/glsl/ast.h
@@ -626,6 +626,16 @@ struct ast_type_qualifier {
* Flag set if GL_ARB_post_depth_coverage layout qualifier is used.
*/
unsigned post_depth_coverage:1;
+
+ /**
+ * Flags for the layout qualifers added by ARB_fragment_shader_interlock
+ */
+
+ unsigned pixel_interlock_ordered:1;
+ unsigned pixel_interlock_unordered:1;
+ unsigned sample_interlock_ordered:1;
+ unsigned sample_interlock_unordered:1;
+
/**
* Flag set if GL_INTEL_conservartive_rasterization layout qualifier
* is used.
diff --git a/src/compiler/glsl/ast_to_hir.cpp b/src/compiler/glsl/ast_to_hir.cpp
index 3bf581571e2..dd60a2a87fd 100644
--- a/src/compiler/glsl/ast_to_hir.cpp
+++ b/src/compiler/glsl/ast_to_hir.cpp
@@ -3897,6 +3897,16 @@ apply_layout_qualifier_to_variable(const struct ast_type_qualifier *qual,
if (state->has_bindless())
apply_bindless_qualifier_to_variable(qual, var, state, loc);
+
+ if (qual->flags.q.pixel_interlock_ordered ||
+ qual->flags.q.pixel_interlock_unordered ||
+ qual->flags.q.sample_interlock_ordered ||
+ qual->flags.q.sample_interlock_unordered) {
+ _mesa_glsl_error(loc, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ }
}
static void
diff --git a/src/compiler/glsl/ast_type.cpp b/src/compiler/glsl/ast_type.cpp
index 14ea936f244..c2b6e6b3c2b 100644
--- a/src/compiler/glsl/ast_type.cpp
+++ b/src/compiler/glsl/ast_type.cpp
@@ -637,6 +637,10 @@ ast_type_qualifier::validate_in_qualifier(YYLTYPE *loc,
valid_in_mask.flags.q.early_fragment_tests = 1;
valid_in_mask.flags.q.inner_coverage = 1;
valid_in_mask.flags.q.post_depth_coverage = 1;
+ valid_in_mask.flags.q.pixel_interlock_ordered = 1;
+ valid_in_mask.flags.q.pixel_interlock_unordered = 1;
+ valid_in_mask.flags.q.sample_interlock_ordered = 1;
+ valid_in_mask.flags.q.sample_interlock_unordered = 1;
break;
case MESA_SHADER_COMPUTE:
valid_in_mask.flags.q.local_size = 7;
@@ -708,6 +712,35 @@ ast_type_qualifier::merge_into_in_qualifier(YYLTYPE *loc,
r = false;
}
+ if (state->in_qualifier->flags.q.pixel_interlock_ordered) {
+ state->fs_pixel_interlock_ordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.pixel_interlock_unordered) {
+ state->fs_pixel_interlock_unordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_unordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_ordered) {
+ state->fs_sample_interlock_ordered = true;
+ state->in_qualifier->flags.q.sample_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_unordered) {
+ state->fs_sample_interlock_unordered = true;
+ state->in_qualifier->flags.q.sample_interlock_unordered = false;
+ }
+
+ if (state->fs_pixel_interlock_ordered +
+ state->fs_pixel_interlock_unordered +
+ state->fs_sample_interlock_ordered +
+ state->fs_sample_interlock_unordered > 1) {
+ _mesa_glsl_error(loc, state,
+ "only one interlock mode can be used at any time.");
+ r = false;
+ }
+
/* We allow the creation of multiple cs_input_layout nodes. Coherence among
* all existing nodes is checked later, when the AST node is transformed
* into HIR.
@@ -776,7 +809,7 @@ ast_type_qualifier::validate_flags(YYLTYPE *loc,
"%s '%s':"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
message, name,
bad.flags.q.invariant ? " invariant" : "",
bad.flags.q.precise ? " precise" : "",
@@ -840,6 +873,10 @@ ast_type_qualifier::validate_flags(YYLTYPE *loc,
bad.flags.q.bound_sampler ? " bound_sampler" : "",
bad.flags.q.bound_image ? " bound_image" : "",
bad.flags.q.post_depth_coverage ? " post_depth_coverage" : "",
+ bad.flags.q.pixel_interlock_ordered ? " pixel_interlock_ordered" : "",
+ bad.flags.q.pixel_interlock_unordered ? " pixel_interlock_unordered": "",
+ bad.flags.q.sample_interlock_ordered ? " sample_interlock_ordered": "",
+ bad.flags.q.sample_interlock_unordered ? " sample_interlock_unordered": "",
bad.flags.q.non_coherent ? " noncoherent" : "");
return false;
}
diff --git a/src/compiler/glsl/builtin_functions.cpp b/src/compiler/glsl/builtin_functions.cpp
index e1ee9943172..efe90346d0e 100644
--- a/src/compiler/glsl/builtin_functions.cpp
+++ b/src/compiler/glsl/builtin_functions.cpp
@@ -513,6 +513,12 @@ shader_ballot(const _mesa_glsl_parse_state *state)
}
static bool
+supports_arb_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_fragment_shader_interlock_enable;
+}
+
+static bool
shader_clock(const _mesa_glsl_parse_state *state)
{
return state->ARB_shader_clock_enable;
@@ -982,6 +988,14 @@ private:
ir_function_signature *_read_invocation_intrinsic(const glsl_type *type);
ir_function_signature *_read_invocation(const glsl_type *type);
+
+ ir_function_signature *_invocation_interlock_intrinsic(
+ builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_invocation_interlock(
+ const char *intrinsic_name,
+ builtin_available_predicate avail);
+
ir_function_signature *_shader_clock_intrinsic(builtin_available_predicate avail,
const glsl_type *type);
ir_function_signature *_shader_clock(builtin_available_predicate avail,
@@ -1219,6 +1233,16 @@ builtin_builder::create_intrinsics()
ir_intrinsic_memory_barrier_shared),
NULL);
+ add_function("__intrinsic_begin_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_begin_invocation_interlock), NULL);
+
+ add_function("__intrinsic_end_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_end_invocation_interlock), NULL);
+
add_function("__intrinsic_shader_clock",
_shader_clock_intrinsic(shader_clock,
glsl_type::uvec2_type),
@@ -3294,6 +3318,18 @@ builtin_builder::create_builtins()
glsl_type::uint64_t_type),
NULL);
+ add_function("beginInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_begin_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
+ add_function("endInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_end_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
add_function("anyInvocationARB",
_vote("__intrinsic_vote_any", vote),
NULL);
@@ -6228,6 +6264,24 @@ builtin_builder::_read_invocation(const glsl_type *type)
}
ir_function_signature *
+builtin_builder::_invocation_interlock_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ MAKE_INTRINSIC(glsl_type::void_type, id, avail, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_invocation_interlock(const char *intrinsic_name,
+ builtin_available_predicate avail)
+{
+ MAKE_SIG(glsl_type::void_type, avail, 0);
+ body.emit(call(shader->symbols->get_function(intrinsic_name),
+ NULL, sig->parameters));
+ return sig;
+}
+
+ir_function_signature *
builtin_builder::_shader_clock_intrinsic(builtin_available_predicate avail,
const glsl_type *type)
{
diff --git a/src/compiler/glsl/glsl_parser.yy b/src/compiler/glsl/glsl_parser.yy
index b4951a258aa..91c10ce1a60 100644
--- a/src/compiler/glsl/glsl_parser.yy
+++ b/src/compiler/glsl/glsl_parser.yy
@@ -1432,6 +1432,36 @@ layout_qualifier_id:
}
}
+ const bool pixel_interlock_ordered = match_layout_qualifier($1,
+ "pixel_interlock_ordered", state) == 0;
+ const bool pixel_interlock_unordered = match_layout_qualifier($1,
+ "pixel_interlock_unordered", state) == 0;
+ const bool sample_interlock_ordered = match_layout_qualifier($1,
+ "sample_interlock_ordered", state) == 0;
+ const bool sample_interlock_unordered = match_layout_qualifier($1,
+ "sample_interlock_unordered", state) == 0;
+
+ if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ } else if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ !state->ARB_fragment_shader_interlock_enable) {
+ _mesa_glsl_error(& @1, state,
+ "interlock layout qualifier present, but the "
+ "GL_ARB_fragment_shader_interlock extension is not "
+ "enabled.");
+ } else {
+ $$.flags.q.pixel_interlock_ordered = pixel_interlock_ordered;
+ $$.flags.q.pixel_interlock_unordered = pixel_interlock_unordered;
+ $$.flags.q.sample_interlock_ordered = sample_interlock_ordered;
+ $$.flags.q.sample_interlock_unordered = sample_interlock_unordered;
+ }
+
/* Layout qualifiers for tessellation evaluation shaders. */
if (!$$.flags.i) {
static const struct {
diff --git a/src/compiler/glsl/glsl_parser_extras.cpp b/src/compiler/glsl/glsl_parser_extras.cpp
index 25003eeccce..04eba980e0e 100644
--- a/src/compiler/glsl/glsl_parser_extras.cpp
+++ b/src/compiler/glsl/glsl_parser_extras.cpp
@@ -299,6 +299,10 @@ _mesa_glsl_parse_state::_mesa_glsl_parse_state(struct gl_context *_ctx,
this->fs_early_fragment_tests = false;
this->fs_inner_coverage = false;
this->fs_post_depth_coverage = false;
+ this->fs_pixel_interlock_ordered = false;
+ this->fs_pixel_interlock_unordered = false;
+ this->fs_sample_interlock_ordered = false;
+ this->fs_sample_interlock_unordered = false;
this->fs_blend_support = 0;
memset(this->atomic_counter_offsets, 0,
sizeof(this->atomic_counter_offsets));
@@ -630,6 +634,7 @@ static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
EXT(ARB_explicit_uniform_location),
EXT(ARB_fragment_coord_conventions),
EXT(ARB_fragment_layer_viewport),
+ EXT(ARB_fragment_shader_interlock),
EXT(ARB_gpu_shader5),
EXT(ARB_gpu_shader_fp64),
EXT(ARB_gpu_shader_int64),
@@ -1721,6 +1726,10 @@ set_shader_inout_layout(struct gl_shader *shader,
assert(!state->fs_early_fragment_tests);
assert(!state->fs_inner_coverage);
assert(!state->fs_post_depth_coverage);
+ assert(!state->fs_pixel_interlock_ordered);
+ assert(!state->fs_pixel_interlock_unordered);
+ assert(!state->fs_sample_interlock_ordered);
+ assert(!state->fs_sample_interlock_unordered);
}
for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
@@ -1842,6 +1851,10 @@ set_shader_inout_layout(struct gl_shader *shader,
shader->EarlyFragmentTests = state->fs_early_fragment_tests;
shader->InnerCoverage = state->fs_inner_coverage;
shader->PostDepthCoverage = state->fs_post_depth_coverage;
+ shader->PixelInterlockOrdered = state->fs_pixel_interlock_ordered;
+ shader->PixelInterlockUnordered = state->fs_pixel_interlock_unordered;
+ shader->SampleInterlockOrdered = state->fs_sample_interlock_ordered;
+ shader->SampleInterlockUnordered = state->fs_sample_interlock_unordered;
shader->BlendSupport = state->fs_blend_support;
break;
diff --git a/src/compiler/glsl/glsl_parser_extras.h b/src/compiler/glsl/glsl_parser_extras.h
index 5b9b6cc8621..59a173418b7 100644
--- a/src/compiler/glsl/glsl_parser_extras.h
+++ b/src/compiler/glsl/glsl_parser_extras.h
@@ -639,6 +639,8 @@ struct _mesa_glsl_parse_state {
bool ARB_fragment_coord_conventions_warn;
bool ARB_fragment_layer_viewport_enable;
bool ARB_fragment_layer_viewport_warn;
+ bool ARB_fragment_shader_interlock_enable;
+ bool ARB_fragment_shader_interlock_warn;
bool ARB_gpu_shader5_enable;
bool ARB_gpu_shader5_warn;
bool ARB_gpu_shader_fp64_enable;
@@ -833,6 +835,11 @@ struct _mesa_glsl_parse_state {
bool fs_post_depth_coverage;
+ bool fs_pixel_interlock_ordered;
+ bool fs_pixel_interlock_unordered;
+ bool fs_sample_interlock_ordered;
+ bool fs_sample_interlock_unordered;
+
unsigned fs_blend_support;
/**
diff --git a/src/compiler/glsl/glsl_to_nir.cpp b/src/compiler/glsl/glsl_to_nir.cpp
index 8e5e9c34912..dc3e822308b 100644
--- a/src/compiler/glsl/glsl_to_nir.cpp
+++ b/src/compiler/glsl/glsl_to_nir.cpp
@@ -752,6 +752,12 @@ nir_visitor::visit(ir_call *ir)
case ir_intrinsic_shader_clock:
op = nir_intrinsic_shader_clock;
break;
+ case ir_intrinsic_begin_invocation_interlock:
+ op = nir_intrinsic_begin_invocation_interlock;
+ break;
+ case ir_intrinsic_end_invocation_interlock:
+ op = nir_intrinsic_end_invocation_interlock;
+ break;
case ir_intrinsic_group_memory_barrier:
op = nir_intrinsic_group_memory_barrier;
break;
@@ -970,6 +976,12 @@ nir_visitor::visit(ir_call *ir)
instr->num_components = 2;
nir_builder_instr_insert(&b, &instr->instr);
break;
+ case nir_intrinsic_begin_invocation_interlock:
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
+ case nir_intrinsic_end_invocation_interlock:
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
case nir_intrinsic_store_ssbo: {
exec_node *param = ir->actual_parameters.get_head();
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
diff --git a/src/compiler/glsl/ir.h b/src/compiler/glsl/ir.h
index 471d9e787a7..67b38f48eff 100644
--- a/src/compiler/glsl/ir.h
+++ b/src/compiler/glsl/ir.h
@@ -1120,6 +1120,8 @@ enum ir_intrinsic_id {
ir_intrinsic_memory_barrier_buffer,
ir_intrinsic_memory_barrier_image,
ir_intrinsic_memory_barrier_shared,
+ ir_intrinsic_begin_invocation_interlock,
+ ir_intrinsic_end_invocation_interlock,
ir_intrinsic_vote_all,
ir_intrinsic_vote_any,
diff --git a/src/compiler/glsl/linker.cpp b/src/compiler/glsl/linker.cpp
index f060c5316fa..e4bf634abe8 100644
--- a/src/compiler/glsl/linker.cpp
+++ b/src/compiler/glsl/linker.cpp
@@ -1978,6 +1978,14 @@ link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
linked_shader->Program->info.fs.post_depth_coverage |=
shader->PostDepthCoverage;
+ linked_shader->Program->info.fs.pixel_interlock_ordered |=
+ shader->PixelInterlockOrdered;
+ linked_shader->Program->info.fs.pixel_interlock_unordered |=
+ shader->PixelInterlockUnordered;
+ linked_shader->Program->info.fs.sample_interlock_ordered |=
+ shader->SampleInterlockOrdered;
+ linked_shader->Program->info.fs.sample_interlock_unordered |=
+ shader->SampleInterlockUnordered;
linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
}
diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py
index b1754a7e50e..ac8a67f44bb 100644
--- a/src/compiler/nir/nir_intrinsics.py
+++ b/src/compiler/nir/nir_intrinsics.py
@@ -188,6 +188,8 @@ barrier("memory_barrier_atomic_counter")
barrier("memory_barrier_buffer")
barrier("memory_barrier_image")
barrier("memory_barrier_shared")
+barrier("begin_invocation_interlock")
+barrier("end_invocation_interlock")
# A conditional discard, with a single boolean source.
intrinsic("discard_if", src_comp=[1])
diff --git a/src/compiler/shader_info.h b/src/compiler/shader_info.h
index afc53a88405..961f0930c8d 100644
--- a/src/compiler/shader_info.h
+++ b/src/compiler/shader_info.h
@@ -179,6 +179,11 @@ typedef struct shader_info {
bool pixel_center_integer;
+ bool pixel_interlock_ordered;
+ bool pixel_interlock_unordered;
+ bool sample_interlock_ordered;
+ bool sample_interlock_unordered;
+
/** gl_FragDepth layout for ARB_conservative_depth. */
enum gl_frag_depth_layout depth_layout;
} fs;