aboutsummaryrefslogtreecommitdiffstats
path: root/src/compiler/spirv/spirv_to_nir.c
diff options
context:
space:
mode:
authorJason Ekstrand <[email protected]>2017-08-22 22:01:42 -0700
committerJason Ekstrand <[email protected]>2018-03-07 12:13:47 -0800
commit68af9f04a4a3cd5d8d6777d4b2c575d5fbabda97 (patch)
tree0f90adb822d858eeba1a44a6fa7c6f3542fdee7d /src/compiler/spirv/spirv_to_nir.c
parentde518f38e53bceef40e4262bf7c3eeb66837fe8f (diff)
spirv: Rework barriers
Our previous handling of barriers always used the big hammer and didn't correctly emit memory barriers when specified along with a control barrier. This commit completely reworks the way we emit barriers to make things both more precise and more correct. Reviewed-by: Lionel Landwerlin <[email protected]> Reviewed-by: Iago Toral Quiroga <[email protected]>
Diffstat (limited to 'src/compiler/spirv/spirv_to_nir.c')
-rw-r--r--src/compiler/spirv/spirv_to_nir.c132
1 files changed, 114 insertions, 18 deletions
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index fdb2993db51..1c52f7ff615 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -2957,36 +2957,132 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
}
static void
+vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
+{
+ nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static void
+vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
+ SpvMemorySemanticsMask semantics)
+{
+ static const SpvMemorySemanticsMask all_memory_semantics =
+ SpvMemorySemanticsUniformMemoryMask |
+ SpvMemorySemanticsWorkgroupMemoryMask |
+ SpvMemorySemanticsAtomicCounterMemoryMask |
+ SpvMemorySemanticsImageMemoryMask;
+
+ /* If we're not actually doing a memory barrier, bail */
+ if (!(semantics & all_memory_semantics))
+ return;
+
+ /* GL and Vulkan don't have these */
+ vtn_assert(scope != SpvScopeCrossDevice);
+
+ if (scope == SpvScopeSubgroup)
+ return; /* Nothing to do here */
+
+ if (scope == SpvScopeWorkgroup) {
+ vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
+ return;
+ }
+
+ /* There's only two scopes thing left */
+ vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
+
+ if ((semantics & all_memory_semantics) == all_memory_semantics) {
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
+ return;
+ }
+
+ /* Issue a bunch of more specific barriers */
+ uint32_t bits = semantics;
+ while (bits) {
+ SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
+ switch (semantic) {
+ case SpvMemorySemanticsUniformMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
+ break;
+ case SpvMemorySemanticsWorkgroupMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
+ break;
+ case SpvMemorySemanticsAtomicCounterMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
+ break;
+ case SpvMemorySemanticsImageMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
+ break;
+ default:
+ break;;
+ }
+ }
+}
+
+static void
vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
- nir_intrinsic_op intrinsic_op;
switch (opcode) {
case SpvOpEmitVertex:
case SpvOpEmitStreamVertex:
- intrinsic_op = nir_intrinsic_emit_vertex;
- break;
case SpvOpEndPrimitive:
- case SpvOpEndStreamPrimitive:
- intrinsic_op = nir_intrinsic_end_primitive;
- break;
- case SpvOpMemoryBarrier:
- intrinsic_op = nir_intrinsic_memory_barrier;
- break;
- case SpvOpControlBarrier:
- intrinsic_op = nir_intrinsic_barrier;
+ case SpvOpEndStreamPrimitive: {
+ nir_intrinsic_op intrinsic_op;
+ switch (opcode) {
+ case SpvOpEmitVertex:
+ case SpvOpEmitStreamVertex:
+ intrinsic_op = nir_intrinsic_emit_vertex;
+ break;
+ case SpvOpEndPrimitive:
+ case SpvOpEndStreamPrimitive:
+ intrinsic_op = nir_intrinsic_end_primitive;
+ break;
+ default:
+ unreachable("Invalid opcode");
+ }
+
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, intrinsic_op);
+
+ switch (opcode) {
+ case SpvOpEmitStreamVertex:
+ case SpvOpEndStreamPrimitive:
+ nir_intrinsic_set_stream_id(intrin, w[1]);
+ break;
+ default:
+ break;
+ }
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
break;
- default:
- vtn_fail("unknown barrier instruction");
}
- nir_intrinsic_instr *intrin =
- nir_intrinsic_instr_create(b->shader, intrinsic_op);
+ case SpvOpMemoryBarrier: {
+ SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
+ SpvMemorySemanticsMask semantics =
+ vtn_constant_value(b, w[2])->values[0].u32[0];
+ vtn_emit_memory_barrier(b, scope, semantics);
+ return;
+ }
+
+ case SpvOpControlBarrier: {
+ SpvScope execution_scope =
+ vtn_constant_value(b, w[1])->values[0].u32[0];
+ if (execution_scope == SpvScopeWorkgroup)
+ vtn_emit_barrier(b, nir_intrinsic_barrier);
- if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
- nir_intrinsic_set_stream_id(intrin, w[1]);
+ SpvScope memory_scope =
+ vtn_constant_value(b, w[2])->values[0].u32[0];
+ SpvMemorySemanticsMask memory_semantics =
+ vtn_constant_value(b, w[3])->values[0].u32[0];
+ vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
+ break;
+ }
- nir_builder_instr_insert(&b->nb, &intrin->instr);
+ default:
+ unreachable("unknown barrier instruction");
+ }
}
static unsigned