aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/compiler/spirv/spirv_to_nir.c40
-rw-r--r--src/compiler/spirv/vtn_private.h3
2 files changed, 38 insertions, 5 deletions
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index 14b76785561..2e7c32e4e99 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -1921,6 +1921,20 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
}
+SpvMemorySemanticsMask
+vtn_storage_class_to_memory_semantics(SpvStorageClass sc)
+{
+ switch (sc) {
+ case SpvStorageClassStorageBuffer:
+ case SpvStorageClassPhysicalStorageBufferEXT:
+ return SpvMemorySemanticsUniformMemoryMask;
+ case SpvStorageClassWorkgroup:
+ return SpvMemorySemanticsWorkgroupMemoryMask;
+ default:
+ return SpvMemorySemanticsMaskNone;
+ }
+}
+
struct vtn_ssa_value *
vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
{
@@ -2417,6 +2431,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
}
struct vtn_image_pointer image;
+ SpvScope scope = SpvScopeInvocation;
+ SpvMemorySemanticsMask semantics = 0;
switch (opcode) {
case SpvOpAtomicExchange:
@@ -2435,10 +2451,14 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
case SpvOpAtomicOr:
case SpvOpAtomicXor:
image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+ scope = vtn_constant_uint(b, w[4]);
+ semantics = vtn_constant_uint(b, w[5]);
break;
case SpvOpAtomicStore:
image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
+ scope = vtn_constant_uint(b, w[2]);
+ semantics = vtn_constant_uint(b, w[3]);
break;
case SpvOpImageQuerySize:
@@ -2557,6 +2577,9 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
vtn_fail_with_opcode("Invalid image opcode", opcode);
}
+ /* Image operations implicitly have the Image storage memory semantics. */
+ semantics |= SpvMemorySemanticsImageMemoryMask;
+
if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
@@ -2676,6 +2699,9 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
struct vtn_pointer *ptr;
nir_intrinsic_instr *atomic;
+ SpvScope scope = SpvScopeInvocation;
+ SpvMemorySemanticsMask semantics = 0;
+
switch (opcode) {
case SpvOpAtomicLoad:
case SpvOpAtomicExchange:
@@ -2693,21 +2719,20 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
case SpvOpAtomicOr:
case SpvOpAtomicXor:
ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ scope = vtn_constant_uint(b, w[4]);
+ semantics = vtn_constant_uint(b, w[5]);
break;
case SpvOpAtomicStore:
ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
+ scope = vtn_constant_uint(b, w[2]);
+ semantics = vtn_constant_uint(b, w[3]);
break;
default:
vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
- /*
- SpvScope scope = w[4];
- SpvMemorySemanticsMask semantics = w[5];
- */
-
/* uniform as "atomic counter uniform" */
if (ptr->mode == vtn_variable_mode_uniform) {
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
@@ -2846,6 +2871,11 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
}
}
+ /* Atomic ordering operations will implicitly apply to the atomic operation
+ * storage class, so include that too.
+ */
+ semantics |= vtn_storage_class_to_memory_semantics(ptr->ptr_type->storage_class);
+
if (opcode != SpvOpAtomicStore) {
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index c3ef3c535ef..523298d94c7 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -887,4 +887,7 @@ bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_o
bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *words, unsigned count);
+
+SpvMemorySemanticsMask vtn_storage_class_to_memory_semantics(SpvStorageClass sc);
+
#endif /* _VTN_PRIVATE_H_ */