summaryrefslogtreecommitdiffstats
path: root/src/compiler/spirv/vtn_subgroup.c
diff options
context:
space:
mode:
authorDaniel Schürmann <[email protected]>2018-05-09 20:41:23 +0200
committerConnor Abbott <[email protected]>2019-06-13 12:44:23 +0000
commit7a858f274c68a93cd3e1259d292e95beec70ba53 (patch)
treec4f0b959f1b4c7f8df372d5596b46ea387c222f9 /src/compiler/spirv/vtn_subgroup.c
parentea51275e07b06b16a952d8108c3a543e38249350 (diff)
spirv/nir: add support for AMD_shader_ballot and Groups capability
This commit also renames existing AMD capabilities: - gcn_shader -> amd_gcn_shader - trinary_minmax -> amd_trinary_minmax Reviewed-by: Connor Abbott <[email protected]>
Diffstat (limited to 'src/compiler/spirv/vtn_subgroup.c')
-rw-r--r--src/compiler/spirv/vtn_subgroup.c45
1 files changed, 41 insertions, 4 deletions
diff --git a/src/compiler/spirv/vtn_subgroup.c b/src/compiler/spirv/vtn_subgroup.c
index ce795ec2cb5..8339b1a4862 100644
--- a/src/compiler/spirv/vtn_subgroup.c
+++ b/src/compiler/spirv/vtn_subgroup.c
@@ -183,7 +183,8 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
val->ssa, vtn_ssa_value(b, w[3]), NULL, 0, 0);
break;
- case SpvOpGroupNonUniformBroadcast: ++w;
+ case SpvOpGroupNonUniformBroadcast:
+ case SpvOpGroupBroadcast: ++w;
case SpvOpSubgroupReadInvocationKHR:
vtn_build_subgroup_instr(b, nir_intrinsic_read_invocation,
val->ssa, vtn_ssa_value(b, w[3]),
@@ -193,6 +194,8 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
case SpvOpGroupNonUniformAll:
case SpvOpGroupNonUniformAny:
case SpvOpGroupNonUniformAllEqual:
+ case SpvOpGroupAll:
+ case SpvOpGroupAny:
case SpvOpSubgroupAllKHR:
case SpvOpSubgroupAnyKHR:
case SpvOpSubgroupAllEqualKHR: {
@@ -201,10 +204,12 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
nir_intrinsic_op op;
switch (opcode) {
case SpvOpGroupNonUniformAll:
+ case SpvOpGroupAll:
case SpvOpSubgroupAllKHR:
op = nir_intrinsic_vote_all;
break;
case SpvOpGroupNonUniformAny:
+ case SpvOpGroupAny:
case SpvOpSubgroupAnyKHR:
op = nir_intrinsic_vote_any;
break;
@@ -232,8 +237,8 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
}
nir_ssa_def *src0;
- if (opcode == SpvOpGroupNonUniformAll ||
- opcode == SpvOpGroupNonUniformAny ||
+ if (opcode == SpvOpGroupNonUniformAll || opcode == SpvOpGroupAll ||
+ opcode == SpvOpGroupNonUniformAny || opcode == SpvOpGroupAny ||
opcode == SpvOpGroupNonUniformAllEqual) {
src0 = vtn_ssa_value(b, w[4])->def;
} else {
@@ -319,13 +324,33 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
case SpvOpGroupNonUniformBitwiseXor:
case SpvOpGroupNonUniformLogicalAnd:
case SpvOpGroupNonUniformLogicalOr:
- case SpvOpGroupNonUniformLogicalXor: {
+ case SpvOpGroupNonUniformLogicalXor:
+ case SpvOpGroupIAdd:
+ case SpvOpGroupFAdd:
+ case SpvOpGroupFMin:
+ case SpvOpGroupUMin:
+ case SpvOpGroupSMin:
+ case SpvOpGroupFMax:
+ case SpvOpGroupUMax:
+ case SpvOpGroupSMax:
+ case SpvOpGroupIAddNonUniformAMD:
+ case SpvOpGroupFAddNonUniformAMD:
+ case SpvOpGroupFMinNonUniformAMD:
+ case SpvOpGroupUMinNonUniformAMD:
+ case SpvOpGroupSMinNonUniformAMD:
+ case SpvOpGroupFMaxNonUniformAMD:
+ case SpvOpGroupUMaxNonUniformAMD:
+ case SpvOpGroupSMaxNonUniformAMD: {
nir_op reduction_op;
switch (opcode) {
case SpvOpGroupNonUniformIAdd:
+ case SpvOpGroupIAdd:
+ case SpvOpGroupIAddNonUniformAMD:
reduction_op = nir_op_iadd;
break;
case SpvOpGroupNonUniformFAdd:
+ case SpvOpGroupFAdd:
+ case SpvOpGroupFAddNonUniformAMD:
reduction_op = nir_op_fadd;
break;
case SpvOpGroupNonUniformIMul:
@@ -335,21 +360,33 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
reduction_op = nir_op_fmul;
break;
case SpvOpGroupNonUniformSMin:
+ case SpvOpGroupSMin:
+ case SpvOpGroupSMinNonUniformAMD:
reduction_op = nir_op_imin;
break;
case SpvOpGroupNonUniformUMin:
+ case SpvOpGroupUMin:
+ case SpvOpGroupUMinNonUniformAMD:
reduction_op = nir_op_umin;
break;
case SpvOpGroupNonUniformFMin:
+ case SpvOpGroupFMin:
+ case SpvOpGroupFMinNonUniformAMD:
reduction_op = nir_op_fmin;
break;
case SpvOpGroupNonUniformSMax:
+ case SpvOpGroupSMax:
+ case SpvOpGroupSMaxNonUniformAMD:
reduction_op = nir_op_imax;
break;
case SpvOpGroupNonUniformUMax:
+ case SpvOpGroupUMax:
+ case SpvOpGroupUMaxNonUniformAMD:
reduction_op = nir_op_umax;
break;
case SpvOpGroupNonUniformFMax:
+ case SpvOpGroupFMax:
+ case SpvOpGroupFMaxNonUniformAMD:
reduction_op = nir_op_fmax;
break;
case SpvOpGroupNonUniformBitwiseAnd: