diff options
author | Samuel Pitoiset <[email protected]> | 2018-10-10 10:42:19 +0200 |
---|---|---|
committer | Samuel Pitoiset <[email protected]> | 2018-10-12 08:42:08 +0200 |
commit | 4b74f05f6b635a8359d1e4a59c8d0cd69d1d9e30 (patch) | |
tree | b83fac90d9630d363e4245b80b598f564576d45d /src/compiler | |
parent | 26a10e3844f1e055d45e56e55ee0960cf0f82244 (diff) |
spirv/nir: handle memory access qualifiers for SSBO loads/stores
v2: - change how the access qualifiers are accumulated
v3: - duplicate members in struct_member_decoration_cb()
- handle access qualifiers on variables
- remove access qualifiers handling in _vtn_variable_load_store()
- fix setting access qualifiers on type->array_element
Signed-off-by: Samuel Pitoiset <[email protected]>
Reviewed-by: Jason Ekstrand <[email protected]
Diffstat (limited to 'src/compiler')
-rw-r--r-- | src/compiler/nir/nir_intrinsics.py | 4 | ||||
-rw-r--r-- | src/compiler/spirv/spirv_to_nir.c | 24 | ||||
-rw-r--r-- | src/compiler/spirv/vtn_private.h | 9 | ||||
-rw-r--r-- | src/compiler/spirv/vtn_variables.c | 54 |
4 files changed, 77 insertions, 14 deletions
diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py index b06b38fc2ce..ec3049ca06d 100644 --- a/src/compiler/nir/nir_intrinsics.py +++ b/src/compiler/nir/nir_intrinsics.py @@ -565,7 +565,7 @@ intrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0, indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER]) # src[] = { buffer_index, offset }. No const_index -load("ssbo", 2, flags=[CAN_ELIMINATE]) +load("ssbo", 2, flags=[CAN_ELIMINATE], indices=[ACCESS]) # src[] = { offset }. const_index[] = { base, component } load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE]) # src[] = { vertex, offset }. const_index[] = { base } @@ -591,6 +591,6 @@ store("output", 2, [BASE, WRMASK, COMPONENT]) # const_index[] = { base, write_mask, component } store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT]) # src[] = { value, block_index, offset }. const_index[] = { write_mask } -store("ssbo", 3, [WRMASK]) +store("ssbo", 3, [WRMASK, ACCESS]) # src[] = { value, offset }. const_index[] = { base, write_mask } store("shared", 2, [BASE, WRMASK]) diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 2ad83196e46..37a801037b9 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -669,6 +669,16 @@ mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member) } static void +vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type, + int member, enum gl_access_qualifier access) +{ + type->members[member] = vtn_type_copy(b, type->members[member]); + type = type->members[member]; + + type->access |= access; +} + +static void struct_member_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, const struct vtn_decoration *dec, void *void_ctx) @@ -681,13 +691,21 @@ struct_member_decoration_cb(struct vtn_builder *b, assert(member < ctx->num_fields); switch (dec->decoration) { + case SpvDecorationRelaxedPrecision: + case SpvDecorationUniform: + break; /* FIXME: Do nothing with this for now. */ case SpvDecorationNonWritable: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE); + break; case SpvDecorationNonReadable: - case SpvDecorationRelaxedPrecision: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE); + break; case SpvDecorationVolatile: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE); + break; case SpvDecorationCoherent: - case SpvDecorationUniform: - break; /* FIXME: Do nothing with this for now. */ + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT); + break; case SpvDecorationNoPerspective: ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE; break; diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h index a31202d1295..da7a04ce59f 100644 --- a/src/compiler/spirv/vtn_private.h +++ b/src/compiler/spirv/vtn_private.h @@ -296,6 +296,9 @@ struct vtn_type { /* for arrays, matrices and pointers, the array stride */ unsigned stride; + /* Access qualifiers */ + enum gl_access_qualifier access; + union { /* Members for scalar, vector, and array-like types */ struct { @@ -457,6 +460,9 @@ struct vtn_pointer { /** A (block_index, offset) pair representing a UBO or SSBO position. */ struct nir_ssa_def *block_index; struct nir_ssa_def *offset; + + /* Access qualifiers */ + enum gl_access_qualifier access; }; struct vtn_variable { @@ -488,6 +494,9 @@ struct vtn_variable { * hack at some point in the future. */ struct vtn_pointer *copy_prop_sampler; + + /* Access qualifiers. */ + enum gl_access_qualifier access; }; struct vtn_image_pointer { diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c index 636fdb8689a..cc3438bff23 100644 --- a/src/compiler/spirv/vtn_variables.c +++ b/src/compiler/spirv/vtn_variables.c @@ -89,6 +89,7 @@ vtn_access_chain_pointer_dereference(struct vtn_builder *b, struct vtn_access_chain *chain = vtn_access_chain_extend(b, base->chain, deref_chain->length); struct vtn_type *type = base->type; + enum gl_access_qualifier access = base->access; /* OpPtrAccessChain is only allowed on things which support variable * pointers. For everything else, the client is expected to just pass us @@ -106,6 +107,8 @@ vtn_access_chain_pointer_dereference(struct vtn_builder *b, } else { type = type->array_element; } + + access |= type->access; } struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer); @@ -114,6 +117,7 @@ vtn_access_chain_pointer_dereference(struct vtn_builder *b, ptr->var = base->var; ptr->deref = base->deref; ptr->chain = chain; + ptr->access = access; return ptr; } @@ -184,6 +188,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, nir_ssa_def *block_index = base->block_index; nir_ssa_def *offset = base->offset; struct vtn_type *type = base->type; + enum gl_access_qualifier access = base->access; unsigned idx = 0; if (base->mode == vtn_variable_mode_ubo || @@ -198,6 +203,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, idx++; /* This consumes a level of type */ type = type->array_element; + access |= type->access; } else { /* This is annoying. We've been asked for a pointer to the * array of UBOs/SSBOs and not a specifc buffer. Return a @@ -319,6 +325,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, vtn_access_link_as_ssa(b, deref_chain->link[idx], type->stride); offset = nir_iadd(&b->nb, offset, elem_offset); type = type->array_element; + access |= type->access; break; } @@ -328,6 +335,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]); offset = nir_iadd(&b->nb, offset, mem_offset); type = type->members[member]; + access |= type->access; break; } @@ -341,6 +349,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, ptr->type = type; ptr->block_index = block_index; ptr->offset = offset; + ptr->access = access; return ptr; } @@ -370,6 +379,7 @@ vtn_pointer_for_variable(struct vtn_builder *b, vtn_assert(ptr_type->deref->type == var->type->type); pointer->ptr_type = ptr_type; pointer->var = var; + pointer->access = var->access | var->type->access; return pointer; } @@ -608,7 +618,8 @@ static void _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load, nir_ssa_def *index, nir_ssa_def *offset, unsigned access_offset, unsigned access_size, - struct vtn_ssa_value **inout, const struct glsl_type *type) + struct vtn_ssa_value **inout, const struct glsl_type *type, + enum gl_access_qualifier access) { nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op); instr->num_components = glsl_get_vector_elements(type); @@ -624,6 +635,11 @@ _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load, nir_intrinsic_set_range(instr, access_size); } + if (op == nir_intrinsic_load_ssbo || + op == nir_intrinsic_store_ssbo) { + nir_intrinsic_set_access(instr, access); + } + if (index) instr->src[src++] = nir_src_for_ssa(index); @@ -654,7 +670,8 @@ static void _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, nir_ssa_def *index, nir_ssa_def *offset, unsigned access_offset, unsigned access_size, - struct vtn_type *type, struct vtn_ssa_value **inout) + struct vtn_type *type, enum gl_access_qualifier access, + struct vtn_ssa_value **inout) { if (load && *inout == NULL) *inout = vtn_create_ssa_value(b, type->type); @@ -704,7 +721,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, _vtn_load_store_tail(b, op, load, index, elem_offset, access_offset, access_size, &(*inout)->elems[i], - glsl_vector_type(base_type, vec_width)); + glsl_vector_type(base_type, vec_width), + type->access | access); } if (load && type->row_major) @@ -717,7 +735,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, vtn_assert(glsl_type_is_vector_or_scalar(type->type)); _vtn_load_store_tail(b, op, load, index, offset, access_offset, access_size, - inout, type->type); + inout, type->type, + type->access | access); } else { /* This is a strided load. We have to load N things separately. * This is the single column of a row-major matrix case. @@ -738,7 +757,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, comp = &temp_val; _vtn_load_store_tail(b, op, load, index, elem_offset, access_offset, access_size, - &comp, glsl_scalar_type(base_type)); + &comp, glsl_scalar_type(base_type), + type->access | access); per_comp[i] = comp->def; } @@ -758,7 +778,9 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride)); _vtn_block_load_store(b, op, load, index, elem_off, access_offset, access_size, - type->array_element, &(*inout)->elems[i]); + type->array_element, + type->array_element->access | access, + &(*inout)->elems[i]); } return; } @@ -770,7 +792,9 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i])); _vtn_block_load_store(b, op, load, index, elem_off, access_offset, access_size, - type->members[i], &(*inout)->elems[i]); + type->members[i], + type->members[i]->access | access, + &(*inout)->elems[i]); } return; } @@ -809,7 +833,7 @@ vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src) struct vtn_ssa_value *value = NULL; _vtn_block_load_store(b, op, true, index, offset, access_offset, access_size, - src->type, &value); + src->type, src->access, &value); return value; } @@ -833,7 +857,7 @@ vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src, offset = vtn_pointer_to_offset(b, dst, &index); _vtn_block_load_store(b, op, false, index, offset, - 0, 0, dst->type, &src); + 0, 0, dst->type, dst->access, &src); } static void @@ -1389,6 +1413,18 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, case SpvDecorationOffset: vtn_var->offset = dec->literals[0]; break; + case SpvDecorationNonWritable: + vtn_var->access |= ACCESS_NON_WRITEABLE; + break; + case SpvDecorationNonReadable: + vtn_var->access |= ACCESS_NON_READABLE; + break; + case SpvDecorationVolatile: + vtn_var->access |= ACCESS_VOLATILE; + break; + case SpvDecorationCoherent: + vtn_var->access |= ACCESS_COHERENT; + break; default: break; } |