diff options
author | Ilia Mirkin <[email protected]> | 2015-12-28 14:03:50 -0500 |
---|---|---|
committer | Ilia Mirkin <[email protected]> | 2016-01-29 21:05:08 -0500 |
commit | 35f8488668fe055ffc8d0f9f897fa73e28e5e00d (patch) | |
tree | 7a87a460a916aadce6e9ce4df5f26211cbb1c893 /src/compiler | |
parent | 2b089c7ffe2c46a5d5d01ec315fb3e5a07695d44 (diff) |
glsl: keep track of ssbo variable being accessed, add access params
Currently any access params (coherent/volatile/restrict) are being lost
when lowering to the ssbo load/store intrinsics. Keep track of the
variable being used, and bake its access params in as the last arg of
the load/store intrinsics.
If the variable is accessed via an instance block, then 'variable'
points to the instance block variable and not the field inside the
instance block that we are accessing. In order to check access
parameters for the field itself we need to detect this case and keep
track of the corresponding field struct so we can extract the specific
field access information from there instead.
Signed-off-by: Ilia Mirkin <[email protected]>
Reviewed-by: Marek Olšák <[email protected]> (v1)
v1 -> v2: add tracking of struct field
v2 -> v3: minor adjustments based on Iago's feedback
Reviewed-by: Iago Toral Quiroga <[email protected]>
Diffstat (limited to 'src/compiler')
-rw-r--r-- | src/compiler/glsl/lower_buffer_access.cpp | 6 | ||||
-rw-r--r-- | src/compiler/glsl/lower_buffer_access.h | 1 | ||||
-rw-r--r-- | src/compiler/glsl/lower_shared_reference.cpp | 6 | ||||
-rw-r--r-- | src/compiler/glsl/lower_ubo_reference.cpp | 40 | ||||
-rw-r--r-- | src/compiler/shader_enums.h | 10 |
5 files changed, 57 insertions, 6 deletions
diff --git a/src/compiler/glsl/lower_buffer_access.cpp b/src/compiler/glsl/lower_buffer_access.cpp index f8c8d140ea8..9ad811de9f1 100644 --- a/src/compiler/glsl/lower_buffer_access.cpp +++ b/src/compiler/glsl/lower_buffer_access.cpp @@ -327,6 +327,7 @@ lower_buffer_access::setup_buffer_access(void *mem_ctx, unsigned *const_offset, bool *row_major, int *matrix_columns, + const glsl_struct_field **struct_field, unsigned packing) { *offset = new(mem_ctx) ir_constant(0u); @@ -442,8 +443,11 @@ lower_buffer_access::setup_buffer_access(void *mem_ctx, intra_struct_offset = glsl_align(intra_struct_offset, field_align); if (strcmp(struct_type->fields.structure[i].name, - deref_record->field) == 0) + deref_record->field) == 0) { + if (struct_field) + *struct_field = &struct_type->fields.structure[i]; break; + } if (packing == GLSL_INTERFACE_PACKING_STD430) intra_struct_offset += type->std430_size(field_row_major); diff --git a/src/compiler/glsl/lower_buffer_access.h b/src/compiler/glsl/lower_buffer_access.h index cc4614e9792..8772bdb76ff 100644 --- a/src/compiler/glsl/lower_buffer_access.h +++ b/src/compiler/glsl/lower_buffer_access.h @@ -57,6 +57,7 @@ public: void setup_buffer_access(void *mem_ctx, ir_variable *var, ir_rvalue *deref, ir_rvalue **offset, unsigned *const_offset, bool *row_major, int *matrix_columns, + const glsl_struct_field **struct_field, unsigned packing); }; diff --git a/src/compiler/glsl/lower_shared_reference.cpp b/src/compiler/glsl/lower_shared_reference.cpp index 533cd9202f4..12499695882 100644 --- a/src/compiler/glsl/lower_shared_reference.cpp +++ b/src/compiler/glsl/lower_shared_reference.cpp @@ -142,7 +142,7 @@ lower_shared_reference_visitor::handle_rvalue(ir_rvalue **rvalue) setup_buffer_access(mem_ctx, var, deref, &offset, &const_offset, - &row_major, &matrix_columns, packing); + &row_major, &matrix_columns, NULL, packing); /* Now that we've calculated the offset to the start of the * dereference, walk over the type and emit loads into a temporary. @@ -210,7 +210,7 @@ lower_shared_reference_visitor::handle_assignment(ir_assignment *ir) setup_buffer_access(mem_ctx, var, deref, &offset, &const_offset, - &row_major, &matrix_columns, packing); + &row_major, &matrix_columns, NULL, packing); deref = new(mem_ctx) ir_dereference_variable(store_var); @@ -370,7 +370,7 @@ lower_shared_reference_visitor::lower_shared_atomic_intrinsic(ir_call *ir) setup_buffer_access(mem_ctx, var, deref, &offset, &const_offset, - &row_major, &matrix_columns, packing); + &row_major, &matrix_columns, NULL, packing); assert(offset); assert(!row_major); diff --git a/src/compiler/glsl/lower_ubo_reference.cpp b/src/compiler/glsl/lower_ubo_reference.cpp index a172054bac8..d6269f7cbac 100644 --- a/src/compiler/glsl/lower_ubo_reference.cpp +++ b/src/compiler/glsl/lower_ubo_reference.cpp @@ -45,7 +45,7 @@ class lower_ubo_reference_visitor : public lower_buffer_access::lower_buffer_access { public: lower_ubo_reference_visitor(struct gl_shader *shader) - : shader(shader) + : shader(shader), struct_field(NULL), variable(NULL) { } @@ -60,6 +60,7 @@ public: bool *row_major, int *matrix_columns, unsigned packing); + uint32_t ssbo_access_params(); ir_expression *ubo_load(void *mem_ctx, const struct glsl_type *type, ir_rvalue *offset); ir_call *ssbo_load(void *mem_ctx, const struct glsl_type *type, @@ -104,6 +105,8 @@ public: struct gl_shader *shader; struct gl_uniform_buffer_variable *ubo_var; + const struct glsl_struct_field *struct_field; + ir_variable *variable; ir_rvalue *uniform_block; bool progress; }; @@ -288,8 +291,9 @@ lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx, *const_offset = ubo_var->Offset; + this->struct_field = NULL; setup_buffer_access(mem_ctx, var, deref, offset, const_offset, row_major, - matrix_columns, packing); + matrix_columns, &this->struct_field, packing); } void @@ -317,6 +321,7 @@ lower_ubo_reference_visitor::handle_rvalue(ir_rvalue **rvalue) this->buffer_access_type = var->is_in_shader_storage_block() ? ssbo_load_access : ubo_load_access; + this->variable = var; /* Compute the offset to the start if the dereference as well as other * information we need to configure the write @@ -370,6 +375,24 @@ shader_storage_buffer_object(const _mesa_glsl_parse_state *state) return state->ARB_shader_storage_buffer_object_enable; } +uint32_t +lower_ubo_reference_visitor::ssbo_access_params() +{ + assert(variable); + + if (variable->is_interface_instance()) { + assert(struct_field); + + return ((struct_field->image_coherent ? ACCESS_COHERENT : 0) | + (struct_field->image_restrict ? ACCESS_RESTRICT : 0) | + (struct_field->image_volatile ? ACCESS_VOLATILE : 0)); + } else { + return ((variable->data.image_coherent ? ACCESS_COHERENT : 0) | + (variable->data.image_restrict ? ACCESS_RESTRICT : 0) | + (variable->data.image_volatile ? ACCESS_VOLATILE : 0)); + } +} + ir_call * lower_ubo_reference_visitor::ssbo_store(void *mem_ctx, ir_rvalue *deref, @@ -394,6 +417,10 @@ lower_ubo_reference_visitor::ssbo_store(void *mem_ctx, ir_variable(glsl_type::uint_type, "write_mask" , ir_var_function_in); sig_params.push_tail(writemask_ref); + ir_variable *access_ref = new(mem_ctx) + ir_variable(glsl_type::uint_type, "access" , ir_var_function_in); + sig_params.push_tail(access_ref); + ir_function_signature *sig = new(mem_ctx) ir_function_signature(glsl_type::void_type, shader_storage_buffer_object); assert(sig); @@ -408,6 +435,7 @@ lower_ubo_reference_visitor::ssbo_store(void *mem_ctx, call_params.push_tail(offset->clone(mem_ctx, NULL)); call_params.push_tail(deref->clone(mem_ctx, NULL)); call_params.push_tail(new(mem_ctx) ir_constant(write_mask)); + call_params.push_tail(new(mem_ctx) ir_constant(ssbo_access_params())); return new(mem_ctx) ir_call(sig, NULL, &call_params); } @@ -426,6 +454,10 @@ lower_ubo_reference_visitor::ssbo_load(void *mem_ctx, ir_variable(glsl_type::uint_type, "offset_ref" , ir_var_function_in); sig_params.push_tail(offset_ref); + ir_variable *access_ref = new(mem_ctx) + ir_variable(glsl_type::uint_type, "access" , ir_var_function_in); + sig_params.push_tail(access_ref); + ir_function_signature *sig = new(mem_ctx) ir_function_signature(type, shader_storage_buffer_object); assert(sig); @@ -444,6 +476,7 @@ lower_ubo_reference_visitor::ssbo_load(void *mem_ctx, exec_list call_params; call_params.push_tail(this->uniform_block->clone(mem_ctx, NULL)); call_params.push_tail(offset->clone(mem_ctx, NULL)); + call_params.push_tail(new(mem_ctx) ir_constant(ssbo_access_params())); return new(mem_ctx) ir_call(sig, deref_result, &call_params); } @@ -499,6 +532,7 @@ lower_ubo_reference_visitor::write_to_memory(void *mem_ctx, unsigned packing = var->get_interface_type()->interface_packing; this->buffer_access_type = ssbo_store_access; + this->variable = var; /* Compute the offset to the start if the dereference as well as other * information we need to configure the write @@ -678,6 +712,7 @@ lower_ubo_reference_visitor::process_ssbo_unsized_array_length(ir_rvalue **rvalu int unsized_array_stride = calculate_unsized_array_stride(deref, packing); this->buffer_access_type = ssbo_unsized_array_length_access; + this->variable = var; /* Compute the offset to the start if the dereference as well as other * information we need to calculate the length. @@ -910,6 +945,7 @@ lower_ubo_reference_visitor::lower_ssbo_atomic_intrinsic(ir_call *ir) unsigned packing = var->get_interface_type()->interface_packing; this->buffer_access_type = ssbo_atomic_access; + this->variable = var; setup_for_load_or_store(mem_ctx, var, deref, &offset, &const_offset, diff --git a/src/compiler/shader_enums.h b/src/compiler/shader_enums.h index efc0b0d515e..d4326c55a9d 100644 --- a/src/compiler/shader_enums.h +++ b/src/compiler/shader_enums.h @@ -535,6 +535,16 @@ enum gl_frag_depth_layout FRAG_DEPTH_LAYOUT_UNCHANGED }; +/** + * \brief Buffer access qualifiers + */ +enum gl_buffer_access_qualifier +{ + ACCESS_COHERENT = 1, + ACCESS_RESTRICT = 2, + ACCESS_VOLATILE = 4, +}; + #ifdef __cplusplus } /* extern "C" */ #endif |