diff options
author | Ian Romanick <[email protected]> | 2017-11-01 16:40:32 -0700 |
---|---|---|
committer | Ian Romanick <[email protected]> | 2017-11-08 18:37:29 -0800 |
commit | c18d8c61d6ecc108c1ef2012b37e9cf56da98664 (patch) | |
tree | 041f4185faee27636b74271efde71dfe97a4efb6 /src/compiler/glsl/lower_buffer_access.cpp | |
parent | 1a2beae1b35bb0d87233993d752414899719cd08 (diff) |
glsl: Use more link_calculate_matrix_stride in lower_buffer_access
I was going to squash this with the previous commit, but there's a lot
of churn in that commit.
Signed-off-by: Ian Romanick <[email protected]>
Reviewed-by: Thomas Helland <[email protected]>
Diffstat (limited to 'src/compiler/glsl/lower_buffer_access.cpp')
-rw-r--r-- | src/compiler/glsl/lower_buffer_access.cpp | 22 |
1 files changed, 2 insertions, 20 deletions
diff --git a/src/compiler/glsl/lower_buffer_access.cpp b/src/compiler/glsl/lower_buffer_access.cpp index 219e03e550d..056fd26e0db 100644 --- a/src/compiler/glsl/lower_buffer_access.cpp +++ b/src/compiler/glsl/lower_buffer_access.cpp @@ -121,26 +121,8 @@ lower_buffer_access::emit_access(void *mem_ctx, row_major, deref->type, packing, writemask_for_size(col_deref->type->vector_elements)); } else { - int size_mul; - - /* std430 doesn't round up vec2 size to a vec4 size */ - if (packing == GLSL_INTERFACE_PACKING_STD430 && - deref->type->vector_elements == 2 && - !deref->type->is_64bit()) { - size_mul = 8; - } else { - /* std140 always rounds the stride of arrays (and matrices) to a - * vec4, so matrices are always 16 between columns/rows. With - * doubles, they will be 32 apart when there are more than 2 rows. - * - * For both std140 and std430, if the member is a - * three-'component vector with components consuming N basic - * machine units, the base alignment is 4N. For vec4, base - * alignment is 4N. - */ - size_mul = (deref->type->is_64bit() && - deref->type->vector_elements > 2) ? 32 : 16; - } + const int size_mul = + link_calculate_matrix_stride(deref->type, row_major, packing); emit_access(mem_ctx, is_write, col_deref, base_offset, deref_offset + i * size_mul, |