diff options
author | Alejandro Piñeiro <[email protected]> | 2017-07-01 08:17:09 +0200 |
---|---|---|
committer | Jose Maria Casanova Crespo <[email protected]> | 2017-12-06 08:57:18 +0100 |
commit | 96f1926aab96a92e45f1dec226390aae46d3ea45 (patch) | |
tree | 1eef1a8838be99c4d57a814c1b5282b0f4b1263f /src/intel/compiler/brw_fs_nir.cpp | |
parent | f1a9936ee1c7d2abe641eb785786c908c5ed799c (diff) |
i965/fs: Use byte_scattered_write on 16-bit store_ssbo
We need to rely on byte scattered writes as untyped writes are 32-bit
size. We could try to keep using 32-bit messages when we have two or
four 16-bit elements, but for simplicity sake, we use the same message
for any component number. We revisit this aproach in the follwing
patches.
v2: Removed use of stride = 2 on 16-bit sources (Jason Ekstrand)
v3: (Jason Ekstrand)
- Include bit_size to scattered write message and remove namespace
- specific for scattered messages.
- Move comment to proper place.
- Squashed with i965/fs: Adjust type_size/type_slots on store_ssbo.
(Jose Maria Casanova)
- Take into account that get_nir_src returns now WORD types for
16-bit sources instead of DWORD.
v4: (Jason Ekstrand)
- Rename lenght variable to num_components.
- Include assertions before emit_untyped_write.
- Remove type_slot in favor of num_slot and first_slot.
Signed-off-by: Jose Maria Casanova Crespo <[email protected]>
Signed-off-by: Alejandro Piñeiro <[email protected]>
Reviewed-by: Jason Ekstrand <[email protected]>
Diffstat (limited to 'src/intel/compiler/brw_fs_nir.cpp')
-rw-r--r-- | src/intel/compiler/brw_fs_nir.cpp | 65 |
1 files changed, 45 insertions, 20 deletions
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index d6ab2861478..5e3c9de141b 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -4075,30 +4075,35 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr * Also, we have to suffle 64-bit data to be in the appropriate layout * expected by our 32-bit write messages. */ - unsigned type_size = 4; - if (nir_src_bit_size(instr->src[0]) == 64) { - type_size = 8; + unsigned bit_size = nir_src_bit_size(instr->src[0]); + unsigned type_size = bit_size / 8; + if (bit_size == 64) { val_reg = shuffle_64bit_data_for_32bit_write(bld, val_reg, instr->num_components); } - unsigned type_slots = type_size / 4; - /* Combine groups of consecutive enabled channels in one write * message. We use ffs to find the first enabled channel and then ffs on - * the bit-inverse, down-shifted writemask to determine the length of - * the block of enabled bits. + * the bit-inverse, down-shifted writemask to determine the num_components + * of the block of enabled bits. */ while (writemask) { unsigned first_component = ffs(writemask) - 1; - unsigned length = ffs(~(writemask >> first_component)) - 1; + unsigned num_components = ffs(~(writemask >> first_component)) - 1; - /* We can't write more than 2 64-bit components at once. Limit the - * length of the write to what we can do and let the next iteration - * handle the rest - */ - if (type_size > 4) - length = MIN2(2, length); + if (type_size > 4) { + /* We can't write more than 2 64-bit components at once. Limit + * the num_components of the write to what we can do and let the next + * iteration handle the rest. + */ + num_components = MIN2(2, num_components); + } else if (type_size < 4) { + /* For 16-bit types we are using byte scattered writes, that can + * only write one component per call. So we limit the num_components, + * and let the write happening in several iterations. + */ + num_components = 1; + } fs_reg offset_reg; nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]); @@ -4112,16 +4117,36 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr brw_imm_ud(type_size * first_component)); } - - emit_untyped_write(bld, surf_index, offset_reg, - offset(val_reg, bld, first_component * type_slots), - 1 /* dims */, length * type_slots, - BRW_PREDICATE_NONE); + if (type_size < 4) { + /* Untyped Surface messages have a fixed 32-bit size, so we need + * to rely on byte scattered in order to write 16-bit elements. + * The byte_scattered_write message needs that every written 16-bit + * type to be aligned 32-bits (stride=2). + */ + fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D); + bld.MOV(subscript(tmp, BRW_REGISTER_TYPE_W, 0), + offset(val_reg, bld, first_component)); + emit_byte_scattered_write(bld, surf_index, offset_reg, + tmp, + 1 /* dims */, 1, + bit_size, + BRW_PREDICATE_NONE); + } else { + assert(num_components * type_size <= 16); + assert((num_components * type_size) % 4 == 0); + assert((first_component * type_size) % 4 == 0); + unsigned first_slot = (first_component * type_size) / 4; + unsigned num_slots = (num_components * type_size) / 4; + emit_untyped_write(bld, surf_index, offset_reg, + offset(val_reg, bld, first_slot), + 1 /* dims */, num_slots, + BRW_PREDICATE_NONE); + } /* Clear the bits in the writemask that we just wrote, then try * again to see if more channels are left. */ - writemask &= (15 << (first_component + length)); + writemask &= (15 << (first_component + num_components)); } break; } |