summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2016-06-28 13:31:21 -0700
committerBrian Behlendorf <[email protected]>2016-06-29 11:22:22 -0700
commit0dab2e84fcecff2806287efacb7c6205f346f69d (patch)
tree4813f9cb944c53140de7a12710f0ca7b12e3ec27 /module
parentd1d19c785497fafb5e239f67006f534651ed2f27 (diff)
Vectorized fletcher_4 must be 128-bit aligned
The fletcher_4_native() and fletcher_4_byteswap() functions may only safely use the vectorized implementations when the buffer is 128-bit aligned. This is because both the AVX2 and SSE implementations process four 32-bit words per iterations. Fallback to the scalar implementation which only processes a single 32-bit word for unaligned buffers. Signed-off-by: Brian Behlendorf <[email protected]> Signed-off-by: Gvozden Neskovic <[email protected]> Issue #4330
Diffstat (limited to 'module')
-rw-r--r--module/zcommon/zfs_fletcher.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/module/zcommon/zfs_fletcher.c b/module/zcommon/zfs_fletcher.c
index 2c2d01d5c..e76c5b8a5 100644
--- a/module/zcommon/zfs_fletcher.c
+++ b/module/zcommon/zfs_fletcher.c
@@ -334,7 +334,12 @@ fletcher_4_impl_get(void)
void
fletcher_4_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
- const fletcher_4_ops_t *ops = fletcher_4_impl_get();
+ const fletcher_4_ops_t *ops;
+
+ if (IS_P2ALIGNED(size, 4 * sizeof (uint32_t)))
+ ops = fletcher_4_impl_get();
+ else
+ ops = &fletcher_4_scalar_ops;
ops->init(zcp);
ops->compute(buf, size, zcp);
@@ -345,7 +350,12 @@ fletcher_4_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
void
fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
- const fletcher_4_ops_t *ops = fletcher_4_impl_get();
+ const fletcher_4_ops_t *ops;
+
+ if (IS_P2ALIGNED(size, 4 * sizeof (uint32_t)))
+ ops = fletcher_4_impl_get();
+ else
+ ops = &fletcher_4_scalar_ops;
ops->init(zcp);
ops->compute_byteswap(buf, size, zcp);