diff options
author | Tyler J. Stachecki <[email protected]> | 2016-07-18 17:38:55 -0400 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2016-07-19 09:03:44 -0700 |
commit | 3d11ecbddd535e37eeb751a830fb5a897881d25d (patch) | |
tree | 019faafe30027b486c9ddc558d58b46823f6c63b /module/zcommon | |
parent | 1b87e0f53249a17f2fbb1d5ca725e65add391ace (diff) |
Prevent segfaults in SSE optimized Fletcher-4
In some cases, the compiler was not respecting the GNU aligned
attribute for stack variables in 35a76a0. This was resulting in
a segfault on CentOS 6.7 hosts using gcc 4.4.7-17. This issue
was fixed in gcc 4.6.
To prevent this from occurring, use unaligned loads and stores
for all stack and global memory references in the SSE optimized
Fletcher-4 code.
Disable zimport testing against master where this flaw exists:
TEST_ZIMPORT_VERSIONS="installed"
Signed-off-by: Tyler J. Stachecki <[email protected]>
Signed-off-by: Gvozden Neskovic <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #4862
Diffstat (limited to 'module/zcommon')
-rw-r--r-- | module/zcommon/zfs_fletcher_sse.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/module/zcommon/zfs_fletcher_sse.c b/module/zcommon/zfs_fletcher_sse.c index 734ae0853..2a4e6a3f2 100644 --- a/module/zcommon/zfs_fletcher_sse.c +++ b/module/zcommon/zfs_fletcher_sse.c @@ -69,12 +69,12 @@ fletcher_4_sse2_fini(zio_cksum_t *zcp) struct zfs_fletcher_sse_array a, b, c, d; uint64_t A, B, C, D; - asm volatile("movdqa %%xmm0, %0":"=m" (a.v)); - asm volatile("movdqa %%xmm1, %0":"=m" (b.v)); + asm volatile("movdqu %%xmm0, %0":"=m" (a.v)); + asm volatile("movdqu %%xmm1, %0":"=m" (b.v)); asm volatile("psllq $0x2, %xmm2"); - asm volatile("movdqa %%xmm2, %0":"=m" (c.v)); + asm volatile("movdqu %%xmm2, %0":"=m" (c.v)); asm volatile("psllq $0x3, %xmm3"); - asm volatile("movdqa %%xmm3, %0":"=m" (d.v)); + asm volatile("movdqu %%xmm3, %0":"=m" (d.v)); kfpu_end(); @@ -168,7 +168,7 @@ fletcher_4_ssse3_byteswap(const void *buf, uint64_t size, zio_cksum_t *unused) const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); - asm volatile("movdqa %0, %%xmm7"::"m" (mask)); + asm volatile("movdqu %0, %%xmm7"::"m" (mask)); asm volatile("pxor %xmm4, %xmm4"); for (; ip < ipend; ip += 2) { |