aboutsummaryrefslogtreecommitdiffstats
path: root/module/zcommon/zfs_fletcher_intel.c
diff options
context:
space:
mode:
authorRichard Yao <[email protected]>2022-12-05 14:00:34 -0500
committerGitHub <[email protected]>2022-12-05 11:00:34 -0800
commit59493b63c18ea223857066218d6a58b67eb88159 (patch)
tree306345902afcc5b2c40eaccc21edeb638bdeb3c3 /module/zcommon/zfs_fletcher_intel.c
parent7b9a423076f4da36b68009ced22259cd243166f6 (diff)
Micro-optimize fletcher4 calculations
When processing abds, we execute 1 `kfpu_begin()`/`kfpu_end()` pair on every page in the abd. This is wasteful and slows down checksum performance versus what the benchmark claimed. We correct this by moving those calls to the init and fini functions. Also, we always check the buffer length against 0 before calling the non-scalar checksum functions. This means that we do not need to execute the loop condition for the first loop iteration. That allows us to micro-optimize the checksum calculations by switching to do-while loops. Note that we do not apply that micro-optimization to the scalar implementation because there is no check in `fletcher_4_incremental_native()`/`fletcher_4_incremental_byteswap()` against 0 sized buffers being passed. Reviewed-by: Alexander Motin <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Richard Yao <[email protected]> Closes #14247
Diffstat (limited to 'module/zcommon/zfs_fletcher_intel.c')
-rw-r--r--module/zcommon/zfs_fletcher_intel.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/module/zcommon/zfs_fletcher_intel.c b/module/zcommon/zfs_fletcher_intel.c
index 42b6309d3..c124d4928 100644
--- a/module/zcommon/zfs_fletcher_intel.c
+++ b/module/zcommon/zfs_fletcher_intel.c
@@ -51,6 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_avx2_init(fletcher_4_ctx_t *ctx)
{
+ kfpu_begin();
memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t));
}
@@ -81,6 +82,7 @@ fletcher_4_avx2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
64 * ctx->avx[3].v[3];
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
+ kfpu_end();
}
#define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \
@@ -106,22 +108,18 @@ fletcher_4_avx2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
const uint64_t *ip = buf;
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
- kfpu_begin();
-
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
- for (; ip < ipend; ip += 2) {
+ do {
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
asm volatile("vpaddq %ymm4, %ymm0, %ymm0");
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
- }
+ } while ((ip += 2) < ipend);
FLETCHER_4_AVX2_SAVE_CTX(ctx);
asm volatile("vzeroupper");
-
- kfpu_end();
}
static void
@@ -134,13 +132,11 @@ fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
const uint64_t *ip = buf;
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
- kfpu_begin();
-
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask));
- for (; ip < ipend; ip += 2) {
+ do {
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
asm volatile("vpshufb %ymm5, %ymm4, %ymm4");
@@ -148,12 +144,10 @@ fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
- }
+ } while ((ip += 2) < ipend);
FLETCHER_4_AVX2_SAVE_CTX(ctx);
asm volatile("vzeroupper");
-
- kfpu_end();
}
static boolean_t fletcher_4_avx2_valid(void)