aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Blinick <[email protected]>2018-02-11 16:11:59 -0700
committerBrian Behlendorf <[email protected]>2018-03-15 10:51:00 -0700
commit8a2a9db8df7d421aedeababf7b1ecbb51642f16c (patch)
treed7e97704ddbb4f7ace29b3f9071396b72104aad3
parent1a2342784aeb15049a3d4926615adf3b85f0bce4 (diff)
OpenZFS 9076 - Adjust perf test concurrency settings
ZFS Performance test concurrency should be lowered for better latency Work by Stephen Blinick. Nightly performance runs typically consist of two levels of concurrency; and both are fairly high. Since the IO runs are to a ZFS filesystem, within a zpool, which is based on some variable number of vdev's, the amount of IO driven to each device is variable. Additionally, different device types (HDD vs SSD, etc) can generally handle a different amount of concurrent IO before saturating. Nevertheless, in practice, it appears that most tests are well past the concurrency saturation point and therefore both perform with the same throughput, the maximum of the device. Because the queuedepth to the device(s) is so high however, the latency is much higher than the best possible at that throughput, and increases linearly with the increase in concurrency. This means that changes in code that impact latency during normal operation (before saturation) may not be apparent when a large component of the measured latency is from the IO sitting in a queue to be serviced. Therefore, changing the concurrency settings is recommended Authored by: Stephen Blinick <[email protected]> Reviewed-by: George Melikov <[email protected]> Reviewed-by: Giuseppe Di Natale <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed by: Dan Kimmel <[email protected]> Reviewed by: John Wren Kennedy <[email protected]> Ported-by: John Wren Kennedy <[email protected]> OpenZFS-issue: https://www.illumos.org/issues/9076 OpenZFS-commit: https://github.com/openzfs/openzfs/pull/562 Upstream bug: DLPX-45477 Closes #7302
-rwxr-xr-xtests/zfs-tests/tests/perf/regression/random_reads.ksh13
-rwxr-xr-xtests/zfs-tests/tests/perf/regression/random_readwrite.ksh13
-rwxr-xr-xtests/zfs-tests/tests/perf/regression/random_writes.ksh13
-rwxr-xr-xtests/zfs-tests/tests/perf/regression/sequential_reads.ksh13
-rwxr-xr-xtests/zfs-tests/tests/perf/regression/sequential_writes.ksh13
5 files changed, 55 insertions, 10 deletions
diff --git a/tests/zfs-tests/tests/perf/regression/random_reads.ksh b/tests/zfs-tests/tests/perf/regression/random_reads.ksh
index 8ec8cd67a..5e91fe0a7 100755
--- a/tests/zfs-tests/tests/perf/regression/random_reads.ksh
+++ b/tests/zfs-tests/tests/perf/regression/random_reads.ksh
@@ -25,6 +25,15 @@
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -54,13 +63,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
diff --git a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
index cf12c9b09..446a440ec 100755
--- a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
+++ b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
@@ -25,6 +25,15 @@
# and used for all fio runs. The ARC is cleared with `zinject -a` prior to
# each run so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -54,13 +63,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES='' # bssplit used instead
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES='' # bssplit used instead
fi
diff --git a/tests/zfs-tests/tests/perf/regression/random_writes.ksh b/tests/zfs-tests/tests/perf/regression/random_writes.ksh
index 8db5fbd32..e724aefe2 100755
--- a/tests/zfs-tests/tests/perf/regression/random_writes.ksh
+++ b/tests/zfs-tests/tests/perf/regression/random_writes.ksh
@@ -24,6 +24,15 @@
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -53,13 +62,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
index af53e5d1e..e60dbb181 100755
--- a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
+++ b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
@@ -25,6 +25,15 @@
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -54,13 +63,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
diff --git a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
index 3319bd47c..d5fb89020 100755
--- a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
+++ b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
@@ -24,6 +24,15 @@
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -53,13 +62,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
fi