diff options
19 files changed, 532 insertions, 193 deletions
diff --git a/tests/runfiles/perf-regression.run b/tests/runfiles/perf-regression.run index cb068e887..d10ff8836 100644 --- a/tests/runfiles/perf-regression.run +++ b/tests/runfiles/perf-regression.run @@ -10,7 +10,7 @@ # # -# Copyright (c) 2015 by Delphix. All rights reserved. +# Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [DEFAULT] @@ -27,6 +27,6 @@ tags = ['perf'] [tests/perf/regression] tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached', 'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached', - 'random_reads', 'random_writes', 'random_readwrite'] + 'random_reads', 'random_writes', 'random_readwrite', 'random_writes_zil'] post = tags = ['perf', 'regression'] diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 00326dcdc..e8def35f8 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -1599,6 +1599,31 @@ function destroy_pool #pool return 0 } +# Return 0 if created successfully; $? otherwise +# +# $1 - dataset name +# $2-n - dataset options + +function create_dataset #dataset dataset_options +{ + typeset dataset=$1 + + shift + + if [[ -z $dataset ]]; then + log_note "Missing dataset name." + return 1 + fi + + if datasetexists $dataset ; then + destroy_dataset $dataset + fi + + log_must zfs create $@ $dataset + + return 0 +} + # Return 0 if destroy successfully or the dataset exists; $? otherwise # Note: In local zones, this function should return 0 silently. # diff --git a/tests/zfs-tests/tests/perf/fio/mkfiles.fio b/tests/zfs-tests/tests/perf/fio/mkfiles.fio index 8289d546d..c7efda86d 100644 --- a/tests/zfs-tests/tests/perf/fio/mkfiles.fio +++ b/tests/zfs-tests/tests/perf/fio/mkfiles.fio @@ -21,7 +21,7 @@ ioengine=psync bs=1024k rw=write thread=1 -directory=/${TESTFS} +directory=${DIRECTORY} numjobs=${NUMJOBS} filesize=${FILE_SIZE} buffer_compress_percentage=66 diff --git a/tests/zfs-tests/tests/perf/fio/random_reads.fio b/tests/zfs-tests/tests/perf/fio/random_reads.fio index 25dd2ff83..79610f9b2 100644 --- a/tests/zfs-tests/tests/perf/fio/random_reads.fio +++ b/tests/zfs-tests/tests/perf/fio/random_reads.fio @@ -10,7 +10,7 @@ # # -# Copyright (c) 2015 by Delphix. All rights reserved. +# Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] @@ -21,7 +21,7 @@ overwrite=0 thread=1 rw=randread time_based=1 -directory=/${TESTFS} +directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync diff --git a/tests/zfs-tests/tests/perf/fio/random_readwrite.fio b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio index 07090d4dc..7d01c38ad 100644 --- a/tests/zfs-tests/tests/perf/fio/random_readwrite.fio +++ b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio @@ -23,7 +23,7 @@ thread=1 rw=randrw rwmixread=80 time_based=1 -directory=/${TESTFS} +directory=${DIRECTORY} runtime=${RUNTIME} bssplit=4k/50:8k/30:128k/10:1m/10 ioengine=psync diff --git a/tests/zfs-tests/tests/perf/fio/random_writes.fio b/tests/zfs-tests/tests/perf/fio/random_writes.fio index 9233a8426..5e2cb3002 100644 --- a/tests/zfs-tests/tests/perf/fio/random_writes.fio +++ b/tests/zfs-tests/tests/perf/fio/random_writes.fio @@ -20,7 +20,7 @@ fallocate=0 thread=1 rw=randwrite time_based=1 -directory=/${TESTFS} +directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync diff --git a/tests/zfs-tests/tests/perf/fio/sequential_reads.fio b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio index b7d9fea5f..33a9a1d89 100644 --- a/tests/zfs-tests/tests/perf/fio/sequential_reads.fio +++ b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio @@ -10,7 +10,7 @@ # # -# Copyright (c) 2015 by Delphix. All rights reserved. +# Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] @@ -21,7 +21,7 @@ overwrite=0 thread=1 rw=read time_based=1 -directory=/${TESTFS} +directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync diff --git a/tests/zfs-tests/tests/perf/fio/sequential_writes.fio b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio index 0ee6d091d..65a65910f 100644 --- a/tests/zfs-tests/tests/perf/fio/sequential_writes.fio +++ b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio @@ -20,7 +20,7 @@ fallocate=0 thread=1 rw=write time_based=1 -directory=/${TESTFS} +directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib index e1e845ba6..c851ee32d 100644 --- a/tests/zfs-tests/tests/perf/perf.shlib +++ b/tests/zfs-tests/tests/perf/perf.shlib @@ -37,6 +37,96 @@ function get_sync_str echo $sync_str } +function get_suffix +{ + typeset threads=$1 + typeset sync=$2 + typeset iosize=$3 + + typeset sync_str=$(get_sync_str $sync) + typeset filesystems=$(get_nfilesystems) + + typeset suffix="$sync_str.$iosize-ios" + suffix="$suffix.$threads-threads.$filesystems-filesystems" + echo $suffix +} + +function do_fio_run_impl +{ + typeset script=$1 + typeset do_recreate=$2 + typeset clear_cache=$3 + + typeset threads=$4 + typeset threads_per_fs=$5 + typeset sync=$6 + typeset iosize=$7 + + typeset sync_str=$(get_sync_str $sync) + log_note "Running with $threads $sync_str threads, $iosize ios" + + if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then + log_must test $do_recreate + verify_threads_per_fs $threads $threads_per_fs + fi + + if $do_recreate; then + recreate_perf_pool + + # + # A value of zero for "threads_per_fs" is "special", and + # means a single filesystem should be used, regardless + # of the number of threads. + # + if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then + populate_perf_filesystems $((threads / threads_per_fs)) + else + populate_perf_filesystems 1 + fi + fi + + if $clear_cache; then + # Clear the ARC + zpool export $PERFPOOL + zpool import $PERFPOOL + fi + + if [[ -n $ZINJECT_DELAYS ]]; then + apply_zinject_delays + else + log_note "No per-device commands to execute." + fi + + # + # Allow this to be overridden by the individual test case. This + # can be used to run the FIO job against something other than + # the default filesystem (e.g. against a clone). + # + export DIRECTORY=$(get_directory) + log_note "DIRECTORY: " $DIRECTORY + + export RUNTIME=$PERF_RUNTIME + export FILESIZE=$((TOTAL_SIZE / threads)) + export NUMJOBS=$threads + export SYNC_TYPE=$sync + export BLOCKSIZE=$iosize + sync + + # This will be part of the output filename. + typeset suffix=$(get_suffix $threads $sync $iosize) + + # Start the data collection + do_collect_scripts $suffix + + # Define output file + typeset logbase="$(get_perf_output_dir)/$(basename \ + $SUDO_COMMAND)" + typeset outfile="$logbase.fio.$suffix" + + # Start the load + log_must fio --output $outfile $FIO_SCRIPTS/$script +} + # # This function will run fio in a loop, according to the .fio file passed # in and a number of environment variables. The following variables can be @@ -56,47 +146,21 @@ function do_fio_run typeset script=$1 typeset do_recreate=$2 typeset clear_cache=$3 - typeset threads sync iosize + typeset threads threads_per_fs sync iosize for threads in $PERF_NTHREADS; do - for sync in $PERF_SYNC_TYPES; do - for iosize in $PERF_IOSIZES; do - typeset sync_str=$(get_sync_str $sync) - log_note "Running with $threads" \ - "$sync_str threads, $iosize ios" - - if $do_recreate; then - recreate_perfpool - log_must zfs create $PERF_FS_OPTS \ - $TESTFS - fi - - if $clear_cache; then - # Clear the ARC - zpool export $PERFPOOL - zpool import $PERFPOOL - fi - - export RUNTIME=$PERF_RUNTIME - export FILESIZE=$((TOTAL_SIZE / threads)) - export NUMJOBS=$threads - export SYNC_TYPE=$sync - export BLOCKSIZE=$iosize - sync - - # Start the data collection - do_collect_scripts $threads $sync $iosize - - # This will be part of the output filename. - typeset suffix="$sync_str.$iosize-ios.$threads-threads" - - # Define output file - typeset logbase="$(get_perf_output_dir)/$(basename \ - $SUDO_COMMAND)" - typeset outfile="$logbase.fio.$suffix" - - # Start the load - log_must fio --output $outfile $FIO_SCRIPTS/$script + for threads_per_fs in $PERF_NTHREADS_PER_FS; do + for sync in $PERF_SYNC_TYPES; do + for iosize in $PERF_IOSIZES; do + do_fio_run_impl \ + $script \ + $do_recreate \ + $clear_cache \ + $threads \ + $threads_per_fs \ + $sync \ + $iosize + done done done done @@ -109,17 +173,11 @@ function do_fio_run # function do_collect_scripts { - typeset threads=$1 - typeset sync=$2 - typeset iosize=$3 + typeset suffix=$1 [[ -n $collect_scripts ]] || log_fail "No data collection scripts." [[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified." - # This will be part of the output filename. - typeset sync_str=$(get_sync_str $sync) - typeset suffix="$sync_str.$iosize-ios.$threads-threads" - # Add in user supplied scripts and logfiles, if any. typeset oIFS=$IFS IFS=',' @@ -152,23 +210,122 @@ function get_perf_output_dir echo $dir } +function apply_zinject_delays +{ + typeset idx=0 + while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do + [[ -n ${ZINJECT_DELAYS[$idx]} ]] || \ + log_must "No zinject delay found at index: $idx" + + for disk in $DISKS; do + log_must zinject \ + -d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL + done + + ((idx += 1)) + done +} + +function clear_zinject_delays +{ + log_must zinject -c all +} + # -# Destroy and create the pool used for performance tests. The -# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool -# configuration by specifying the pool creation command in their environment. -# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created. +# Destroy and create the pool used for performance tests. # -function recreate_perfpool +function recreate_perf_pool { [[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set." - poolexists $PERFPOOL && destroy_pool $PERFPOOL + # + # In case there's been some "leaked" zinject delays, or if the + # performance test injected some delays itself, we clear all + # delays before attempting to destroy the pool. Each delay + # places a hold on the pool, so the destroy will fail if there + # are any outstanding delays. + # + clear_zinject_delays + + # + # This function handles the case where the pool already exists, + # and will destroy the previous pool and recreate a new pool. + # + create_pool $PERFPOOL $DISKS +} - if [[ -n $PERFPOOL_CREATE_CMD ]]; then - log_must $PERFPOOL_CREATE_CMD - else - log_must eval "zpool create -f $PERFPOOL $DISKS" - fi +function verify_threads_per_fs +{ + typeset threads=$1 + typeset threads_per_fs=$2 + + log_must test -n $threads + log_must test -n $threads_per_fs + + # + # A value of "0" is treated as a "special value", and it is + # interpreted to mean all threads will run using a single + # filesystem. + # + [[ $threads_per_fs -eq 0 ]] && return + + # + # The number of threads per filesystem must be a value greater + # than or equal to zero; since we just verified the value isn't + # 0 above, then it must be greater than zero here. + # + log_must test $threads_per_fs -ge 0 + + # + # This restriction can be lifted later if needed, but for now, + # we restrict the number of threads per filesystem to a value + # that evenly divides the thread count. This way, the threads + # will be evenly distributed over all the filesystems. + # + log_must test $((threads % threads_per_fs)) -eq 0 +} + +function populate_perf_filesystems +{ + typeset nfilesystems=${1:-1} + + export TESTFS="" + for i in $(seq 1 $nfilesystems); do + typeset dataset="$PERFPOOL/fs$i" + create_dataset $dataset $PERF_FS_OPTS + if [[ -z "$TESTFS" ]]; then + TESTFS="$dataset" + else + TESTFS="$TESTFS $dataset" + fi + done +} + +function get_nfilesystems +{ + typeset filesystems=( $TESTFS ) + echo ${#filesystems[@]} +} + +function get_directory +{ + typeset filesystems=( $TESTFS ) + typeset directory= + + typeset idx=0 + while [[ $idx -lt "${#filesystems[@]}" ]]; do + mountpoint=$(get_prop mountpoint "${filesystems[$idx]}") + + if [[ -n $directory ]]; then + directory=$directory:$mountpoint + else + directory=$mountpoint + fi + + ((idx += 1)) + done + + echo $directory } function get_max_arc_size diff --git a/tests/zfs-tests/tests/perf/regression/Makefile.am b/tests/zfs-tests/tests/perf/regression/Makefile.am index c0419949d..4f045880f 100644 --- a/tests/zfs-tests/tests/perf/regression/Makefile.am +++ b/tests/zfs-tests/tests/perf/regression/Makefile.am @@ -3,6 +3,7 @@ dist_pkgdata_SCRIPTS = \ random_reads.ksh \ random_readwrite.ksh \ random_writes.ksh \ + random_writes_zil.ksh \ sequential_reads_arc_cached_clone.ksh \ sequential_reads_arc_cached.ksh \ sequential_reads_dbuf_cached.ksh \ diff --git a/tests/zfs-tests/tests/perf/regression/random_reads.ksh b/tests/zfs-tests/tests/perf/regression/random_reads.ksh index 5bf269a85..f4e333696 100755 --- a/tests/zfs-tests/tests/perf/regression/random_reads.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_reads.ksh @@ -41,35 +41,33 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during random read load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. -export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) # Variables for use by fio. if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} fi @@ -79,6 +77,7 @@ fi # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. @@ -88,12 +87,20 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" - "iostat" "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Random reads with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh index e6d6e3a11..00dd07025 100755 --- a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh @@ -41,35 +41,33 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during random read-write load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. -export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) # Variables for use by fio. if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} export PERF_IOSIZES='' # bssplit used instead elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES='' # bssplit used instead fi @@ -79,6 +77,7 @@ fi # a subset of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. @@ -88,12 +87,20 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" - "iostat" "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Random reads and writes with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/random_writes.ksh b/tests/zfs-tests/tests/perf/regression/random_writes.ksh index d85a3d98a..c84f96506 100755 --- a/tests/zfs-tests/tests/perf/regression/random_writes.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_writes.ksh @@ -40,35 +40,33 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during random write load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. -export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) # Variables for use by fio. if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} fi @@ -80,12 +78,20 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" - "iostat" "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Random writes with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh new file mode 100755 index 000000000..4f2a49681 --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh @@ -0,0 +1,90 @@ +#!/usr/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015, 2016 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + # kill fio and iostat + pkill fio + pkill iostat + + # + # We're using many filesystems depending on the number of + # threads for each test, and there's no good way to get a list + # of all the filesystems that should be destroyed on cleanup + # (i.e. the list of filesystems used for the last test ran). + # Thus, we simply recreate the pool as a way to destroy all + # filesystems and leave a fresh pool behind. + # + recreate_perf_pool +} + +trap "log_fail \"Measure IO stats during random write load\"" SIGTERM +log_onexit cleanup + +recreate_perf_pool + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) + +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} + +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} +fi + +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + typeset perf_record_cmd="perf record -F 99 -a -g -q \ + -o /dev/stdout -- sleep ${PERF_RUNTIME}" + + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) +else + export collect_scripts=( + "kstat zfs:0 1" "kstat" + "vmstat -T d 1" "vmstat" + "mpstat -T d 1" "mpstat" + "iostat -T d -xcnz 1" "iostat" + "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "dtrace -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil" + "dtrace -s $PERF_SCRIPTS/profile.d" "profile" + "dtrace -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile" + ) +fi +log_note "ZIL specific random write workload with $PERF_RUNTYPE settings" +do_fio_run random_writes.fio true false +log_pass "Measure IO stats during ZIL specific random write workload" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh index a9c62fe5a..93c109e73 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh @@ -41,35 +41,33 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during sequential read load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. -export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) # Variables for use by fio. if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} fi @@ -79,6 +77,7 @@ fi # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. @@ -88,14 +87,22 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1" - "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat" - "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat" - "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Sequential reads with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh index 6622ac973..5ef95c0e0 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh @@ -31,20 +31,16 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during sequential read load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Make sure the working set can be cached in the arc. Aim for 1/2 of arc. export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) @@ -54,12 +50,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} fi @@ -69,6 +67,7 @@ fi # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. @@ -78,14 +77,22 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1" - "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat" - "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat" - "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Sequential cached reads with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh index 9ed0e4792..60f5d750d 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh @@ -37,20 +37,16 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM - -log_assert "Measure IO stats during sequential read load" log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Make sure the working set can be cached in the arc. Aim for 1/2 of arc. export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) @@ -60,12 +56,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} fi @@ -75,15 +73,26 @@ fi # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio +# +# Only a single filesystem is used by this test. To be defensive, we +# double check that TESTFS only contains a single filesystem. We +# wouldn't want to assume this was the case, and have it actually +# contain multiple filesystem (causing cascading failures later). +# +log_must test $(get_nfilesystems) -eq 1 + log_note "Creating snapshot, $TESTSNAP, of $TESTFS" create_snapshot $TESTFS $TESTSNAP log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP" create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE # -# Reset the TESTFS to point to the clone +# We want to run FIO against the clone we created above, and not the +# clone's originating filesystem. Thus, we override the default behavior +# and explicitly set TESTFS to the clone. # export TESTFS=$PERFPOOL/$TESTCLONE @@ -94,16 +103,24 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1" - "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat" - "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat" - "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi -log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings" +log_note "Sequential cached reads from $DIRECTORY with $PERF_RUNTYPE settings" do_fio_run sequential_reads.fio false false log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh index edb7a96c9..d49da6057 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh @@ -35,18 +35,16 @@ function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Ensure the working set can be cached in the dbuf cache. export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4)) @@ -56,12 +54,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'64'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'64k'} fi @@ -71,6 +71,7 @@ fi # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. @@ -80,16 +81,24 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1" - "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat" - "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" - "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" + export collect_scripts=( + "kstat zfs:0 1" "kstat" + "vmstat -T d 1" "vmstat" + "mpstat -T d 1" "mpstat" + "iostat -T d -xcnz 1" "iostat" "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" - "dtrace -s $PERF_SCRIPTS/profile.d" "profile") + "dtrace -s $PERF_SCRIPTS/profile.d" "profile" + ) fi log_note "Sequential cached reads with $PERF_RUNTYPE settings" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh index 01ab80d4a..e6d9ec2d5 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh @@ -37,38 +37,36 @@ . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib -log_assert "Measure IO stats during sequential write load" -log_onexit cleanup - function cleanup { # kill fio and iostat - pkill ${fio##*/} - pkill ${iostat##*/} - log_must_busy zfs destroy $TESTFS - log_must_busy zpool destroy $PERFPOOL + pkill fio + pkill iostat + recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM +log_onexit cleanup -export TESTFS=$PERFPOOL/testfs -recreate_perfpool -log_must zfs create $PERF_FS_OPTS $TESTFS +recreate_perf_pool +populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. -export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) +export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) # Variables for use by fio. if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} + export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} fi @@ -80,12 +78,20 @@ if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" - export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" - "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" - "iostat" "$perf_record_cmd" "perf") + export collect_scripts=( + "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "vmstat 1" "vmstat" + "mpstat -P ALL 1" "mpstat" + "iostat -dxyz 1" "iostat" + "$perf_record_cmd" "perf" + ) else - export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" - "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat") + export collect_scripts=( + "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "vmstat 1" "vmstat" + "mpstat 1" "mpstat" + "iostat -xcnz 1" "iostat" + ) fi log_note "Sequential writes with $PERF_RUNTYPE settings" |