diff options
Diffstat (limited to 'tests/zfs-tests')
25 files changed, 1295 insertions, 2 deletions
diff --git a/tests/zfs-tests/include/commands.cfg.in b/tests/zfs-tests/include/commands.cfg.in index 10d8ffd8a..cf201c7e4 100644 --- a/tests/zfs-tests/include/commands.cfg.in +++ b/tests/zfs-tests/include/commands.cfg.in @@ -27,11 +27,14 @@ export DU="@DU@" export DUMPADM="@DUMPADM@" export ECHO="@ECHO@" export EGREP="@EGREP@" +export FALSE="@FALSE@" export FDISK="@FDISK@" export FGREP="@FGREP@" export FILE="@FILE@" export FIND="@FIND@" +export FIO="@FIO@" export FORMAT="@FORMAT@" +export FREE="@FREE@" export FSCK="@FSCK@" export GETENT="@GETENT@" export GETFACL="@GETFACL@" @@ -44,6 +47,7 @@ export GROUPMOD="@GROUPMOD@" export HEAD="@HEAD@" export HOSTNAME="@HOSTNAME@" export ID="@ID@" +export IOSTAT="@IOSTAT@" export ISAINFO="@ISAINFO@" export KILL="@KILL@" export KSH="@KSH@" @@ -61,6 +65,7 @@ export MNTTAB="@MNTTAB@" export MODINFO="@MODINFO@" export MODUNLOAD="@MODUNLOAD@" export MOUNT="@MOUNT@" +export MPSTAT="@MPSTAT@" export MV="@MV@" export NAWK="@AWK@" export NEWFS="@NEWFS@" @@ -98,6 +103,7 @@ export SWAPADD="@SWAPADD@" export SYNC="@SYNC@" export TAIL="@TAIL@" export TAR="@TAR@" +export TIMEOUT="@TIMEOUT@" export TOUCH="@TOUCH@" export TR="@TR@" export TRUNCATE="@TRUNCATE@" @@ -115,6 +121,7 @@ export UNSHARE="@UNSHARE@" export USERADD="@USERADD@" export USERDEL="@USERDEL@" export USERMOD="@USERMOD@" +export VMSTAT="@VMSTAT@" export WAIT="@WAIT@" export WC="@WC@" export ZONEADM="@ZONEADM@" diff --git a/tests/zfs-tests/include/default.cfg.in b/tests/zfs-tests/include/default.cfg.in index ec2eb90f0..f6d6de0e9 100644 --- a/tests/zfs-tests/include/default.cfg.in +++ b/tests/zfs-tests/include/default.cfg.in @@ -104,12 +104,15 @@ export COMPRESSION_PROP=on export CHECKSUM_PROP=on # some common variables used by test scripts : +export FIO_SCRIPTS=$STF_SUITE/tests/perf/fio +export PERF_SCRIPTS=$STF_SUITE/tests/perf/scripts # some test pool names export TESTPOOL=testpool.$$ export TESTPOOL1=testpool1.$$ export TESTPOOL2=testpool2.$$ export TESTPOOL3=testpool3.$$ +export PERFPOOL=perfpool # some test file system names export TESTFS=testfs.$$ diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 0c9ddd1cf..37f173e12 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -2809,6 +2809,30 @@ function vdevs_in_pool return 0; } +function get_max +{ + typeset -l i max=$1 + shift + + for i in "$@"; do + max=$(echo $((max > i ? max : i))) + done + + echo $max +} + +function get_min +{ + typeset -l i min=$1 + shift + + for i in "$@"; do + min=$(echo $((min < i ? min : i))) + done + + echo $min +} + # # Wait for newly created block devices to have their minors created. # diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index e1a1c6b02..f74947915 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -1 +1 @@ -SUBDIRS = functional stress +SUBDIRS = functional perf stress diff --git a/tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_common.kshlib b/tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_common.kshlib index 1f234ceb9..328e940c2 100644 --- a/tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_common.kshlib +++ b/tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_common.kshlib @@ -25,7 +25,7 @@ # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/clean_mirror/default.cfg @@ -36,6 +36,32 @@ # the contents of the mirror. # This code is sourced into each of these test cases. +# +# Synchronize all the data in pool +# +# $1 pool name +# +function sync_pool #pool +{ + typeset pool=$1 + + log_must $SYNC + log_must $SLEEP 2 + # Flush all the pool data. + typeset -i ret + $ZPOOL scrub $pool >/dev/null 2>&1 + ret=$? + (( $ret != 0 )) && \ + log_fail "$ZPOOL scrub $pool failed." + + while ! is_pool_scrubbed $pool; do + if is_pool_resilvered $pool ; then + log_fail "$pool should not be resilver completed." + fi + log_must $SLEEP 2 + done +} + function overwrite_verify_mirror { typeset AFFECTED_DEVICE=$1 @@ -60,6 +86,12 @@ function overwrite_verify_mirror atfile=0 + # + # Flush out the cache so that we ensure we're reading from disk. + # + log_must $ZPOOL export $TESTPOOL + log_must $ZPOOL import $TESTPOOL + typeset -i failedcount=0 while (( atfile < FILE_COUNT )); do files[$atfile]=$TESTDIR/file.$atfile @@ -75,4 +107,6 @@ function overwrite_verify_mirror log_fail "of the $FILE_COUNT files $failedcount did not " \ "have the same checksum before and after." fi + + sync_pool $TESTPOOL } diff --git a/tests/zfs-tests/tests/perf/Makefile.am b/tests/zfs-tests/tests/perf/Makefile.am new file mode 100644 index 000000000..a0502037e --- /dev/null +++ b/tests/zfs-tests/tests/perf/Makefile.am @@ -0,0 +1,7 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf +dist_pkgdata_SCRIPTS = perf.shlib + +SUBDIRS = \ + fio \ + regression \ + scripts diff --git a/tests/zfs-tests/tests/perf/fio/Makefile.am b/tests/zfs-tests/tests/perf/fio/Makefile.am new file mode 100644 index 000000000..9604a9df6 --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/Makefile.am @@ -0,0 +1,8 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/fio +dist_pkgdata_SCRIPTS = \ + mkfiles.fio \ + random_reads.fio \ + random_readwrite.fio \ + random_writes.fio \ + sequential_reads.fio \ + sequential_writes.fio diff --git a/tests/zfs-tests/tests/perf/fio/mkfiles.fio b/tests/zfs-tests/tests/perf/fio/mkfiles.fio new file mode 100644 index 000000000..f876bd63d --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/mkfiles.fio @@ -0,0 +1,30 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +group_reporting=1 +fallocate=0 +ioengine=psync +bs=1024k +rw=write +thread=1 +directory=/${TESTFS} +numjobs=${NUMJOBS} +filesize=${FILE_SIZE} +buffer_compress_percentage=33 +buffer_compress_chunk=4096 + +[job] diff --git a/tests/zfs-tests/tests/perf/fio/random_reads.fio b/tests/zfs-tests/tests/perf/fio/random_reads.fio new file mode 100644 index 000000000..25dd2ff83 --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/random_reads.fio @@ -0,0 +1,31 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +group_reporting=1 +fallocate=0 +overwrite=0 +thread=1 +rw=randread +time_based=1 +directory=/${TESTFS} +runtime=${RUNTIME} +bs=${BLOCKSIZE} +ioengine=psync +sync=${SYNC_TYPE} +numjobs=${NUMJOBS} + +[job] diff --git a/tests/zfs-tests/tests/perf/fio/random_readwrite.fio b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio new file mode 100644 index 000000000..0b750260f --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio @@ -0,0 +1,35 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +nrfiles=16 +group_reporting=1 +fallocate=0 +overwrite=0 +thread=1 +rw=randrw +rwmixread=80 +time_based=1 +directory=/${TESTFS} +runtime=${RUNTIME} +bssplit=4k/50:8k/30:128k/10:1m/10 +ioengine=psync +sync=${SYNC_TYPE} +numjobs=${NUMJOBS} +buffer_compress_percentage=33 +buffer_compress_chunk=4096 + +[job] diff --git a/tests/zfs-tests/tests/perf/fio/random_writes.fio b/tests/zfs-tests/tests/perf/fio/random_writes.fio new file mode 100644 index 000000000..b1860a71d --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/random_writes.fio @@ -0,0 +1,33 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +group_reporting=1 +fallocate=0 +thread=1 +rw=randwrite +time_based=1 +directory=/${TESTFS} +runtime=${RUNTIME} +bs=${BLOCKSIZE} +ioengine=psync +sync=${SYNC_TYPE} +numjobs=${NUMJOBS} +filesize=${FILESIZE} +buffer_compress_percentage=33 +buffer_compress_chunk=4096 + +[job] diff --git a/tests/zfs-tests/tests/perf/fio/sequential_reads.fio b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio new file mode 100644 index 000000000..b7d9fea5f --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio @@ -0,0 +1,31 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +group_reporting=1 +fallocate=0 +overwrite=0 +thread=1 +rw=read +time_based=1 +directory=/${TESTFS} +runtime=${RUNTIME} +bs=${BLOCKSIZE} +ioengine=psync +sync=${SYNC_TYPE} +numjobs=${NUMJOBS} + +[job] diff --git a/tests/zfs-tests/tests/perf/fio/sequential_writes.fio b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio new file mode 100644 index 000000000..df1590cf1 --- /dev/null +++ b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio @@ -0,0 +1,33 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +[global] +filename_format=file$jobnum +group_reporting=1 +fallocate=0 +thread=1 +rw=write +time_based=1 +directory=/${TESTFS} +runtime=${RUNTIME} +bs=${BLOCKSIZE} +ioengine=psync +sync=${SYNC_TYPE} +numjobs=${NUMJOBS} +filesize=${FILESIZE} +buffer_compress_percentage=33 +buffer_compress_chunk=4096 + +[job] diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib new file mode 100644 index 000000000..cb2b85a20 --- /dev/null +++ b/tests/zfs-tests/tests/perf/perf.shlib @@ -0,0 +1,308 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# Copyright (c) 2016, Intel Corporation. +# + +. $STF_SUITE/include/libtest.shlib + +# If neither is specified, do a nightly run. +[[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1 + +# Default runtime for each type of test run. +export PERF_RUNTIME_WEEKLY=$((30 * 60)) +export PERF_RUNTIME_NIGHTLY=$((10 * 60)) + +# Default fs creation options +export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \ + ' -o checksum=sha256 -o redundant_metadata=most'} + +function get_sync_str +{ + typeset sync=$1 + typeset sync_str='' + + [[ $sync -eq 0 ]] && sync_str='async' + [[ $sync -eq 1 ]] && sync_str='sync' + $ECHO $sync_str +} + +# +# This function will run fio in a loop, according to the .fio file passed +# in and a number of environment variables. The following variables can be +# set before launching zfstest to override the defaults. +# +# PERF_RUNTIME: The time in seconds each fio invocation should run. +# PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are +# nightly and weekly. +# PERF_NTHREADS: A list of how many threads each fio invocation will use. +# PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO. +# PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO. +# PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag' +# pairs that will be added to the scripts specified in each test. +# +function do_fio_run +{ + typeset script=$1 + typeset do_recreate=$2 + typeset clear_cache=$3 + typeset threads sync iosize + + for threads in $PERF_NTHREADS; do + for sync in $PERF_SYNC_TYPES; do + for iosize in $PERF_IOSIZES; do + log_note "Running with $threads" \ + "$(get_sync_str $sync) threads, $iosize ios" + + if $do_recreate; then + recreate_perfpool + log_must $ZFS create $PERF_FS_OPTS \ + $TESTFS + fi + + if $clear_cache; then + # Clear the ARC + $ZPOOL export $PERFPOOL + $ZPOOL import $PERFPOOL + fi + + export RUNTIME=$PERF_RUNTIME + export FILESIZE=$((TOTAL_SIZE / threads)) + export NUMJOBS=$threads + export SYNC_TYPE=$sync + export BLOCKSIZE=$iosize + $SYNC + + # Start the data collection + do_collect_scripts $threads $sync $iosize + + # Start the load + log_must $FIO $FIO_SCRIPTS/$script + done + done + done +} + +# +# This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS. +# The script at index N is launched in the background, with its output +# redirected to a logfile containing the tag specified at index N + 1. +# +function do_collect_scripts +{ + typeset threads=$1 + typeset sync=$2 + typeset iosize=$3 + + [[ -n $collect_scripts ]] || log_fail "No data collection scripts." + [[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified." + + # This will be part of the output filename. + typeset sync_str=$(get_sync_str $sync) + typeset suffix="$sync_str.$iosize-ios.$threads-threads" + + # Add in user supplied scripts and logfiles, if any. + typeset oIFS=$IFS + IFS=',' + for item in $PERF_COLLECT_SCRIPTS; do + collect_scripts+=($($ECHO $item | $SED 's/^ *//g')) + done + IFS=$oIFS + + typeset idx=0 + while [[ $idx -lt "${#collect_scripts[@]}" ]]; do + typeset logbase="$(get_perf_output_dir)/$($BASENAME \ + $SUDO_COMMAND)" + typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix" + + $TIMEOUT $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 & + ((idx += 2)) + done + + # Need to explicitly return 0 because timeout(1) will kill + # a child process and cause us to return non-zero. + return 0 +} + +# Find a place to deposit performance data collected while under load. +function get_perf_output_dir +{ + typeset dir="$(pwd)/perf_data" + [[ -d $dir ]] || $MKDIR -p $dir + + $ECHO $dir +} + +# +# Destroy and create the pool used for performance tests. The +# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool +# configuration by specifying the pool creation command in their environment. +# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created. +# +function recreate_perfpool +{ + [[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set." + + poolexists $PERFPOOL && destroy_pool $PERFPOOL + + if [[ -n $PERFPOOL_CREATE_CMD ]]; then + log_must $PERFPOOL_CREATE_CMD + else + log_must eval "$ZPOOL create -f $PERFPOOL $DISKS" + fi +} + +function get_max_arc_size +{ + if is_linux; then + typeset -l max_arc_size=`$AWK '$1 == "c_max" { print $3 }' \ + /proc/spl/kstat/zfs/arcstats` + else + typeset -l max_arc_size=$(dtrace -qn 'BEGIN { + printf("%u\n", `arc_stats.arcstat_c_max.value.ui64); + exit(0); + }') + fi + + [[ $? -eq 0 ]] || log_fail "get_max_arc_size failed" + + $ECHO $max_arc_size +} + +# Create a file with some information about how this system is configured. +function get_system_config +{ + typeset config=$PERF_DATA_DIR/$1 + + $ECHO "{" >>$config + if is_linux; then + $ECHO " \"ncpus\": \"$($NPROC --all)\"," >>$config + $ECHO " \"physmem\": \"$($FREE -b | \ + $AWK '$1 == "Mem:" { print $2 }')\"," >>$config + $ECHO " \"c_max\": \"$(get_max_arc_size)\"," >>$config + $ECHO " \"hostname\": \"$($UNAME -n)\"," >>$config + $ECHO " \"kernel version\": \"$($UNAME -sr)\"," >>$config + else + $DTRACE -qn 'BEGIN{ + printf(" \"ncpus\": %d,\n", `ncpus); + printf(" \"physmem\": %u,\n", `physmem * `_pagesize); + printf(" \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64); + printf(" \"kmem_flags\": \"0x%x\",", `kmem_flags); + exit(0)}' >>$config + $ECHO " \"hostname\": \"$($UNAME -n)\"," >>$config + $ECHO " \"kernel version\": \"$($UNAME -v)\"," >>$config + fi + if is_linux; then + $LSBLK -dino NAME,SIZE | $AWK 'BEGIN { + printf(" \"disks\": {\n"); first = 1} + {disk = $1} {size = $2; + if (first != 1) {printf(",\n")} else {first = 0} + printf(" \"%s\": \"%s\"", disk, size)} + END {printf("\n },\n")}' >>$config + + zfs_tunables="/sys/module/zfs/parameters" + + printf " \"tunables\": {\n" >>$config + for tunable in \ + zfs_arc_max \ + zfs_arc_meta_limit \ + zfs_arc_sys_free \ + zfs_dirty_data_max \ + zfs_flags \ + zfs_prefetch_disable \ + zfs_txg_timeout \ + zfs_vdev_aggregation_limit \ + zfs_vdev_async_read_max_active \ + zfs_vdev_async_write_max_active \ + zfs_vdev_sync_read_max_active \ + zfs_vdev_sync_write_max_active \ + zio_delay_max + do + if [ "$tunable" != "zfs_arc_max" ] + then + printf ",\n" >>$config + fi + printf " \"$tunable\": \"$(cat $zfs_tunables/$tunable)\"" \ + >>$config + done + printf "\n }\n" >>$config + else + $IOSTAT -En | $AWK 'BEGIN { + printf(" \"disks\": {\n"); first = 1} + /^c/ {disk = $1} + /^Size: [^0]/ {size = $2; + if (first != 1) {printf(",\n")} else {first = 0} + printf(" \"%s\": \"%s\"", disk, size)} + END {printf("\n },\n")}' >>$config + + $SED -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \ + $AWK -F= 'BEGIN {printf(" \"system\": {\n"); first = 1} + {if (first != 1) {printf(",\n")} else {first = 0}; + printf(" \"%s\": %s", $1, $2)} + END {printf("\n }\n")}' >>$config + fi + $ECHO "}" >>$config +} + +function num_jobs_by_cpu +{ + if is_linux; then + typeset ncpu=$($NPROC --all) + else + typeset ncpu=$($PSRINFO | $WC -l) + fi + typeset num_jobs=$ncpu + + [[ $ncpu -gt 8 ]] && num_jobs=$($ECHO "$ncpu * 3 / 4" | $BC) + + $ECHO $num_jobs +} + +# +# On illumos this looks like: ":sd3:sd4:sd1:sd2:" +# +function pool_to_lun_list +{ + typeset pool=$1 + typeset ctd ctds devname lun + typeset lun_list=':' + + if is_linux; then + ctds=$($ZPOOL list -HLv $pool | \ + $AWK '/sd[a-z]*|loop[0-9]*|dm-[0-9]*/ {print $1}') + + for ctd in $ctds; do + lun_list="$lun_list$ctd:" + done + else + ctds=$($ZPOOL list -v $pool | + $AWK '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ {print $1}') + + for ctd in $ctds; do + # Get the device name as it appears in /etc/path_to_inst + devname=$($READLINK -f /dev/dsk/${ctd}s0 | $SED -n \ + 's/\/devices\([^:]*\):.*/\1/p') + # Add a string composed of the driver name and instance + # number to the list for comparison with dev_statname. + lun=$($SED 's/"//g' /etc/path_to_inst | $GREP \ + $devname | $AWK '{print $3$2}') + un_list="$lun_list$lun:" + done + fi + $ECHO $lun_list +} + +# Create a perf_data directory to hold performance statistics and +# configuration information. +export PERF_DATA_DIR=$(get_perf_output_dir) +[[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json diff --git a/tests/zfs-tests/tests/perf/regression/Makefile.am b/tests/zfs-tests/tests/perf/regression/Makefile.am new file mode 100644 index 000000000..c9032a26b --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/Makefile.am @@ -0,0 +1,10 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/regression +dist_pkgdata_SCRIPTS = \ + random_reads.ksh \ + random_readwrite.ksh \ + random_writes.ksh \ + sequential_reads_cached_clone.ksh \ + sequential_reads_cached.ksh \ + sequential_reads.ksh \ + sequential_writes.ksh \ + setup.ksh diff --git a/tests/zfs-tests/tests/perf/regression/random_reads.ksh b/tests/zfs-tests/tests/perf/regression/random_reads.ksh new file mode 100755 index 000000000..0e9187f6d --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/random_reads.ksh @@ -0,0 +1,83 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the random_reads job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read from are created prior to the first fio run, and used +# for all fio runs. The ARC is cleared with `zinject -a` prior to each run +# so reads will go to disk. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during random read load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} +fi + +# Layout the files to be used by the read tests. Create as many files as the +# largest number of threads. An fio run with fewer threads will use a subset +# of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must $FIO $FIO_SCRIPTS/mkfiles.fio + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" + "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Random reads with $PERF_RUNTYPE settings" +do_fio_run random_reads.fio $FALSE $TRUE +log_pass "Measure IO stats during random read load" diff --git a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh new file mode 100755 index 000000000..c360cd5b7 --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh @@ -0,0 +1,83 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the random_readwrite job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read and write from are created prior to the first fio run, +# and used for all fio runs. The ARC is cleared with `zinject -a` prior to +# each run so reads will go to disk. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during random read-write load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} + export PERF_IOSIZES='' # bssplit used instead +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES='' # bssplit used instead +fi + +# Layout the files to be used by the readwrite tests. Create as many files +# as the largest number of threads. An fio run with fewer threads will use +# a subset of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must $FIO $FIO_SCRIPTS/mkfiles.fio + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" + "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Random reads and writes with $PERF_RUNTYPE settings" +do_fio_run random_readwrite.fio $FALSE $TRUE +log_pass "Measure IO stats during random read and write load" diff --git a/tests/zfs-tests/tests/perf/regression/random_writes.ksh b/tests/zfs-tests/tests/perf/regression/random_writes.ksh new file mode 100755 index 000000000..3e5d0f59e --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/random_writes.ksh @@ -0,0 +1,75 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the random_writes job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# Prior to each fio run the dataset is recreated, and fio writes new files +# into an otherwise empty pool. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during random write load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} +fi + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" + "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Random writes with $PERF_RUNTYPE settings" +do_fio_run random_writes.fio $TRUE $FALSE +log_pass "Measure IO stats during random write load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh new file mode 100755 index 000000000..75680e089 --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh @@ -0,0 +1,84 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the sequential_reads job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read from are created prior to the first fio run, and used +# for all fio runs. The ARC is cleared with `zinject -a` prior to each run +# so reads will go to disk. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during sequential read load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} +fi + +# Layout the files to be used by the read tests. Create as many files as the +# largest number of threads. An fio run with fewer threads will use a subset +# of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must $FIO $FIO_SCRIPTS/mkfiles.fio + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Sequential reads with $PERF_RUNTYPE settings" +do_fio_run sequential_reads.fio $FALSE $TRUE +log_pass "Measure IO stats during sequential read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh new file mode 100755 index 000000000..54a4d4ac8 --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh @@ -0,0 +1,83 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the sequential_reads job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read from are created prior to the first fio run, and used +# for all fio runs. The ARC is not cleared to ensure that all data is cached. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during sequential read load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Make sure the working set can be cached in the arc. Aim for 1/2 of arc. +export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} +fi + +# Layout the files to be used by the read tests. Create as many files as the +# largest number of threads. An fio run with fewer threads will use a subset +# of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must $FIO $FIO_SCRIPTS/mkfiles.fio + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Sequential cached reads with $PERF_RUNTYPE settings" +do_fio_run sequential_reads.fio $FALSE $FALSE +log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh new file mode 100755 index 000000000..bbc053abc --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh @@ -0,0 +1,99 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the sequential_reads job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read from are created prior to the first fio run, and used +# for all fio runs. This test will exercise cached read performance from +# a clone filesystem. The data is initially cached in the ARC and then +# a snapshot and clone are created. All the performance runs are then +# initiated against the clone filesystem to exercise the performance of +# reads when the ARC has to create another buffer from a different dataset. +# It will also exercise the need to evict the duplicate buffer once the last +# reference on that buffer is released. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +log_assert "Measure IO stats during sequential read load" +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Make sure the working set can be cached in the arc. Aim for 1/2 of arc. +export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} +fi + +# Layout the files to be used by the read tests. Create as many files as the +# largest number of threads. An fio run with fewer threads will use a subset +# of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must $FIO $FIO_SCRIPTS/mkfiles.fio + +log_note "Creating snapshot, $TESTSNAP, of $TESTFS" +create_snapshot $TESTFS $TESTSNAP +log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP" +create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE + +# +# Reset the TESTFS to point to the clone +# +export TESTFS=$PERFPOOL/$TESTCLONE + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1" + "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings" +do_fio_run sequential_reads.fio $FALSE $FALSE +log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh new file mode 100755 index 000000000..7865afa20 --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh @@ -0,0 +1,75 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the sequential_writes job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# Prior to each fio run the dataset is recreated, and fio writes new files +# into an otherwise empty pool. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +log_assert "Measure IO stats during sequential write load" +log_onexit cleanup + +function cleanup +{ + log_must $ZFS destroy $TESTFS +} + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must $ZFS create $PERF_FS_OPTS $TESTFS + +# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. +export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} +fi + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" + "iostat") +else + export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat") +fi + +log_note "Sequential writes with $PERF_RUNTYPE settings" +do_fio_run sequential_writes.fio $TRUE $FALSE +log_pass "Measure IO stats during sequential write load" diff --git a/tests/zfs-tests/tests/perf/regression/setup.ksh b/tests/zfs-tests/tests/perf/regression/setup.ksh new file mode 100755 index 000000000..5e6fe37cb --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/setup.ksh @@ -0,0 +1,30 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2015 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +[[ -z $FIO ]] && log_fail "Missing fio" +[[ -z $FREE ]] && log_fail "Missing free" +[[ -z $IOSTAT ]] && log_fail "Missing iostat" +[[ -z $LSBLK ]] && log_fail "Missing lsblk" +[[ -z $MPSTAT ]] && log_fail "Missing mpstat" +[[ -z $VMSTAT ]] && log_fail "Missing vmstat" + +verify_runnable "global" +verify_disk_count "$DISKS" 3 + +log_pass diff --git a/tests/zfs-tests/tests/perf/scripts/Makefile.am b/tests/zfs-tests/tests/perf/scripts/Makefile.am new file mode 100644 index 000000000..f0d45e1fb --- /dev/null +++ b/tests/zfs-tests/tests/perf/scripts/Makefile.am @@ -0,0 +1,2 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/scripts +dist_pkgdata_SCRIPTS = prefetch_io.sh diff --git a/tests/zfs-tests/tests/perf/scripts/prefetch_io.sh b/tests/zfs-tests/tests/perf/scripts/prefetch_io.sh new file mode 100755 index 000000000..3dd9e6c36 --- /dev/null +++ b/tests/zfs-tests/tests/perf/scripts/prefetch_io.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2016 by Intel, Corp. +# + +# +# Linux platform placeholder for collecting prefetch I/O stats +# TBD if we can add additional kstats to achieve the desired results +# + +zfs_kstats="/proc/spl/kstat/zfs" + +AWK=${AWK:-awk} +DATE=${DATE:-date} + +function get_prefetch_ios +{ + typeset -l data_misses=`$AWK '$1 == "prefetch_data_misses" \ + { print $3 }' $zfs_kstats/arcstats` + typeset -l metadata_misses=`$AWK '$1 == "prefetch_metadata_misses" \ + { print $3 }' $zfs_kstats/arcstats` + typeset -l total_misses=$(( $data_misses + $metadata_misses )) + + echo $total_misses +} + +function get_prefetched_demand_reads +{ + typeset -l demand_reads=`$AWK '$1 == "demand_hit_predictive_prefetch" \ + { print $3 }' $zfs_kstats/arcstats` + + echo $demand_reads +} + +function get_sync_wait_for_async +{ + typeset -l sync_wait=`$AWK '$1 == "sync_wait_for_async" \ + { print $3 }' $zfs_kstats/arcstats` + + echo $sync_wait +} + +if [ $# -ne 2 ] +then + echo "Usage: `basename $0` <poolname> interval" >&2 + exit 1 +fi + +poolname=$1 +interval=$2 +prefetch_ios=$(get_prefetch_ios) +prefetched_demand_reads=$(get_prefetched_demand_reads) +sync_wait_for_async=$(get_sync_wait_for_async) + +while true +do + new_prefetch_ios=$(get_prefetch_ios) + printf "%u\n%-24s\t%u\n" $($DATE +%s) "prefetch_ios" \ + $(( $new_prefetch_ios - $prefetch_ios )) + prefetch_ios=$new_prefetch_ios + + new_prefetched_demand_reads=$(get_prefetched_demand_reads) + printf "%-24s\t%u\n" "prefetched_demand_reads" \ + $(( $new_prefetched_demand_reads - $prefetched_demand_reads )) + prefetched_demand_reads=$new_prefetched_demand_reads + + new_sync_wait_for_async=$(get_sync_wait_for_async) + printf "%-24s\t%u\n" "sync_wait_for_async" \ + $(( $new_sync_wait_for_async - $sync_wait_for_async )) + sync_wait_for_async=$new_sync_wait_for_async + + sleep $interval +done |