diff options
-rw-r--r-- | tests/runfiles/perf-regression.run | 6 | ||||
-rw-r--r-- | tests/zfs-tests/include/commands.cfg | 1 | ||||
-rw-r--r-- | tests/zfs-tests/tests/perf/perf.shlib | 18 | ||||
-rw-r--r-- | tests/zfs-tests/tests/perf/regression/Makefile.am | 5 | ||||
-rwxr-xr-x | tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh (renamed from tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh) | 0 | ||||
-rwxr-xr-x | tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh (renamed from tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh) | 0 | ||||
-rwxr-xr-x | tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh | 93 |
7 files changed, 118 insertions, 5 deletions
diff --git a/tests/runfiles/perf-regression.run b/tests/runfiles/perf-regression.run index 3b571dddd..cb068e887 100644 --- a/tests/runfiles/perf-regression.run +++ b/tests/runfiles/perf-regression.run @@ -25,8 +25,8 @@ outputdir = /var/tmp/test_results tags = ['perf'] [tests/perf/regression] -tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_cached', - 'sequential_reads_cached_clone', 'random_reads', 'random_writes', - 'random_readwrite'] +tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached', + 'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached', + 'random_reads', 'random_writes', 'random_readwrite'] post = tags = ['perf', 'regression'] diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index 0600fe71c..ca734d39f 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -44,6 +44,7 @@ export SYSTEM_FILES='arp file find fio + free getconf getent getfacl diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib index 76b6651c3..e1e845ba6 100644 --- a/tests/zfs-tests/tests/perf/perf.shlib +++ b/tests/zfs-tests/tests/perf/perf.shlib @@ -188,6 +188,24 @@ function get_max_arc_size echo $max_arc_size } +function get_max_dbuf_cache_size +{ + typeset -l max_dbuf_cache_size + + if is_linux; then + max_dbuf_cache_size=$(get_tunable dbuf_cache_max_bytes) + else + max_dbuf_cache_size=$(dtrace -qn 'BEGIN { + printf("%u\n", `dbuf_cache_max_bytes); + exit(0); + }') + + [[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed" + fi + + echo $max_dbuf_cache_size +} + # Create a file with some information about how this system is configured. function get_system_config { diff --git a/tests/zfs-tests/tests/perf/regression/Makefile.am b/tests/zfs-tests/tests/perf/regression/Makefile.am index c9032a26b..c0419949d 100644 --- a/tests/zfs-tests/tests/perf/regression/Makefile.am +++ b/tests/zfs-tests/tests/perf/regression/Makefile.am @@ -3,8 +3,9 @@ dist_pkgdata_SCRIPTS = \ random_reads.ksh \ random_readwrite.ksh \ random_writes.ksh \ - sequential_reads_cached_clone.ksh \ - sequential_reads_cached.ksh \ + sequential_reads_arc_cached_clone.ksh \ + sequential_reads_arc_cached.ksh \ + sequential_reads_dbuf_cached.ksh \ sequential_reads.ksh \ sequential_writes.ksh \ setup.ksh diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh index 5445f814e..5445f814e 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh index 5d7175b1a..5d7175b1a 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh new file mode 100755 index 000000000..5d029280e --- /dev/null +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh @@ -0,0 +1,93 @@ +#!/bin/ksh + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2016 by Delphix. All rights reserved. +# + +# +# Description: +# Trigger fio runs using the sequential_reads job file. The number of runs and +# data collected is determined by the PERF_* variables. See do_fio_run for +# details about these variables. +# +# The files to read from are created prior to the first fio run, and used +# for all fio runs. The ARC is not cleared to ensure that all data is cached. +# +# This is basically a copy of the sequential_reads_cached test case, but with +# a smaller dateset so that we can fit everything into the decompressed, linear +# space in the dbuf cache. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/perf/perf.shlib + +function cleanup +{ + # kill fio and iostat + pkill ${fio##*/} + pkill ${iostat##*/} + log_must_busy zfs destroy $TESTFS + log_must_busy zpool destroy $PERFPOOL +} + +trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM +log_onexit cleanup + +export TESTFS=$PERFPOOL/testfs +recreate_perfpool +log_must zfs create $PERF_FS_OPTS $TESTFS + +# Ensure the working set can be cached in the dbuf cache. +export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4)) + +# Variables for use by fio. +if [[ -n $PERF_REGRESSION_WEEKLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} +elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then + export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} + export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} + export PERF_NTHREADS=${PERF_NTHREADS:-'64'} + export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} + export PERF_IOSIZES=${PERF_IOSIZES:-'64k'} +fi + +# Layout the files to be used by the read tests. Create as many files as the +# largest number of threads. An fio run with fewer threads will use a subset +# of the available files. +export NUMJOBS=$(get_max $PERF_NTHREADS) +export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) +log_must fio $FIO_SCRIPTS/mkfiles.fio + +# Set up the scripts and output files that will log performance data. +lun_list=$(pool_to_lun_list $PERFPOOL) +log_note "Collecting backend IO stats with lun list $lun_list" +if is_linux; then + export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" + "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1" + "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat") +else + export collect_scripts=("kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" + "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" + "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" + "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" + "dtrace -s $PERF_SCRIPTS/profile.d" "profile") +fi + +log_note "Sequential cached reads with $PERF_RUNTYPE settings" +do_fio_run sequential_reads.fio false false +log_pass "Measure IO stats during sequential cached read load" |