summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Makefile.am29
-rw-r--r--scripts/common.sh.in373
-rwxr-xr-xscripts/zconfig.sh572
-rwxr-xr-xscripts/zfs.sh74
-rw-r--r--scripts/zpool-config/dm0-raid0.sh60
-rw-r--r--scripts/zpool-config/dragon-raid0-1x70.sh21
-rw-r--r--scripts/zpool-config/dragon-raid10-35x2.sh21
-rw-r--r--scripts/zpool-config/dragon-raidz-7x10.sh21
-rw-r--r--scripts/zpool-config/dragon-raidz2-7x10.sh21
-rw-r--r--scripts/zpool-config/file-raid0.sh31
-rw-r--r--scripts/zpool-config/file-raid10.sh34
-rw-r--r--scripts/zpool-config/file-raidz.sh31
-rw-r--r--scripts/zpool-config/file-raidz2.sh31
-rw-r--r--scripts/zpool-config/hda-raid0.sh16
-rw-r--r--scripts/zpool-config/lo-raid0.sh39
-rw-r--r--scripts/zpool-config/lo-raid10.sh54
-rw-r--r--scripts/zpool-config/lo-raidz.sh39
-rw-r--r--scripts/zpool-config/lo-raidz2.sh39
-rw-r--r--scripts/zpool-config/md0-raid10.sh38
-rw-r--r--scripts/zpool-config/md0-raid5.sh38
-rw-r--r--scripts/zpool-config/ram0-raid0.sh16
-rw-r--r--scripts/zpool-config/sda-raid0.sh16
-rw-r--r--scripts/zpool-config/supermicro-raid0-1x16.sh21
-rw-r--r--scripts/zpool-config/supermicro-raid10-8x2.sh21
-rw-r--r--scripts/zpool-config/supermicro-raidz-4x4.sh21
-rw-r--r--scripts/zpool-config/supermicro-raidz2-4x4.sh21
-rw-r--r--scripts/zpool-config/x4550-raid0-1x48.sh21
-rw-r--r--scripts/zpool-config/x4550-raid10-24x2.sh21
-rw-r--r--scripts/zpool-config/x4550-raidz-8x6.sh21
-rw-r--r--scripts/zpool-config/x4550-raidz2-8x6.sh21
-rwxr-xr-xscripts/zpool-create.sh133
31 files changed, 1915 insertions, 0 deletions
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
new file mode 100644
index 000000000..a1dfc3871
--- /dev/null
+++ b/scripts/Makefile.am
@@ -0,0 +1,29 @@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+nobase_pkglibexec_SCRIPTS = common.sh
+nobase_pkglibexec_SCRIPTS += zconfig.sh
+nobase_pkglibexec_SCRIPTS += zfs.sh
+nobase_pkglibexec_SCRIPTS += zpool-create.sh
+nobase_pkglibexec_SCRIPTS += zpool-config/*
+EXTRA_DIST = zfs-update.sh $(nobase_pkglibexec_SCRIPTS)
+
+ZFS=${top_srcdir}/scripts/zfs.sh
+ZCONFIG=${top_srcdir}/scripts/zconfig.sh
+ZTEST=${top_builddir}/cmd/ztest/ztest
+
+check:
+ @echo
+ @echo -n "===================================="
+ @echo -n " ZTEST "
+ @echo "===================================="
+ @echo
+ @$(ZFS)
+ @$(ZTEST) -V
+ @$(ZFS) -u
+ @echo
+ @echo
+ @echo -n "==================================="
+ @echo -n " ZCONFIG "
+ @echo "==================================="
+ @echo
+ @$(ZCONFIG)
+ @echo
diff --git a/scripts/common.sh.in b/scripts/common.sh.in
new file mode 100644
index 000000000..00418696c
--- /dev/null
+++ b/scripts/common.sh.in
@@ -0,0 +1,373 @@
+#!/bin/bash
+#
+# Common support functions for testing scripts. If a .script-config
+# files is available it will be sourced so in-tree kernel modules and
+# utilities will be used. If no .script-config can be found then the
+# installed kernel modules and utilities will be used.
+
+basedir="$(dirname $0)"
+
+SCRIPT_CONFIG=.script-config
+if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
+. "${basedir}/../${SCRIPT_CONFIG}"
+else
+MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
+fi
+
+PROG="<define PROG>"
+CLEANUP=
+VERBOSE=
+VERBOSE_FLAG=
+FORCE=
+FORCE_FLAG=
+DUMP_LOG=
+ERROR=
+RAID0S=()
+RAID10S=()
+RAIDZS=()
+RAIDZ2S=()
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libexecdir=@libexecdir@
+pkglibexecdir=${libexecdir}/@PACKAGE@
+bindir=@bindir@
+sbindir=@sbindir@
+
+ETCDIR=${ETCDIR:-/etc}
+DEVDIR=${DEVDIR:-/dev/disk/zpool}
+ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
+
+ZDB=${ZDB:-${sbindir}/zdb}
+ZFS=${ZFS:-${sbindir}/zfs}
+ZINJECT=${ZINJECT:-${sbindir}/zinject}
+ZPOOL=${ZPOOL:-${sbindir}/zpool}
+ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
+ZTEST=${ZTEST:-${sbindir}/ztest}
+
+COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
+ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
+ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
+
+LDMOD=${LDMOD:-/sbin/modprobe}
+LSMOD=${LSMOD:-/sbin/lsmod}
+RMMOD=${RMMOD:-/sbin/rmmod}
+INFOMOD=${INFOMOD:-/sbin/modinfo}
+LOSETUP=${LOSETUP:-/sbin/losetup}
+SYSCTL=${SYSCTL:-/sbin/sysctl}
+UDEVADM=${UDEVADM:-/sbin/udevadm}
+AWK=${AWK:-/usr/bin/awk}
+
+die() {
+ echo -e "${PROG}: $1" >&2
+ exit 1
+}
+
+msg() {
+ if [ ${VERBOSE} ]; then
+ echo "$@"
+ fi
+}
+
+pass() {
+ echo "PASS"
+}
+
+fail() {
+ echo "FAIL ($1)"
+ exit $1
+}
+
+spl_dump_log() {
+ ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
+ local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
+ ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
+ echo
+ echo "Dumped debug log: ${NAME}.log"
+ tail -n1 ${NAME}.log
+ echo
+ return 0
+}
+
+check_modules() {
+ local LOADED_MODULES=()
+ local MISSING_MODULES=()
+
+ for MOD in ${MODULES[*]}; do
+ local NAME=`basename $MOD .ko`
+
+ if ${LSMOD} | egrep -q "^${NAME}"; then
+ LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
+ fi
+
+ if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
+ MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
+ fi
+ done
+
+ if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
+ ERROR="Unload these modules with '${PROG} -u':\n"
+ ERROR="${ERROR}${LOADED_MODULES[*]}"
+ return 1
+ fi
+
+ if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
+ ERROR="The following modules can not be found,"
+ ERROR="${ERROR} ensure your source trees are built:\n"
+ ERROR="${ERROR}${MISSING_MODULES[*]}"
+ return 1
+ fi
+
+ return 0
+}
+
+load_module() {
+ local NAME=`basename $1 .ko`
+
+ if [ ${VERBOSE} ]; then
+ echo "Loading ${NAME} ($@)"
+ fi
+
+ ${LDMOD} $* || ERROR="Failed to load $1" return 1
+
+ return 0
+}
+
+load_modules() {
+ mkdir -p /etc/zfs
+
+ for MOD in ${MODULES[*]}; do
+ local NAME=`basename ${MOD} .ko`
+ local VALUE=
+
+ for OPT in "$@"; do
+ OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
+
+ if [ ${NAME} = "${OPT_NAME}" ]; then
+ VALUE=`echo ${OPT} | cut -f2- -d'='`
+ fi
+ done
+
+ load_module ${MOD} ${VALUE} || return 1
+ done
+
+ if [ ${VERBOSE} ]; then
+ echo "Successfully loaded ZFS module stack"
+ fi
+
+ return 0
+}
+
+unload_module() {
+ local NAME=`basename $1 .ko`
+
+ if [ ${VERBOSE} ]; then
+ echo "Unloading ${NAME} ($@)"
+ fi
+
+ ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
+
+ return 0
+}
+
+unload_modules() {
+ local MODULES_REVERSE=( $(echo ${MODULES[@]} |
+ ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
+
+ for MOD in ${MODULES_REVERSE[*]}; do
+ local NAME=`basename ${MOD} .ko`
+ local USE_COUNT=`${LSMOD} |
+ egrep "^${NAME} "| ${AWK} '{print $3}'`
+
+ if [ "${USE_COUNT}" = 0 ] ; then
+
+ if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
+ spl_dump_log
+ fi
+
+ unload_module ${MOD} || return 1
+ fi
+ done
+
+ if [ ${VERBOSE} ]; then
+ echo "Successfully unloaded ZFS module stack"
+ fi
+
+ return 0
+}
+
+unused_loop_device() {
+ for DEVICE in `ls -1 /dev/loop*`; do
+ ${LOSETUP} ${DEVICE} &>/dev/null
+ if [ $? -ne 0 ]; then
+ echo ${DEVICE}
+ return
+ fi
+ done
+
+ die "Error: Unable to find unused loopback device"
+}
+
+#
+# This can be slightly dangerous because the loop devices we are
+# cleanup up may not be ours. However, if the devices are currently
+# in use we will not be able to remove them, and we only remove
+# devices which include 'zpool' in the name. So any damage we might
+# do should be limited to other zfs related testing.
+#
+cleanup_loop_devices() {
+ local TMP_FILE=`mktemp`
+
+ ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
+ ${AWK} -F":" -v losetup="$LOSETUP" \
+ '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
+ ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
+
+ rm -f ${TMP_FILE}
+}
+
+#
+# The following udev helper functions assume that the provided
+# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
+# disk mapping. In this mapping each CHANNEL is represented by
+# the letters a-z, and the RANK is represented by the numbers
+# 1-n. A CHANNEL should identify a group of RANKS which are all
+# attached to a single controller, each RANK represents a disk.
+# This provides a simply mechanism to locate a specific drive
+# given a known hardware configuration.
+#
+udev_setup() {
+ local SRC_PATH=$1
+
+ # When running in tree manually contruct symlinks in tree to
+ # the proper devices. Symlinks are installed for all entires
+ # in the config file regardless of if that device actually
+ # exists. When installed as a package udev can be relied on for
+ # this and it will only create links for devices which exist.
+ if [ ${INTREE} ]; then
+ PWD=`pwd`
+ mkdir -p ${DEVDIR}/
+ cd ${DEVDIR}/
+ ${AWK} '!/^#/ && /./ { system( \
+ "ln -f -s /dev/disk/by-path/"$2" "$1";" \
+ "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
+ "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
+ ) }' $SRC_PATH
+ cd ${PWD}
+ else
+ DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
+ DST_PATH=/etc/zfs/${DST_FILE}
+
+ if [ -e ${DST_PATH} ]; then
+ die "Error: Config ${DST_PATH} already exists"
+ fi
+
+ cp ${SRC_PATH} ${DST_PATH}
+
+ if [ -f ${UDEVADM} ]; then
+ ${UDEVADM} trigger
+ ${UDEVADM} settle
+ else
+ /sbin/udevtrigger
+ /sbin/udevsettle
+ fi
+ fi
+
+ return 0
+}
+
+udev_cleanup() {
+ local SRC_PATH=$1
+
+ if [ ${INTREE} ]; then
+ PWD=`pwd`
+ cd ${DEVDIR}/
+ ${AWK} '!/^#/ && /./ { system( \
+ "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
+ cd ${PWD}
+ fi
+
+ return 0
+}
+
+udev_cr2d() {
+ local CHANNEL=`echo "obase=16; $1+96" | bc`
+ local RANK=$2
+
+ printf "\x${CHANNEL}${RANK}"
+}
+
+udev_raid0_setup() {
+ local RANKS=$1
+ local CHANNELS=$2
+ local IDX=0
+
+ RAID0S=()
+ for RANK in `seq 1 ${RANKS}`; do
+ for CHANNEL in `seq 1 ${CHANNELS}`; do
+ DISK=`udev_cr2d ${CHANNEL} ${RANK}`
+ RAID0S[${IDX}]="${DEVDIR}/${DISK}"
+ let IDX=IDX+1
+ done
+ done
+
+ return 0
+}
+
+udev_raid10_setup() {
+ local RANKS=$1
+ local CHANNELS=$2
+ local IDX=0
+
+ RAID10S=()
+ for RANK in `seq 1 ${RANKS}`; do
+ for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
+ let CHANNEL2=CHANNEL1+1
+ DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
+ DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
+ GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
+ RAID10S[${IDX}]="mirror ${GROUP}"
+ let IDX=IDX+1
+ done
+ done
+
+ return 0
+}
+
+udev_raidz_setup() {
+ local RANKS=$1
+ local CHANNELS=$2
+
+ RAIDZS=()
+ for RANK in `seq 1 ${RANKS}`; do
+ RAIDZ=("raidz")
+
+ for CHANNEL in `seq 1 ${CHANNELS}`; do
+ DISK=`udev_cr2d ${CHANNEL} ${RANK}`
+ RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
+ done
+
+ RAIDZS[${RANK}]="${RAIDZ[*]}"
+ done
+
+ return 0
+}
+
+udev_raidz2_setup() {
+ local RANKS=$1
+ local CHANNELS=$2
+
+ RAIDZ2S=()
+ for RANK in `seq 1 ${RANKS}`; do
+ RAIDZ2=("raidz2")
+
+ for CHANNEL in `seq 1 ${CHANNELS}`; do
+ DISK=`udev_cr2d ${CHANNEL} ${RANK}`
+ RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
+ done
+
+ RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
+ done
+
+ return 0
+}
diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh
new file mode 100755
index 000000000..98f00fa0e
--- /dev/null
+++ b/scripts/zconfig.sh
@@ -0,0 +1,572 @@
+#!/bin/bash
+#
+# ZFS/ZPOOL configuration test script.
+
+basedir="$(dirname $0)"
+
+SCRIPT_COMMON=common.sh
+if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
+. "${basedir}/${SCRIPT_COMMON}"
+else
+echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
+fi
+
+PROG=zconfig.sh
+
+usage() {
+cat << EOF
+USAGE:
+$0 [hvc]
+
+DESCRIPTION:
+ ZFS/ZPOOL configuration tests
+
+OPTIONS:
+ -h Show this message
+ -v Verbose
+ -c Cleanup lo+file devices at start
+
+EOF
+}
+
+while getopts 'hvc?' OPTION; do
+ case $OPTION in
+ h)
+ usage
+ exit 1
+ ;;
+ v)
+ VERBOSE=1
+ ;;
+ c)
+ CLEANUP=1
+ ;;
+ ?)
+ usage
+ exit
+ ;;
+ esac
+done
+
+if [ $(id -u) != 0 ]; then
+ die "Must run as root"
+fi
+
+# Perform pre-cleanup is requested
+if [ ${CLEANUP} ]; then
+ cleanup_loop_devices
+ rm -f /tmp/zpool.cache.*
+fi
+
+zconfig_partition() {
+ local DEVICE=$1
+ local START=$2
+ local END=$3
+ local TMP_FILE=`mktemp`
+
+ /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
+${START},${END}
+;
+;
+;
+EOF
+
+ rm ${TMP_FILE}
+}
+
+# Validate persistent zpool.cache configuration.
+zconfig_test1() {
+ local POOL_NAME=test1
+ local TMP_FILE1=`mktemp`
+ local TMP_FILE2=`mktemp`
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 1 - persistent zpool.cache: "
+
+ # Create a pool save its status for comparison.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
+
+ # Unload/load the module stack and verify the pool persists.
+ ${ZFS_SH} -u || fail 4
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
+ cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
+
+ # Cleanup the test pool and temporary files
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
+ rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
+ ${ZFS_SH} -u || fail 10
+
+ pass
+}
+zconfig_test1
+
+# Validate ZFS disk scanning and import w/out zpool.cache configuration.
+zconfig_test2() {
+ local POOL_NAME=test2
+ local TMP_FILE1=`mktemp`
+ local TMP_FILE2=`mktemp`
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 2 - scan disks for pools to import: "
+
+ # Create a pool save its status for comparison.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
+
+ # Unload the module stack, remove the cache file, load the module
+ # stack and attempt to probe the disks to import the pool. As
+ # a cross check verify the old pool state against the imported.
+ ${ZFS_SH} -u || fail 4
+ rm -f ${TMP_CACHE} || fail 5
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
+ ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
+ ${ZPOOL} import ${POOL_NAME} || fail 8
+ ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
+ cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
+
+ # Cleanup the test pool and temporary files
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
+ rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
+ ${ZFS_SH} -u || fail 13
+
+ pass
+}
+zconfig_test2
+
+zconfig_zvol_device_stat() {
+ local EXPECT=$1
+ local POOL_NAME=/dev/$2
+ local ZVOL_NAME=/dev/$3
+ local SNAP_NAME=/dev/$4
+ local CLONE_NAME=/dev/$5
+ local COUNT=0
+
+ # Briefly delay for udev
+ sleep 1
+
+ # Pool exists
+ stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
+
+ # Volume and partitions
+ stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
+ stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
+ stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
+
+ # Snapshot with partitions
+ stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
+ stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
+ stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
+
+ # Clone with partitions
+ stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
+ stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
+ stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
+
+ if [ $EXPECT -ne $COUNT ]; then
+ return 1
+ fi
+
+ return 0
+}
+
+# zpool import/export device check
+# (1 volume, 2 partitions, 1 snapshot, 1 clone)
+zconfig_test3() {
+ local POOL_NAME=tank
+ local ZVOL_NAME=volume
+ local SNAP_NAME=snap
+ local CLONE_NAME=clone
+ local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
+ local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 3 - zpool import/export device: "
+
+ # Create a pool, volume, partition, snapshot, and clone.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
+ zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
+ ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
+ ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
+
+ # Verify the devices were created
+ zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
+
+ # Export the pool
+ ${ZPOOL} export ${POOL_NAME} || fail 8
+
+ # verify the devices were removed
+ zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
+
+ # Import the pool, wait 1 second for udev
+ ${ZPOOL} import ${POOL_NAME} || fail 10
+
+ # Verify the devices were created
+ zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
+
+ # Destroy the pool and consequently the devices
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
+
+ # verify the devices were removed
+ zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
+
+ ${ZFS_SH} -u || fail 14
+ rm -f ${TMP_CACHE} || fail 15
+
+ pass
+}
+zconfig_test3
+
+# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
+zconfig_test4() {
+ POOL_NAME=tank
+ ZVOL_NAME=volume
+ SNAP_NAME=snap
+ CLONE_NAME=clone
+ FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
+ FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
+ TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 4 - zpool insmod/rmmod device: "
+
+ # Create a pool, volume, snapshot, and clone
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
+ zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
+ ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
+ ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
+
+ # Verify the devices were created
+ zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
+
+ # Unload the modules
+ ${ZFS_SH} -u || fail 8
+
+ # Verify the devices were removed
+ zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
+
+ # Load the modules, wait 1 second for udev
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
+
+ # Verify the devices were created
+ zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
+
+ # Destroy the pool and consequently the devices
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
+
+ # Verify the devices were removed
+ zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
+ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
+
+ ${ZFS_SH} -u || fail 14
+ rm -f ${TMP_CACHE} || fail 15
+
+ pass
+}
+zconfig_test4
+
+# ZVOL volume sanity check
+zconfig_test5() {
+ local POOL_NAME=tank
+ local ZVOL_NAME=fish
+ local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ local SRC_DIR=/bin/
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 5 - zvol+ext3 volume: "
+
+ # Create a pool and volume.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 400M ${FULL_NAME} || fail 3
+
+ # Partition the volume, for a 400M volume there will be
+ # 812 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_NAME} 0 812
+
+ # Format the partition with ext3.
+ /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
+
+ # Mount the ext3 filesystem and copy some data to it.
+ mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
+ mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
+ cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
+ sync
+
+ # Verify the copied files match the original files.
+ diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
+
+ # Remove the files, umount, destroy the volume and pool.
+ rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
+ umount /tmp/${ZVOL_NAME}1 || fail 11
+ rmdir /tmp/${ZVOL_NAME}1 || fail 12
+
+ ${ZFS} destroy ${FULL_NAME} || fail 13
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
+ ${ZFS_SH} -u || fail 15
+ rm -f ${TMP_CACHE} || fail 16
+
+ pass
+}
+zconfig_test5
+
+# ZVOL snapshot sanity check
+zconfig_test6() {
+ local POOL_NAME=tank
+ local ZVOL_NAME=fish
+ local SNAP_NAME=pristine
+ local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
+ local SRC_DIR=/bin/
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 6 - zvol+ext2 snapshot: "
+
+ # Create a pool and volume.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
+
+ # Partition the volume, for a 400M volume there will be
+ # 812 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
+
+ # Format the partition with ext2 (no journal).
+ /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
+
+ # Mount the ext3 filesystem and copy some data to it.
+ mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
+ mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
+
+ # Snapshot the pristine ext2 filesystem and mount it read-only.
+ ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
+ mkdir -p /tmp/${SNAP_NAME}1 || fail 9
+ mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
+
+ # Copy to original volume
+ cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
+ sync
+
+ # Verify the copied files match the original files,
+ # and the copied files do NOT appear in the snapshot.
+ diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
+ diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
+
+ # umount, destroy the snapshot, volume, and pool.
+ umount /tmp/${SNAP_NAME}1 || fail 14
+ rmdir /tmp/${SNAP_NAME}1 || fail 15
+ ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
+
+ umount /tmp/${ZVOL_NAME}1 || fail 17
+ rmdir /tmp/${ZVOL_NAME}1 || fail 18
+ ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
+
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
+ ${ZFS_SH} -u || fail 21
+ rm -f ${TMP_CACHE} || fail 22
+
+ pass
+}
+zconfig_test6
+
+# ZVOL clone sanity check
+zconfig_test7() {
+ local POOL_NAME=tank
+ local ZVOL_NAME=fish
+ local SNAP_NAME=pristine
+ local CLONE_NAME=clone
+ local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
+ local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
+ local SRC_DIR=/bin/
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ echo -n "test 7 - zvol+ext2 clone: "
+
+ # Create a pool and volume.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
+
+ # Partition the volume, for a 400M volume there will be
+ # 812 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
+
+ # Format the partition with ext2 (no journal).
+ /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
+
+ # Mount the ext3 filesystem and copy some data to it.
+ mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
+ mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
+
+ # Snapshot the pristine ext2 filesystem and mount it read-only.
+ ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
+ mkdir -p /tmp/${SNAP_NAME}1 || fail 9
+ mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
+
+ # Copy to original volume.
+ cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
+ sync
+
+ # Verify the copied files match the original files,
+ # and the copied files do NOT appear in the snapshot.
+ diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
+ diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
+
+ # Clone from the original pristine snapshot
+ ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} && sleep 1 || fail 14
+ mkdir -p /tmp/${CLONE_NAME}1 || fail 15
+ mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
+
+ # Verify the clone matches the pristine snapshot,
+ # and the files copied to the original volume are NOT there.
+ diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
+ diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
+
+ # Copy to cloned volume.
+ cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
+ sync
+
+ # Verify the clone matches the modified original volume.
+ diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
+
+ # umount, destroy the snapshot, volume, and pool.
+ umount /tmp/${CLONE_NAME}1 || fail 21
+ rmdir /tmp/${CLONE_NAME}1 || fail 22
+ ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
+
+ umount /tmp/${SNAP_NAME}1 || fail 24
+ rmdir /tmp/${SNAP_NAME}1 || fail 25
+ ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
+
+ umount /tmp/${ZVOL_NAME}1 || fail 27
+ rmdir /tmp/${ZVOL_NAME}1 || fail 28
+ ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
+
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
+ ${ZFS_SH} -u || fail 31
+ rm -f ${TMP_CACHE} || fail 32
+
+ pass
+}
+zconfig_test7
+
+# Send/Receive sanity check
+test_8() {
+ local POOL_NAME1=tank1
+ local POOL_NAME2=tank2
+ local ZVOL_NAME=fish
+ local SNAP_NAME=snap
+ local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
+ local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
+ local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
+ local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
+ local SRC_DIR=/bin/
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+
+ # Create two pools and a volume
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
+ ${ZFS} create -V 400M ${FULL_ZVOL_NAME1} || fail 4
+
+ # Partition the volume, for a 400M volume there will be
+ # 812 cylinders, 16 heads, and 63 sectors per track.
+ zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 812
+
+ # Format the partition with ext2.
+ /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
+
+ # Mount the ext3 filesystem and copy some data to it.
+ mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
+ mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
+ cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
+ sync || fail 9
+
+ # Snapshot the ext3 filesystem so it may be sent.
+ ${ZFS} snapshot ${FULL_SNAP_NAME1} && sleep 1 || fail 11
+
+ # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
+ (${ZFS} send ${FULL_SNAP_NAME1} | \
+ ${ZFS} receive ${FULL_ZVOL_NAME2}) && sleep 1 || fail 12
+
+ # Mount the sent ext3 filesystem.
+ mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
+ mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
+
+ # Verify the contents of the volumes match
+ diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
+ &>/dev/null || fail 15
+
+ # Umount, destroy the volume and pool.
+ umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
+ umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
+ rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
+ rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
+ rmdir /tmp/${POOL_NAME1} || fail 20
+ rmdir /tmp/${POOL_NAME2} || fail 21
+
+ ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
+ ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
+ ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
+ ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
+ ${ZFS_SH} -u || fail 28
+ rm -f ${TMP_CACHE} || fail 29
+
+ pass
+}
+run_test 8 "zfs send/receive"
+
+# zpool event sanity check
+test_9() {
+ local POOL_NAME=tank
+ local ZVOL_NAME=fish
+ local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
+ local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
+ local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
+
+ # Create a pool and volume.
+ ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
+ ${ZFS} create -V 400M ${FULL_NAME} || fail 3
+
+ # Dump the events, there should be at least 5 lines.
+ ${ZPOOL} events >${TMP_EVENTS} || fail 4
+ EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
+ [ $EVENTS -lt 5 ] && fail 5
+
+ # Clear the events and ensure there are none.
+ ${ZPOOL} events -c >/dev/null || fail 6
+ ${ZPOOL} events >${TMP_EVENTS} || fail 7
+ EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
+ [ $EVENTS -gt 1 ] && fail 8
+
+ ${ZFS} destroy ${FULL_NAME} || fail 9
+ ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
+ ${ZFS_SH} -u || fail 11
+ rm -f ${TMP_CACHE} || fail 12
+ rm -f ${TMP_EVENTS} || fail 13
+
+ pass
+}
+run_test 9 "zpool events"
+
+exit 0
+
diff --git a/scripts/zfs.sh b/scripts/zfs.sh
new file mode 100755
index 000000000..523fbfcc0
--- /dev/null
+++ b/scripts/zfs.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+#
+# A simple script to simply the loading/unloading the ZFS module stack.
+
+basedir="$(dirname $0)"
+
+SCRIPT_COMMON=common.sh
+if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
+. "${basedir}/${SCRIPT_COMMON}"
+else
+echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
+fi
+
+PROG=zfs.sh
+UNLOAD=
+
+usage() {
+cat << EOF
+USAGE:
+$0 [hvud] [module-options]
+
+DESCRIPTION:
+ Load/unload the ZFS module stack.
+
+OPTIONS:
+ -h Show this message
+ -v Verbose
+ -u Unload modules
+ -d Save debug log on unload
+
+MODULE-OPTIONS:
+ Must be of the from module="options", for example:
+
+$0 zfs="zfs_prefetch_disable=1"
+$0 zfs="zfs_prefetch_disable=1 zfs_mdcomp_disable=1"
+$0 spl="spl_debug_mask=0"
+
+EOF
+}
+
+while getopts 'hvud' OPTION; do
+ case $OPTION in
+ h)
+ usage
+ exit 1
+ ;;
+ v)
+ VERBOSE=1
+ ;;
+ u)
+ UNLOAD=1
+ ;;
+ d)
+ DUMP_LOG=1
+ ;;
+ ?)
+ usage
+ exit
+ ;;
+ esac
+done
+
+if [ $(id -u) != 0 ]; then
+ die "Must run as root"
+fi
+
+if [ ${UNLOAD} ]; then
+ unload_modules
+else
+ check_modules || die "${ERROR}"
+ load_modules "$@"
+fi
+
+exit 0
diff --git a/scripts/zpool-config/dm0-raid0.sh b/scripts/zpool-config/dm0-raid0.sh
new file mode 100644
index 000000000..89f66e73a
--- /dev/null
+++ b/scripts/zpool-config/dm0-raid0.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+#
+# Four disk Raid-0 DM in a single Raid-0 Configuration
+#
+
+PVCREATE=${PVCREATE:-/sbin/pvcreate}
+PVREMOVE=${PVREMOVE:-/sbin/pvremove}
+PVDEVICES=${PVDEVICES:-"/dev/sd[abcd]"}
+
+VGCREATE=${VGCREATE:-/sbin/vgcreate}
+VGREMOVE=${VGREMOVE:-/sbin/vgremove}
+VGNAME=${VGNAME:-"vg_tank"}
+
+LVCREATE=${LVCREATE:-/sbin/lvcreate}
+LVREMOVE=${LVREMOVE:-/sbin/lvremove}
+LVNAME=${LVNAME:-"lv_tank"}
+LVSTRIPES=${LVSTRIPES:-4}
+LVSIZE=${LVSIZE:-32G}
+
+DEVICES="/dev/${VGNAME}/${LVNAME}"
+
+zpool_dm_destroy() {
+ msg ${LVREMOVE} -f ${VGNAME}/${LVNAME}
+ ${LVREMOVE} -f ${VGNAME}/${LVNAME} >/dev/null
+
+ msg ${VGREMOVE} -f ${VGNAME}
+ ${VGREMOVE} -f ${VGNAME} >/dev/null
+
+ msg ${PVREMOVE} ${PVDEVICES}
+ ${PVREMOVE} ${PVDEVICES} >/dev/null
+}
+
+zpool_create() {
+ # Remove EFI labels which cause pvcreate failure
+ for DEVICE in ${PVDEVICES}; do
+ dd if=/dev/urandom of=${DEVICE} bs=1k count=32 &>/dev/null
+ done
+
+ msg ${PVCREATE} -f ${PVDEVICES}
+ ${PVCREATE} -f ${PVDEVICES} >/dev/null || exit 1
+
+ msg ${VGCREATE} ${VGNAME} ${PVDEVICES}
+ ${VGCREATE} ${VGNAME} ${PVDEVICES} >/dev/null || exit 2
+
+ msg ${LVCREATE} --size=${LVSIZE} --stripes=${LVSTRIPES} \
+ --name=${LVNAME} ${VGNAME}
+ ${LVCREATE} --size=${LVSIZE} --stripes=${LVSTRIPES} \
+ --name=${LVNAME} ${VGNAME} >/dev/null || exit 3
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ ${DEVICES} || (zpool_dm_destroy && exit 4)
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ zpool_dm_destroy
+}
diff --git a/scripts/zpool-config/dragon-raid0-1x70.sh b/scripts/zpool-config/dragon-raid0-1x70.sh
new file mode 100644
index 000000000..dda995700
--- /dev/null
+++ b/scripts/zpool-config/dragon-raid0-1x70.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Dragon (White Box) Raid-0 Configuration (1x70)
+#
+
+RANKS=7
+CHANNELS=10
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
+ udev_raid0_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
+}
diff --git a/scripts/zpool-config/dragon-raid10-35x2.sh b/scripts/zpool-config/dragon-raid10-35x2.sh
new file mode 100644
index 000000000..37f2a539a
--- /dev/null
+++ b/scripts/zpool-config/dragon-raid10-35x2.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Dragon (White Box) Raid-10 Configuration (35x2(1+1))
+#
+
+RANKS=7
+CHANNELS=10
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
+ udev_raid10_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
+}
diff --git a/scripts/zpool-config/dragon-raidz-7x10.sh b/scripts/zpool-config/dragon-raidz-7x10.sh
new file mode 100644
index 000000000..9857cf1c0
--- /dev/null
+++ b/scripts/zpool-config/dragon-raidz-7x10.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Dragon (White Box) Raid-Z Configuration (7x10(9+1))
+#
+
+RANKS=7
+CHANNELS=10
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
+ udev_raidz_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
+}
diff --git a/scripts/zpool-config/dragon-raidz2-7x10.sh b/scripts/zpool-config/dragon-raidz2-7x10.sh
new file mode 100644
index 000000000..0dd07a19b
--- /dev/null
+++ b/scripts/zpool-config/dragon-raidz2-7x10.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Dragon (White Box) Raid-Z2 Configuration (7x10(8+2))
+#
+
+RANKS=7
+CHANNELS=10
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
+ udev_raidz2_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
+}
diff --git a/scripts/zpool-config/file-raid0.sh b/scripts/zpool-config/file-raid0.sh
new file mode 100644
index 000000000..5ec80b05c
--- /dev/null
+++ b/scripts/zpool-config/file-raid0.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# 4 File Raid-0 Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ msg "Creating ${FILE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${FILES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${FILES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ msg "Removing ${FILE}"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/file-raid10.sh b/scripts/zpool-config/file-raid10.sh
new file mode 100644
index 000000000..ae7f0ae07
--- /dev/null
+++ b/scripts/zpool-config/file-raid10.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# 4 File Raid-10 Configuration
+#
+
+FILES_M1="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1"
+FILES_M2="/tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+FILES="${FILES_M1} ${FILES_M2}"
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ msg "Creating ${FILE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ mirror ${FILES_M1} mirror ${FILES_M2}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ mirror ${FILES_M1} mirror ${FILES_M2} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ msg "Removing ${FILE}"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/file-raidz.sh b/scripts/zpool-config/file-raidz.sh
new file mode 100644
index 000000000..5b6c3ea2c
--- /dev/null
+++ b/scripts/zpool-config/file-raidz.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# 4 File Raid-Z Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ msg "Creating ${FILE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz ${FILES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz ${FILES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ msg "Removing ${FILE}"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/file-raidz2.sh b/scripts/zpool-config/file-raidz2.sh
new file mode 100644
index 000000000..bc0e5ec8a
--- /dev/null
+++ b/scripts/zpool-config/file-raidz2.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# 4 File Raid-Z2 Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ msg "Creating ${FILE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz2 ${FILES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz2 ${FILES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ msg "Removing ${FILE}"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/hda-raid0.sh b/scripts/zpool-config/hda-raid0.sh
new file mode 100644
index 000000000..fb743fae5
--- /dev/null
+++ b/scripts/zpool-config/hda-raid0.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Single disk /dev/hda Raid-0 Configuration
+#
+
+DEVICES="/dev/hda"
+
+zpool_create() {
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
+}
diff --git a/scripts/zpool-config/lo-raid0.sh b/scripts/zpool-config/lo-raid0.sh
new file mode 100644
index 000000000..321d9b1f7
--- /dev/null
+++ b/scripts/zpool-config/lo-raid0.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# 4 Device Loopback Raid-0 Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+DEVICES=""
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ DEVICE=`unused_loop_device`
+ msg "Creating ${FILE} using loopback device ${DEVICE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ ${LOSETUP} ${DEVICE} ${FILE} ||
+ die "Error $? creating ${FILE} -> ${DEVICE} loopback"
+ DEVICES="${DEVICES} ${DEVICE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
+ msg "Removing ${FILE} using loopback device ${DEVICE}"
+ ${LOSETUP} -d ${DEVICE} ||
+ die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/lo-raid10.sh b/scripts/zpool-config/lo-raid10.sh
new file mode 100644
index 000000000..f9c47cd1e
--- /dev/null
+++ b/scripts/zpool-config/lo-raid10.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# 4 Device Loopback Raid-0 Configuration
+#
+
+FILES_M1="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1"
+FILES_M2="/tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+FILES="${FILES_M1} ${FILES_M2}"
+DEVICES_M1=""
+DEVICES_M2=""
+
+zpool_create() {
+ for FILE in ${FILES_M1}; do
+ DEVICE=`unused_loop_device`
+ msg "Creating ${FILE} using loopback device ${DEVICE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ ${LOSETUP} ${DEVICE} ${FILE} ||
+ die "Error $? creating ${FILE} -> ${DEVICE} loopback"
+ DEVICES_M1="${DEVICES_M1} ${DEVICE}"
+ done
+
+ for FILE in ${FILES_M2}; do
+ DEVICE=`unused_loop_device`
+ msg "Creating ${FILE} using loopback device ${DEVICE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ ${LOSETUP} ${DEVICE} ${FILE} ||
+ die "Error $? creating ${FILE} -> ${DEVICE} loopback"
+ DEVICES_M2="${DEVICES_M2} ${DEVICE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ mirror ${DEVICES_M1} mirror ${DEVICES_M2}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ mirror ${DEVICES_M1} mirror ${DEVICES_M2}
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
+ msg "Removing ${FILE} using loopback device ${DEVICE}"
+ ${LOSETUP} -d ${DEVICE} ||
+ die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/lo-raidz.sh b/scripts/zpool-config/lo-raidz.sh
new file mode 100644
index 000000000..509f6ee1d
--- /dev/null
+++ b/scripts/zpool-config/lo-raidz.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# 4 Device Loopback Raid-0 Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+DEVICES=""
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ DEVICE=`unused_loop_device`
+ msg "Creating ${FILE} using loopback device ${DEVICE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ ${LOSETUP} ${DEVICE} ${FILE} ||
+ die "Error $? creating ${FILE} -> ${DEVICE} loopback"
+ DEVICES="${DEVICES} ${DEVICE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
+ msg "Removing ${FILE} using loopback device ${DEVICE}"
+ ${LOSETUP} -d ${DEVICE} ||
+ die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/lo-raidz2.sh b/scripts/zpool-config/lo-raidz2.sh
new file mode 100644
index 000000000..6e61293c0
--- /dev/null
+++ b/scripts/zpool-config/lo-raidz2.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# 4 Device Loopback Raid-0 Configuration
+#
+
+FILES="/tmp/zpool-vdev0 \
+ /tmp/zpool-vdev1 \
+ /tmp/zpool-vdev2 \
+ /tmp/zpool-vdev3"
+DEVICES=""
+
+zpool_create() {
+ for FILE in ${FILES}; do
+ DEVICE=`unused_loop_device`
+ msg "Creating ${FILE} using loopback device ${DEVICE}"
+ rm -f ${FILE} || exit 1
+ dd if=/dev/zero of=${FILE} bs=1024k count=0 seek=256 \
+ &>/dev/null || die "Error $? creating ${FILE}"
+ ${LOSETUP} ${DEVICE} ${FILE} ||
+ die "Error $? creating ${FILE} -> ${DEVICE} loopback"
+ DEVICES="${DEVICES} ${DEVICE}"
+ done
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz2 ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} raidz2 ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ for FILE in ${FILES}; do
+ DEVICE=`${LOSETUP} -a | grep ${FILE} | head -n1|cut -f1 -d:`
+ msg "Removing ${FILE} using loopback device ${DEVICE}"
+ ${LOSETUP} -d ${DEVICE} ||
+ die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
+ rm -f ${FILE} || exit 1
+ done
+}
diff --git a/scripts/zpool-config/md0-raid10.sh b/scripts/zpool-config/md0-raid10.sh
new file mode 100644
index 000000000..ccc717015
--- /dev/null
+++ b/scripts/zpool-config/md0-raid10.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Four disk Raid-10 in a single Raid-0 Configuration
+#
+
+MDADM=${MDADM:-/sbin/mdadm}
+MDDEVICES=${MDDEVICES:-"/dev/sd[abcd]"}
+MDCOUNT=${MDCOUNT:-4}
+MDRAID=${MDRAID:-10}
+
+DEVICES="/dev/md0"
+
+zpool_md_destroy() {
+ msg ${MDADM} --manage --stop ${DEVICES}
+ ${MDADM} --manage --stop ${DEVICES} &>/dev/null
+
+ msg ${MDADM} --zero-superblock ${MDDEVICES}
+ ${MDADM} --zero-superblock ${MDDEVICES} >/dev/null
+}
+
+zpool_create() {
+ msg ${MDADM} --create ${DEVICES} --level=${MDRAID} \
+ --raid-devices=${MDCOUNT} ${MDDEVICES}
+ ${MDADM} --create ${DEVICES} --level=${MDRAID} \
+ --raid-devices=${MDCOUNT} ${MDDEVICES} \
+ &>/dev/null || (zpool_md_destroy && exit 1)
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ ${DEVICES} || (zpool_md_destroy && exit 2)
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ zpool_md_destroy
+}
diff --git a/scripts/zpool-config/md0-raid5.sh b/scripts/zpool-config/md0-raid5.sh
new file mode 100644
index 000000000..b5b22fe7a
--- /dev/null
+++ b/scripts/zpool-config/md0-raid5.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Four disk Raid-5 in a single Raid-0 Configuration
+#
+
+MDADM=${MDADM:-/sbin/mdadm}
+MDDEVICES=${MDDEVICES:-"/dev/sd[abcd]"}
+MDCOUNT=${MDCOUNT:-4}
+MDRAID=${MDRAID:-5}
+
+DEVICES="/dev/md0"
+
+zpool_md_destroy() {
+ msg ${MDADM} --manage --stop ${DEVICES}
+ ${MDADM} --manage --stop ${DEVICES} &>/dev/null
+
+ msg ${MDADM} --zero-superblock ${MDDEVICES}
+ ${MDADM} --zero-superblock ${MDDEVICES} >/dev/null
+}
+
+zpool_create() {
+ msg ${MDADM} --create ${DEVICES} --level=${MDRAID} \
+ --raid-devices=${MDCOUNT} ${MDDEVICES}
+ ${MDADM} --create ${DEVICES} --level=${MDRAID} \
+ --raid-devices=${MDCOUNT} ${MDDEVICES} \
+ &>/dev/null || (zpool_md_destroy && exit 1)
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} \
+ ${DEVICES} || (zpool_md_destroy && exit 2)
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+
+ zpool_md_destroy
+}
diff --git a/scripts/zpool-config/ram0-raid0.sh b/scripts/zpool-config/ram0-raid0.sh
new file mode 100644
index 000000000..b1939bfaa
--- /dev/null
+++ b/scripts/zpool-config/ram0-raid0.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Single ram disk /dev/ram0 Raid-0 Configuration
+#
+
+DEVICES="/dev/ram0"
+
+zpool_create() {
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
+}
diff --git a/scripts/zpool-config/sda-raid0.sh b/scripts/zpool-config/sda-raid0.sh
new file mode 100644
index 000000000..b11092466
--- /dev/null
+++ b/scripts/zpool-config/sda-raid0.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Single disk /dev/sda Raid-0 Configuration
+#
+
+DEVICES="/dev/sda"
+
+zpool_create() {
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${DEVICES} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME} || exit 1
+}
diff --git a/scripts/zpool-config/supermicro-raid0-1x16.sh b/scripts/zpool-config/supermicro-raid0-1x16.sh
new file mode 100644
index 000000000..efe48459d
--- /dev/null
+++ b/scripts/zpool-config/supermicro-raid0-1x16.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Supermicro (White Box) Raid-0 Configuration (1x16)
+#
+
+RANKS=4
+CHANNELS=4
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+ udev_raid0_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+}
diff --git a/scripts/zpool-config/supermicro-raid10-8x2.sh b/scripts/zpool-config/supermicro-raid10-8x2.sh
new file mode 100644
index 000000000..a6e6be6c0
--- /dev/null
+++ b/scripts/zpool-config/supermicro-raid10-8x2.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Supermicro (White Box) Raid-10 Configuration (8x2(1+1))
+#
+
+RANKS=4
+CHANNELS=4
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+ udev_raid10_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+}
diff --git a/scripts/zpool-config/supermicro-raidz-4x4.sh b/scripts/zpool-config/supermicro-raidz-4x4.sh
new file mode 100644
index 000000000..9ed2780e9
--- /dev/null
+++ b/scripts/zpool-config/supermicro-raidz-4x4.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Supermicro (White Box) Raid-Z Configuration (4x4(3+1))
+#
+
+RANKS=4
+CHANNELS=4
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+ udev_raidz_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+}
diff --git a/scripts/zpool-config/supermicro-raidz2-4x4.sh b/scripts/zpool-config/supermicro-raidz2-4x4.sh
new file mode 100644
index 000000000..ed3eedfdf
--- /dev/null
+++ b/scripts/zpool-config/supermicro-raidz2-4x4.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Supermicro (White Box) Raid-Z2 Configuration (4x4(2+2))
+#
+
+RANKS=4
+CHANNELS=4
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+ udev_raidz2_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
+}
diff --git a/scripts/zpool-config/x4550-raid0-1x48.sh b/scripts/zpool-config/x4550-raid0-1x48.sh
new file mode 100644
index 000000000..16156aa09
--- /dev/null
+++ b/scripts/zpool-config/x4550-raid0-1x48.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Sun Fire x4550 (Thumper/Thor) Raid-0 Configuration (1x48)
+#
+
+RANKS=8
+CHANNELS=6
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
+ udev_raid0_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
+}
diff --git a/scripts/zpool-config/x4550-raid10-24x2.sh b/scripts/zpool-config/x4550-raid10-24x2.sh
new file mode 100644
index 000000000..ec91f43e6
--- /dev/null
+++ b/scripts/zpool-config/x4550-raid10-24x2.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Sun Fire x4550 (Thumper/Thor) Raid-10 Configuration (24x2(1+1))
+#
+
+RANKS=8
+CHANNELS=6
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
+ udev_raid10_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
+}
diff --git a/scripts/zpool-config/x4550-raidz-8x6.sh b/scripts/zpool-config/x4550-raidz-8x6.sh
new file mode 100644
index 000000000..ed31a80e6
--- /dev/null
+++ b/scripts/zpool-config/x4550-raidz-8x6.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Sun Fire x4550 (Thumper/Thor) Raid-Z Configuration (8x6(5+1))
+#
+
+RANKS=8
+CHANNELS=6
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
+ udev_raidz_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
+}
diff --git a/scripts/zpool-config/x4550-raidz2-8x6.sh b/scripts/zpool-config/x4550-raidz2-8x6.sh
new file mode 100644
index 000000000..45ccd7474
--- /dev/null
+++ b/scripts/zpool-config/x4550-raidz2-8x6.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Sun Fire x4550 (Thumper/Thor) Raid-Z Configuration (8x6(4+2))
+#
+
+RANKS=8
+CHANNELS=6
+
+zpool_create() {
+ udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
+ udev_raidz2_setup ${RANKS} ${CHANNELS}
+
+ msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
+ ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
+}
+
+zpool_destroy() {
+ msg ${ZPOOL} destroy ${ZPOOL_NAME}
+ ${ZPOOL} destroy ${ZPOOL_NAME}
+ udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
+}
diff --git a/scripts/zpool-create.sh b/scripts/zpool-create.sh
new file mode 100755
index 000000000..d6b301e32
--- /dev/null
+++ b/scripts/zpool-create.sh
@@ -0,0 +1,133 @@
+#!/bin/bash
+
+basedir="$(dirname $0)"
+
+SCRIPT_COMMON=common.sh
+if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
+. "${basedir}/${SCRIPT_COMMON}"
+else
+echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
+fi
+
+PROG=zpool-create.sh
+
+usage() {
+cat << EOF
+USAGE:
+$0 [hvcp]
+
+DESCRIPTION:
+ Create one of several predefined zpool configurations.
+
+OPTIONS:
+ -h Show this message
+ -v Verbose
+ -f Force everything
+ -c Configuration for zpool
+ -p Name for zpool
+ -d Destroy zpool (default create)
+ -l Additional zpool options
+ -s Additional zfs options
+
+EOF
+}
+
+check_config() {
+
+ if [ ! -f ${ZPOOL_CONFIG} ]; then
+ local NAME=`basename ${ZPOOL_CONFIG} .sh`
+ ERROR="Unknown config '${NAME}', available configs are:\n"
+
+ for CFG in `ls ${ZPOOLDIR}/ | grep ".sh"`; do
+ local NAME=`basename ${CFG} .sh`
+ ERROR="${ERROR}${NAME}\n"
+ done
+
+ return 1
+ fi
+
+ return 0
+}
+
+ZPOOL_CONFIG=unknown
+ZPOOL_NAME=tank
+ZPOOL_DESTROY=
+ZPOOL_OPTIONS=""
+ZFS_OPTIONS=""
+
+while getopts 'hvfc:p:dl:s:' OPTION; do
+ case $OPTION in
+ h)
+ usage
+ exit 1
+ ;;
+ v)
+ VERBOSE=1
+ VERBOSE_FLAG="-v"
+ ;;
+ f)
+ FORCE=1
+ FORCE_FLAG="-f"
+ ;;
+ c)
+ ZPOOL_CONFIG=${ZPOOLDIR}/${OPTARG}.sh
+ ;;
+ p)
+ ZPOOL_NAME=${OPTARG}
+ ;;
+ d)
+ ZPOOL_DESTROY=1
+ ;;
+ l)
+ ZPOOL_OPTIONS=${OPTARG}
+ ;;
+ s)
+ ZFS_OPTIONS=${OPTARG}
+ ;;
+ ?)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ $(id -u) != 0 ]; then
+ die "Must run as root"
+fi
+
+check_config || die "${ERROR}"
+. ${ZPOOL_CONFIG}
+
+if [ ${ZPOOL_DESTROY} ]; then
+ zpool_destroy
+else
+ zpool_create
+
+ if [ "${ZPOOL_OPTIONS}" ]; then
+ if [ ${VERBOSE} ]; then
+ echo
+ echo "${ZPOOL} ${ZPOOL_OPTIONS} ${ZPOOL_NAME}"
+ fi
+ ${ZPOOL} ${ZPOOL_OPTIONS} ${ZPOOL_NAME} || exit 1
+ fi
+
+ if [ "${ZFS_OPTIONS}" ]; then
+ if [ ${VERBOSE} ]; then
+ echo
+ echo "${ZFS} ${ZFS_OPTIONS} ${ZPOOL_NAME}"
+ fi
+ ${ZFS} ${ZFS_OPTIONS} ${ZPOOL_NAME} || exit 1
+ fi
+
+ if [ ${VERBOSE} ]; then
+ echo
+ echo "zpool list"
+ ${ZPOOL} list || exit 1
+
+ echo
+ echo "zpool status ${ZPOOL_NAME}"
+ ${ZPOOL} status ${ZPOOL_NAME} || exit 1
+ fi
+fi
+
+exit 0