aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorLOLi <[email protected]>2017-12-28 19:15:32 +0100
committerTony Hutter <[email protected]>2018-01-30 10:27:31 -0600
commita8fa31b50b958306cd39c21e8518f776ee59f1b6 (patch)
tree847c4082960a2bb9e77f3c83cf51a1f933700b5c /tests
parent8d82a19def540bba43c8c7597142ff53f7a0b7e5 (diff)
Fix 'zpool add' handling of nested interior VDEVs
When replacing a faulted device which was previously handled by a spare multiple levels of nested interior VDEVs will be present in the pool configuration; the following example illustrates one of the possible situations: NAME STATE READ WRITE CKSUM testpool DEGRADED 0 0 0 raidz1-0 DEGRADED 0 0 0 spare-0 DEGRADED 0 0 0 replacing-0 DEGRADED 0 0 0 /var/tmp/fault-dev UNAVAIL 0 0 0 cannot open /var/tmp/replace-dev ONLINE 0 0 0 /var/tmp/spare-dev1 ONLINE 0 0 0 /var/tmp/safe-dev ONLINE 0 0 0 spares /var/tmp/spare-dev1 INUSE currently in use This is safe and allowed, but get_replication() needs to handle this situation gracefully to let zpool add new devices to the pool. Reviewed-by: George Melikov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: loli10K <[email protected]> Closes #6678 Closes #6996
Diffstat (limited to 'tests')
-rw-r--r--tests/runfiles/linux.run2
-rw-r--r--tests/zfs-tests/include/libtest.shlib66
-rw-r--r--tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am3
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh111
4 files changed, 178 insertions, 4 deletions
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index f872c0cbf..303c27529 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -228,7 +228,7 @@ tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_005_pos', 'zpool_add_006_pos',
'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg',
'zpool_add_010_pos',
- 'add-o_ashift', 'add_prop_ashift']
+ 'add-o_ashift', 'add_prop_ashift', 'add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib
index 345d1903d..86f172a6d 100644
--- a/tests/zfs-tests/include/libtest.shlib
+++ b/tests/zfs-tests/include/libtest.shlib
@@ -1989,6 +1989,31 @@ function check_hotspare_state # pool disk state{inuse,avail}
}
#
+# Wait until a hotspare transitions to a given state or times out.
+#
+# Return 0 when pool/disk matches expected state, 1 on timeout.
+#
+function wait_hotspare_state # pool disk state timeout
+{
+ typeset pool=$1
+ typeset disk=${2#$/DEV_DSKDIR/}
+ typeset state=$3
+ typeset timeout=${4:-60}
+ typeset -i i=0
+
+ while [[ $i -lt $timeout ]]; do
+ if check_hotspare_state $pool $disk $state; then
+ return 0
+ fi
+
+ i=$((i+1))
+ sleep 1
+ done
+
+ return 1
+}
+
+#
# Verify a given slog disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
@@ -2027,6 +2052,31 @@ function check_vdev_state # pool disk state{online,offline,unavail}
}
#
+# Wait until a vdev transitions to a given state or times out.
+#
+# Return 0 when pool/disk matches expected state, 1 on timeout.
+#
+function wait_vdev_state # pool disk state timeout
+{
+ typeset pool=$1
+ typeset disk=${2#$/DEV_DSKDIR/}
+ typeset state=$3
+ typeset timeout=${4:-60}
+ typeset -i i=0
+
+ while [[ $i -lt $timeout ]]; do
+ if check_vdev_state $pool $disk $state; then
+ return 0
+ fi
+
+ i=$((i+1))
+ sleep 1
+ done
+
+ return 1
+}
+
+#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
@@ -3394,14 +3444,26 @@ function zed_stop
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
kill $zedpid
- wait $zedpid
+ while ps -p $zedpid > /dev/null; do
+ sleep 1
+ done
rm -f ${ZEDLET_DIR}/zed.pid
fi
-
return 0
}
#
+# Drain all zevents
+#
+function zed_events_drain
+{
+ while [ $(zpool events -H | wc -l) -ne 0 ]; do
+ sleep 1
+ zpool events -c >/dev/null
+ done
+}
+
+#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
index 4b6b533fe..062028299 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
@@ -15,4 +15,5 @@ dist_pkgdata_SCRIPTS = \
zpool_add_009_neg.ksh \
zpool_add_010_pos.ksh \
add-o_ashift.ksh \
- add_prop_ashift.ksh
+ add_prop_ashift.ksh \
+ add_nested_replacing_spare.ksh
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh
new file mode 100755
index 000000000..b38079852
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh
@@ -0,0 +1,111 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <[email protected]>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
+
+#
+# DESCRIPTION:
+# 'zpool add' works with nested replacing/spare vdevs
+#
+# STRATEGY:
+# 1. Create a redundant pool with a spare device
+# 2. Manually fault a device, wait for the hot-spare and then replace it:
+# this creates a situation where replacing and spare vdevs are nested.
+# 3. Verify 'zpool add' is able to add new devices to the pool.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ zed_stop
+ zed_cleanup
+ log_must zinject -c all
+ destroy_pool $TESTPOOL
+ log_must rm -f $DATA_DEVS $SPARE_DEVS
+}
+
+log_assert "'zpool add' works with nested replacing/spare vdevs"
+log_onexit cleanup
+
+FAULT_DEV="$TEST_BASE_DIR/fault-dev"
+SAFE_DEV1="$TEST_BASE_DIR/safe-dev1"
+SAFE_DEV2="$TEST_BASE_DIR/safe-dev2"
+SAFE_DEV3="$TEST_BASE_DIR/safe-dev3"
+SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3"
+REPLACE_DEV="$TEST_BASE_DIR/replace-dev"
+ADD_DEV="$TEST_BASE_DIR/add-dev"
+DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV"
+SPARE_DEV1="$TEST_BASE_DIR/spare-dev1"
+SPARE_DEV2="$TEST_BASE_DIR/spare-dev2"
+SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2"
+
+# We need ZED running to work with spares
+zed_setup
+zed_start
+# Clear events from previous runs
+zed_events_drain
+
+for type in "mirror" "raidz1" "raidz2" "raidz3"
+do
+ # 1. Create a redundant pool with a spare device
+ truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
+ log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS
+ log_must zpool add $TESTPOOL spare $SPARE_DEV1
+
+ # 2.1 Fault a device, verify the spare is kicked in
+ log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL
+ log_must zpool scrub $TESTPOOL
+ log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60
+ log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
+ log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
+ log_must check_state $TESTPOOL "" "DEGRADED"
+
+ # 2.2 Replace the faulted device: this creates a replacing vdev inside a
+ # spare vdev
+ log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV
+ log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60
+ zpool status | awk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""}
+ $1 ~ poolname {c=4}; (c && c--) { s=s$1":" }
+ END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }'
+ if [[ $? -ne 0 ]]; then
+ log_fail "Pool does not contain nested replacing/spare vdevs"
+ fi
+
+ # 3. Verify 'zpool add' is able to add new devices
+ log_must zpool add $TESTPOOL spare $SPARE_DEV2
+ log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL"
+ log_must zpool add -f $TESTPOOL $ADD_DEV
+ log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60
+
+ # Cleanup
+ log_must zinject -c all
+ destroy_pool $TESTPOOL
+ log_must rm -f $DATA_DEVS $SPARE_DEVS
+done
+
+log_pass "'zpool add' works with nested replacing/spare vdevs"