diff options
author | LOLi <[email protected]> | 2017-12-28 19:15:32 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-12-28 10:15:32 -0800 |
commit | 390d679acdfa6a2498280a4dcd33b7600ace27ce (patch) | |
tree | d0b071bb8a24fa237b36ac8c8d7cfae578d5366f /tests/zfs-tests | |
parent | 2fe61a7ecc507d031451c21b3077fae549b58ec3 (diff) |
Fix 'zpool add' handling of nested interior VDEVs
When replacing a faulted device which was previously handled by a spare
multiple levels of nested interior VDEVs will be present in the pool
configuration; the following example illustrates one of the possible
situations:
NAME STATE READ WRITE CKSUM
testpool DEGRADED 0 0 0
raidz1-0 DEGRADED 0 0 0
spare-0 DEGRADED 0 0 0
replacing-0 DEGRADED 0 0 0
/var/tmp/fault-dev UNAVAIL 0 0 0 cannot open
/var/tmp/replace-dev ONLINE 0 0 0
/var/tmp/spare-dev1 ONLINE 0 0 0
/var/tmp/safe-dev ONLINE 0 0 0
spares
/var/tmp/spare-dev1 INUSE currently in use
This is safe and allowed, but get_replication() needs to handle this
situation gracefully to let zpool add new devices to the pool.
Reviewed-by: George Melikov <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: loli10K <[email protected]>
Closes #6678
Closes #6996
Diffstat (limited to 'tests/zfs-tests')
-rw-r--r-- | tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am | 3 | ||||
-rwxr-xr-x | tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh | 111 |
2 files changed, 113 insertions, 1 deletions
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am index 4b6b533fe..062028299 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am @@ -15,4 +15,5 @@ dist_pkgdata_SCRIPTS = \ zpool_add_009_neg.ksh \ zpool_add_010_pos.ksh \ add-o_ashift.ksh \ - add_prop_ashift.ksh + add_prop_ashift.ksh \ + add_nested_replacing_spare.ksh diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh new file mode 100755 index 000000000..b38079852 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh @@ -0,0 +1,111 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2017, loli10K <[email protected]>. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib + +# +# DESCRIPTION: +# 'zpool add' works with nested replacing/spare vdevs +# +# STRATEGY: +# 1. Create a redundant pool with a spare device +# 2. Manually fault a device, wait for the hot-spare and then replace it: +# this creates a situation where replacing and spare vdevs are nested. +# 3. Verify 'zpool add' is able to add new devices to the pool. +# + +verify_runnable "global" + +function cleanup +{ + zed_stop + zed_cleanup + log_must zinject -c all + destroy_pool $TESTPOOL + log_must rm -f $DATA_DEVS $SPARE_DEVS +} + +log_assert "'zpool add' works with nested replacing/spare vdevs" +log_onexit cleanup + +FAULT_DEV="$TEST_BASE_DIR/fault-dev" +SAFE_DEV1="$TEST_BASE_DIR/safe-dev1" +SAFE_DEV2="$TEST_BASE_DIR/safe-dev2" +SAFE_DEV3="$TEST_BASE_DIR/safe-dev3" +SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3" +REPLACE_DEV="$TEST_BASE_DIR/replace-dev" +ADD_DEV="$TEST_BASE_DIR/add-dev" +DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV" +SPARE_DEV1="$TEST_BASE_DIR/spare-dev1" +SPARE_DEV2="$TEST_BASE_DIR/spare-dev2" +SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2" + +# We need ZED running to work with spares +zed_setup +zed_start +# Clear events from previous runs +zed_events_drain + +for type in "mirror" "raidz1" "raidz2" "raidz3" +do + # 1. Create a redundant pool with a spare device + truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS + log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS + log_must zpool add $TESTPOOL spare $SPARE_DEV1 + + # 2.1 Fault a device, verify the spare is kicked in + log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL + log_must zpool scrub $TESTPOOL + log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60 + log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60 + log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE" + log_must check_state $TESTPOOL "" "DEGRADED" + + # 2.2 Replace the faulted device: this creates a replacing vdev inside a + # spare vdev + log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV + log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60 + zpool status | awk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""} + $1 ~ poolname {c=4}; (c && c--) { s=s$1":" } + END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }' + if [[ $? -ne 0 ]]; then + log_fail "Pool does not contain nested replacing/spare vdevs" + fi + + # 3. Verify 'zpool add' is able to add new devices + log_must zpool add $TESTPOOL spare $SPARE_DEV2 + log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL" + log_must zpool add -f $TESTPOOL $ADD_DEV + log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60 + + # Cleanup + log_must zinject -c all + destroy_pool $TESTPOOL + log_must rm -f $DATA_DEVS $SPARE_DEVS +done + +log_pass "'zpool add' works with nested replacing/spare vdevs" |