diff options
author | Brian Behlendorf <[email protected]> | 2017-12-04 11:50:35 -0800 |
---|---|---|
committer | GitHub <[email protected]> | 2017-12-04 11:50:35 -0800 |
commit | ea39f75f64ff72e30900a36e00632f180f5f6676 (patch) | |
tree | fe5d708335eeebaca59daba333510d142021d6be | |
parent | 72841b9fd957a392bb621393685b06dc042d4523 (diff) |
Fix 'zpool create|add' replication level check
When the pool configuration contains a hole due to a previous device
removal ignore this top level vdev. Failure to do so will result in
the current configuration being assessed to have a non-uniform
replication level and the expected warning will be disabled.
The zpool_add_010_pos test case was extended to cover this scenario.
Reviewed-by: George Melikov <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #6907
Closes #6911
-rw-r--r-- | cmd/zpool/zpool_vdev.c | 7 | ||||
-rwxr-xr-x | tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh | 66 |
2 files changed, 61 insertions, 12 deletions
diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c index 04a2611f9..63a7101b8 100644 --- a/cmd/zpool/zpool_vdev.c +++ b/cmd/zpool/zpool_vdev.c @@ -801,8 +801,11 @@ get_replication(nvlist_t *nvroot, boolean_t fatal) if (is_log) continue; - verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, - &type) == 0); + /* Ignore holes introduced by removing aux devices */ + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); + if (strcmp(type, VDEV_TYPE_HOLE) == 0) + continue; + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { /* diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh index f949e8ae9..8b8eade48 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh @@ -37,10 +37,10 @@ # Verify zpool add succeed when adding vdevs with matching redundancy. # # STRATEGY: -# 1. Create base filesystem to hold virtual disk files. -# 2. Create several files == $MINVDEVSIZE. -# 3. Create pool with given redundancy. -# 3. Verify 'zpool add' succeed with with matching redundancy. +# 1. Create several files == $MINVDEVSIZE. +# 2. Verify 'zpool add' succeeds with matching redundancy. +# 3. Verify 'zpool add' warns with differing redundancy. +# 4. Verify 'zpool add' warns with differing redundancy after removal. # verify_runnable "global" @@ -48,21 +48,24 @@ verify_runnable "global" function cleanup { datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1 - datasetexists $TESTPOOL && destroy_pool $TESTPOOL + + typeset -i i=0 + while ((i < 10)); do + log_must rm -f $TEST_BASE_DIR/vdev$i + ((i += 1)) + done } log_assert "Verify 'zpool add' succeed with keywords combination." log_onexit cleanup -create_pool $TESTPOOL $DISKS -mntpnt=$(get_prop mountpoint $TESTPOOL) - +# 1. Create several files == $MINVDEVSIZE. typeset -i i=0 while ((i < 10)); do - log_must truncate -s $MINVDEVSIZE $mntpnt/vdev$i + log_must truncate -s $MINVDEVSIZE $TEST_BASE_DIR/vdev$i - eval vdev$i=$mntpnt/vdev$i + eval vdev$i=$TEST_BASE_DIR/vdev$i ((i += 1)) done @@ -99,6 +102,10 @@ set -A redundancy3_add_args \ "mirror $vdev5 $vdev6 $vdev7 $vdev8" \ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" +set -A log_args "log" "$vdev4" +set -A cache_args "cache" "$vdev4" +set -A spare_args "spare" "$vdev4" + typeset -i j=0 function zpool_create_add @@ -140,11 +147,37 @@ function zpool_create_forced_add done } +function zpool_create_rm_add +{ + typeset -n create_args=$1 + typeset -n add_args=$2 + typeset -n rm_args=$3 + + i=0 + while ((i < ${#create_args[@]})); do + j=0 + while ((j < ${#add_args[@]})); do + log_must zpool create $TESTPOOL1 ${create_args[$i]} + log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]} + log_must zpool add $TESTPOOL1 ${add_args[$j]} + log_must zpool remove $TESTPOOL1 ${rm_args[1]} + log_mustnot zpool add $TESTPOOL1 ${rm_args[1]} + log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]} + log_must zpool destroy -f $TESTPOOL1 + + ((j += 1)) + done + ((i += 1)) + done +} + +# 2. Verify 'zpool add' succeeds with matching redundancy. zpool_create_add redundancy0_create_args redundancy0_add_args zpool_create_add redundancy1_create_args redundancy1_add_args zpool_create_add redundancy2_create_args redundancy2_add_args zpool_create_add redundancy3_create_args redundancy3_add_args +# 3. Verify 'zpool add' warns with differing redundancy. zpool_create_forced_add redundancy0_create_args redundancy1_add_args zpool_create_forced_add redundancy0_create_args redundancy2_add_args zpool_create_forced_add redundancy0_create_args redundancy3_add_args @@ -161,4 +194,17 @@ zpool_create_forced_add redundancy3_create_args redundancy0_add_args zpool_create_forced_add redundancy3_create_args redundancy1_add_args zpool_create_forced_add redundancy3_create_args redundancy2_add_args +# 4. Verify 'zpool add' warns with differing redundancy after removal. +zpool_create_rm_add redundancy1_create_args redundancy1_add_args log_args +zpool_create_rm_add redundancy2_create_args redundancy2_add_args log_args +zpool_create_rm_add redundancy3_create_args redundancy3_add_args log_args + +zpool_create_rm_add redundancy1_create_args redundancy1_add_args cache_args +zpool_create_rm_add redundancy2_create_args redundancy2_add_args cache_args +zpool_create_rm_add redundancy3_create_args redundancy3_add_args cache_args + +zpool_create_rm_add redundancy1_create_args redundancy1_add_args spare_args +zpool_create_rm_add redundancy2_create_args redundancy2_add_args spare_args +zpool_create_rm_add redundancy3_create_args redundancy3_add_args spare_args + log_pass "'zpool add' succeed with keywords combination." |