aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/runfiles/common.run3
-rwxr-xr-xtests/test-runner/bin/zts-report.py.in3
-rw-r--r--tests/zfs-tests/tests/Makefile.am3
-rwxr-xr-xtests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh (renamed from tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged.ksh)21
-rwxr-xr-xtests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh157
5 files changed, 164 insertions, 23 deletions
diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run
index 89ee0d3cb..a4ec27a36 100644
--- a/tests/runfiles/common.run
+++ b/tests/runfiles/common.run
@@ -763,7 +763,8 @@ tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
- 'redundancy_draid3', 'redundancy_draid_damaged', 'redundancy_draid_spare1',
+ 'redundancy_draid3', 'redundancy_draid_damaged1',
+ 'redundancy_draid_damaged2', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in
index ddb9bb7ee..bf7cf22b6 100755
--- a/tests/test-runner/bin/zts-report.py.in
+++ b/tests/test-runner/bin/zts-report.py.in
@@ -226,9 +226,6 @@ maybe = {
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', 11946],
'projectquota/setup': ['SKIP', exec_reason],
- 'redundancy/redundancy_004_neg': ['FAIL', 7290],
- 'redundancy/redundancy_draid_spare1': ['FAIL', known_reason],
- 'redundancy/redundancy_draid_spare3': ['FAIL', known_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'reservation/reservation_008_pos': ['FAIL', 7741],
'reservation/reservation_018_pos': ['FAIL', 5642],
diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am
index e65a8bba2..4c5b11212 100644
--- a/tests/zfs-tests/tests/Makefile.am
+++ b/tests/zfs-tests/tests/Makefile.am
@@ -1632,7 +1632,8 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
- functional/redundancy/redundancy_draid_damaged.ksh \
+ functional/redundancy/redundancy_draid_damaged1.ksh \
+ functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh
index 9b3be9f4e..da2d58eef 100755
--- a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged.ksh
+++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh
@@ -85,28 +85,13 @@ function test_sequential_resilver # <pool> <parity> <dir>
for (( i=0; i<$nparity; i=i+1 )); do
spare=draid${nparity}-0-$i
- zpool status $pool
- zpool replace -fsw $pool $dir/dev-$i $spare
- zpool status $pool
+ log_must zpool replace -fsw $pool $dir/dev-$i $spare
done
log_must zpool scrub -w $pool
+ log_must zpool status $pool
- # When only a single child was overwritten the sequential resilver
- # can fully repair the damange from parity and the scrub will have
- # nothing to repair. When multiple children are silently damaged
- # the sequential resilver will calculate the wrong data since only
- # the parity information is used and it cannot be verified with
- # the checksum. However, since only the resilvering devices are
- # written to with the bad data a subsequent scrub will be able to
- # fully repair the pool.
- #
- if [[ $nparity == 1 ]]; then
- log_must check_pool_status $pool "scan" "repaired 0B"
- else
- log_mustnot check_pool_status $pool "scan" "repaired 0B"
- fi
-
+ log_mustnot check_pool_status $pool "scan" "repaired 0B"
log_must check_pool_status $pool "errors" "No known data errors"
log_must check_pool_status $pool "scan" "with 0 errors"
}
diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh
new file mode 100755
index 000000000..8e06db9ba
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh
@@ -0,0 +1,157 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/redundancy/redundancy.kshlib
+
+#
+# DESCRIPTION:
+# When sequentially resilvering a dRAID pool to a distributed spare
+# silent damage to an online vdev in a replacing or spare mirror vdev
+# is not expected to be repaired. Not only does the rebuild have no
+# reason to suspect the silent damage but even if it did there's no
+# checksum available to determine the correct copy and make the repair.
+# However, the subsequent scrub should detect and repair any damage.
+#
+# STRATEGY:
+# 1. Create block device files for the test draid pool
+# 2. For each parity value [1..3]
+# a. Create a draid pool
+# b. Fill it with some directories/files
+# c. Systematically damage and replace three devices by:
+# - Overwrite the device
+# - Replace the damaged vdev with a distributed spare
+# - Scrub the pool and verify repair IO is issued
+# d. Detach the distributed spares
+# e. Scrub the pool and verify there was nothing to repair
+# f. Destroy the draid pool
+#
+
+typeset -r devs=7
+typeset -r dev_size_mb=512
+typeset -a disks
+
+prefetch_disable=$(get_tunable PREFETCH_DISABLE)
+rebuild_scrub_enabled=$(get_tunable REBUILD_SCRUB_ENABLED)
+
+function cleanup
+{
+ poolexists "$TESTPOOL" && destroy_pool "$TESTPOOL"
+
+ for i in {0..$devs}; do
+ rm -f "$TEST_BASE_DIR/dev-$i"
+ done
+
+ set_tunable32 PREFETCH_DISABLE $prefetch_disable
+ set_tunable32 REBUILD_SCRUB_ENABLED $rebuild_scrub_enabled
+}
+
+log_onexit cleanup
+
+log_must set_tunable32 PREFETCH_DISABLE 1
+log_must set_tunable32 REBUILD_SCRUB_ENABLED 0
+
+# Disk files which will be used by pool
+for i in {0..$(($devs - 1))}; do
+ device=$TEST_BASE_DIR/dev-$i
+ log_must truncate -s ${dev_size_mb}M $device
+ disks[${#disks[*]}+1]=$device
+done
+
+# Disk file which will be attached
+log_must truncate -s 512M $TEST_BASE_DIR/dev-$devs
+
+dir=$TEST_BASE_DIR
+
+for nparity in 1 2 3; do
+ raid=draid${nparity}:3s
+
+ log_must zpool create -f -O compression=off -o cachefile=none \
+ $TESTPOOL $raid ${disks[@]}
+ # log_must zfs set primarycache=metadata $TESTPOOL
+
+ log_must zfs create $TESTPOOL/fs
+ log_must fill_fs /$TESTPOOL/fs 1 256 10 1024 R
+
+ log_must zfs create -o compress=on $TESTPOOL/fs2
+ log_must fill_fs /$TESTPOOL/fs2 1 256 10 1024 R
+
+ log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
+ log_must fill_fs /$TESTPOOL/fs3 1 256 10 1024 R
+
+ log_must zpool export $TESTPOOL
+ log_must zpool import -o cachefile=none -d $dir $TESTPOOL
+
+ log_must check_pool_status $TESTPOOL "errors" "No known data errors"
+
+ for nspare in 0 1 2; do
+ damaged=$dir/dev-${nspare}
+ spare=draid${nparity}-0-${nspare}
+
+ log_must zpool export $TESTPOOL
+ log_must dd conv=notrunc if=/dev/zero of=$damaged \
+ bs=1M seek=4 count=$(($dev_size_mb-4))
+ log_must zpool import -o cachefile=none -d $dir $TESTPOOL
+
+ log_must zpool replace -fsw $TESTPOOL $damaged $spare
+
+ # Scrub the pool after the sequential resilver and verify
+ # that the silent damage was repaired by the scrub.
+ log_must zpool scrub -w $TESTPOOL
+ log_must zpool status $TESTPOOL
+ log_must check_pool_status $TESTPOOL "errors" \
+ "No known data errors"
+ log_must check_pool_status $TESTPOOL "scan" "with 0 errors"
+ log_mustnot check_pool_status $TESTPOOL "scan" "repaired 0B"
+ done
+
+ for nspare in 0 1 2; do
+ log_must check_vdev_state $TESTPOOL \
+ spare-${nspare} "ONLINE"
+ log_must check_vdev_state $TESTPOOL \
+ ${dir}/dev-${nspare} "ONLINE"
+ log_must check_vdev_state $TESTPOOL \
+ draid${nparity}-0-${nspare} "ONLINE"
+ done
+
+ # Detach the distributed spares and scrub the pool again to
+ # verify no damage remained on the originally corrupted vdevs.
+ for nspare in 0 1 2; do
+ log_must zpool detach $TESTPOOL draid${nparity}-0-${nspare}
+ done
+
+ log_must zpool clear $TESTPOOL
+ log_must zpool scrub -w $TESTPOOL
+ log_must zpool status $TESTPOOL
+
+ log_must check_pool_status $TESTPOOL "errors" "No known data errors"
+ log_must check_pool_status $TESTPOOL "scan" "with 0 errors"
+ log_must check_pool_status $TESTPOOL "scan" "repaired 0B"
+
+ log_must zpool destroy "$TESTPOOL"
+done
+
+log_pass "draid damaged device scrub test succeeded."