diff options
author | Andrea Gelmini <[email protected]> | 2021-04-02 18:38:53 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2021-04-02 18:52:15 -0700 |
commit | bf169e9f15efbc343b931f20cfad5d2b59c8a821 (patch) | |
tree | a15662d4490569937a30c715704ff51a698a2473 /tests | |
parent | 943df59ed942182427ed50b94a346dc22c4407e6 (diff) |
Fix various typos
Correct an assortment of typos throughout the code base.
Reviewed-by: Brian Behlendorf <[email protected]>
Reviewed-by: Matthew Ahrens <[email protected]>
Reviewed-by: Ryan Moeller <[email protected]>
Signed-off-by: Andrea Gelmini <[email protected]>
Closes #11774
Diffstat (limited to 'tests')
15 files changed, 22 insertions, 22 deletions
diff --git a/tests/runfiles/sanity.run b/tests/runfiles/sanity.run index e32cf5f62..b1d2c73de 100644 --- a/tests/runfiles/sanity.run +++ b/tests/runfiles/sanity.run @@ -12,7 +12,7 @@ # as much functionality as possible while still executing relatively # quickly. The included tests should take no more than a few seconds # each to run at most. This provides a convenient way to sanity test a -# change before commiting to a full test run which takes several hours. +# change before committing to a full test run which takes several hours. # # Approximate run time: 15 minutes # diff --git a/tests/zfs-tests/cmd/draid/draid.c b/tests/zfs-tests/cmd/draid/draid.c index 861c6ba1a..57261348b 100644 --- a/tests/zfs-tests/cmd/draid/draid.c +++ b/tests/zfs-tests/cmd/draid/draid.c @@ -626,7 +626,7 @@ eval_decluster(draid_map_t *map, double *worst_ratiop, double *avg_ratiop) uint64_t faults = nspares; /* - * Score groupwidths up to 19. This value was choosen as the + * Score groupwidths up to 19. This value was chosen as the * largest reasonable width (16d+3p). dRAID pools may be still * be created with wider stripes but they are not considered in * this analysis in order to optimize for the most common cases. @@ -727,7 +727,7 @@ eval_maps(uint64_t children, int passes, uint64_t *map_seed, * Consider maps with a lower worst_ratio to be of higher * quality. Some maps may have a lower avg_ratio but they * are discarded since they might include some particularly - * imbalanced permuations. The average is tracked to in + * imbalanced permutations. The average is tracked to in * order to get a sense of the average permutation quality. */ eval_decluster(map, &worst_ratio, &avg_ratio); @@ -1194,8 +1194,8 @@ draid_dump(int argc, char *argv[]) } /* - * Print all of the mappings as a C formated draid_map_t array. This table - * is found in the module/zcommon/zfs_draid.c file and is the definative + * Print all of the mappings as a C formatted draid_map_t array. This table + * is found in the module/zcommon/zfs_draid.c file and is the definitive * source for all mapping used by dRAID. It cannot be updated without * changing the dRAID on disk format. */ diff --git a/tests/zfs-tests/cmd/file_write/file_write.c b/tests/zfs-tests/cmd/file_write/file_write.c index 45d296db4..60893c34f 100644 --- a/tests/zfs-tests/cmd/file_write/file_write.c +++ b/tests/zfs-tests/cmd/file_write/file_write.c @@ -44,7 +44,7 @@ static unsigned char bigbuffer[BIGBUFFERSIZE]; static void usage(char *); /* - * psudo-randomize the buffer + * pseudo-randomize the buffer */ static void randomize_buffer(int block_size) { int i; diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh index f399ad270..fb29e4acd 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh @@ -19,7 +19,7 @@ # snapshots from the same datasets # # STRATEGY -# 1. Create multiple snapshots for the same datset +# 1. Create multiple snapshots for the same dataset # 2. Run zfs destroy for these snapshots for a mix of valid and # invalid snapshot names # 3. Run zfs destroy for snapshots from different datasets and diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh index dbf81262e..73dec9240 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh @@ -36,7 +36,7 @@ typeset VDEV_PREFIX="$TEST_BASE_DIR/filedev" # STRATEGY: # 1. Create different storage pools, use -n to add devices to the pool and # verify the output is as expected. -# 2. Create a pool whith a hole vdev and verify it's not listed with add -n. +# 2. Create a pool with a hole vdev and verify it's not listed with add -n. # typeset -a dev=( @@ -163,7 +163,7 @@ for (( i=0; i < ${#tests[@]}; i+=1 )); do log_must destroy_pool "$TESTPOOL" done -# Make sure hole vdevs are skiped in output. +# Make sure hole vdevs are skipped in output. log_must eval "zpool create '$TESTPOOL' '${dev[0]}' log '${dev[1]}' \ cache '${dev[2]}'" diff --git a/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh index 382b2cb7f..4951097ac 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh @@ -74,7 +74,7 @@ else fi # -# datsets ordered by checksum options (note, Orange, Carrot & Banana have the +# datasets ordered by checksum options (note, Orange, Carrot & Banana have the # same checksum options, so ZFS should revert to sorting them alphabetically by # name) # diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh index 5cb50fde6..22450d89d 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Change HOME to /var/tmp -# 2. Make a simple script that echos a key value pair +# 2. Make a simple script that echoes a key value pair # in /var/tmp/.zpool.d # 3. Make sure it can be run with -c # 4. Remove the script we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh index 1197ea2d1..11f51350a 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs -# 2. Make a simple script that echos a key value pair in each dir +# 2. Make a simple script that echoes a key value pair in each dir # 3. Make sure scripts can be run with -c # 4. Remove the scripts we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh index 4cc3deb6d..5363043a8 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Change HOME to /var/tmp -# 2. Make a simple script that echos a key value pair +# 2. Make a simple script that echoes a key value pair # in /var/tmp/.zpool.d # 3. Make sure it can be run with -c # 4. Remove the script we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh index a075b9a0c..3f64fdf1a 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs -# 2. Make a simple script that echos a key value pair in each dir +# 2. Make a simple script that echoes a key value pair in each dir # 3. Make sure scripts can be run with -c # 4. Remove the scripts we created diff --git a/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh b/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh index 0abe1e2ce..86916bf90 100755 --- a/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh @@ -30,7 +30,7 @@ # STRATEGY: # 1. Create a pool # 2. Simulate physical removal of one device -# 3. Verify the device is unvailable +# 3. Verify the device is unavailable # 4. Reattach the device # 5. Verify the device is onlined # 6. Repeat the same tests with a spare device: @@ -104,7 +104,7 @@ do log_must mkfile 1m $mntpnt/file log_must zpool sync $TESTPOOL - # 3. Verify the device is unvailable. + # 3. Verify the device is unavailable. log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL" # 4. Reattach the device diff --git a/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh b/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh index b3192b2bf..5c3835349 100755 --- a/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh +++ b/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh @@ -44,7 +44,7 @@ function cleanup rm -f ${VDEV_FILES[@]} } -log_assert "Verify attach/detech with multiple vdevs" +log_assert "Verify attach/detach with multiple vdevs" ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS) @@ -79,7 +79,7 @@ for replace_mode in "healing" "sequential"; do ${VDEV_FILES[1]} ${VDEV_FILES[2]} log_must is_pool_resilvering $TESTPOOL1 - # Original vdev cannot be detached until there is sufficent redundancy. + # Original vdev cannot be detached until there is sufficient redundancy. log_mustnot zpool detach $TESTPOOL1 ${VDEV_FILES[0]} # Detach first vdev (resilver keeps running) @@ -108,4 +108,4 @@ for replace_mode in "healing" "sequential"; do log_must zpool wait $TESTPOOL1 done -log_pass "Verify attach/detech with multiple vdevs" +log_pass "Verify attach/detach with multiple vdevs" diff --git a/tests/zfs-tests/tests/functional/replacement/replace_import.ksh b/tests/zfs-tests/tests/functional/replacement/replace_import.ksh index 35d51d939..37d3c6645 100755 --- a/tests/zfs-tests/tests/functional/replacement/replace_import.ksh +++ b/tests/zfs-tests/tests/functional/replacement/replace_import.ksh @@ -26,7 +26,7 @@ # Strategy: # 1. For both healing and sequential resilvering replace: # a. Create a pool -# b. Repalce a vdev with 'zpool replace' to resilver (-s) it. +# b. Replace a vdev with 'zpool replace' to resilver (-s) it. # c. Export the pool # d. Import the pool # e. Verify the 'zpool replace' resumed resilvering. diff --git a/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh b/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh index 7896b2dbe..7e96ab518 100755 --- a/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh +++ b/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh @@ -36,7 +36,7 @@ # a. Replace a vdev with a spare & suspend resilver immediately # b. Verify resilver starts properly # c. Offline / online another vdev to introduce a new DTL range -# d. Verify resilver restart restart or defer +# d. Verify resilver restart or defer # e. Inject read errors on vdev that was offlined / onlned # f. Verify that resilver did not restart # g. Unsuspend resilver and wait for it to finish diff --git a/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh b/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh index ec1986c45..da0d36a35 100755 --- a/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh @@ -39,7 +39,7 @@ # for a dataset. Unlike quotas however there should be no restrictions # on accessing space outside of the limits of the reservation (if the # space is available in the pool). Verify that in a filesystem with a -# reservation set that its possible to create files both within the +# reservation set that it's possible to create files both within the # reserved space and also outside. # # STRATEGY: |