diff options
57 files changed, 75 insertions, 75 deletions
diff --git a/cmd/zgenhostid/zgenhostid.c b/cmd/zgenhostid/zgenhostid.c index 50fcf05e4..5a9bdad7c 100644 --- a/cmd/zgenhostid/zgenhostid.c +++ b/cmd/zgenhostid/zgenhostid.c @@ -137,7 +137,7 @@ main(int argc, char **argv) } /* - * we need just 4 bytes in native endianess + * we need just 4 bytes in native endianness * not using sethostid() because it may be missing or just a stub */ uint32_t hostid = input_i; diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index e23604b3d..7a15c78d1 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -1065,7 +1065,7 @@ zpool_do_add(int argc, char **argv) free(vname); } } - /* And finaly the spares */ + /* And finally the spares */ if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, &sparechild, &sparechildren) == 0 && sparechildren > 0) { hadspare = B_TRUE; diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c index c86081a81..3d83da641 100644 --- a/cmd/zpool/zpool_vdev.c +++ b/cmd/zpool/zpool_vdev.c @@ -445,7 +445,7 @@ typedef struct replication_level { /* * N.B. For the purposes of comparing replication levels dRAID can be - * considered functionally equivilant to raidz. + * considered functionally equivalent to raidz. */ static boolean_t is_raidz_mirror(replication_level_t *a, replication_level_t *b, diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index 7193eafe3..24197d27b 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -5979,7 +5979,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) vd0->vdev_resilver_txg != 0)) { /* * Make vd0 explicitly claim to be unreadable, - * or unwriteable, or reach behind its back + * or unwritable, or reach behind its back * and close the underlying fd. We can do this if * maxfaults == 0 because we'll fail and reexecute, * and we can do it if maxfaults >= 2 because we'll diff --git a/cmd/zvol_wait/zvol_wait b/cmd/zvol_wait/zvol_wait index 9a3948da5..eb1b3e81f 100755 --- a/cmd/zvol_wait/zvol_wait +++ b/cmd/zvol_wait/zvol_wait @@ -39,7 +39,7 @@ list_zvols() { [ "$volmode" = "none" ] && continue [ "$redacted" = "-" ] || continue # - # We also also ignore partially received zvols if it is + # We also ignore partially received zvols if it is # not an incremental receive, as those won't even have a block # device minor node created yet. # diff --git a/config/CppCheck.am b/config/CppCheck.am index 13c633c60..e53013bd0 100644 --- a/config/CppCheck.am +++ b/config/CppCheck.am @@ -1,5 +1,5 @@ # -# Default rules for running cppcheck against the the user space components. +# Default rules for running cppcheck against the user space components. # PHONY += cppcheck diff --git a/config/kernel-generic_fillattr.m4 b/config/kernel-generic_fillattr.m4 index 50c803130..0acd5d531 100644 --- a/config/kernel-generic_fillattr.m4 +++ b/config/kernel-generic_fillattr.m4 @@ -16,7 +16,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS], [ ]) AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS], [ - AC_MSG_CHECKING([whether generic_fillattr requres struct user_namespace*]) + AC_MSG_CHECKING([whether generic_fillattr requires struct user_namespace*]) ZFS_LINUX_TEST_RESULT([generic_fillattr_userns], [ AC_MSG_RESULT([yes]) AC_DEFINE(HAVE_GENERIC_FILLATTR_USERNS, 1, diff --git a/etc/zfs/zfs-functions.in b/etc/zfs/zfs-functions.in index c2ce6157c..54f2ebc0e 100644 --- a/etc/zfs/zfs-functions.in +++ b/etc/zfs/zfs-functions.in @@ -14,13 +14,13 @@ PATH=/sbin:/bin:/usr/bin:/usr/sbin # Source function library if [ -f /etc/rc.d/init.d/functions ]; then - # RedHat and derivates + # RedHat and derivatives . /etc/rc.d/init.d/functions elif [ -L /etc/init.d/functions.sh ]; then # Gentoo . /etc/init.d/functions.sh elif [ -f /lib/lsb/init-functions ]; then - # LSB, Debian GNU/Linux and derivates + # LSB, Debian GNU/Linux and derivatives . /lib/lsb/init-functions fi diff --git a/include/os/freebsd/spl/sys/vnode_impl.h b/include/os/freebsd/spl/sys/vnode_impl.h index 94ec1ad4e..c82b1fc9a 100644 --- a/include/os/freebsd/spl/sys/vnode_impl.h +++ b/include/os/freebsd/spl/sys/vnode_impl.h @@ -210,7 +210,7 @@ enum create { CRCREAT, CRMKNOD, CRMKDIR }; /* reason for create */ * * The cc_caller_id is used to identify one or more callers who invoke * operations, possibly on behalf of others. For example, the NFS - * server could have it's own cc_caller_id which can be detected by + * server could have its own cc_caller_id which can be detected by * vnode/vfs operations or (FEM) monitors on those operations. New * caller IDs are generated by fs_new_caller_id(). */ diff --git a/include/os/linux/kernel/linux/blkdev_compat.h b/include/os/linux/kernel/linux/blkdev_compat.h index ee066537b..b57d0a896 100644 --- a/include/os/linux/kernel/linux/blkdev_compat.h +++ b/include/os/linux/kernel/linux/blkdev_compat.h @@ -261,7 +261,7 @@ bio_set_bi_error(struct bio *bio, int error) * For older kernels trigger a re-reading of the partition table by calling * check_disk_change() which calls flush_disk() to invalidate the device. * - * For newer kernels (as of 5.10), bdev_check_media_chage is used, in favor of + * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of * check_disk_change(), with the modification that invalidation is no longer * forced. */ diff --git a/include/os/linux/kernel/linux/mod_compat.h b/include/os/linux/kernel/linux/mod_compat.h index e96e95313..cc42c3f7c 100644 --- a/include/os/linux/kernel/linux/mod_compat.h +++ b/include/os/linux/kernel/linux/mod_compat.h @@ -83,7 +83,7 @@ enum scope_prefix_types { /* * Declare a module parameter / sysctl node * - * "scope_prefix" the part of the the sysctl / sysfs tree the node resides under + * "scope_prefix" the part of the sysctl / sysfs tree the node resides under * (currently a no-op on Linux) * "name_prefix" the part of the variable name that will be excluded from the * exported names on platforms with a hierarchical namespace diff --git a/include/sys/vdev_draid.h b/include/sys/vdev_draid.h index 65417a93c..52ce4ba16 100644 --- a/include/sys/vdev_draid.h +++ b/include/sys/vdev_draid.h @@ -51,7 +51,7 @@ extern "C" { * dRAID permutation map. */ typedef struct draid_map { - uint64_t dm_children; /* # of permuation columns */ + uint64_t dm_children; /* # of permutation columns */ uint64_t dm_nperms; /* # of permutation rows */ uint64_t dm_seed; /* dRAID map seed */ uint64_t dm_checksum; /* Checksum of generated map */ diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h index db4fe1447..6c3295353 100644 --- a/include/sys/vdev_impl.h +++ b/include/sys/vdev_impl.h @@ -501,7 +501,7 @@ typedef enum vbe_vers { * and is protected by an embedded checksum. By default, GRUB will * check if the boot filesystem supports storing the environment data * in a special location, and if so, will invoke filesystem specific - * logic to retrieve it. This can be overriden by a variable, should + * logic to retrieve it. This can be overridden by a variable, should * the user so desire. */ VB_RAW = 0, diff --git a/include/sys/vdev_rebuild.h b/include/sys/vdev_rebuild.h index 61ae15c5d..b59fbe153 100644 --- a/include/sys/vdev_rebuild.h +++ b/include/sys/vdev_rebuild.h @@ -60,7 +60,7 @@ typedef struct vdev_rebuild_phys { /* * The vdev_rebuild_t describes the current state and how a top-level vdev * should be rebuilt. The core elements are the top-vdev, the metaslab being - * rebuilt, range tree containing the allocted extents and the on-disk state. + * rebuilt, range tree containing the allocated extents and the on-disk state. */ typedef struct vdev_rebuild { vdev_t *vr_top_vdev; /* top-level vdev to rebuild */ diff --git a/lib/libzfs/libzfs_dataset.c b/lib/libzfs/libzfs_dataset.c index 4598e87f2..b654e41a5 100644 --- a/lib/libzfs/libzfs_dataset.c +++ b/lib/libzfs/libzfs_dataset.c @@ -5334,7 +5334,7 @@ zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl) * 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in * the 128k block example above. * - * The situtation is slightly different for dRAID since the minimum allocation + * The situation is slightly different for dRAID since the minimum allocation * size is the full group width. The same 8K block above would be written as * follows in a dRAID group: * diff --git a/lib/libzfsbootenv/lzbe_device.c b/lib/libzfsbootenv/lzbe_device.c index 670efd8b0..b366bd9c7 100644 --- a/lib/libzfsbootenv/lzbe_device.c +++ b/lib/libzfsbootenv/lzbe_device.c @@ -83,7 +83,7 @@ lzbe_set_boot_device(const char *pool, lzbe_flags_t flag, const char *device) } else { /* * Use device name directly if it does start with - * prefix "zfs:". Otherwise, add prefix and sufix. + * prefix "zfs:". Otherwise, add prefix and suffix. */ if (strncmp(device, "zfs:", 4) == 0) { fnvlist_add_string(nv, OS_BOOTONCE, device); diff --git a/lib/libzutil/zutil_import.c b/lib/libzutil/zutil_import.c index 93d05354f..c06065250 100644 --- a/lib/libzutil/zutil_import.c +++ b/lib/libzutil/zutil_import.c @@ -1408,7 +1408,7 @@ discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv, /* * Once we have the path, we need to add the directory to - * our directoy cache. + * our directory cache. */ if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { return (zpool_find_import_scan_dir(hdl, lock, cache, diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index e3e19481a..cda4661fe 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -234,7 +234,7 @@ Use \fB0\fR for no (default) and \fB1\fR for yes. .RS 12n Percent of ARC size allowed for L2ARC-only headers. Since L2ARC buffers are not evicted on memory pressure, too large amount of -headers on system with irrationaly large L2ARC can render it slow or unusable. +headers on system with irrationally large L2ARC can render it slow or unusable. This parameter limits L2ARC writes and rebuild to achieve it. .sp Default value: \fB33\fR%. @@ -387,7 +387,7 @@ Default value: \fB16,777,217\fR. .RS 12n When attempting to log the output nvlist of an ioctl in the on-disk history, the output will not be stored if it is larger than size (in bytes). This must be -less then DMU_MAX_ACCESS (64MB). This applies primarily to +less than DMU_MAX_ACCESS (64MB). This applies primarily to zfs_ioc_channel_program(). .sp Default value: \fB1MB\fR. @@ -911,7 +911,7 @@ Default value: \fB8,388,608\fR (8MB). .RS 12n Max bytes to prefetch indirects for per stream. .sp -Default vaule: \fB67,108,864\fR (64MB). +Default value: \fB67,108,864\fR (64MB). .RE .sp diff --git a/man/man5/zpool-features.5 b/man/man5/zpool-features.5 index c56b31e2d..b506acc7f 100644 --- a/man/man5/zpool-features.5 +++ b/man/man5/zpool-features.5 @@ -168,7 +168,7 @@ of the requested feature set. .LP By convention, compatibility files in \fB/usr/share/zfs/compatibility.d\fR are provided by the distribution package, and include feature sets -supported by important versions of popular distribtions, and feature +supported by important versions of popular distributions, and feature sets commonly supported at the start of each year. Compatibility files in \fB/etc/zfs/compatibility.d\fR, if present, will take precedence over files with the same name in \fB/usr/share/zfs/compatibility.d\fR. @@ -1046,7 +1046,7 @@ DEPENDENCIES extensible_dataset \fBzstd\fR is a high-performance compression algorithm that features a combination of high compression ratios and high speed. Compared to \fBgzip\fR, -\fBzstd\fR offers slighty better compression at much higher speeds. Compared +\fBzstd\fR offers slightly better compression at much higher speeds. Compared to \fBlz4\fR, \fBzstd\fR offers much better compression while being only modestly slower. Typically, \fBzstd\fR compression speed ranges from 250 to 500 MB/s per thread and decompression speed is over 1 GB/s per thread. diff --git a/man/man8/zfs.8 b/man/man8/zfs.8 index 4cff97792..52000f29b 100644 --- a/man/man8/zfs.8 +++ b/man/man8/zfs.8 @@ -678,7 +678,7 @@ This bookmark can then be used instead of snapshot in send streams. .Ed .It Sy Example 24 No Setting sharesmb Property Options on a ZFS File System The following example show how to share SMB filesystem through ZFS. -Note that that a user and his/her password must be given. +Note that a user and his/her password must be given. .Bd -literal # smbmount //127.0.0.1/share_tmp /mnt/tmp \\ -o user=workgroup/turbo,password=obrut,uid=1000 diff --git a/module/avl/avl.c b/module/avl/avl.c index d0473d883..1a95092bc 100644 --- a/module/avl/avl.c +++ b/module/avl/avl.c @@ -1008,7 +1008,7 @@ avl_destroy_nodes(avl_tree_t *tree, void **cookie) --tree->avl_numnodes; /* - * If we just did a right child or there isn't one, go up to parent. + * If we just removed a right child or there isn't one, go up to parent. */ if (child == 1 || parent->avl_child[1] == NULL) { node = parent; diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c index 23686c59e..7332834cb 100644 --- a/module/icp/algs/modes/gcm.c +++ b/module/icp/algs/modes/gcm.c @@ -1399,7 +1399,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) } datap += done; } - /* Decrypt remainder, which is less then chunk size, in one go. */ + /* Decrypt remainder, which is less than chunk size, in one go. */ kfpu_begin(); if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) { done = aesni_gcm_decrypt(datap, datap, bleft, @@ -1415,7 +1415,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES); /* - * Now less then GCM_AVX_MIN_DECRYPT_BYTES bytes remain, + * Now less than GCM_AVX_MIN_DECRYPT_BYTES bytes remain, * decrypt them block by block. */ while (bleft > 0) { diff --git a/module/os/freebsd/zfs/abd_os.c b/module/os/freebsd/zfs/abd_os.c index ff4d80ef1..cb37fb362 100644 --- a/module/os/freebsd/zfs/abd_os.c +++ b/module/os/freebsd/zfs/abd_os.c @@ -306,7 +306,7 @@ void abd_free_linear_page(abd_t *abd) { /* - * FreeBSD does not have have scatter linear pages + * FreeBSD does not have scatter linear pages * so there is an error. */ VERIFY(0); diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c index 906294e78..5588d787a 100644 --- a/module/os/freebsd/zfs/zfs_vnops_os.c +++ b/module/os/freebsd/zfs/zfs_vnops_os.c @@ -3577,7 +3577,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap, /* * Create a new object for the symlink. - * for version 4 ZPL datsets the symlink will be an SA attribute + * for version 4 ZPL datasets the symlink will be an SA attribute */ zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); diff --git a/module/os/freebsd/zfs/zio_crypt.c b/module/os/freebsd/zfs/zio_crypt.c index 9fe678d25..0451864db 100644 --- a/module/os/freebsd/zfs/zio_crypt.c +++ b/module/os/freebsd/zfs/zio_crypt.c @@ -114,7 +114,7 @@ * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left * in plaintext for scrubbing and claiming, but the bonus buffers might contain * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing - * which which pieces of the block need to be encrypted. For more details about + * which pieces of the block need to be encrypted. For more details about * dnode authentication and encryption, see zio_crypt_init_uios_dnode(). * * OBJECT SET AUTHENTICATION: diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c index a84ac35a3..c80d4acef 100644 --- a/module/os/freebsd/zfs/zvol_os.c +++ b/module/os/freebsd/zfs/zvol_os.c @@ -761,7 +761,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag) volsize = zv->zv_volsize; /* * uio_loffset == volsize isn't an error as - * its required for EOF processing. + * it's required for EOF processing. */ if (zfs_uio_resid(&uio) > 0 && (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c index 8aeed6f56..e95d051ed 100644 --- a/module/os/linux/zfs/zfs_vnops_os.c +++ b/module/os/linux/zfs/zfs_vnops_os.c @@ -3140,7 +3140,7 @@ top: /* * Create a new object for the symlink. - * for version 4 ZPL datsets the symlink will be an SA attribute + * for version 4 ZPL datasets the symlink will be an SA attribute */ zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c index d59c1bb07..4bfb26302 100644 --- a/module/os/linux/zfs/zfs_znode.c +++ b/module/os/linux/zfs/zfs_znode.c @@ -217,7 +217,7 @@ zfs_znode_fini(void) * created or destroyed. This kind of locking would normally reside in the * znode itself but in this case that's impossible because the znode and SA * buffer may not yet exist. Therefore the locking is handled externally - * with an array of mutexs and AVLs trees which contain per-object locks. + * with an array of mutexes and AVLs trees which contain per-object locks. * * In zfs_znode_hold_enter() a per-object lock is created as needed, inserted * in to the correct AVL tree and finally the per-object lock is held. In diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c index 2c58fecb2..3ccd5914d 100644 --- a/module/os/linux/zfs/zio_crypt.c +++ b/module/os/linux/zfs/zio_crypt.c @@ -115,7 +115,7 @@ * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left * in plaintext for scrubbing and claiming, but the bonus buffers might contain * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing - * which which pieces of the block need to be encrypted. For more details about + * which pieces of the block need to be encrypted. For more details about * dnode authentication and encryption, see zio_crypt_init_uios_dnode(). * * OBJECT SET AUTHENTICATION: diff --git a/module/unicode/u8_textprep.c b/module/unicode/u8_textprep.c index be816d728..c1d9a325f 100644 --- a/module/unicode/u8_textprep.c +++ b/module/unicode/u8_textprep.c @@ -884,7 +884,7 @@ do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz, * | B0| B1| ... | Bm| * +---+---+-...-+---+ * - * The first byte, B0, is always less then 0xF5 (U8_DECOMP_BOTH). + * The first byte, B0, is always less than 0xF5 (U8_DECOMP_BOTH). * * (2) Canonical decomposition mappings: * diff --git a/module/zcommon/zfs_comutil.c b/module/zcommon/zfs_comutil.c index 1cec60ac1..886167759 100644 --- a/module/zcommon/zfs_comutil.c +++ b/module/zcommon/zfs_comutil.c @@ -26,7 +26,7 @@ /* * This file is intended for functions that ought to be common between user * land (libzfs) and the kernel. When many common routines need to be shared - * then a separate file should to be created. + * then a separate file should be created. */ #if !defined(_KERNEL) diff --git a/module/zfs/abd.c b/module/zfs/abd.c index 1e6645c90..2d1be9752 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -381,7 +381,7 @@ abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free) child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) { /* * We always pass B_FALSE for free_on_free as it is the - * original child gang ABDs responsibilty to determine + * original child gang ABDs responsibility to determine * if any of its child ABDs should be free'd on the call * to abd_free(). */ diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 55c71a382..f0ae3938a 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -5036,7 +5036,7 @@ arc_reap_cb(void *arg, zthr_t *zthr) * memory in the system at a fraction of the arc_size (1/128th by * default). If oversubscribed (free_memory < 0) then reduce the * target arc_size by the deficit amount plus the fractional - * amount. If free memory is positive but less then the fractional + * amount. If free memory is positive but less than the fractional * amount, reduce by what is needed to hit the fractional amount. */ free_memory = arc_available_memory(); diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c index 2faf1af52..bead7da22 100644 --- a/module/zfs/dsl_bookmark.c +++ b/module/zfs/dsl_bookmark.c @@ -236,7 +236,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp, error = SET_ERROR(EEXIST); goto eholdnewbmds; default: - /* dsl_bookmark_lookup_impl already did SET_ERRROR */ + /* dsl_bookmark_lookup_impl already did SET_ERROR */ goto eholdnewbmds; } @@ -271,7 +271,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp, error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR); break; default: - /* dsl_bookmark_lookup already did SET_ERRROR */ + /* dsl_bookmark_lookup already did SET_ERROR */ break; } } else { @@ -536,7 +536,7 @@ dsl_bookmark_create_sync_impl_book( * Reasoning: * - The zbm_redaction_obj would be referred to by both source and new * bookmark, but would be destroyed once either source or new is - * destroyed, resulting in use-after-free of the referrred object. + * destroyed, resulting in use-after-free of the referred object. * - User expectation when issuing the `zfs bookmark` command is that * a normal bookmark of the source is created * diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 5170c9ca2..73fa1809b 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -6496,7 +6496,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot) /* * The virtual dRAID spares must be added after vdev tree is created - * and the vdev guids are generated. The guid of their assoicated + * and the vdev guids are generated. The guid of their associated * dRAID is stored in the config and used when opening the spare. */ if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index c536a1c6c..69e44b48e 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -1372,7 +1372,7 @@ vdev_metaslab_group_create(vdev_t *vd) /* * The spa ashift min/max only apply for the normal metaslab - * class. Class destination is late binding so ashift boundry + * class. Class destination is late binding so ashift boundary * setting had to wait until now. */ if (vd->vdev_top == vd && vd->vdev_ashift != 0 && @@ -2046,7 +2046,7 @@ vdev_open(vdev_t *vd) vd->vdev_max_asize = max_asize; /* - * If the vdev_ashift was not overriden at creation time, + * If the vdev_ashift was not overridden at creation time, * then set it the logical ashift and optimize the ashift. */ if (vd->vdev_ashift == 0) { @@ -2116,7 +2116,7 @@ vdev_open(vdev_t *vd) } /* - * Track the the minimum allocation size. + * Track the minimum allocation size. */ if (vd->vdev_top == vd && vd->vdev_ashift != 0 && vd->vdev_islog == 0 && vd->vdev_aux == NULL) { @@ -4570,7 +4570,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize) /* * Solely for the purposes of 'zpool iostat -lqrw' - * reporting use the priority to catagorize the IO. + * reporting use the priority to categorize the IO. * Only the following are reported to user space: * * ZIO_PRIORITY_SYNC_READ, diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c index db753ec16..020b3bc95 100644 --- a/module/zfs/vdev_raidz.c +++ b/module/zfs/vdev_raidz.c @@ -1984,7 +1984,7 @@ raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity) * 2 4 5 first: increment to 3 * 3 4 5 done * - * This strategy works for dRAID but is less effecient when there are a large + * This strategy works for dRAID but is less efficient when there are a large * number of child vdevs and therefore permutations to check. Furthermore, * since the raidz_map_t rows likely do not overlap reconstruction would be * possible as long as there are no more than nparity data errors per row. diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c index a77ff99fa..aa79642aa 100644 --- a/module/zfs/vdev_rebuild.c +++ b/module/zfs/vdev_rebuild.c @@ -81,7 +81,7 @@ * Advantages: * * - Sequential reconstruction is performed in LBA order which may be faster - * than healing reconstruction particularly when using using HDDs (or + * than healing reconstruction particularly when using HDDs (or * especially with SMR devices). Only allocated capacity is resilvered. * * - Sequential reconstruction is not constrained by ZFS block boundaries. diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 5f291d067..7f929df16 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -233,7 +233,7 @@ unsigned long zfs_max_nvlist_src_size = 0; /* * When logging the output nvlist of an ioctl in the on-disk history, limit - * the logged size to this many bytes. This must be less then DMU_MAX_ACCESS. + * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS. * This applies primarily to zfs_ioc_channel_program(). */ unsigned long zfs_history_output_max = 1024 * 1024; diff --git a/module/zstd/README.md b/module/zstd/README.md index f8e127736..eed229e2f 100644 --- a/module/zstd/README.md +++ b/module/zstd/README.md @@ -10,7 +10,7 @@ library, besides upgrading to a newer ZSTD release. Tree structure: * `zfs_zstd.c` is the actual `zzstd` kernel module. -* `lib/` contains the the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md) +* `lib/` contains the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md) version of the `Zstandard` library, generated from our template file * `zstd-in.c` is our template file for generating the library * `include/`: This directory contains supplemental includes for platform diff --git a/module/zstd/include/zstd_compat_wrapper.h b/module/zstd/include/zstd_compat_wrapper.h index 5cca517b5..71adc7804 100644 --- a/module/zstd/include/zstd_compat_wrapper.h +++ b/module/zstd/include/zstd_compat_wrapper.h @@ -34,7 +34,7 @@ /* * This wrapper fixes a problem, in case the ZFS filesystem driver, is compiled - * staticly into the kernel. + * statically into the kernel. * This will cause a symbol collision with the older in-kernel zstd library. * The following macros will simply rename all local zstd symbols and references * diff --git a/module/zstd/zfs_zstd.c b/module/zstd/zfs_zstd.c index dfcd938ae..fc1b0359a 100644 --- a/module/zstd/zfs_zstd.c +++ b/module/zstd/zfs_zstd.c @@ -258,7 +258,7 @@ zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size) for (int i = 0; i < ZSTD_POOL_MAX; i++) { pool = &zstd_mempool[i]; /* - * This lock is simply a marker for a pool object beeing in use. + * This lock is simply a marker for a pool object being in use. * If it's already hold, it will be skipped. * * We need to create it before checking it to avoid race @@ -488,7 +488,7 @@ zfs_zstd_decompress_level(void *s_start, void *d_start, size_t s_len, /* * NOTE: We ignore the ZSTD version for now. As soon as any - * incompatibility occurrs, it has to be handled accordingly. + * incompatibility occurs, it has to be handled accordingly. * The version can be accessed via `hdr_copy.version`. */ diff --git a/tests/runfiles/sanity.run b/tests/runfiles/sanity.run index e32cf5f62..b1d2c73de 100644 --- a/tests/runfiles/sanity.run +++ b/tests/runfiles/sanity.run @@ -12,7 +12,7 @@ # as much functionality as possible while still executing relatively # quickly. The included tests should take no more than a few seconds # each to run at most. This provides a convenient way to sanity test a -# change before commiting to a full test run which takes several hours. +# change before committing to a full test run which takes several hours. # # Approximate run time: 15 minutes # diff --git a/tests/zfs-tests/cmd/draid/draid.c b/tests/zfs-tests/cmd/draid/draid.c index 861c6ba1a..57261348b 100644 --- a/tests/zfs-tests/cmd/draid/draid.c +++ b/tests/zfs-tests/cmd/draid/draid.c @@ -626,7 +626,7 @@ eval_decluster(draid_map_t *map, double *worst_ratiop, double *avg_ratiop) uint64_t faults = nspares; /* - * Score groupwidths up to 19. This value was choosen as the + * Score groupwidths up to 19. This value was chosen as the * largest reasonable width (16d+3p). dRAID pools may be still * be created with wider stripes but they are not considered in * this analysis in order to optimize for the most common cases. @@ -727,7 +727,7 @@ eval_maps(uint64_t children, int passes, uint64_t *map_seed, * Consider maps with a lower worst_ratio to be of higher * quality. Some maps may have a lower avg_ratio but they * are discarded since they might include some particularly - * imbalanced permuations. The average is tracked to in + * imbalanced permutations. The average is tracked to in * order to get a sense of the average permutation quality. */ eval_decluster(map, &worst_ratio, &avg_ratio); @@ -1194,8 +1194,8 @@ draid_dump(int argc, char *argv[]) } /* - * Print all of the mappings as a C formated draid_map_t array. This table - * is found in the module/zcommon/zfs_draid.c file and is the definative + * Print all of the mappings as a C formatted draid_map_t array. This table + * is found in the module/zcommon/zfs_draid.c file and is the definitive * source for all mapping used by dRAID. It cannot be updated without * changing the dRAID on disk format. */ diff --git a/tests/zfs-tests/cmd/file_write/file_write.c b/tests/zfs-tests/cmd/file_write/file_write.c index 45d296db4..60893c34f 100644 --- a/tests/zfs-tests/cmd/file_write/file_write.c +++ b/tests/zfs-tests/cmd/file_write/file_write.c @@ -44,7 +44,7 @@ static unsigned char bigbuffer[BIGBUFFERSIZE]; static void usage(char *); /* - * psudo-randomize the buffer + * pseudo-randomize the buffer */ static void randomize_buffer(int block_size) { int i; diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh index f399ad270..fb29e4acd 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh @@ -19,7 +19,7 @@ # snapshots from the same datasets # # STRATEGY -# 1. Create multiple snapshots for the same datset +# 1. Create multiple snapshots for the same dataset # 2. Run zfs destroy for these snapshots for a mix of valid and # invalid snapshot names # 3. Run zfs destroy for snapshots from different datasets and diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh index dbf81262e..73dec9240 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh @@ -36,7 +36,7 @@ typeset VDEV_PREFIX="$TEST_BASE_DIR/filedev" # STRATEGY: # 1. Create different storage pools, use -n to add devices to the pool and # verify the output is as expected. -# 2. Create a pool whith a hole vdev and verify it's not listed with add -n. +# 2. Create a pool with a hole vdev and verify it's not listed with add -n. # typeset -a dev=( @@ -163,7 +163,7 @@ for (( i=0; i < ${#tests[@]}; i+=1 )); do log_must destroy_pool "$TESTPOOL" done -# Make sure hole vdevs are skiped in output. +# Make sure hole vdevs are skipped in output. log_must eval "zpool create '$TESTPOOL' '${dev[0]}' log '${dev[1]}' \ cache '${dev[2]}'" diff --git a/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh index 382b2cb7f..4951097ac 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh @@ -74,7 +74,7 @@ else fi # -# datsets ordered by checksum options (note, Orange, Carrot & Banana have the +# datasets ordered by checksum options (note, Orange, Carrot & Banana have the # same checksum options, so ZFS should revert to sorting them alphabetically by # name) # diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh index 5cb50fde6..22450d89d 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Change HOME to /var/tmp -# 2. Make a simple script that echos a key value pair +# 2. Make a simple script that echoes a key value pair # in /var/tmp/.zpool.d # 3. Make sure it can be run with -c # 4. Remove the script we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh index 1197ea2d1..11f51350a 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs -# 2. Make a simple script that echos a key value pair in each dir +# 2. Make a simple script that echoes a key value pair in each dir # 3. Make sure scripts can be run with -c # 4. Remove the scripts we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh index 4cc3deb6d..5363043a8 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Change HOME to /var/tmp -# 2. Make a simple script that echos a key value pair +# 2. Make a simple script that echoes a key value pair # in /var/tmp/.zpool.d # 3. Make sure it can be run with -c # 4. Remove the script we created diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh index a075b9a0c..3f64fdf1a 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh @@ -30,7 +30,7 @@ # # STRATEGY: # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs -# 2. Make a simple script that echos a key value pair in each dir +# 2. Make a simple script that echoes a key value pair in each dir # 3. Make sure scripts can be run with -c # 4. Remove the scripts we created diff --git a/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh b/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh index 0abe1e2ce..86916bf90 100755 --- a/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh @@ -30,7 +30,7 @@ # STRATEGY: # 1. Create a pool # 2. Simulate physical removal of one device -# 3. Verify the device is unvailable +# 3. Verify the device is unavailable # 4. Reattach the device # 5. Verify the device is onlined # 6. Repeat the same tests with a spare device: @@ -104,7 +104,7 @@ do log_must mkfile 1m $mntpnt/file log_must zpool sync $TESTPOOL - # 3. Verify the device is unvailable. + # 3. Verify the device is unavailable. log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL" # 4. Reattach the device diff --git a/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh b/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh index b3192b2bf..5c3835349 100755 --- a/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh +++ b/tests/zfs-tests/tests/functional/replacement/attach_multiple.ksh @@ -44,7 +44,7 @@ function cleanup rm -f ${VDEV_FILES[@]} } -log_assert "Verify attach/detech with multiple vdevs" +log_assert "Verify attach/detach with multiple vdevs" ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS) @@ -79,7 +79,7 @@ for replace_mode in "healing" "sequential"; do ${VDEV_FILES[1]} ${VDEV_FILES[2]} log_must is_pool_resilvering $TESTPOOL1 - # Original vdev cannot be detached until there is sufficent redundancy. + # Original vdev cannot be detached until there is sufficient redundancy. log_mustnot zpool detach $TESTPOOL1 ${VDEV_FILES[0]} # Detach first vdev (resilver keeps running) @@ -108,4 +108,4 @@ for replace_mode in "healing" "sequential"; do log_must zpool wait $TESTPOOL1 done -log_pass "Verify attach/detech with multiple vdevs" +log_pass "Verify attach/detach with multiple vdevs" diff --git a/tests/zfs-tests/tests/functional/replacement/replace_import.ksh b/tests/zfs-tests/tests/functional/replacement/replace_import.ksh index 35d51d939..37d3c6645 100755 --- a/tests/zfs-tests/tests/functional/replacement/replace_import.ksh +++ b/tests/zfs-tests/tests/functional/replacement/replace_import.ksh @@ -26,7 +26,7 @@ # Strategy: # 1. For both healing and sequential resilvering replace: # a. Create a pool -# b. Repalce a vdev with 'zpool replace' to resilver (-s) it. +# b. Replace a vdev with 'zpool replace' to resilver (-s) it. # c. Export the pool # d. Import the pool # e. Verify the 'zpool replace' resumed resilvering. diff --git a/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh b/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh index 7896b2dbe..7e96ab518 100755 --- a/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh +++ b/tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh @@ -36,7 +36,7 @@ # a. Replace a vdev with a spare & suspend resilver immediately # b. Verify resilver starts properly # c. Offline / online another vdev to introduce a new DTL range -# d. Verify resilver restart restart or defer +# d. Verify resilver restart or defer # e. Inject read errors on vdev that was offlined / onlned # f. Verify that resilver did not restart # g. Unsuspend resilver and wait for it to finish diff --git a/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh b/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh index ec1986c45..da0d36a35 100755 --- a/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/reservation/reservation_006_pos.ksh @@ -39,7 +39,7 @@ # for a dataset. Unlike quotas however there should be no restrictions # on accessing space outside of the limits of the reservation (if the # space is available in the pool). Verify that in a filesystem with a -# reservation set that its possible to create files both within the +# reservation set that it's possible to create files both within the # reserved space and also outside. # # STRATEGY: |