aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/avl/avl.c2
-rw-r--r--module/icp/algs/modes/gcm.c4
-rw-r--r--module/os/freebsd/zfs/abd_os.c2
-rw-r--r--module/os/freebsd/zfs/zfs_vnops_os.c2
-rw-r--r--module/os/freebsd/zfs/zio_crypt.c2
-rw-r--r--module/os/freebsd/zfs/zvol_os.c2
-rw-r--r--module/os/linux/zfs/zfs_vnops_os.c2
-rw-r--r--module/os/linux/zfs/zfs_znode.c2
-rw-r--r--module/os/linux/zfs/zio_crypt.c2
-rw-r--r--module/unicode/u8_textprep.c2
-rw-r--r--module/zcommon/zfs_comutil.c2
-rw-r--r--module/zfs/abd.c2
-rw-r--r--module/zfs/arc.c2
-rw-r--r--module/zfs/dsl_bookmark.c6
-rw-r--r--module/zfs/spa.c2
-rw-r--r--module/zfs/vdev.c8
-rw-r--r--module/zfs/vdev_raidz.c2
-rw-r--r--module/zfs/vdev_rebuild.c2
-rw-r--r--module/zfs/zfs_ioctl.c2
-rw-r--r--module/zstd/README.md2
-rw-r--r--module/zstd/include/zstd_compat_wrapper.h2
-rw-r--r--module/zstd/zfs_zstd.c4
22 files changed, 29 insertions, 29 deletions
diff --git a/module/avl/avl.c b/module/avl/avl.c
index d0473d883..1a95092bc 100644
--- a/module/avl/avl.c
+++ b/module/avl/avl.c
@@ -1008,7 +1008,7 @@ avl_destroy_nodes(avl_tree_t *tree, void **cookie)
--tree->avl_numnodes;
/*
- * If we just did a right child or there isn't one, go up to parent.
+ * If we just removed a right child or there isn't one, go up to parent.
*/
if (child == 1 || parent->avl_child[1] == NULL) {
node = parent;
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
index 23686c59e..7332834cb 100644
--- a/module/icp/algs/modes/gcm.c
+++ b/module/icp/algs/modes/gcm.c
@@ -1399,7 +1399,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
}
datap += done;
}
- /* Decrypt remainder, which is less then chunk size, in one go. */
+ /* Decrypt remainder, which is less than chunk size, in one go. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) {
done = aesni_gcm_decrypt(datap, datap, bleft,
@@ -1415,7 +1415,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES);
/*
- * Now less then GCM_AVX_MIN_DECRYPT_BYTES bytes remain,
+ * Now less than GCM_AVX_MIN_DECRYPT_BYTES bytes remain,
* decrypt them block by block.
*/
while (bleft > 0) {
diff --git a/module/os/freebsd/zfs/abd_os.c b/module/os/freebsd/zfs/abd_os.c
index ff4d80ef1..cb37fb362 100644
--- a/module/os/freebsd/zfs/abd_os.c
+++ b/module/os/freebsd/zfs/abd_os.c
@@ -306,7 +306,7 @@ void
abd_free_linear_page(abd_t *abd)
{
/*
- * FreeBSD does not have have scatter linear pages
+ * FreeBSD does not have scatter linear pages
* so there is an error.
*/
VERIFY(0);
diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c
index 906294e78..5588d787a 100644
--- a/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -3577,7 +3577,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
/*
* Create a new object for the symlink.
- * for version 4 ZPL datsets the symlink will be an SA attribute
+ * for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
diff --git a/module/os/freebsd/zfs/zio_crypt.c b/module/os/freebsd/zfs/zio_crypt.c
index 9fe678d25..0451864db 100644
--- a/module/os/freebsd/zfs/zio_crypt.c
+++ b/module/os/freebsd/zfs/zio_crypt.c
@@ -114,7 +114,7 @@
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
- * which which pieces of the block need to be encrypted. For more details about
+ * which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
diff --git a/module/os/freebsd/zfs/zvol_os.c b/module/os/freebsd/zfs/zvol_os.c
index a84ac35a3..c80d4acef 100644
--- a/module/os/freebsd/zfs/zvol_os.c
+++ b/module/os/freebsd/zfs/zvol_os.c
@@ -761,7 +761,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
volsize = zv->zv_volsize;
/*
* uio_loffset == volsize isn't an error as
- * its required for EOF processing.
+ * it's required for EOF processing.
*/
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c
index 8aeed6f56..e95d051ed 100644
--- a/module/os/linux/zfs/zfs_vnops_os.c
+++ b/module/os/linux/zfs/zfs_vnops_os.c
@@ -3140,7 +3140,7 @@ top:
/*
* Create a new object for the symlink.
- * for version 4 ZPL datsets the symlink will be an SA attribute
+ * for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c
index d59c1bb07..4bfb26302 100644
--- a/module/os/linux/zfs/zfs_znode.c
+++ b/module/os/linux/zfs/zfs_znode.c
@@ -217,7 +217,7 @@ zfs_znode_fini(void)
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
- * with an array of mutexs and AVLs trees which contain per-object locks.
+ * with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c
index 2c58fecb2..3ccd5914d 100644
--- a/module/os/linux/zfs/zio_crypt.c
+++ b/module/os/linux/zfs/zio_crypt.c
@@ -115,7 +115,7 @@
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
- * which which pieces of the block need to be encrypted. For more details about
+ * which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
diff --git a/module/unicode/u8_textprep.c b/module/unicode/u8_textprep.c
index be816d728..c1d9a325f 100644
--- a/module/unicode/u8_textprep.c
+++ b/module/unicode/u8_textprep.c
@@ -884,7 +884,7 @@ do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz,
* | B0| B1| ... | Bm|
* +---+---+-...-+---+
*
- * The first byte, B0, is always less then 0xF5 (U8_DECOMP_BOTH).
+ * The first byte, B0, is always less than 0xF5 (U8_DECOMP_BOTH).
*
* (2) Canonical decomposition mappings:
*
diff --git a/module/zcommon/zfs_comutil.c b/module/zcommon/zfs_comutil.c
index 1cec60ac1..886167759 100644
--- a/module/zcommon/zfs_comutil.c
+++ b/module/zcommon/zfs_comutil.c
@@ -26,7 +26,7 @@
/*
* This file is intended for functions that ought to be common between user
* land (libzfs) and the kernel. When many common routines need to be shared
- * then a separate file should to be created.
+ * then a separate file should be created.
*/
#if !defined(_KERNEL)
diff --git a/module/zfs/abd.c b/module/zfs/abd.c
index 1e6645c90..2d1be9752 100644
--- a/module/zfs/abd.c
+++ b/module/zfs/abd.c
@@ -381,7 +381,7 @@ abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/*
* We always pass B_FALSE for free_on_free as it is the
- * original child gang ABDs responsibilty to determine
+ * original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call
* to abd_free().
*/
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 55c71a382..f0ae3938a 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -5036,7 +5036,7 @@ arc_reap_cb(void *arg, zthr_t *zthr)
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
- * amount. If free memory is positive but less then the fractional
+ * amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c
index 2faf1af52..bead7da22 100644
--- a/module/zfs/dsl_bookmark.c
+++ b/module/zfs/dsl_bookmark.c
@@ -236,7 +236,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
error = SET_ERROR(EEXIST);
goto eholdnewbmds;
default:
- /* dsl_bookmark_lookup_impl already did SET_ERRROR */
+ /* dsl_bookmark_lookup_impl already did SET_ERROR */
goto eholdnewbmds;
}
@@ -271,7 +271,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
break;
default:
- /* dsl_bookmark_lookup already did SET_ERRROR */
+ /* dsl_bookmark_lookup already did SET_ERROR */
break;
}
} else {
@@ -536,7 +536,7 @@ dsl_bookmark_create_sync_impl_book(
* Reasoning:
* - The zbm_redaction_obj would be referred to by both source and new
* bookmark, but would be destroyed once either source or new is
- * destroyed, resulting in use-after-free of the referrred object.
+ * destroyed, resulting in use-after-free of the referred object.
* - User expectation when issuing the `zfs bookmark` command is that
* a normal bookmark of the source is created
*
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 5170c9ca2..73fa1809b 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -6496,7 +6496,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
/*
* The virtual dRAID spares must be added after vdev tree is created
- * and the vdev guids are generated. The guid of their assoicated
+ * and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index c536a1c6c..69e44b48e 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -1372,7 +1372,7 @@ vdev_metaslab_group_create(vdev_t *vd)
/*
* The spa ashift min/max only apply for the normal metaslab
- * class. Class destination is late binding so ashift boundry
+ * class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
@@ -2046,7 +2046,7 @@ vdev_open(vdev_t *vd)
vd->vdev_max_asize = max_asize;
/*
- * If the vdev_ashift was not overriden at creation time,
+ * If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
@@ -2116,7 +2116,7 @@ vdev_open(vdev_t *vd)
}
/*
- * Track the the minimum allocation size.
+ * Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
@@ -4570,7 +4570,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
/*
* Solely for the purposes of 'zpool iostat -lqrw'
- * reporting use the priority to catagorize the IO.
+ * reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index db753ec16..020b3bc95 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -1984,7 +1984,7 @@ raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
* 2 4 5 first: increment to 3
* 3 4 5 done
*
- * This strategy works for dRAID but is less effecient when there are a large
+ * This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap reconstruction would be
* possible as long as there are no more than nparity data errors per row.
diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c
index a77ff99fa..aa79642aa 100644
--- a/module/zfs/vdev_rebuild.c
+++ b/module/zfs/vdev_rebuild.c
@@ -81,7 +81,7 @@
* Advantages:
*
* - Sequential reconstruction is performed in LBA order which may be faster
- * than healing reconstruction particularly when using using HDDs (or
+ * than healing reconstruction particularly when using HDDs (or
* especially with SMR devices). Only allocated capacity is resilvered.
*
* - Sequential reconstruction is not constrained by ZFS block boundaries.
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 5f291d067..7f929df16 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -233,7 +233,7 @@ unsigned long zfs_max_nvlist_src_size = 0;
/*
* When logging the output nvlist of an ioctl in the on-disk history, limit
- * the logged size to this many bytes. This must be less then DMU_MAX_ACCESS.
+ * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS.
* This applies primarily to zfs_ioc_channel_program().
*/
unsigned long zfs_history_output_max = 1024 * 1024;
diff --git a/module/zstd/README.md b/module/zstd/README.md
index f8e127736..eed229e2f 100644
--- a/module/zstd/README.md
+++ b/module/zstd/README.md
@@ -10,7 +10,7 @@ library, besides upgrading to a newer ZSTD release.
Tree structure:
* `zfs_zstd.c` is the actual `zzstd` kernel module.
-* `lib/` contains the the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md)
+* `lib/` contains the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md)
version of the `Zstandard` library, generated from our template file
* `zstd-in.c` is our template file for generating the library
* `include/`: This directory contains supplemental includes for platform
diff --git a/module/zstd/include/zstd_compat_wrapper.h b/module/zstd/include/zstd_compat_wrapper.h
index 5cca517b5..71adc7804 100644
--- a/module/zstd/include/zstd_compat_wrapper.h
+++ b/module/zstd/include/zstd_compat_wrapper.h
@@ -34,7 +34,7 @@
/*
* This wrapper fixes a problem, in case the ZFS filesystem driver, is compiled
- * staticly into the kernel.
+ * statically into the kernel.
* This will cause a symbol collision with the older in-kernel zstd library.
* The following macros will simply rename all local zstd symbols and references
*
diff --git a/module/zstd/zfs_zstd.c b/module/zstd/zfs_zstd.c
index dfcd938ae..fc1b0359a 100644
--- a/module/zstd/zfs_zstd.c
+++ b/module/zstd/zfs_zstd.c
@@ -258,7 +258,7 @@ zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
for (int i = 0; i < ZSTD_POOL_MAX; i++) {
pool = &zstd_mempool[i];
/*
- * This lock is simply a marker for a pool object beeing in use.
+ * This lock is simply a marker for a pool object being in use.
* If it's already hold, it will be skipped.
*
* We need to create it before checking it to avoid race
@@ -488,7 +488,7 @@ zfs_zstd_decompress_level(void *s_start, void *d_start, size_t s_len,
/*
* NOTE: We ignore the ZSTD version for now. As soon as any
- * incompatibility occurrs, it has to be handled accordingly.
+ * incompatibility occurs, it has to be handled accordingly.
* The version can be accessed via `hdr_copy.version`.
*/