aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/arc.c2
-rw-r--r--module/zfs/dbuf.c4
-rw-r--r--module/zfs/dmu_objset.c2
-rw-r--r--module/zfs/dnode_sync.c2
-rw-r--r--module/zfs/dsl_dataset.c2
-rw-r--r--module/zfs/dsl_scan.c2
-rw-r--r--module/zfs/lz4.c4
-rw-r--r--module/zfs/metaslab.c2
-rw-r--r--module/zfs/policy.c2
-rw-r--r--module/zfs/rrwlock.c4
-rw-r--r--module/zfs/sa.c2
-rw-r--r--module/zfs/spa.c6
-rw-r--r--module/zfs/spa_config.c2
-rw-r--r--module/zfs/spa_stats.c4
-rw-r--r--module/zfs/vdev.c2
-rw-r--r--module/zfs/vdev_mirror.c2
-rw-r--r--module/zfs/zap_micro.c2
-rw-r--r--module/zfs/zfs_ioctl.c2
-rw-r--r--module/zfs/zfs_replay.c2
-rw-r--r--module/zfs/zfs_rlock.c4
-rw-r--r--module/zfs/zfs_vnops.c4
-rw-r--r--module/zfs/zil.c2
-rw-r--r--module/zfs/zpl_xattr.c2
-rw-r--r--module/zfs/zvol.c2
24 files changed, 32 insertions, 32 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 170e2f128..2ce8cf628 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -4438,7 +4438,7 @@ SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
/*
* Adapt arc info given the number of bytes we are trying to add and
- * the state that we are comming from. This function is only called
+ * the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index c5a894ede..b7dfb8587 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -150,7 +150,7 @@ int dbuf_cache_max_shift = 5;
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
- * water mark, then callers adding elments to the cache will begin to evict
+ * water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
@@ -320,7 +320,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
idx = hv & h->hash_table_mask;
/*
- * We musn't hold db_mtx to maintain lock ordering:
+ * We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(refcount_is_zero(&db->db_holds));
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 65aff9550..add4417ac 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -63,7 +63,7 @@
krwlock_t os_lock;
/*
- * Tunable to overwrite the maximum number of threads for the parallization
+ * Tunable to overwrite the maximum number of threads for the parallelization
* of dmu_objset_find_dp, needed to speed up the import of pools with many
* datasets.
* Default is 4 times the number of leaf vdevs.
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index 527707104..d8527528a 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -539,7 +539,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
/*
* Now that we've released our hold, the dnode may
- * be evicted, so we musn't access it.
+ * be evicted, so we mustn't access it.
*/
}
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index f2c40a3c4..8abb510e8 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -83,7 +83,7 @@ extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds);
extern int spa_asize_inflation;
/*
- * Figure out how much of this delta should be propogated to the dsl_dir
+ * Figure out how much of this delta should be propagated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index 68c11b3a8..c097a35ac 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -834,7 +834,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
goto out;
/*
- * If dsl_scan_ddt() has aready visited this block, it will have
+ * If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
diff --git a/module/zfs/lz4.c b/module/zfs/lz4.c
index 5caa6a854..f8294e85e 100644
--- a/module/zfs/lz4.c
+++ b/module/zfs/lz4.c
@@ -63,7 +63,7 @@ lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
return (s_len);
/*
- * Encode the compresed buffer size at the start. We'll need this in
+ * Encode the compressed buffer size at the start. We'll need this in
* decompression to counter the effects of padding which might be
* added to the compressed buffer and which, if unhandled, would
* confuse the hell out of our decompression function.
@@ -205,7 +205,7 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
/*
* Little Endian or Big Endian?
- * Note: overwrite the below #define if you know your architecture endianess.
+ * Note: overwrite the below #define if you know your architecture endianness.
*/
#if defined(_BIG_ENDIAN)
#define LZ4_BIG_ENDIAN 1
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 5dd425768..a8a5f45db 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -1989,7 +1989,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
} else {
/*
* Since the space map is not loaded we simply update the
- * exisiting histogram with what was freed in this txg. This
+ * existing histogram with what was freed in this txg. This
* means that the on-disk histogram may not have an accurate
* view of the free space but it's close enough to allow
* us to make allocation decisions.
diff --git a/module/zfs/policy.c b/module/zfs/policy.c
index fda13a9b5..5b1de29e4 100644
--- a/module/zfs/policy.c
+++ b/module/zfs/policy.c
@@ -36,7 +36,7 @@
/*
* The passed credentials cannot be directly verified because Linux only
- * provides and interface to check the *current* proces credentials. In
+ * provides and interface to check the *current* process credentials. In
* order to handle this the capable() test is only run when the passed
* credentials match the current process credentials or the kcred. In
* all other cases this function must fail and return the passed err.
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index 51394c01c..704f76067 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -313,8 +313,8 @@ rrw_tsd_destroy(void *arg)
* The idea is to split single busy lock into array of locks, so that
* each reader can lock only one of them for read, depending on result
* of simple hash function. That proportionally reduces lock congestion.
- * Writer same time has to sequentially aquire write on all the locks.
- * That makes write aquisition proportionally slower, but in places where
+ * Writer at the same time has to sequentially acquire write on all the locks.
+ * That makes write acquisition proportionally slower, but in places where
* it is used (filesystem unmount) performance is not critical.
*
* All the functions below are direct wrappers around functions above.
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index 46aacfb1b..5346764a7 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -1245,7 +1245,7 @@ sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
/*
- * Determine number of variable lenghts in header
+ * Determine number of variable lengths in header
* The standard 8 byte header has one for free and a
* 16 byte header would have 4 + 1;
*/
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index a463859a2..f3d821f79 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -3677,7 +3677,7 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
nvlist_t **newdevs;
/*
- * Generate new dev list by concatentating with the
+ * Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
@@ -6274,7 +6274,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
- * The version is synced seperatly before other
+ * The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
@@ -6304,7 +6304,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. It's unnecessary
* to do this for pool creation since the vdev's
- * configuratoin has already been dirtied.
+ * configuration has already been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 4f5d16543..0240da2b9 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -222,7 +222,7 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
* the configuration has been synced to the MOS. This exposes a window where
* the MOS config will have been updated but the cache file has not. If
* the system were to crash at that instant then the cached config may not
- * contain the correct information to open the pool and an explicity import
+ * contain the correct information to open the pool and an explicit import
* would be required.
*/
void
diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c
index 20d7dc9d1..ac1fb8c6f 100644
--- a/module/zfs/spa_stats.c
+++ b/module/zfs/spa_stats.c
@@ -106,7 +106,7 @@ spa_read_history_addr(kstat_t *ksp, loff_t n)
}
/*
- * When the kstat is written discard all spa_read_history_t entires. The
+ * When the kstat is written discard all spa_read_history_t entries. The
* ssh->lock will be held until ksp->ks_ndata entries are processed.
*/
static int
@@ -327,7 +327,7 @@ spa_txg_history_addr(kstat_t *ksp, loff_t n)
}
/*
- * When the kstat is written discard all spa_txg_history_t entires. The
+ * When the kstat is written discard all spa_txg_history_t entries. The
* ssh->lock will be held until ksp->ks_ndata entries are processed.
*/
static int
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 8fc1a8d28..77bfef3a0 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -2706,7 +2706,7 @@ vdev_clear(spa_t *spa, vdev_t *vd)
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
- * When reopening in reponse to a clear event, it may be due to
+ * When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c
index 2b9081168..dc2a34642 100644
--- a/module/zfs/vdev_mirror.c
+++ b/module/zfs/vdev_mirror.c
@@ -153,7 +153,7 @@ vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
/*
* Apply half the seek increment to I/O's within seek offset
- * of the last I/O queued to this vdev as they should incure less
+ * of the last I/O queued to this vdev as they should incur less
* of a seek increment.
*/
if (ABS(lastoffset - zio_offset) <
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index 131c33e29..1ee990582 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -124,7 +124,7 @@ zap_hash(zap_name_t *zn)
* Don't use all 64 bits, since we need some in the cookie for
* the collision differentiator. We MUST use the high bits,
* since those are the ones that we first pay attention to when
- * chosing the bucket.
+ * choosing the bucket.
*/
h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 18af93cae..98ab745f8 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -4307,7 +4307,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin,
/*
* dsl_props_set() will not convert RECEIVED to LOCAL on or
* after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL
- * explictly if we're restoring local properties cleared in the
+ * explicitly if we're restoring local properties cleared in the
* first new-style receive.
*/
if (origprops != NULL &&
diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c
index eee13c065..19387ce05 100644
--- a/module/zfs/zfs_replay.c
+++ b/module/zfs/zfs_replay.c
@@ -870,7 +870,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap)
* The FUID table index may no longer be valid and
* during zfs_create() a new index may be assigned.
* Because of this the log will contain the original
- * doman+rid in order to create a new FUID.
+ * domain+rid in order to create a new FUID.
*
* The individual ACEs may contain an ephemeral uid/gid which is no
* longer valid and will need to be replaced with an actual FUID.
diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
index d3f68537f..d7fc01496 100644
--- a/module/zfs/zfs_rlock.c
+++ b/module/zfs/zfs_rlock.c
@@ -65,7 +65,7 @@
* Otherwise, the proxy lock is split into smaller lock ranges and
* new proxy locks created for non overlapping ranges.
* The reference counts are adjusted accordingly.
- * Meanwhile, the orginal lock is kept around (this is the callers handle)
+ * Meanwhile, the original lock is kept around (this is the callers handle)
* and its offset and length are used when releasing the lock.
*
* Thread coordination
@@ -87,7 +87,7 @@
*
* Grow block handling
* -------------------
- * ZFS supports multiple block sizes currently upto 128K. The smallest
+ * ZFS supports multiple block sizes currently up to 128K. The smallest
* block size is used for the file which is grown as needed. During this
* growth all other writers and readers must be excluded.
* So if the block size needs to be grown then the whole file is
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 9fe4c7870..3509238d1 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -857,7 +857,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the excute bits is set.
+ * privileged and at least one of the execute bits is set.
*
* It would be nice to to this after all writes have
* been done, but that would still expose the ISUID/ISGID
@@ -2127,7 +2127,7 @@ top:
}
/*
- * Grab a lock on the directory to make sure that noone is
+ * Grab a lock on the directory to make sure that no one is
* trying to add (or lookup) entries while we are removing it.
*/
rw_enter(&zp->z_name_lock, RW_WRITER);
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index b3b069900..10ea12cd7 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -544,7 +544,7 @@ zil_create(zilog_t *zilog)
/*
* Allocate an initial log block if:
* - there isn't one already
- * - the existing block is the wrong endianess
+ * - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
diff --git a/module/zfs/zpl_xattr.c b/module/zfs/zpl_xattr.c
index 9ab27f1c2..7186e477a 100644
--- a/module/zfs/zpl_xattr.c
+++ b/module/zfs/zpl_xattr.c
@@ -50,7 +50,7 @@
* are the security.selinux xattrs which are less than 100 bytes and
* exist for every file when xattr labeling is enabled.
*
- * The Linux xattr implemenation has been written to take advantage of
+ * The Linux xattr implementation has been written to take advantage of
* this typical usage. When the dataset property 'xattr=sa' is set,
* then xattrs will be preferentially stored as System Attributes (SA).
* This allows tiny xattrs (~100 bytes) to be stored with the dnode and
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index 7674b3148..10c963ca5 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -1627,7 +1627,7 @@ zvol_create_minors_cb(const char *dsname, void *arg)
* - for each zvol, create a minor node, then check if the zvol's snapshots
* are 'visible', and only then iterate over the snapshots if needed
*
- * If the name represents a snapshot, a check is perfromed if the snapshot is
+ * If the name represents a snapshot, a check is performed if the snapshot is
* 'visible' (which also verifies that the parent is a zvol), and if so,
* a minor node for that snapshot is created.
*/