diff options
author | ka7 <[email protected]> | 2017-01-03 18:31:18 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-01-03 11:31:18 -0600 |
commit | 4e33ba4c389f59b74138bf7130e924a4230d64e9 (patch) | |
tree | 6707105987e45e846d0b0a5f0ad6e3e30191e77f /module | |
parent | a5e046eaacad20487188c9eef231554e1401d8c9 (diff) |
Fix spelling
Reviewed-by: Brian Behlendorf <[email protected]
Reviewed-by: Giuseppe Di Natale <[email protected]>>
Reviewed-by: George Melikov <[email protected]>
Reviewed-by: Haakan T Johansson <[email protected]>
Closes #5547
Closes #5543
Diffstat (limited to 'module')
33 files changed, 41 insertions, 41 deletions
diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S index d55c5eb48..367795668 100644 --- a/module/icp/asm-x86_64/sha2/sha256_impl.S +++ b/module/icp/asm-x86_64/sha2/sha256_impl.S @@ -33,7 +33,7 @@ * level parallelism, on a given CPU implementation in this case. * * Special note on Intel EM64T. While Opteron CPU exhibits perfect - * perfromance ratio of 1.5 between 64- and 32-bit flavors [see above], + * performance ratio of 1.5 between 64- and 32-bit flavors [see above], * [currently available] EM64T CPUs apparently are far from it. On the * contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit * sha256_block:-( This is presumably because 64-bit shifts/rotates diff --git a/module/icp/asm-x86_64/sha2/sha512_impl.S b/module/icp/asm-x86_64/sha2/sha512_impl.S index 248d8b2cc..5a49cff4b 100644 --- a/module/icp/asm-x86_64/sha2/sha512_impl.S +++ b/module/icp/asm-x86_64/sha2/sha512_impl.S @@ -33,7 +33,7 @@ * level parallelism, on a given CPU implementation in this case. * * Special note on Intel EM64T. While Opteron CPU exhibits perfect - * perfromance ratio of 1.5 between 64- and 32-bit flavors [see above], + * performance ratio of 1.5 between 64- and 32-bit flavors [see above], * [currently available] EM64T CPUs apparently are far from it. On the * contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit * sha256_block:-( This is presumably because 64-bit shifts/rotates diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c index 38927dcc0..fd2f7e1aa 100644 --- a/module/icp/core/kcf_callprov.c +++ b/module/icp/core/kcf_callprov.c @@ -282,7 +282,7 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, prov_chain = me->me_hw_prov_chain; /* - * We check for the threshhold for using a hardware provider for + * We check for the threshold for using a hardware provider for * this amount of data. If there is no software provider available * for the mechanism, then the threshold is ignored. */ diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 3ed154758..cac34a44a 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -100,7 +100,7 @@ kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { }; /* - * Per-algorithm internal threasholds for the minimum input size of before + * Per-algorithm internal thresholds for the minimum input size of before * offloading to hardware provider. * Dispatching a crypto operation to a hardware provider entails paying the * cost of an additional context switch. Measurments with Sun Accelerator 4000 diff --git a/module/icp/include/sha1/sha1.h b/module/icp/include/sha1/sha1.h index b6ae6b8d2..251b64fca 100644 --- a/module/icp/include/sha1/sha1.h +++ b/module/icp/include/sha1/sha1.h @@ -35,7 +35,7 @@ extern "C" { /* * NOTE: n2rng (Niagara2 RNG driver) accesses the state field of * SHA1_CTX directly. NEVER change this structure without verifying - * compatiblity with n2rng. The important thing is that the state + * compatibility with n2rng. The important thing is that the state * must be in a field declared as uint32_t state[5]. */ /* SHA-1 context. */ diff --git a/module/nvpair/nvpair_alloc_fixed.c b/module/nvpair/nvpair_alloc_fixed.c index 20081ba56..e3ac12938 100644 --- a/module/nvpair/nvpair_alloc_fixed.c +++ b/module/nvpair/nvpair_alloc_fixed.c @@ -42,7 +42,7 @@ * - it uses a pre-allocated buffer for memory allocations. * - it does _not_ free memory in the pre-allocated buffer. * - * The reason for the selected implemention is simplicity. + * The reason for the selected implementation is simplicity. * This allocator is designed for the usage in interrupt context when * the caller may not wait for free memory. */ diff --git a/module/unicode/u8_textprep.c b/module/unicode/u8_textprep.c index 992f6b6b7..74253c50d 100644 --- a/module/unicode/u8_textprep.c +++ b/module/unicode/u8_textprep.c @@ -842,7 +842,7 @@ do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz, } /* - * At this point, this rountine does not know what it would get. + * At this point, this routine does not know what it would get. * The caller should sort it out if the state isn't a Hangul one. */ *state = U8_STATE_START; diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c index 1802750f9..06e4854e4 100644 --- a/module/zcommon/zfs_prop.c +++ b/module/zcommon/zfs_prop.c @@ -143,7 +143,7 @@ zfs_prop_init(void) { "noallow", ZFS_ACL_NOALLOW }, { "restricted", ZFS_ACL_RESTRICTED }, { "passthrough", ZFS_ACL_PASSTHROUGH }, - { "secure", ZFS_ACL_RESTRICTED }, /* bkwrd compatability */ + { "secure", ZFS_ACL_RESTRICTED }, /* bkwrd compatibility */ { "passthrough-x", ZFS_ACL_PASSTHROUGH_X }, { NULL } }; diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 170e2f128..2ce8cf628 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -4438,7 +4438,7 @@ SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS); /* * Adapt arc info given the number of bytes we are trying to add and - * the state that we are comming from. This function is only called + * the state that we are coming from. This function is only called * when we are adding new content to the cache. */ static void diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index c5a894ede..b7dfb8587 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -150,7 +150,7 @@ int dbuf_cache_max_shift = 5; * cache size). Once the eviction thread is woken up and eviction is required, * it will continue evicting buffers until it's able to reduce the cache size * to the low water mark. If the cache size continues to grow and hits the high - * water mark, then callers adding elments to the cache will begin to evict + * water mark, then callers adding elements to the cache will begin to evict * directly from the cache until the cache is no longer above the high water * mark. */ @@ -320,7 +320,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db) idx = hv & h->hash_table_mask; /* - * We musn't hold db_mtx to maintain lock ordering: + * We mustn't hold db_mtx to maintain lock ordering: * DBUF_HASH_MUTEX > db_mtx. */ ASSERT(refcount_is_zero(&db->db_holds)); diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 65aff9550..add4417ac 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -63,7 +63,7 @@ krwlock_t os_lock; /* - * Tunable to overwrite the maximum number of threads for the parallization + * Tunable to overwrite the maximum number of threads for the parallelization * of dmu_objset_find_dp, needed to speed up the import of pools with many * datasets. * Default is 4 times the number of leaf vdevs. diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c index 527707104..d8527528a 100644 --- a/module/zfs/dnode_sync.c +++ b/module/zfs/dnode_sync.c @@ -539,7 +539,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); /* * Now that we've released our hold, the dnode may - * be evicted, so we musn't access it. + * be evicted, so we mustn't access it. */ } diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index f2c40a3c4..8abb510e8 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -83,7 +83,7 @@ extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds); extern int spa_asize_inflation; /* - * Figure out how much of this delta should be propogated to the dsl_dir + * Figure out how much of this delta should be propagated to the dsl_dir * layer. If there's a refreservation, that space has already been * partially accounted for in our ancestors. */ diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 68c11b3a8..c097a35ac 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -834,7 +834,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, goto out; /* - * If dsl_scan_ddt() has aready visited this block, it will have + * If dsl_scan_ddt() has already visited this block, it will have * already done any translations or scrubbing, so don't call the * callback again. */ diff --git a/module/zfs/lz4.c b/module/zfs/lz4.c index 5caa6a854..f8294e85e 100644 --- a/module/zfs/lz4.c +++ b/module/zfs/lz4.c @@ -63,7 +63,7 @@ lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, return (s_len); /* - * Encode the compresed buffer size at the start. We'll need this in + * Encode the compressed buffer size at the start. We'll need this in * decompression to counter the effects of padding which might be * added to the compressed buffer and which, if unhandled, would * confuse the hell out of our decompression function. @@ -205,7 +205,7 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, /* * Little Endian or Big Endian? - * Note: overwrite the below #define if you know your architecture endianess. + * Note: overwrite the below #define if you know your architecture endianness. */ #if defined(_BIG_ENDIAN) #define LZ4_BIG_ENDIAN 1 diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 5dd425768..a8a5f45db 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -1989,7 +1989,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) } else { /* * Since the space map is not loaded we simply update the - * exisiting histogram with what was freed in this txg. This + * existing histogram with what was freed in this txg. This * means that the on-disk histogram may not have an accurate * view of the free space but it's close enough to allow * us to make allocation decisions. diff --git a/module/zfs/policy.c b/module/zfs/policy.c index fda13a9b5..5b1de29e4 100644 --- a/module/zfs/policy.c +++ b/module/zfs/policy.c @@ -36,7 +36,7 @@ /* * The passed credentials cannot be directly verified because Linux only - * provides and interface to check the *current* proces credentials. In + * provides and interface to check the *current* process credentials. In * order to handle this the capable() test is only run when the passed * credentials match the current process credentials or the kcred. In * all other cases this function must fail and return the passed err. diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c index 51394c01c..704f76067 100644 --- a/module/zfs/rrwlock.c +++ b/module/zfs/rrwlock.c @@ -313,8 +313,8 @@ rrw_tsd_destroy(void *arg) * The idea is to split single busy lock into array of locks, so that * each reader can lock only one of them for read, depending on result * of simple hash function. That proportionally reduces lock congestion. - * Writer same time has to sequentially aquire write on all the locks. - * That makes write aquisition proportionally slower, but in places where + * Writer at the same time has to sequentially acquire write on all the locks. + * That makes write acquisition proportionally slower, but in places where * it is used (filesystem unmount) performance is not critical. * * All the functions below are direct wrappers around functions above. diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 46aacfb1b..5346764a7 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1245,7 +1245,7 @@ sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype) sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info); /* - * Determine number of variable lenghts in header + * Determine number of variable lengths in header * The standard 8 byte header has one for free and a * 16 byte header would have 4 + 1; */ diff --git a/module/zfs/spa.c b/module/zfs/spa.c index a463859a2..f3d821f79 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -3677,7 +3677,7 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, nvlist_t **newdevs; /* - * Generate new dev list by concatentating with the + * Generate new dev list by concatenating with the * current dev list. */ VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, @@ -6274,7 +6274,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx) case ZPOOL_PROP_VERSION: intval = fnvpair_value_uint64(elem); /* - * The version is synced seperatly before other + * The version is synced separately before other * properties and should be correct by now. */ ASSERT3U(spa_version(spa), >=, intval); @@ -6304,7 +6304,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx) * We need to dirty the configuration on all the vdevs * so that their labels get updated. It's unnecessary * to do this for pool creation since the vdev's - * configuratoin has already been dirtied. + * configuration has already been dirtied. */ if (tx->tx_txg != TXG_INITIAL) vdev_config_dirty(spa->spa_root_vdev); diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c index 4f5d16543..0240da2b9 100644 --- a/module/zfs/spa_config.c +++ b/module/zfs/spa_config.c @@ -222,7 +222,7 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) * the configuration has been synced to the MOS. This exposes a window where * the MOS config will have been updated but the cache file has not. If * the system were to crash at that instant then the cached config may not - * contain the correct information to open the pool and an explicity import + * contain the correct information to open the pool and an explicit import * would be required. */ void diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index 20d7dc9d1..ac1fb8c6f 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -106,7 +106,7 @@ spa_read_history_addr(kstat_t *ksp, loff_t n) } /* - * When the kstat is written discard all spa_read_history_t entires. The + * When the kstat is written discard all spa_read_history_t entries. The * ssh->lock will be held until ksp->ks_ndata entries are processed. */ static int @@ -327,7 +327,7 @@ spa_txg_history_addr(kstat_t *ksp, loff_t n) } /* - * When the kstat is written discard all spa_txg_history_t entires. The + * When the kstat is written discard all spa_txg_history_t entries. The * ssh->lock will be held until ksp->ks_ndata entries are processed. */ static int diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 8fc1a8d28..77bfef3a0 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -2706,7 +2706,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) !vdev_readable(vd) || !vdev_writeable(vd)) { /* - * When reopening in reponse to a clear event, it may be due to + * When reopening in response to a clear event, it may be due to * a fmadm repair request. In this case, if the device is * still broken, we want to still post the ereport again. */ diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c index 2b9081168..dc2a34642 100644 --- a/module/zfs/vdev_mirror.c +++ b/module/zfs/vdev_mirror.c @@ -153,7 +153,7 @@ vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset) /* * Apply half the seek increment to I/O's within seek offset - * of the last I/O queued to this vdev as they should incure less + * of the last I/O queued to this vdev as they should incur less * of a seek increment. */ if (ABS(lastoffset - zio_offset) < diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c index 131c33e29..1ee990582 100644 --- a/module/zfs/zap_micro.c +++ b/module/zfs/zap_micro.c @@ -124,7 +124,7 @@ zap_hash(zap_name_t *zn) * Don't use all 64 bits, since we need some in the cookie for * the collision differentiator. We MUST use the high bits, * since those are the ones that we first pay attention to when - * chosing the bucket. + * choosing the bucket. */ h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1); diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 18af93cae..98ab745f8 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -4307,7 +4307,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, /* * dsl_props_set() will not convert RECEIVED to LOCAL on or * after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL - * explictly if we're restoring local properties cleared in the + * explicitly if we're restoring local properties cleared in the * first new-style receive. */ if (origprops != NULL && diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c index eee13c065..19387ce05 100644 --- a/module/zfs/zfs_replay.c +++ b/module/zfs/zfs_replay.c @@ -870,7 +870,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap) * The FUID table index may no longer be valid and * during zfs_create() a new index may be assigned. * Because of this the log will contain the original - * doman+rid in order to create a new FUID. + * domain+rid in order to create a new FUID. * * The individual ACEs may contain an ephemeral uid/gid which is no * longer valid and will need to be replaced with an actual FUID. diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c index d3f68537f..d7fc01496 100644 --- a/module/zfs/zfs_rlock.c +++ b/module/zfs/zfs_rlock.c @@ -65,7 +65,7 @@ * Otherwise, the proxy lock is split into smaller lock ranges and * new proxy locks created for non overlapping ranges. * The reference counts are adjusted accordingly. - * Meanwhile, the orginal lock is kept around (this is the callers handle) + * Meanwhile, the original lock is kept around (this is the callers handle) * and its offset and length are used when releasing the lock. * * Thread coordination @@ -87,7 +87,7 @@ * * Grow block handling * ------------------- - * ZFS supports multiple block sizes currently upto 128K. The smallest + * ZFS supports multiple block sizes currently up to 128K. The smallest * block size is used for the file which is grown as needed. During this * growth all other writers and readers must be excluded. * So if the block size needs to be grown then the whole file is diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 9fe4c7870..3509238d1 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -857,7 +857,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) /* * Clear Set-UID/Set-GID bits on successful write if not - * privileged and at least one of the excute bits is set. + * privileged and at least one of the execute bits is set. * * It would be nice to to this after all writes have * been done, but that would still expose the ISUID/ISGID @@ -2127,7 +2127,7 @@ top: } /* - * Grab a lock on the directory to make sure that noone is + * Grab a lock on the directory to make sure that no one is * trying to add (or lookup) entries while we are removing it. */ rw_enter(&zp->z_name_lock, RW_WRITER); diff --git a/module/zfs/zil.c b/module/zfs/zil.c index b3b069900..10ea12cd7 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -544,7 +544,7 @@ zil_create(zilog_t *zilog) /* * Allocate an initial log block if: * - there isn't one already - * - the existing block is the wrong endianess + * - the existing block is the wrong endianness */ if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { tx = dmu_tx_create(zilog->zl_os); diff --git a/module/zfs/zpl_xattr.c b/module/zfs/zpl_xattr.c index 9ab27f1c2..7186e477a 100644 --- a/module/zfs/zpl_xattr.c +++ b/module/zfs/zpl_xattr.c @@ -50,7 +50,7 @@ * are the security.selinux xattrs which are less than 100 bytes and * exist for every file when xattr labeling is enabled. * - * The Linux xattr implemenation has been written to take advantage of + * The Linux xattr implementation has been written to take advantage of * this typical usage. When the dataset property 'xattr=sa' is set, * then xattrs will be preferentially stored as System Attributes (SA). * This allows tiny xattrs (~100 bytes) to be stored with the dnode and diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 7674b3148..10c963ca5 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -1627,7 +1627,7 @@ zvol_create_minors_cb(const char *dsname, void *arg) * - for each zvol, create a minor node, then check if the zvol's snapshots * are 'visible', and only then iterate over the snapshots if needed * - * If the name represents a snapshot, a check is perfromed if the snapshot is + * If the name represents a snapshot, a check is performed if the snapshot is * 'visible' (which also verifies that the parent is a zvol), and if so, * a minor node for that snapshot is created. */ diff --git a/module/zpios/pios.c b/module/zpios/pios.c index c1791eb7d..297d35bba 100644 --- a/module/zpios/pios.c +++ b/module/zpios/pios.c @@ -1,7 +1,7 @@ /* * ZPIOS is a heavily modified version of the original PIOS test code. * It is designed to have the test code running in the Linux kernel - * against ZFS while still being flexibly controled from user space. + * against ZFS while still being flexibly controlled from user space. * * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). |