summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorWill Andrews <[email protected]>2013-06-11 09:12:34 -0800
committerBrian Behlendorf <[email protected]>2013-11-04 10:55:25 -0800
commitd3cc8b152edc608fa4b73d4cb5354356da6b451c (patch)
treee6ac6881379658bfb63cb9787f6781705b6d2004 /module
parente49f1e20a09181d03382d64afdc4b7a12a5dfdf1 (diff)
Illumos #3742
3742 zfs comments need cleaner, more consistent style Reviewed by: Matthew Ahrens <[email protected]> Reviewed by: George Wilson <[email protected]> Reviewed by: Eric Schrock <[email protected]> Approved by: Christopher Siden <[email protected]> References: https://www.illumos.org/issues/3742 illumos/illumos-gate@f7170741490edba9d1d9c697c177c887172bc741 Ported-by: Richard Yao <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #1775 Porting notes: 1. The change to zfs_vfsops.c was dropped because it involves zfs_mount_label_policy, which does not exist in the Linux port.
Diffstat (limited to 'module')
-rw-r--r--module/zfs/arc.c14
-rw-r--r--module/zfs/bptree.c2
-rw-r--r--module/zfs/dnode.c18
-rw-r--r--module/zfs/dnode_sync.c4
-rw-r--r--module/zfs/dsl_prop.c2
-rw-r--r--module/zfs/sa.c3
-rw-r--r--module/zfs/spa.c11
-rw-r--r--module/zfs/spa_config.c1
-rw-r--r--module/zfs/spa_misc.c2
-rw-r--r--module/zfs/txg.c2
-rw-r--r--module/zfs/vdev.c18
-rw-r--r--module/zfs/vdev_queue.c11
-rw-r--r--module/zfs/vdev_raidz.c9
-rw-r--r--module/zfs/zfs_acl.c9
-rw-r--r--module/zfs/zfs_ioctl.c12
-rw-r--r--module/zfs/zfs_log.c21
-rw-r--r--module/zfs/zfs_rlock.c2
-rw-r--r--module/zfs/zfs_sa.c2
-rw-r--r--module/zfs/zfs_vfsops.c11
-rw-r--r--module/zfs/zfs_vnops.c28
-rw-r--r--module/zfs/zfs_znode.c17
-rw-r--r--module/zfs/zil.c5
22 files changed, 95 insertions, 109 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 2ae4c37a3..c66ff009d 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -58,11 +58,11 @@
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
- * elements of the cache are therefor exactly the same size. So
+ * elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (rangeing from 512 bytes to
- * 128K bytes). We therefor choose a set of blocks to evict to make
+ * 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
@@ -77,7 +77,7 @@
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal arc algorithms for
- * adjusting the cache use method 2. We therefor provide two
+ * adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* arc list locks.
*
@@ -431,7 +431,7 @@ static arc_stats_t arc_stats = {
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
#define ARCSTAT_INCR(stat, val) \
- atomic_add_64(&arc_stats.stat.value.ui64, (val));
+ atomic_add_64(&arc_stats.stat.value.ui64, (val))
#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
@@ -665,9 +665,7 @@ uint64_t zfs_crc64_table[256];
#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
-/*
- * L2ARC Performance Tunables
- */
+/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
@@ -3829,7 +3827,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
/*
* Writes will, almost always, require additional memory allocations
- * in order to compress/encrypt/etc the data. We therefor need to
+ * in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
if ((error = arc_memory_throttle(reserve, anon_size, txg)))
diff --git a/module/zfs/bptree.c b/module/zfs/bptree.c
index 73922db88..a0c90cc4d 100644
--- a/module/zfs/bptree.c
+++ b/module/zfs/bptree.c
@@ -43,7 +43,7 @@
* dsl_scan_sync. This allows the delete operation to finish without traversing
* all the dataset's blocks.
*
- * Note that while bt_begin and bt_end are only ever incremented in this code
+ * Note that while bt_begin and bt_end are only ever incremented in this code,
* they are effectively reset to 0 every time the entire bptree is freed because
* the bptree's object is destroyed and re-created.
*/
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 13af3c31d..c01d724a9 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -1804,14 +1804,16 @@ dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
}
/*
- * This function scans a block at the indicated "level" looking for
- * a hole or data (depending on 'flags'). If level > 0, then we are
- * scanning an indirect block looking at its pointers. If level == 0,
- * then we are looking at a block of dnodes. If we don't find what we
- * are looking for in the block, we return ESRCH. Otherwise, return
- * with *offset pointing to the beginning (if searching forwards) or
- * end (if searching backwards) of the range covered by the block
- * pointer we matched on (or dnode).
+ * Scans a block at the indicated "level" looking for a hole or data,
+ * depending on 'flags'.
+ *
+ * If level > 0, then we are scanning an indirect block looking at its
+ * pointers. If level == 0, then we are looking at a block of dnodes.
+ *
+ * If we don't find what we are looking for in the block, we return ESRCH.
+ * Otherwise, return with *offset pointing to the beginning (if searching
+ * forwards) or end (if searching backwards) of the range covered by the
+ * block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index a1c71d487..0ff25d2af 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -302,7 +302,7 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
}
/*
- * free_range: Traverse the indicated range of the provided file
+ * Traverse the indicated range of the provided file
* and "free" all the blocks contained there.
*/
static void
@@ -370,7 +370,7 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
}
/*
- * Try to kick all the dnodes dbufs out of the cache...
+ * Try to kick all the dnode's dbufs out of the cache...
*/
void
dnode_evict_dbufs(dnode_t *dn)
diff --git a/module/zfs/dsl_prop.c b/module/zfs/dsl_prop.c
index cfd8dd433..079ef9742 100644
--- a/module/zfs/dsl_prop.c
+++ b/module/zfs/dsl_prop.c
@@ -380,7 +380,7 @@ dsl_prop_predict(dsl_dir_t *dd, const char *propname,
/*
* Unregister this callback. Return 0 on success, ENOENT if ddname is
- * invalid, ENOMSG if no matching callback registered.
+ * invalid, or ENOMSG if no matching callback registered.
*/
int
dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index 9efd48391..117d3868a 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -110,6 +110,7 @@
* location.
*
* Byteswap implications:
+ *
* Since the SA attributes are not entirely self describing we can't do
* the normal byteswap processing. The special ZAP layout attribute and
* attribute registration attributes define the byteswap function and the
@@ -188,7 +189,6 @@ sa_attr_reg_t sa_legacy_attrs[] = {
};
/*
- * ZPL legacy layout
* This is only used for objects of type DMU_OT_ZNODE
*/
sa_attr_type_t sa_legacy_zpl_layout[] = {
@@ -198,7 +198,6 @@ sa_attr_type_t sa_legacy_zpl_layout[] = {
/*
* Special dummy layout used for buffers with no attributes.
*/
-
sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
static int sa_legacy_attr_count = 16;
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index c30107771..6bd640b40 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -4585,6 +4585,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
/*
* Detach a device from a mirror or replacing vdev.
+ *
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
@@ -5242,11 +5243,9 @@ spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
* the spa_vdev_config_[enter/exit] functions which allow us to
* grab and release the spa_config_lock while still holding the namespace
* lock. During each step the configuration is synced out.
- */
-
-/*
- * Remove a device from the pool. Currently, this supports removing only hot
- * spares, slogs, and level 2 ARC devices.
+ *
+ * Currently, this supports removing only hot spares, slogs, and level 2 ARC
+ * devices.
*/
int
spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
@@ -5356,7 +5355,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
- * current spared, so we can detach it.
+ * currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 5e5b40526..858d9d16e 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -320,6 +320,7 @@ spa_config_set(spa_t *spa, nvlist_t *config)
/*
* Generate the pool's configuration based on the current in-core state.
+ *
* We infer whether to generate a complete config or just one top-level config
* based on whether vd is the root vdev.
*/
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index ffba5028f..3b7922b6c 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -1288,7 +1288,7 @@ spa_freeze(spa_t *spa)
/*
* This is a stripped-down version of strtoull, suitable only for converting
- * lowercase hexidecimal numbers that don't overflow.
+ * lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
strtonum(const char *str, char **nptr)
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 697aa0905..8d410f7a5 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -636,7 +636,7 @@ txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
tx_state_t *tx = &dp->dp_tx;
hrtime_t start = gethrtime();
- /* don't delay if this txg could transition to quiesing immediately */
+ /* don't delay if this txg could transition to quiescing immediately */
if (tx->tx_open_txg > txg ||
tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
return;
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 95effb626..ddfca71df 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -966,9 +966,11 @@ vdev_probe_done(zio_t *zio)
}
/*
- * Determine whether this device is accessible by reading and writing
- * to several known locations: the pad regions of each vdev label
- * but the first (which we leave alone in case it contains a VTOC).
+ * Determine whether this device is accessible.
+ *
+ * Read and write to several known locations: the pad regions of each
+ * vdev label but the first, which we leave alone in case it contains
+ * a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
@@ -2202,10 +2204,12 @@ vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
}
/*
- * Online the given vdev. If 'unspare' is set, it implies two things. First,
- * any attached spare device should be detached when the device finishes
- * resilvering. Second, the online should be treated like a 'test' online case,
- * so no FMA events are generated if the device fails to open.
+ * Online the given vdev.
+ *
+ * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
+ * spare device should be detached when the device finishes resilvering.
+ * Second, the online should be treated like a 'test' online case, so no FMA
+ * events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index c01990bf7..06a641087 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -37,13 +37,14 @@
/*
* These tunables are for performance analysis.
*/
+
+/* The maximum number of I/Os concurrently pending to each device. */
+int zfs_vdev_max_pending = 10;
+
/*
- * zfs_vdev_max_pending is the maximum number of i/os concurrently
- * pending to each device. zfs_vdev_min_pending is the initial number
- * of i/os pending to each device (before it starts ramping up to
- * max_pending).
+ * The initial number of I/Os pending to each device, before it starts ramping
+ * up to zfs_vdev_max_pending.
*/
-int zfs_vdev_max_pending = 10;
int zfs_vdev_min_pending = 4;
/*
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index d2dfd5b43..6e298975d 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -60,6 +60,7 @@
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
+ *
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
@@ -118,7 +119,7 @@ typedef struct raidz_map {
uint64_t rm_missingparity; /* Count of missing parity devices */
uint64_t rm_firstdatacol; /* First data column/parity count */
uint64_t rm_nskip; /* Skipped sectors for padding */
- uint64_t rm_skipstart; /* Column index of padding start */
+ uint64_t rm_skipstart; /* Column index of padding start */
void *rm_datacopy; /* rm_asize-buffer of copied data */
uintptr_t rm_reports; /* # of referencing checksum reports */
uint8_t rm_freed; /* map no longer has referencing ZIO */
@@ -158,10 +159,7 @@ typedef struct raidz_map {
*/
int vdev_raidz_default_to_general;
-/*
- * These two tables represent powers and logs of 2 in the Galois field defined
- * above. These values were computed by repeatedly multiplying by 2 as above.
- */
+/* Powers of 2 in the Galois field defined above. */
static const uint8_t vdev_raidz_pow2[256] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26,
@@ -196,6 +194,7 @@ static const uint8_t vdev_raidz_pow2[256] = {
0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83,
0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01
};
+/* Logs of 2 in the Galois field defined above. */
static const uint8_t vdev_raidz_log2[256] = {
0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6,
0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b,
diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c
index 311b19942..ce66dc01b 100644
--- a/module/zfs/zfs_acl.c
+++ b/module/zfs/zfs_acl.c
@@ -1476,7 +1476,8 @@ zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp)
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
- } if (deny1) {
+ }
+ if (deny1) {
zfs_set_ace(aclp, zacep, deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
@@ -1873,7 +1874,7 @@ zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids)
}
/*
- * Retrieve a files ACL
+ * Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
@@ -2028,7 +2029,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
}
/*
- * Set a files ACL
+ * Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
@@ -2449,6 +2450,7 @@ slow:
/*
* Determine whether Access should be granted/denied.
+ *
* The least priv subsytem is always consulted as a basic privilege
* can define any form of access.
*/
@@ -2656,7 +2658,6 @@ zfs_delete_final_check(znode_t *zp, znode_t *dzp,
* Determine whether Access should be granted/deny, without
* consulting least priv subsystem.
*
- *
* The following chart is the recommended NFSv4 enforcement for
* ability to delete an object.
*
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 269680597..a0da3b996 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -290,9 +290,7 @@ zfs_is_bootfs(const char *name)
}
/*
- * zfs_earlier_version
- *
- * Return non-zero if the spa version is less than requested version.
+ * Return non-zero if the spa version is less than requested version.
*/
static int
zfs_earlier_version(const char *name, int version)
@@ -310,8 +308,6 @@ zfs_earlier_version(const char *name, int version)
}
/*
- * zpl_earlier_version
- *
* Return TRUE if the ZPL version is less than requested version.
*/
static boolean_t
@@ -2942,10 +2938,10 @@ zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
/*
* inputs:
- * createprops list of properties requested by creator
- * default_zplver zpl version to use if unspecified in createprops
- * fuids_ok fuids allowed in this version of the spa?
* os parent objset pointer (NULL if root fs)
+ * fuids_ok fuids allowed in this version of the spa?
+ * sa_ok SAs allowed in this version of the spa?
+ * createprops list of properties requested by creator
*
* outputs:
* zplprops values for the zplprops we attach to the master node object
diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c
index 67b120436..0bb44234d 100644
--- a/module/zfs/zfs_log.c
+++ b/module/zfs/zfs_log.c
@@ -212,9 +212,8 @@ zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
}
/*
- * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR,
- * TX_MKDIR_ATTR and TX_MKXATTR
- * transactions.
+ * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
+ * TK_MKXATTR transactions.
*
* TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
* domain information appended prior to the name. In this case the
@@ -341,7 +340,7 @@ zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
}
/*
- * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions.
+ * Handles both TX_REMOVE and TX_RMDIR transactions.
*/
void
zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
@@ -365,7 +364,7 @@ zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
}
/*
- * zfs_log_link() handles TX_LINK transactions.
+ * Handles TX_LINK transactions.
*/
void
zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
@@ -388,7 +387,7 @@ zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
}
/*
- * zfs_log_symlink() handles TX_SYMLINK transactions.
+ * Handles TX_SYMLINK transactions.
*/
void
zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
@@ -420,7 +419,7 @@ zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
}
/*
- * zfs_log_rename() handles TX_RENAME transactions.
+ * Handles TX_RENAME transactions.
*/
void
zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
@@ -446,7 +445,7 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
}
/*
- * zfs_log_write() handles TX_WRITE transactions.
+ * Handles TX_WRITE transactions.
*/
long zfs_immediate_write_sz = 32768;
@@ -525,7 +524,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
}
/*
- * zfs_log_truncate() handles TX_TRUNCATE transactions.
+ * Handles TX_TRUNCATE transactions.
*/
void
zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
@@ -548,7 +547,7 @@ zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
}
/*
- * zfs_log_setattr() handles TX_SETATTR transactions.
+ * Handles TX_SETATTR transactions.
*/
void
zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
@@ -610,7 +609,7 @@ zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
}
/*
- * zfs_log_acl() handles TX_ACL transactions.
+ * Handles TX_ACL transactions.
*/
void
zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
index 136972b32..898d8049c 100644
--- a/module/zfs/zfs_rlock.c
+++ b/module/zfs/zfs_rlock.c
@@ -28,7 +28,7 @@
/*
* This file contains the code to implement file range locking in
- * ZFS, although there isn't much specific to ZFS (all that comes to mind
+ * ZFS, although there isn't much specific to ZFS (all that comes to mind is
* support for growing the blocksize).
*
* Interface
diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c
index 621c5f904..df4ef3dc1 100644
--- a/module/zfs/zfs_sa.c
+++ b/module/zfs/zfs_sa.c
@@ -264,7 +264,7 @@ out:
/*
* I'm not convinced we should do any of this upgrade.
* since the SA code can read both old/new znode formats
- * with probably little to know performance difference.
+ * with probably little to no performance difference.
*
* All new files will be created with the new format.
*/
diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c
index 64e01e753..06a5affa4 100644
--- a/module/zfs/zfs_vfsops.c
+++ b/module/zfs/zfs_vfsops.c
@@ -947,13 +947,12 @@ EXPORT_SYMBOL(zfs_unregister_callbacks);
#ifdef HAVE_MLSLABEL
/*
- * zfs_check_global_label:
- * Check that the hex label string is appropriate for the dataset
- * being mounted into the global_zone proper.
+ * Check that the hex label string is appropriate for the dataset being
+ * mounted into the global_zone proper.
*
- * Return an error if the hex label string is not default or
- * admin_low/admin_high. For admin_low labels, the corresponding
- * dataset must be readonly.
+ * Return an error if the hex label string is not default or
+ * admin_low/admin_high. For admin_low labels, the corresponding
+ * dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 9500af62d..e6c1711ac 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -123,7 +123,7 @@
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
- * During ZIL replay the zfs_log_* functions will update the sequence
+ * During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
@@ -357,7 +357,7 @@ update_pages(struct inode *ip, int64_t start, int len,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
- * the file is memory mapped.
+ * the file is memory mapped.
*/
static int
mappedread(struct inode *ip, int nbytes, uio_t *uio)
@@ -418,8 +418,7 @@ unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
*
* OUT: uio - updated offset and range, buffer filled.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
@@ -1116,8 +1115,7 @@ EXPORT_SYMBOL(zfs_access);
*
* OUT: ipp - inode of located entry, NULL if not found.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
@@ -1249,8 +1247,7 @@ EXPORT_SYMBOL(zfs_lookup);
*
* OUT: ipp - inode of created or trunc'd entry.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated if new entry created
@@ -1858,8 +1855,7 @@ EXPORT_SYMBOL(zfs_mkdir);
* cr - credentials of caller.
* flags - case flags
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
@@ -3148,8 +3144,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
* cr - credentials of caller.
* flags - case flags
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdip,tdip - ctime|mtime updated
@@ -3483,8 +3478,7 @@ EXPORT_SYMBOL(zfs_rename);
* cr - credentials of caller.
* flags - case flags
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
@@ -4141,8 +4135,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
* pl - list of pages to read
* nr_pages - number of pages to read
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
@@ -4277,8 +4270,7 @@ convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
* offset - current file offset.
* cr - credentials of caller [UNUSED].
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index cf81b92f1..c141c9367 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -754,9 +754,8 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
}
/*
- * zfs_xvattr_set only updates the in-core attributes
- * it is assumed the caller will be doing an sa_bulk_update
- * to push the changes out
+ * Update in-core attributes. It is assumed the caller will be doing an
+ * sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
@@ -1183,8 +1182,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
@@ -1261,8 +1259,7 @@ top:
* off - start of section to free.
* len - length of section to free.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
@@ -1300,8 +1297,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
@@ -1374,8 +1370,7 @@ top:
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
- * RETURN: 0 if success
- * error code if failure
+ * RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index c9ff1f650..3688c8a16 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -88,9 +88,9 @@ zil_stats_t zil_stats = {
static kstat_t *zil_ksp;
/*
- * This global ZIL switch affects all pools
+ * Disable intent logging replay. This global ZIL switch affects all pools.
*/
-int zil_replay_disable = 0; /* disable intent logging replay */
+int zil_replay_disable = 0;
/*
* Tunable parameter for debugging or performance analysis. Setting
@@ -922,6 +922,7 @@ zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
/*
* Define a limited set of intent log block sizes.
+ *
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.