aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dsl_scan.c
diff options
context:
space:
mode:
authorAndrea Gelmini <[email protected]>2019-09-03 02:56:41 +0200
committerBrian Behlendorf <[email protected]>2019-09-02 17:56:41 -0700
commite1cfd73f7f91f1ccf4b19ec26adcbcd575f546c9 (patch)
treeb5297a8ebb1c0c804e121b8f0522f83aa5b5480b /module/zfs/dsl_scan.c
parent7859537768e030d0151a6d72a6b031751228bc85 (diff)
Fix typos in module/zfs/
Reviewed-by: Matt Ahrens <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Reviewed-by: Richard Laager <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Andrea Gelmini <[email protected]> Closes #9240
Diffstat (limited to 'module/zfs/dsl_scan.c')
-rw-r--r--module/zfs/dsl_scan.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index d6956f560..1becd4d55 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -1912,7 +1912,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
/*
* This debugging is commented out to conserve stack space. This
- * function is called recursively and the debugging addes several
+ * function is called recursively and the debugging adds several
* bytes to the stack for each call. It can be commented back in
* if required to debug an issue in dsl_scan_visitbp().
*
@@ -3373,7 +3373,7 @@ dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
- * cna guarantee that blocks we are currently scanning will not change out
+ * can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
@@ -3977,7 +3977,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
- * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
+ * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*/
static int
ext_size_compare(const void *x, const void *y)