diff options
-rw-r--r-- | man/man5/zfs-module-parameters.5 | 14 | ||||
-rw-r--r-- | module/zfs/metaslab.c | 5 |
2 files changed, 18 insertions, 1 deletions
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index 250adc9ef..14c7fb81a 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -137,6 +137,20 @@ Default value: \fB8,388,608\fR. .sp .ne 2 .na +\fBmetaslab_aliquot\fR (ulong) +.ad +.RS 12n +Metaslab granularity, in bytes. This is roughly similar to what would be +referred to as the "stripe size" in traditional RAID arrays. In normal +operation, ZFS will try to write this amount of data to a top-level vdev +before moving on to the next one. +.sp +Default value: \fB524,288\fR. +.RE + +.sp +.ne 2 +.na \fBmetaslab_bias_enabled\fR (int) .ad .RS 12n diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 15859908f..3b556b6db 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -59,7 +59,7 @@ * operation, we will try to write this amount of data to a top-level vdev * before moving on to the next one. */ -uint64_t metaslab_aliquot = 512ULL << 10; +unsigned long metaslab_aliquot = 512 << 10; uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ @@ -2707,6 +2707,7 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp) } #if defined(_KERNEL) && defined(HAVE_SPL) +module_param(metaslab_aliquot, ulong, 0644); module_param(metaslab_debug_load, int, 0644); module_param(metaslab_debug_unload, int, 0644); module_param(metaslab_preload_enabled, int, 0644); @@ -2717,6 +2718,8 @@ module_param(metaslab_fragmentation_factor_enabled, int, 0644); module_param(metaslab_lba_weighting_enabled, int, 0644); module_param(metaslab_bias_enabled, int, 0644); +MODULE_PARM_DESC(metaslab_aliquot, + "allocation granularity (a.k.a. stripe size)"); MODULE_PARM_DESC(metaslab_debug_load, "load all metaslabs when pool is first opened"); MODULE_PARM_DESC(metaslab_debug_unload, |