summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2019-03-13 15:00:10 -0400
committerBrian Behlendorf <[email protected]>2019-03-13 12:00:10 -0700
commit1af240f3b51c080376bb6ae1efc13d62b087b65d (patch)
tree0f602dc3baa2d9d9a9f1543d102204a3d6327990
parent12a935ee9c2e2aa92309e7afb9d73e0757f61cd7 (diff)
Add separate aggregation limit for non-rotating media
Before sequential scrub patches ZFS never aggregated I/Os above 128KB. Sequential scrub bumped that to 1MB, supposedly to reduce number of head seeks for spinning disks. But for SSDs it makes little to no sense, especially on FreeBSD, where due to MAXPHYS limitation device will likely still see bunch of 128KB I/Os instead of one large. Having more strict aggregation limit for SSDs allows to avoid allocation of large memory buffer and copy to/from it, that is a serious problem when throughput reaches gigabytes per second. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Richard Elling <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Closes #8494
-rw-r--r--man/man5/zfs-module-parameters.511
-rw-r--r--module/zfs/vdev_queue.c11
2 files changed, 21 insertions, 1 deletions
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5
index 2a0f5f81d..f75f09917 100644
--- a/man/man5/zfs-module-parameters.5
+++ b/man/man5/zfs-module-parameters.5
@@ -2364,6 +2364,17 @@ Default value: \fB5\fR.
.RS 12n
Max vdev I/O aggregation size
.sp
+Default value: \fB1,048,576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_aggregation_limit_non_rotating\fR (int)
+.ad
+.RS 12n
+Max vdev I/O aggregation size for non-rotating media
+.sp
Default value: \fB131,072\fR.
.RE
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index 939699cb8..a1861d5f0 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -174,6 +174,7 @@ int zfs_vdev_async_write_active_max_dirty_percent = 60;
* they aren't able to help us aggregate at this level.
*/
int zfs_vdev_aggregation_limit = 1 << 20;
+int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE;
int zfs_vdev_read_gap_limit = 32 << 10;
int zfs_vdev_write_gap_limit = 4 << 10;
@@ -549,7 +550,11 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
abd_t *abd;
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
- limit = MAX(MIN(zfs_vdev_aggregation_limit, maxblocksize), 0);
+ if (vq->vq_vdev->vdev_nonrot)
+ limit = zfs_vdev_aggregation_limit_non_rotating;
+ else
+ limit = zfs_vdev_aggregation_limit;
+ limit = MAX(MIN(limit, maxblocksize), 0);
if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0)
return (NULL);
@@ -913,6 +918,10 @@ vdev_queue_last_offset(vdev_t *vd)
module_param(zfs_vdev_aggregation_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Max vdev I/O aggregation size");
+module_param(zfs_vdev_aggregation_limit_non_rotating, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_aggregation_limit_non_rotating,
+ "Max vdev I/O aggregation size for non-rotating media");
+
module_param(zfs_vdev_read_gap_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");