diff options
author | George Wilson <[email protected]> | 2014-10-20 22:07:45 +0000 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2015-04-30 15:07:47 -0700 |
commit | 98b254188a730553361adfabca9f658421be2b82 (patch) | |
tree | af1e47047131dec542f7f0637557cba5c22a1d46 /module/zfs/zio.c | |
parent | 8dd86a10cf836d64cddd9c8693f449686e35788c (diff) |
Illumos #5244 - zio pipeline callers should explicitly invoke next stage
5244 zio pipeline callers should explicitly invoke next stage
Reviewed by: Adam Leventhal <[email protected]>
Reviewed by: Alex Reece <[email protected]>
Reviewed by: Christopher Siden <[email protected]>
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed by: Richard Elling <[email protected]>
Reviewed by: Dan McDonald <[email protected]>
Reviewed by: Steven Hartland <[email protected]>
Approved by: Gordon Ross <[email protected]>
References:
https://www.illumos.org/issues/5244
https://github.com/illumos/illumos-gate/commit/738f37b
Porting Notes:
1. The unported "2932 support crash dumps to raidz, etc. pools"
caused a merge conflict due to a copyright difference in
module/zfs/vdev_raidz.c.
2. The unported "4128 disks in zpools never go away when pulled"
and additional Linux-specific changes caused merge conflicts in
module/zfs/vdev_disk.c.
Ported-by: Richard Yao <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #2828
Diffstat (limited to 'module/zfs/zio.c')
-rw-r--r-- | module/zfs/zio.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 066f04f18..032341378 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -59,6 +59,9 @@ kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; int zio_delay_max = ZIO_DELAY_MAX; +#define ZIO_PIPELINE_CONTINUE 0x100 +#define ZIO_PIPELINE_STOP 0x101 + /* * The following actions directly effect the spa's sync-to-convergence logic. * The values below define the sync pass when we start performing the action. @@ -2526,6 +2529,18 @@ zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) * Read and write to physical devices * ========================================================================== */ + + +/* + * Issue an I/O to the underlying vdev. Typically the issue pipeline + * stops after this stage and will resume upon I/O completion. + * However, there are instances where the vdev layer may need to + * continue the pipeline when an I/O was not issued. Since the I/O + * that was sent to the vdev layer might be different than the one + * currently active in the pipeline (see vdev_queue_io()), we explicitly + * force the underlying vdev layers to call either zio_execute() or + * zio_interrupt() to ensure that the pipeline continues with the correct I/O. + */ static int zio_vdev_io_start(zio_t *zio) { @@ -2543,7 +2558,8 @@ zio_vdev_io_start(zio_t *zio) /* * The mirror_ops handle multiple DVAs in a single BP. */ - return (vdev_mirror_ops.vdev_op_io_start(zio)); + vdev_mirror_ops.vdev_op_io_start(zio); + return (ZIO_PIPELINE_STOP); } /* @@ -2551,7 +2567,7 @@ zio_vdev_io_start(zio_t *zio) * can quickly react to certain workloads. In particular, we care * about non-scrubbing, top-level reads and writes with the following * characteristics: - * - synchronous writes of user data to non-slog devices + * - synchronous writes of user data to non-slog devices * - any reads of user data * When these conditions are met, adjust the timestamp of spa_last_io * which allows the scan thread to adjust its workload accordingly. @@ -2637,7 +2653,8 @@ zio_vdev_io_start(zio_t *zio) } } - return (vd->vdev_ops->vdev_op_io_start(zio)); + vd->vdev_ops->vdev_op_io_start(zio); + return (ZIO_PIPELINE_STOP); } static int |