aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2012-12-17 16:23:27 -0800
committerBrian Behlendorf <[email protected]>2013-01-08 10:35:43 -0800
commit91579709fccd3e55a21970742b66c388fb1403db (patch)
treec581f69220bbd43aa104a5e59bb47aa2fe523f4c /module/zfs
parentea0b2538cd5967fcdf26b7b7c01859a060fef3e3 (diff)
Fix __zio_execute() asynchronous dispatch
To save valuable stack all zio's were made asynchronous when in the tgx_sync_thread context or during pool initialization. See commit 2fac4c2 for the original patch and motivation. Unfortuantely, the changes to dsl_pool_sync_context() made by the feature flags broke this logic causing in __zio_execute() to dispatch itself infinitely when called during pool initialization. This commit refines the existing logic to specificly target only the two cases we care about. Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/zio.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 66f228bc7..638105a09 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1248,7 +1248,7 @@ __zio_execute(zio_t *zio)
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
- dsl_pool_t *dsl;
+ dsl_pool_t *dp;
boolean_t cut;
int rv;
@@ -1262,7 +1262,7 @@ __zio_execute(zio_t *zio)
ASSERT(stage <= ZIO_STAGE_DONE);
- dsl = spa_get_dsl(zio->io_spa);
+ dp = spa_get_dsl(zio->io_spa);
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
@@ -1272,16 +1272,24 @@ __zio_execute(zio_t *zio)
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
- * If we are in the txg_sync_thread or being called
- * during pool init issue async to minimize stack depth.
- * Both of these call paths may be recursively called.
- *
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
- if (((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
- zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) ||
- (dsl != NULL && dsl_pool_sync_context(dsl))) {
+ if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
+ zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
+ zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
+ return;
+ }
+
+ /*
+ * If we executing in the context of the tx_sync_thread,
+ * or we are performing pool initialization outside of a
+ * zio_taskq[ZIO_TASKQ_ISSUE] context. Then issue the zio
+ * async to minimize stack usage for these deep call paths.
+ */
+ if ((dp && curthread == dp->dp_tx.tx_sync_thread) ||
+ (dp && spa_is_initializing(dp->dp_spa) &&
+ !zio_taskq_member(zio, ZIO_TASKQ_ISSUE))) {
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}