summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2015-12-02 11:53:37 -0800
committerBrian Behlendorf <[email protected]>2015-12-07 12:20:43 -0800
commitb58986eebf3c47c946393da4b968ee33edaea99e (patch)
tree6104d36d9f2fd55a4725090f1b5098b4d1272103 /module
parentf40926795c1a1d3750e2a6f6904061cfa68715df (diff)
Use large stacks when available
While stack size will vary by architecture it has historically defaulted to 8K on x86_64 systems. However, as of Linux 3.15 the default thread stack size was increased to 16K. These kernels are now the default in most non- enterprise distributions which means we no longer need to assume 8K stacks. This patch takes advantage of that fact by appropriately reverting stack conservation changes which were made to ensure stability. Changes which may have had a negative impact on performance for certain workloads. This also has the side effect of bringing the code slightly more in line with upstream. Signed-off-by: Brian Behlendorf <[email protected]> Signed-off-by: Richard Yao <[email protected]> Closes #4059
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu_send.c8
-rw-r--r--module/zfs/zio.c47
2 files changed, 38 insertions, 17 deletions
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 8e79dc70d..b028e5ba4 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -67,7 +67,7 @@ typedef struct dump_bytes_io {
} dump_bytes_io_t;
static void
-dump_bytes_strategy(void *arg)
+dump_bytes_cb(void *arg)
{
dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
dmu_sendarg_t *dsp = dbi->dbi_dsp;
@@ -94,6 +94,9 @@ dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
dbi.dbi_buf = buf;
dbi.dbi_len = len;
+#if defined(HAVE_LARGE_STACKS)
+ dump_bytes_cb(&dbi);
+#else
/*
* The vn_rdwr() call is performed in a taskq to ensure that there is
* always enough stack space to write safely to the target filesystem.
@@ -101,7 +104,8 @@ dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
* them and they are used in vdev_file.c for a similar purpose.
*/
spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE,
- ZIO_TASKQ_ISSUE, dump_bytes_strategy, &dbi, TQ_SLEEP);
+ ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
+#endif /* HAVE_LARGE_STACKS */
return (dsp->dsa_err);
}
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index c378742ed..a16266a40 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1401,6 +1401,31 @@ zio_execute(zio_t *zio)
spl_fstrans_unmark(cookie);
}
+/*
+ * Used to determine if in the current context the stack is sized large
+ * enough to allow zio_execute() to be called recursively. A minimum
+ * stack size of 16K is required to avoid needing to re-dispatch the zio.
+ */
+boolean_t
+zio_execute_stack_check(zio_t *zio)
+{
+#if !defined(HAVE_LARGE_STACKS)
+ dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
+
+ /* Executing in txg_sync_thread() context. */
+ if (dp && curthread == dp->dp_tx.tx_sync_thread)
+ return (B_TRUE);
+
+ /* Pool initialization outside of zio_taskq context. */
+ if (dp && spa_is_initializing(dp->dp_spa) &&
+ !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
+ !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
+ return (B_TRUE);
+#endif /* HAVE_LARGE_STACKS */
+
+ return (B_FALSE);
+}
+
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
@@ -1410,8 +1435,6 @@ __zio_execute(zio_t *zio)
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
- dsl_pool_t *dp;
- boolean_t cut;
int rv;
ASSERT(!MUTEX_HELD(&zio->io_lock));
@@ -1424,10 +1447,6 @@ __zio_execute(zio_t *zio)
ASSERT(stage <= ZIO_STAGE_DONE);
- dp = spa_get_dsl(zio->io_spa);
- cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
- zio_requeue_io_start_cut_in_line : B_FALSE;
-
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
@@ -1439,21 +1458,19 @@ __zio_execute(zio_t *zio)
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
+ boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
+ zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
- * If we executing in the context of the tx_sync_thread,
- * or we are performing pool initialization outside of a
- * zio_taskq[ZIO_TASKQ_ISSUE|ZIO_TASKQ_ISSUE_HIGH] context.
- * Then issue the zio asynchronously to minimize stack usage
- * for these deep call paths.
+ * If the current context doesn't have large enough stacks
+ * the zio must be issued asynchronously to prevent overflow.
*/
- if ((dp && curthread == dp->dp_tx.tx_sync_thread) ||
- (dp && spa_is_initializing(dp->dp_spa) &&
- !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
- !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))) {
+ if (zio_execute_stack_check(zio)) {
+ boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
+ zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}