aboutsummaryrefslogtreecommitdiffstats
path: root/module/spl/spl-taskq.c
diff options
context:
space:
mode:
authortuxoko <[email protected]>2015-11-06 15:00:55 -0800
committerBrian Behlendorf <[email protected]>2015-11-13 15:02:55 -0800
commitf5f2b87df0362242b13b8183a2a8d88be63b0e73 (patch)
tree4618a8a290f8961fe6998845fd10d0a63dc4efc0 /module/spl/spl-taskq.c
parent3e7e6f34d0b39b210de68fd69a0c08c6d21227a5 (diff)
Fix taskq dynamic spawning
Currently taskq_dispatch() will spawn new task with a condition that the caller is also a member of the taskq. However, under this condition, it will still cause deadlock where a task on tq1 is waiting another thread, who is trying to dispatch a task on tq1. So this patch removes the check. For example when you do: zfs send pp/fs0@001 | zfs recv pp/fs0_copy This will easily deadlock before this patch. Also, move the seq_task check from taskq_thread_spawn() to taskq_thread() because it's not used by the caller from taskq_dispatch(). Signed-off-by: Chunwei Chen <[email protected]> Signed-off-by: Tim Chase <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #496
Diffstat (limited to 'module/spl/spl-taskq.c')
-rw-r--r--module/spl/spl-taskq.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index f6ef56251..2c2e3ad46 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -538,7 +538,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
}
EXPORT_SYMBOL(taskq_cancel_id);
-static int taskq_thread_spawn(taskq_t *tq, int seq_tasks);
+static int taskq_thread_spawn(taskq_t *tq);
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
@@ -587,9 +587,8 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
- if (tq->tq_nactive == tq->tq_nthreads &&
- taskq_member_impl(tq, current))
- (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
return (rc);
@@ -635,9 +634,8 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
spin_unlock(&t->tqent_lock);
out:
/* Spawn additional taskq threads if required. */
- if (tq->tq_nactive == tq->tq_nthreads &&
- taskq_member_impl(tq, current))
- (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
return (rc);
}
@@ -683,9 +681,8 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
- if (tq->tq_nactive == tq->tq_nthreads &&
- taskq_member_impl(tq, current))
- (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
}
EXPORT_SYMBOL(taskq_dispatch_ent);
@@ -756,15 +753,14 @@ taskq_thread_spawn_task(void *arg)
* which is also a dynamic taskq cannot be safely used for this.
*/
static int
-taskq_thread_spawn(taskq_t *tq, int seq_tasks)
+taskq_thread_spawn(taskq_t *tq)
{
int spawning = 0;
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);
- if ((seq_tasks > spl_taskq_thread_sequential) &&
- (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
+ if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
(tq->tq_flags & TASKQ_ACTIVE)) {
spawning = (++tq->tq_nspawn);
taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
@@ -898,7 +894,8 @@ taskq_thread(void *args)
}
/* Spawn additional taskq threads if required. */
- if (taskq_thread_spawn(tq, ++seq_tasks))
+ if ((++seq_tasks) > spl_taskq_thread_sequential &&
+ taskq_thread_spawn(tq))
seq_tasks = 0;
tqt->tqt_id = 0;