summaryrefslogtreecommitdiffstats
path: root/module/spl/spl-taskq.c
diff options
context:
space:
mode:
authorRicardo M. Correia <[email protected]>2010-08-02 09:24:01 +0000
committerBrian Behlendorf <[email protected]>2010-08-02 11:20:31 -0700
commit26f7245c7cfa77f25aedf1a500db689343644ead (patch)
tree5359f350cd21d7869bc4ee4b22413a46108d4089 /module/spl/spl-taskq.c
parent41f84a8d56c00f3c95a4bf0b6027bedd7abb5b15 (diff)
Fix taskq code to not drop tasks when TQ_SLEEP is used.
When TQ_SLEEP is used, taskq_dispatch() should always succeed even if the number of pending tasks is above tq->tq_maxalloc. This semantic is similar to KM_SLEEP in kmem allocations, which also always succeed. However, we cannot block forever otherwise there is a risk of deadlock. Therefore, we still allow the number of pending tasks to go above tq->tq_maxalloc with TQ_SLEEP, but we may sleep up to 1 second per task dispatch, thereby throttling the task dispatch rate. One of the existing splat tests was also augmented to test for this scenario. The test would fail with the previous implementation but now it succeeds. Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/spl/spl-taskq.c')
-rw-r--r--module/spl/spl-taskq.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index 201cb5949..5a17f1ccf 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -78,35 +78,36 @@ retry:
if (flags & TQ_NOSLEEP)
SRETURN(NULL);
- /* Sleep periodically polling the free list for an available
- * spl_task_t. If a full second passes and we have not found
- * one gives up and return a NULL to the caller. */
- if (flags & TQ_SLEEP) {
- spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- schedule_timeout(HZ / 100);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (count < 100)
- SGOTO(retry, count++);
-
- SRETURN(NULL);
- }
-
- /* Unreachable, Neither TQ_SLEEP or TQ_NOSLEEP set */
- PANIC("Neither TQ_SLEEP or TQ_NOSLEEP set");
+ /*
+ * Sleep periodically polling the free list for an available
+ * spl_task_t. Dispatching with TQ_SLEEP should always succeed
+ * but we cannot block forever waiting for an spl_taskq_t to
+ * show up in the free list, otherwise a deadlock can happen.
+ *
+ * Therefore, we need to allocate a new task even if the number
+ * of allocated tasks is above tq->tq_maxalloc, but we still
+ * end up delaying the task allocation by one second, thereby
+ * throttling the task dispatch rate.
+ */
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+ schedule_timeout(HZ / 100);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ if (count < 100)
+ SGOTO(retry, count++);
}
- spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (t) {
- spin_lock_init(&t->t_lock);
+ if (t) {
+ spin_lock_init(&t->t_lock);
INIT_LIST_HEAD(&t->t_list);
- t->t_id = 0;
- t->t_func = NULL;
- t->t_arg = NULL;
- tq->tq_nalloc++;
- }
+ t->t_id = 0;
+ t->t_func = NULL;
+ t->t_arg = NULL;
+ tq->tq_nalloc++;
+ }
SRETURN(t);
}