aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-11-03 20:21:08 +0000
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-11-03 20:21:08 +0000
commit749045bbfa3302f8917ef681373775245b241698 (patch)
tree1240613ad2cca83df75b11e44d49f276debd4b5e /modules
parentf6c81c5ea7ba171bdb9e6d3a29c9e2a7fa6896ad (diff)
Apply a nice fix caught by Ricardo,
* spl-04-fix-taskq-spinlock-lockup.patch Fixes a deadlock in the BIO completion handler, due to the taskq code prematurely re-enabling interrupts when another spinlock had disabled them in the IDE IRQ handler. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@161 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
Diffstat (limited to 'modules')
-rw-r--r--modules/spl/spl-taskq.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/modules/spl/spl-taskq.c b/modules/spl/spl-taskq.c
index 21d63c875..3c7f43cda 100644
--- a/modules/spl/spl-taskq.c
+++ b/modules/spl/spl-taskq.c
@@ -76,9 +76,9 @@ retry:
* spl_task_t. If a full second passes and we have not found
* one gives up and return a NULL to the caller. */
if (flags & TQ_SLEEP) {
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
if (count < 100)
GOTO(retry, count++);
@@ -89,9 +89,9 @@ retry:
SBUG();
}
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
if (t) {
spin_lock_init(&t->t_lock);
@@ -185,9 +185,9 @@ __taskq_wait(taskq_t *tq)
ENTRY;
ASSERT(tq);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
id = tq->tq_next_id;
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
__taskq_wait_id(tq, id);
@@ -228,7 +228,7 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
SBUG();
}
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
@@ -242,7 +242,6 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
if ((t = task_alloc(tq, flags)) == NULL)
GOTO(out, rc = 0);
-
spin_lock(&t->t_lock);
list_add_tail(&t->t_list, &tq->tq_pend_list);
t->t_id = rc = tq->tq_next_id;
@@ -253,7 +252,7 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
wake_up(&tq->tq_work_waitq);
out:
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
RETURN(rc);
}
EXPORT_SYMBOL(__taskq_dispatch);
@@ -297,7 +296,7 @@ taskq_thread(void *args)
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nthreads++;
wake_up(&tq->tq_wait_waitq);
set_current_state(TASK_INTERRUPTIBLE);
@@ -306,9 +305,9 @@ taskq_thread(void *args)
add_wait_queue(&tq->tq_work_waitq, &wait);
if (list_empty(&tq->tq_pend_list)) {
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule();
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
} else {
__set_current_state(TASK_RUNNING);
}
@@ -319,12 +318,12 @@ taskq_thread(void *args)
list_del_init(&t->t_list);
list_add_tail(&t->t_list, &tq->tq_work_list);
tq->tq_nactive++;
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
/* Perform the requested task */
t->t_func(t->t_arg);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nactive--;
id = t->t_id;
task_done(tq, t);
@@ -344,7 +343,7 @@ taskq_thread(void *args)
__set_current_state(TASK_RUNNING);
tq->tq_nthreads--;
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
RETURN(0);
}
@@ -375,7 +374,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
}
spin_lock_init(&tq->tq_lock);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_name = name;
tq->tq_nactive = 0;
tq->tq_nthreads = 0;
@@ -396,7 +395,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
for (i = 0; i < nthreads; i++) {
t = kthread_create(taskq_thread, tq, "%s/%d", name, i);
@@ -432,9 +431,9 @@ __taskq_destroy(taskq_t *tq)
ENTRY;
ASSERT(tq);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_flags &= ~TQ_ACTIVE;
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
/* TQ_ACTIVE cleared prevents new tasks being added to pending */
__taskq_wait(tq);
@@ -444,7 +443,7 @@ __taskq_destroy(taskq_t *tq)
if (tq->tq_threads[i])
kthread_stop(tq->tq_threads[i]);
- spin_lock_irq(&tq->tq_lock);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
while (!list_empty(&tq->tq_free_list)) {
t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
@@ -458,7 +457,7 @@ __taskq_destroy(taskq_t *tq)
ASSERT(list_empty(&tq->tq_work_list));
ASSERT(list_empty(&tq->tq_pend_list));
- spin_unlock_irq(&tq->tq_lock);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *));
kmem_free(tq, sizeof(taskq_t));