summaryrefslogtreecommitdiffstats
path: root/module/spl/spl-taskq.c
diff options
context:
space:
mode:
authorJames Cowgill <[email protected]>2017-10-30 18:16:56 +0000
committerBrian Behlendorf <[email protected]>2017-10-30 11:16:56 -0700
commit35a44fcb8d6e346f51be82dfe57562c2ea0c6a9c (patch)
tree65c30f6eac8879af662ead0deeab697c04282e48 /module/spl/spl-taskq.c
parent8be368899918e2786f2fed84dc746de1894b06c1 (diff)
Remove all spin_is_locked calls
On systems with CONFIG_SMP turned off, spin_is_locked always returns false causing these assertions to fail. Remove them as suggested in zfsonlinux/zfs#6558. Reviewed-by: George Melikov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: James Cowgill <[email protected]> Closes #665
Diffstat (limited to 'module/spl/spl-taskq.c')
-rw-r--r--module/spl/spl-taskq.c13
1 files changed, 0 insertions, 13 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index 7cad9f76b..50f6f520f 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -103,7 +103,6 @@ task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
int count = 0;
ASSERT(tq);
- ASSERT(spin_is_locked(&tq->tq_lock));
retry:
/* Acquire taskq_ent_t's from free list if available */
if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
@@ -168,7 +167,6 @@ task_free(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
- ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer));
@@ -185,7 +183,6 @@ task_done(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
- ASSERT(spin_is_locked(&tq->tq_lock));
/* Wake tasks blocked in taskq_wait_id() */
wake_up_all(&t->tqent_waitq);
@@ -259,7 +256,6 @@ taskq_lowest_id(taskq_t *tq)
taskq_thread_t *tqt;
ASSERT(tq);
- ASSERT(spin_is_locked(&tq->tq_lock));
if (!list_empty(&tq->tq_pend_list)) {
t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
@@ -297,7 +293,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
ASSERT(tq);
ASSERT(tqt);
- ASSERT(spin_is_locked(&tq->tq_lock));
list_for_each_prev(l, &tq->tq_active_list) {
w = list_entry(l, taskq_thread_t, tqt_active_list);
@@ -320,8 +315,6 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
struct list_head *l;
taskq_ent_t *t;
- ASSERT(spin_is_locked(&tq->tq_lock));
-
list_for_each(l, lh) {
t = list_entry(l, taskq_ent_t, tqent_list);
@@ -348,8 +341,6 @@ taskq_find(taskq_t *tq, taskqid_t id)
struct list_head *l;
taskq_ent_t *t;
- ASSERT(spin_is_locked(&tq->tq_lock));
-
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
return (t);
@@ -751,8 +742,6 @@ taskq_next_ent(taskq_t *tq)
{
struct list_head *list;
- ASSERT(spin_is_locked(&tq->tq_lock));
-
if (!list_empty(&tq->tq_prio_list))
list = &tq->tq_prio_list;
else if (!list_empty(&tq->tq_pend_list))
@@ -817,8 +806,6 @@ taskq_thread_spawn(taskq_t *tq)
static int
taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
{
- ASSERT(spin_is_locked(&tq->tq_lock));
-
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);