summaryrefslogtreecommitdiffstats
path: root/module/spl/spl-taskq.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2012-09-12 11:31:39 -0700
committerBrian Behlendorf <[email protected]>2012-09-12 11:41:42 -0700
commit9b51f218410ccdeb69a85d99bd5f09051ec2def1 (patch)
tree70dbe2457b1fd3c8717b7b76605831a250a0e7ec /module/spl/spl-taskq.c
parent330fe010e42fcdbc70832583f95248b9227ac2c4 (diff)
Remove TQ_SLEEP -> KM_SLEEP mapping
When the taskq code was originally written it seemed like a good idea to simply map TQ_SLEEP to KM_SLEEP. Unfortunately, this assumed that the TQ_* flags would never confict with any of the Linux GFP_* flags. When adding the TQ_PUSHPAGE support in commit cd5ca4b this invariant was accidentally broken. Therefore to support TQ_PUSHPAGE, which is needed for Linux, and prevent any further confusion I have removed this direct mapping. The TQ_SLEEP, TQ_NOSLEEP, and TQ_PUSHPAGE are no longer defined in terms of their KM_* counterparts. Instead a simple mapping function is introduce to convert TQ_* -> KM_* where needed. Signed-off-by: Brian Behlendorf <[email protected]> Issue #171
Diffstat (limited to 'module/spl/spl-taskq.c')
-rw-r--r--module/spl/spl-taskq.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index e4092b842..7ea20461b 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -38,6 +38,18 @@
taskq_t *system_taskq;
EXPORT_SYMBOL(system_taskq);
+static int
+task_km_flags(uint_t flags)
+{
+ if (flags & TQ_NOSLEEP)
+ return KM_NOSLEEP;
+
+ if (flags & TQ_PUSHPAGE)
+ return KM_PUSHPAGE;
+
+ return KM_SLEEP;
+}
+
/*
* NOTE: Must be called with tq->tq_lock held, returns a list_t which
* is not attached to the free, work, or pending taskq lists.
@@ -50,8 +62,6 @@ task_alloc(taskq_t *tq, uint_t flags)
SENTRY;
ASSERT(tq);
- ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */
- ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
ASSERT(spin_is_locked(&tq->tq_lock));
retry:
/* Acquire taskq_ent_t's from free list if available */
@@ -92,7 +102,7 @@ retry:
}
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
+ t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
if (t) {
@@ -251,14 +261,6 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
ASSERT(tq);
ASSERT(func);
- /* Solaris assumes TQ_SLEEP if not passed explicitly */
- if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
- flags |= TQ_SLEEP;
-
- if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
- PANIC("May schedule while atomic: %s/0x%08x/%d\n",
- current->comm, preempt_count(), current->pid);
-
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
/* Taskq being destroyed and all tasks drained */
@@ -554,7 +556,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
}
- tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
+ tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
if (tq == NULL)
SRETURN(NULL);
@@ -580,12 +582,12 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
if (flags & TASKQ_PREPOPULATE)
for (i = 0; i < minalloc; i++)
- task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
+ task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
for (i = 0; i < nthreads; i++) {
- tqt = kmem_alloc(sizeof(*tqt), KM_SLEEP);
+ tqt = kmem_alloc(sizeof(*tqt), KM_PUSHPAGE);
INIT_LIST_HEAD(&tqt->tqt_thread_list);
INIT_LIST_HEAD(&tqt->tqt_active_list);
tqt->tqt_tq = tq;