diff options
-rw-r--r-- | include/sys/taskq.h | 9 | ||||
-rw-r--r-- | module/spl/spl-taskq.c | 92 |
2 files changed, 95 insertions, 6 deletions
diff --git a/include/sys/taskq.h b/include/sys/taskq.h index 4ea29cb3b..54d869afe 100644 --- a/include/sys/taskq.h +++ b/include/sys/taskq.h @@ -51,8 +51,11 @@ typedef struct taskq_ent { taskqid_t tqent_id; task_func_t *tqent_func; void *tqent_arg; + uintptr_t tqent_flags; } taskq_ent_t; +#define TQENT_FLAG_PREALLOC 0x1 + /* * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as * KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly @@ -100,6 +103,9 @@ typedef struct taskq_thread { extern taskq_t *system_taskq; extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t); +extern void __taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t, taskq_ent_t *); +extern int __taskq_empty_ent(taskq_ent_t *); +extern void __taskq_init_ent(taskq_ent_t *); extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t); extern void __taskq_destroy(taskq_t *); extern void __taskq_wait_id(taskq_t *, taskqid_t); @@ -113,6 +119,9 @@ void spl_taskq_fini(void); #define taskq_wait_id(tq, id) __taskq_wait_id(tq, id) #define taskq_wait(tq) __taskq_wait(tq) #define taskq_dispatch(tq, f, p, fl) __taskq_dispatch(tq, f, p, fl) +#define taskq_dispatch_ent(tq, f, p, fl, t) __taskq_dispatch_ent(tq, f, p, fl, t) +#define taskq_empty_ent(t) __taskq_empty_ent(t) +#define taskq_init_ent(t) __taskq_init_ent(t) #define taskq_create(n, th, p, mi, ma, fl) __taskq_create(n, th, p, mi, ma, fl) #define taskq_create_proc(n, th, p, mi, ma, pr, fl) \ __taskq_create(n, th, p, mi, ma, fl) diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index 5c22544b8..b2b0e6ca8 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -57,6 +57,9 @@ retry: /* Acquire taskq_ent_t's from free list if available */ if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); + + ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); + list_del_init(&t->tqent_list); SRETURN(t); } @@ -93,11 +96,7 @@ retry: spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); if (t) { - spin_lock_init(&t->tqent_lock); - INIT_LIST_HEAD(&t->tqent_list); - t->tqent_id = 0; - t->tqent_func = NULL; - t->tqent_arg = NULL; + taskq_init_ent(t); tq->tq_nalloc++; } @@ -136,12 +135,18 @@ task_done(taskq_t *tq, taskq_ent_t *t) ASSERT(t); ASSERT(spin_is_locked(&tq->tq_lock)); + /* For prealloc'd tasks, we don't free anything. */ + if ((!(tq->tq_flags & TASKQ_DYNAMIC)) && + (t->tqent_flags & TQENT_FLAG_PREALLOC)) + return; + list_del_init(&t->tqent_list); if (tq->tq_nalloc <= tq->tq_minalloc) { t->tqent_id = 0; t->tqent_func = NULL; t->tqent_arg = NULL; + t->tqent_flags = 0; list_add_tail(&t->tqent_list, &tq->tq_free_list); } else { task_free(tq, t); @@ -281,6 +286,9 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) tq->tq_next_id++; t->tqent_func = func; t->tqent_arg = arg; + + ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); + spin_unlock(&t->tqent_lock); wake_up(&tq->tq_work_waitq); @@ -289,6 +297,72 @@ out: SRETURN(rc); } EXPORT_SYMBOL(__taskq_dispatch); + +void +__taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, + taskq_ent_t *t) +{ + SENTRY; + + ASSERT(tq); + ASSERT(func); + ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); + + spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); + + /* Taskq being destroyed and all tasks drained */ + if (!(tq->tq_flags & TQ_ACTIVE)) { + t->tqent_id = 0; + goto out; + } + + spin_lock(&t->tqent_lock); + + /* + * Mark it as a prealloc'd task. This is important + * to ensure that we don't free it later. + */ + t->tqent_flags |= TQENT_FLAG_PREALLOC; + + /* Queue to the priority list instead of the pending list */ + if (flags & TQ_FRONT) + list_add_tail(&t->tqent_list, &tq->tq_prio_list); + else + list_add_tail(&t->tqent_list, &tq->tq_pend_list); + + t->tqent_id = tq->tq_next_id; + tq->tq_next_id++; + t->tqent_func = func; + t->tqent_arg = arg; + + spin_unlock(&t->tqent_lock); + + wake_up(&tq->tq_work_waitq); +out: + spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); + SEXIT; +} +EXPORT_SYMBOL(__taskq_dispatch_ent); + +int +__taskq_empty_ent(taskq_ent_t *t) +{ + return list_empty(&t->tqent_list); +} +EXPORT_SYMBOL(__taskq_empty_ent); + +void +__taskq_init_ent(taskq_ent_t *t) +{ + spin_lock_init(&t->tqent_lock); + INIT_LIST_HEAD(&t->tqent_list); + t->tqent_id = 0; + t->tqent_func = NULL; + t->tqent_arg = NULL; + t->tqent_flags = 0; +} +EXPORT_SYMBOL(__taskq_init_ent); + /* * Returns the lowest incomplete taskqid_t. The taskqid_t may * be queued on the pending list, on the priority list, or on @@ -407,6 +481,10 @@ taskq_thread(void *args) if (pend_list) { t = list_entry(pend_list->next, taskq_ent_t, tqent_list); list_del_init(&t->tqent_list); + /* In order to support recursively dispatching a + * preallocated taskq_ent_t, tqent_id must be + * stored prior to executing tqent_func. */ + id = t->tqent_id; tqt->tqt_ent = t; taskq_insert_in_order(tq, tqt); tq->tq_nactive++; @@ -419,7 +497,6 @@ taskq_thread(void *args) tq->tq_nactive--; list_del_init(&tqt->tqt_active_list); tqt->tqt_ent = NULL; - id = t->tqent_id; task_done(tq, t); /* When the current lowest outstanding taskqid is @@ -570,6 +647,9 @@ __taskq_destroy(taskq_t *tq) while (!list_empty(&tq->tq_free_list)) { t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); + + ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); + list_del_init(&t->tqent_list); task_free(tq, t); } |