diff options
author | Chunwei Chen <[email protected]> | 2016-05-20 18:04:03 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2016-05-24 13:00:17 -0700 |
commit | 5ce028b0d4b650b42cb81b3fdf71b517adce4552 (patch) | |
tree | 869646035542a74377515ff6b253382af72e85bc /module | |
parent | 872e0cc9c7334f7aedca05f41eca5ddecf6ff72b (diff) |
Fix race between taskq_destroy and dynamic spawning thread
While taskq_destroy would wait for dynamic_taskq to finish its tasks, but it
does not implies the thread being spawned is up and running. This will cause
taskq to be freed before the thread can exit.
We fix this by using tq_nspawn to indicate how many threads are being spawned
before they are inserted to the thread list. And have taskq_destroy to wait
for it to drop to zero.
Signed-off-by: Chunwei Chen <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Signed-off-by: Tim Chase <[email protected]>
Issue #553
Closes #550
Diffstat (limited to 'module')
-rw-r--r-- | module/spl/spl-taskq.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index bfcf651af..9784473bd 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -763,11 +763,12 @@ taskq_thread_spawn_task(void *arg) taskq_t *tq = (taskq_t *)arg; unsigned long flags; - (void) taskq_thread_create(tq); - - spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); - tq->tq_nspawn--; - spin_unlock_irqrestore(&tq->tq_lock, flags); + if (taskq_thread_create(tq) == NULL) { + /* restore spawning count if failed */ + spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); + tq->tq_nspawn--; + spin_unlock_irqrestore(&tq->tq_lock, flags); + } } /* @@ -848,6 +849,14 @@ taskq_thread(void *args) tsd_set(taskq_tsd, tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); + /* + * If we are dynamically spawned, decrease spawning count. Note that + * we could be created during taskq_create, in which case we shouldn't + * do the decrement. But it's fine because taskq_create will reset + * tq_nspawn later. + */ + if (tq->tq_flags & TASKQ_DYNAMIC) + tq->tq_nspawn--; /* Immediately exit if more threads than allowed were created. */ if (tq->tq_nthreads >= tq->tq_maxthreads) @@ -1063,6 +1072,11 @@ taskq_create(const char *name, int nthreads, pri_t pri, /* Wait for all threads to be started before potential destroy */ wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); + /* + * taskq_thread might have touched nspawn, but we don't want them to + * because they're not dynamically spawned. So we reset it to 0 + */ + tq->tq_nspawn = 0; if (rc) { taskq_destroy(tq); @@ -1106,6 +1120,12 @@ taskq_destroy(taskq_t *tq) up_write(&tq_list_sem); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); + /* wait for spawning threads to insert themselves to the list */ + while (tq->tq_nspawn) { + spin_unlock_irqrestore(&tq->tq_lock, flags); + schedule_timeout_interruptible(1); + spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); + } /* * Signal each thread to exit and block until it does. Each thread |