summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorMatthew Macy <[email protected]>2020-07-11 17:13:45 -0700
committerGitHub <[email protected]>2020-07-11 17:13:45 -0700
commit3933305eaca95ad6c3003765326136d4d710ba0e (patch)
treea1c8f094f75c9771bd62c3310076b34dc60ed7b2 /module
parent6f1db5f37ede685ebe55d1549c635e0aef4661b5 (diff)
FreeBSD: Use a hash table for taskqid lookups
Previously a tqent could be recycled prematurely, update the code to use a hash table for lookups to resolve this. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Matt Macy <[email protected]> Closes #10529
Diffstat (limited to 'module')
-rw-r--r--module/os/freebsd/spl/spl_taskq.c161
1 files changed, 118 insertions, 43 deletions
diff --git a/module/os/freebsd/spl/spl_taskq.c b/module/os/freebsd/spl/spl_taskq.c
index 28cebc5dc..f32dad2a2 100644
--- a/module/os/freebsd/spl/spl_taskq.c
+++ b/module/os/freebsd/spl/spl_taskq.c
@@ -38,6 +38,8 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/taskq.h>
#include <sys/zfs_context.h>
+#include <sys/ck.h>
+#include <sys/epoch.h>
#include <vm/uma.h>
@@ -50,41 +52,40 @@ taskq_t *dynamic_taskq = NULL;
extern int uma_align_cache;
-#define TQ_MASK uma_align_cache
-#define TQ_PTR_MASK ~uma_align_cache
+static MALLOC_DEFINE(M_TASKQ, "taskq", "taskq structures");
-#define TIMEOUT_TASK 1
-#define NORMAL_TASK 2
+static CK_LIST_HEAD(tqenthashhead, taskq_ent) *tqenthashtbl;
+static unsigned long tqenthash;
+static unsigned long tqenthashlock;
+static struct sx *tqenthashtbl_lock;
-static int
-taskqent_init(void *mem, int size, int flags)
-{
- bzero(mem, sizeof (taskq_ent_t));
- return (0);
-}
+static uint32_t tqidnext = 1;
-static int
-taskqent_ctor(void *mem, int size, void *arg, int flags)
-{
- return (0);
-}
-
-static void
-taskqent_dtor(void *mem, int size, void *arg)
-{
- taskq_ent_t *ent = mem;
+#define TQIDHASH(tqid) (&tqenthashtbl[(tqid) & tqenthash])
+#define TQIDHASHLOCK(tqid) (&tqenthashtbl_lock[((tqid) & tqenthashlock)])
- ent->tqent_gen = (ent->tqent_gen + 1) & TQ_MASK;
-}
+#define TIMEOUT_TASK 1
+#define NORMAL_TASK 2
static void
system_taskq_init(void *arg)
{
+ int i;
tsd_create(&taskq_tsd, NULL);
+ tqenthashtbl = hashinit(mp_ncpus * 8, M_TASKQ, &tqenthash);
+ tqenthashlock = (tqenthash + 1) / 8;
+ if (tqenthashlock > 0)
+ tqenthashlock--;
+ tqenthashtbl_lock =
+ malloc(sizeof (*tqenthashtbl_lock) * (tqenthashlock + 1),
+ M_TASKQ, M_WAITOK | M_ZERO);
+ for (i = 0; i < tqenthashlock + 1; i++)
+ sx_init_flags(&tqenthashtbl_lock[i], "tqenthash", SX_DUPOK);
+ tqidnext = 1;
taskq_zone = uma_zcreate("taskq_zone", sizeof (taskq_ent_t),
- taskqent_ctor, taskqent_dtor, taskqent_init, NULL,
- UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_CACHE, 0);
system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
0, 0, 0);
system_delay_taskq = taskq_create("system_delay_taskq", mp_ncpus,
@@ -96,15 +97,65 @@ SYSINIT(system_taskq_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_init,
static void
system_taskq_fini(void *arg)
{
+ int i;
taskq_destroy(system_delay_taskq);
taskq_destroy(system_taskq);
uma_zdestroy(taskq_zone);
tsd_destroy(&taskq_tsd);
+ for (i = 0; i < tqenthashlock + 1; i++)
+ sx_destroy(&tqenthashtbl_lock[i]);
+ for (i = 0; i < tqenthash + 1; i++)
+ VERIFY(CK_LIST_EMPTY(&tqenthashtbl[i]));
+ free(tqenthashtbl_lock, M_TASKQ);
+ free(tqenthashtbl, M_TASKQ);
}
SYSUNINIT(system_taskq_fini, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_fini,
NULL);
+static taskq_ent_t *
+taskq_lookup(taskqid_t tqid)
+{
+ taskq_ent_t *ent = NULL;
+
+ sx_xlock(TQIDHASHLOCK(tqid));
+ CK_LIST_FOREACH(ent, TQIDHASH(tqid), tqent_hash) {
+ if (ent->tqent_id == tqid)
+ break;
+ }
+ if (ent != NULL)
+ refcount_acquire(&ent->tqent_rc);
+ sx_xunlock(TQIDHASHLOCK(tqid));
+ return (ent);
+}
+
+static taskqid_t
+taskq_insert(taskq_ent_t *ent)
+{
+ taskqid_t tqid = atomic_fetchadd_int(&tqidnext, 1);
+
+ ent->tqent_id = tqid;
+ ent->tqent_registered = B_TRUE;
+ sx_xlock(TQIDHASHLOCK(tqid));
+ CK_LIST_INSERT_HEAD(TQIDHASH(tqid), ent, tqent_hash);
+ sx_xunlock(TQIDHASHLOCK(tqid));
+ return (tqid);
+}
+
+static void
+taskq_remove(taskq_ent_t *ent)
+{
+ taskqid_t tqid = ent->tqent_id;
+
+ if (!ent->tqent_registered)
+ return;
+
+ sx_xlock(TQIDHASHLOCK(tqid));
+ CK_LIST_REMOVE(ent, tqent_hash);
+ sx_xunlock(TQIDHASHLOCK(tqid));
+ ent->tqent_registered = B_FALSE;
+}
+
static void
taskq_tsd_set(void *context)
{
@@ -174,26 +225,44 @@ taskq_of_curthread(void)
return (tsd_get(taskq_tsd));
}
+static void
+taskq_free(taskq_ent_t *task)
+{
+ taskq_remove(task);
+ if (refcount_release(&task->tqent_rc))
+ uma_zfree(taskq_zone, task);
+}
+
int
taskq_cancel_id(taskq_t *tq, taskqid_t tid)
{
uint32_t pend;
int rc;
- taskq_ent_t *ent = (void*)(tid & TQ_PTR_MASK);
+ taskq_ent_t *ent;
- if (ent == NULL)
+ if (tid == 0)
return (0);
- if ((tid & TQ_MASK) != ent->tqent_gen)
+
+ if ((ent = taskq_lookup(tid)) == NULL)
return (0);
+
+ ent->tqent_cancelled = B_TRUE;
if (ent->tqent_type == TIMEOUT_TASK) {
rc = taskqueue_cancel_timeout(tq->tq_queue,
&ent->tqent_timeout_task, &pend);
} else
rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
- if (rc == EBUSY)
- taskq_wait_id(tq, tid);
- else
- uma_zfree(taskq_zone, ent);
+ if (rc == EBUSY) {
+ taskqueue_drain(tq->tq_queue, &ent->tqent_task);
+ } else if (pend) {
+ /*
+ * Tasks normally free themselves when run, but here the task
+ * was cancelled so it did not free itself.
+ */
+ taskq_free(ent);
+ }
+ /* Free the extra reference we added with taskq_lookup. */
+ taskq_free(ent);
return (rc);
}
@@ -202,8 +271,9 @@ taskq_run(void *arg, int pending __unused)
{
taskq_ent_t *task = arg;
- task->tqent_func(task->tqent_arg);
- uma_zfree(taskq_zone, task);
+ if (!task->tqent_cancelled)
+ task->tqent_func(task->tqent_arg);
+ taskq_free(task);
}
taskqid_t
@@ -227,12 +297,12 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
task = uma_zalloc(taskq_zone, mflag);
if (task == NULL)
return (0);
- tid = (uintptr_t)task;
- MPASS((tid & TQ_MASK) == 0);
task->tqent_func = func;
task->tqent_arg = arg;
task->tqent_type = TIMEOUT_TASK;
- tid |= task->tqent_gen;
+ task->tqent_cancelled = B_FALSE;
+ refcount_init(&task->tqent_rc, 1);
+ tid = taskq_insert(task);
TIMEOUT_TASK_INIT(tq->tq_queue, &task->tqent_timeout_task, 0,
taskq_run, task);
@@ -261,15 +331,15 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
task = uma_zalloc(taskq_zone, mflag);
if (task == NULL)
return (0);
-
- tid = (uintptr_t)task;
- MPASS((tid & TQ_MASK) == 0);
+ refcount_init(&task->tqent_rc, 1);
task->tqent_func = func;
task->tqent_arg = arg;
+ task->tqent_cancelled = B_FALSE;
task->tqent_type = NORMAL_TASK;
+ tid = taskq_insert(task);
TASK_INIT(&task->tqent_task, prio, taskq_run, task);
- tid |= task->tqent_gen;
taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
+ VERIFY(tid);
return (tid);
}
@@ -292,7 +362,9 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint32_t flags,
* can go at the front of the queue.
*/
prio = !!(flags & TQ_FRONT);
-
+ task->tqent_cancelled = B_FALSE;
+ task->tqent_registered = B_FALSE;
+ task->tqent_id = 0;
task->tqent_func = func;
task->tqent_arg = arg;
@@ -309,12 +381,15 @@ taskq_wait(taskq_t *tq)
void
taskq_wait_id(taskq_t *tq, taskqid_t tid)
{
- taskq_ent_t *ent = (void*)(tid & TQ_PTR_MASK);
+ taskq_ent_t *ent;
- if ((tid & TQ_MASK) != ent->tqent_gen)
+ if (tid == 0)
+ return;
+ if ((ent = taskq_lookup(tid)) == NULL)
return;
taskqueue_drain(tq->tq_queue, &ent->tqent_task);
+ taskq_free(ent);
}
void