diff options
author | Brian Behlendorf <[email protected]> | 2014-11-05 17:30:35 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2014-11-19 10:35:07 -0800 |
commit | 8d9a23e82cea5d897e9357d569ef364106703d5a (patch) | |
tree | e49678dab2f5b419d630d388aca924a3c6e4fc72 /module/spl/spl-taskq.c | |
parent | 917fef273295616c563bbb0a5f6986cfce543d2f (diff) |
Retire legacy debugging infrastructure
When the SPL was originally written Linux tracepoints were still
in their infancy. Therefore, an entire debugging subsystem was
added to facilite tracing which served us well for many years.
Now that Linux tracepoints have matured they provide all the
functionality of the previous tracing subsystem. Rather than
maintain parallel functionality it makes sense to fully adopt
tracepoints. Therefore, this patch retires the legacy debugging
infrastructure.
See zfsonlinux/zfs@bc9f413 for the tracepoint changes.
Signed-off-by: Ned Bass <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #408
Diffstat (limited to 'module/spl/spl-taskq.c')
-rw-r--r-- | module/spl/spl-taskq.c | 106 |
1 files changed, 34 insertions, 72 deletions
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index 0cb2ceeaf..951298d9f 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -26,13 +26,6 @@ #include <sys/taskq.h> #include <sys/kmem.h> -#include <spl-debug.h> - -#ifdef SS_DEBUG_SUBSYS -#undef SS_DEBUG_SUBSYS -#endif - -#define SS_DEBUG_SUBSYS SS_TASKQ int spl_taskq_thread_bind = 0; module_param(spl_taskq_thread_bind, int, 0644); @@ -63,7 +56,6 @@ task_alloc(taskq_t *tq, uint_t flags) { taskq_ent_t *t; int count = 0; - SENTRY; ASSERT(tq); ASSERT(spin_is_locked(&tq->tq_lock)); @@ -77,17 +69,17 @@ retry: ASSERT(!timer_pending(&t->tqent_timer)); list_del_init(&t->tqent_list); - SRETURN(t); + return (t); } /* Free list is empty and memory allocations are prohibited */ if (flags & TQ_NOALLOC) - SRETURN(NULL); + return (NULL); /* Hit maximum taskq_ent_t pool size */ if (tq->tq_nalloc >= tq->tq_maxalloc) { if (flags & TQ_NOSLEEP) - SRETURN(NULL); + return (NULL); /* * Sleep periodically polling the free list for an available @@ -103,8 +95,10 @@ retry: spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); schedule_timeout(HZ / 100); spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); - if (count < 100) - SGOTO(retry, count++); + if (count < 100) { + count++; + goto retry; + } } spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); @@ -116,7 +110,7 @@ retry: tq->tq_nalloc++; } - SRETURN(t); + return (t); } /* @@ -126,8 +120,6 @@ retry: static void task_free(taskq_t *tq, taskq_ent_t *t) { - SENTRY; - ASSERT(tq); ASSERT(t); ASSERT(spin_is_locked(&tq->tq_lock)); @@ -136,8 +128,6 @@ task_free(taskq_t *tq, taskq_ent_t *t) kmem_free(t, sizeof(taskq_ent_t)); tq->tq_nalloc--; - - SEXIT; } /* @@ -147,7 +137,6 @@ task_free(taskq_t *tq, taskq_ent_t *t) static void task_done(taskq_t *tq, taskq_ent_t *t) { - SENTRY; ASSERT(tq); ASSERT(t); ASSERT(spin_is_locked(&tq->tq_lock)); @@ -167,8 +156,6 @@ task_done(taskq_t *tq, taskq_ent_t *t) } else { task_free(tq, t); } - - SEXIT; } /* @@ -222,7 +209,6 @@ taskq_lowest_id(taskq_t *tq) taskqid_t lowest_id = tq->tq_next_id; taskq_ent_t *t; taskq_thread_t *tqt; - SENTRY; ASSERT(tq); ASSERT(spin_is_locked(&tq->tq_lock)); @@ -249,7 +235,7 @@ taskq_lowest_id(taskq_t *tq) lowest_id = MIN(lowest_id, tqt->tqt_id); } - SRETURN(lowest_id); + return (lowest_id); } /* @@ -261,7 +247,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) taskq_thread_t *w; struct list_head *l; - SENTRY; ASSERT(tq); ASSERT(tqt); ASSERT(spin_is_locked(&tq->tq_lock)); @@ -275,8 +260,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) } if (l == &tq->tq_active_list) list_add(&tqt->tqt_active_list, &tq->tq_active_list); - - SEXIT; } /* @@ -288,7 +271,6 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) { struct list_head *l; taskq_ent_t *t; - SENTRY; ASSERT(spin_is_locked(&tq->tq_lock)); @@ -296,13 +278,13 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) t = list_entry(l, taskq_ent_t, tqent_list); if (t->tqent_id == id) - SRETURN(t); + return (t); if (t->tqent_id > id) break; } - SRETURN(NULL); + return (NULL); } /* @@ -317,33 +299,32 @@ taskq_find(taskq_t *tq, taskqid_t id, int *active) taskq_thread_t *tqt; struct list_head *l; taskq_ent_t *t; - SENTRY; ASSERT(spin_is_locked(&tq->tq_lock)); *active = 0; t = taskq_find_list(tq, &tq->tq_delay_list, id); if (t) - SRETURN(t); + return (t); t = taskq_find_list(tq, &tq->tq_prio_list, id); if (t) - SRETURN(t); + return (t); t = taskq_find_list(tq, &tq->tq_pend_list, id); if (t) - SRETURN(t); + return (t); list_for_each(l, &tq->tq_active_list) { tqt = list_entry(l, taskq_thread_t, tqt_active_list); if (tqt->tqt_id == id) { t = tqt->tqt_task; *active = 1; - SRETURN(t); + return (t); } } - SRETURN(NULL); + return (NULL); } static int @@ -405,7 +386,7 @@ taskq_wait_check(taskq_t *tq, taskqid_t id) rc = (id < tq->tq_lowest_id); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); - SRETURN(rc); + return (rc); } void @@ -419,7 +400,7 @@ void taskq_wait(taskq_t *tq) { taskqid_t id; - SENTRY; + ASSERT(tq); /* Wait for the largest outstanding taskqid */ @@ -428,9 +409,6 @@ taskq_wait(taskq_t *tq) spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); taskq_wait_all(tq, id); - - SEXIT; - } EXPORT_SYMBOL(taskq_wait); @@ -439,7 +417,6 @@ taskq_member(taskq_t *tq, void *t) { struct list_head *l; taskq_thread_t *tqt; - SENTRY; ASSERT(tq); ASSERT(t); @@ -447,10 +424,10 @@ taskq_member(taskq_t *tq, void *t) list_for_each(l, &tq->tq_thread_list) { tqt = list_entry(l, taskq_thread_t, tqt_thread_list); if (tqt->tqt_thread == (struct task_struct *)t) - SRETURN(1); + return (1); } - SRETURN(0); + return (0); } EXPORT_SYMBOL(taskq_member); @@ -466,7 +443,6 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id) taskq_ent_t *t; int active = 0; int rc = ENOENT; - SENTRY; ASSERT(tq); @@ -507,7 +483,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id) rc = EBUSY; } - SRETURN(rc); + return (rc); } EXPORT_SYMBOL(taskq_cancel_id); @@ -516,7 +492,6 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) { taskq_ent_t *t; taskqid_t rc = 0; - SENTRY; ASSERT(tq); ASSERT(func); @@ -525,15 +500,15 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) /* Taskq being destroyed and all tasks drained */ if (!(tq->tq_flags & TQ_ACTIVE)) - SGOTO(out, rc = 0); + goto out; /* Do not queue the task unless there is idle thread for it */ ASSERT(tq->tq_nactive <= tq->tq_nthreads); if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) - SGOTO(out, rc = 0); + goto out; if ((t = task_alloc(tq, flags)) == NULL) - SGOTO(out, rc = 0); + goto out; spin_lock(&t->tqent_lock); @@ -559,7 +534,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) wake_up(&tq->tq_work_waitq); out: spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); - SRETURN(rc); + return (rc); } EXPORT_SYMBOL(taskq_dispatch); @@ -567,9 +542,8 @@ taskqid_t taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, uint_t flags, clock_t expire_time) { - taskq_ent_t *t; taskqid_t rc = 0; - SENTRY; + taskq_ent_t *t; ASSERT(tq); ASSERT(func); @@ -578,10 +552,10 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, /* Taskq being destroyed and all tasks drained */ if (!(tq->tq_flags & TQ_ACTIVE)) - SGOTO(out, rc = 0); + goto out; if ((t = task_alloc(tq, flags)) == NULL) - SGOTO(out, rc = 0); + goto out; spin_lock(&t->tqent_lock); @@ -603,7 +577,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, spin_unlock(&t->tqent_lock); out: spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); - SRETURN(rc); + return (rc); } EXPORT_SYMBOL(taskq_dispatch_delay); @@ -611,8 +585,6 @@ void taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, taskq_ent_t *t) { - SENTRY; - ASSERT(tq); ASSERT(func); ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); @@ -650,7 +622,6 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, wake_up(&tq->tq_work_waitq); out: spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); - SEXIT; } EXPORT_SYMBOL(taskq_dispatch_ent); @@ -685,7 +656,6 @@ taskq_thread(void *args) taskq_t *tq; taskq_ent_t *t; struct list_head *pend_list; - SENTRY; ASSERT(tqt); tq = tqt->tqt_tq; @@ -778,7 +748,7 @@ taskq_thread(void *args) spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); - SRETURN(0); + return (0); } taskq_t * @@ -789,7 +759,6 @@ taskq_create(const char *name, int nthreads, pri_t pri, taskq_t *tq; taskq_thread_t *tqt; int rc = 0, i, j = 0; - SENTRY; ASSERT(name != NULL); ASSERT(pri <= maxclsyspri); @@ -808,7 +777,7 @@ taskq_create(const char *name, int nthreads, pri_t pri, tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE); if (tq == NULL) - SRETURN(NULL); + return (NULL); spin_lock_init(&tq->tq_lock); spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); @@ -869,7 +838,7 @@ taskq_create(const char *name, int nthreads, pri_t pri, tq = NULL; } - SRETURN(tq); + return (tq); } EXPORT_SYMBOL(taskq_create); @@ -879,7 +848,6 @@ taskq_destroy(taskq_t *tq) struct task_struct *thread; taskq_thread_t *tqt; taskq_ent_t *t; - SENTRY; ASSERT(tq); spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); @@ -929,30 +897,24 @@ taskq_destroy(taskq_t *tq) spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); kmem_free(tq, sizeof(taskq_t)); - - SEXIT; } EXPORT_SYMBOL(taskq_destroy); int spl_taskq_init(void) { - SENTRY; - /* Solaris creates a dynamic taskq of up to 64 threads, however in * a Linux environment 1 thread per-core is usually about right */ system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), minclsyspri, 4, 512, TASKQ_PREPOPULATE); if (system_taskq == NULL) - SRETURN(1); + return (1); - SRETURN(0); + return (0); } void spl_taskq_fini(void) { - SENTRY; taskq_destroy(system_taskq); - SEXIT; } |