diff options
author | Brian Behlendorf <[email protected]> | 2015-12-11 16:15:50 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2015-12-11 16:20:22 -0800 |
commit | 2c4332cf793e7c9ca5b2b9b0e6f31c3e41bbc1b1 (patch) | |
tree | 046d7fddccdfc3b105aec8ffe38a61e83f5ef8da | |
parent | 066b89e68545e1f774124969d0dd7b36ccb04112 (diff) |
Fix cstyle issues in spl-taskq.c and taskq.h
This patch only addresses the issues identified by the style checker.
It contains no functional changes.
Signed-off-by: Brian Behlendorf <[email protected]>
-rw-r--r-- | include/sys/taskq.h | 91 | ||||
-rw-r--r-- | module/spl/spl-taskq.c | 75 |
2 files changed, 87 insertions, 79 deletions
diff --git a/include/sys/taskq.h b/include/sys/taskq.h index 07b4209e6..ed6aff8f8 100644 --- a/include/sys/taskq.h +++ b/include/sys/taskq.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,10 +20,10 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see <http://www.gnu.org/licenses/>. -\*****************************************************************************/ + */ #ifndef _SPL_TASKQ_H -#define _SPL_TASKQ_H +#define _SPL_TASKQ_H #include <linux/module.h> #include <linux/gfp.h> @@ -33,29 +33,30 @@ #include <sys/types.h> #include <sys/thread.h> -#define TASKQ_NAMELEN 31 +#define TASKQ_NAMELEN 31 -#define TASKQ_PREPOPULATE 0x00000001 -#define TASKQ_CPR_SAFE 0x00000002 -#define TASKQ_DYNAMIC 0x00000004 -#define TASKQ_THREADS_CPU_PCT 0x00000008 -#define TASKQ_DC_BATCH 0x00000010 -#define TASKQ_ACTIVE 0x80000000 +#define TASKQ_PREPOPULATE 0x00000001 +#define TASKQ_CPR_SAFE 0x00000002 +#define TASKQ_DYNAMIC 0x00000004 +#define TASKQ_THREADS_CPU_PCT 0x00000008 +#define TASKQ_DC_BATCH 0x00000010 +#define TASKQ_ACTIVE 0x80000000 /* * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as * KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly * large so as not to conflict with already used GFP_* defines. */ -#define TQ_SLEEP 0x00000000 -#define TQ_NOSLEEP 0x00000001 -#define TQ_PUSHPAGE 0x00000002 -#define TQ_NOQUEUE 0x01000000 -#define TQ_NOALLOC 0x02000000 -#define TQ_NEW 0x04000000 -#define TQ_FRONT 0x08000000 - -/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent, +#define TQ_SLEEP 0x00000000 +#define TQ_NOSLEEP 0x00000001 +#define TQ_PUSHPAGE 0x00000002 +#define TQ_NOQUEUE 0x01000000 +#define TQ_NOALLOC 0x02000000 +#define TQ_NEW 0x04000000 +#define TQ_FRONT 0x08000000 + +/* + * spin_lock(lock) and spin_lock_nested(lock,0) are equivalent, * so TQ_LOCK_DYNAMIC must not evaluate to 0 */ typedef enum tq_lock_role { @@ -67,28 +68,28 @@ typedef unsigned long taskqid_t; typedef void (task_func_t)(void *); typedef struct taskq { - spinlock_t tq_lock; /* protects taskq_t */ - char *tq_name; /* taskq name */ - struct list_head tq_thread_list;/* list of all threads */ - struct list_head tq_active_list;/* list of active threads */ - int tq_nactive; /* # of active threads */ - int tq_nthreads; /* # of existing threads */ - int tq_nspawn; /* # of threads being spawned */ - int tq_maxthreads; /* # of threads maximum */ - int tq_pri; /* priority */ - int tq_minalloc; /* min task_t pool size */ - int tq_maxalloc; /* max task_t pool size */ - int tq_nalloc; /* cur task_t pool size */ - uint_t tq_flags; /* flags */ - taskqid_t tq_next_id; /* next pend/work id */ - taskqid_t tq_lowest_id; /* lowest pend/work id */ - struct list_head tq_free_list; /* free task_t's */ - struct list_head tq_pend_list; /* pending task_t's */ - struct list_head tq_prio_list; /* priority pending task_t's */ - struct list_head tq_delay_list; /* delayed task_t's */ - wait_queue_head_t tq_work_waitq; /* new work waitq */ - wait_queue_head_t tq_wait_waitq; /* wait waitq */ - tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */ + spinlock_t tq_lock; /* protects taskq_t */ + char *tq_name; /* taskq name */ + struct list_head tq_thread_list; /* list of all threads */ + struct list_head tq_active_list; /* list of active threads */ + int tq_nactive; /* # of active threads */ + int tq_nthreads; /* # of existing threads */ + int tq_nspawn; /* # of threads being spawned */ + int tq_maxthreads; /* # of threads maximum */ + int tq_pri; /* priority */ + int tq_minalloc; /* min task_t pool size */ + int tq_maxalloc; /* max task_t pool size */ + int tq_nalloc; /* cur task_t pool size */ + uint_t tq_flags; /* flags */ + taskqid_t tq_next_id; /* next pend/work id */ + taskqid_t tq_lowest_id; /* lowest pend/work id */ + struct list_head tq_free_list; /* free task_t's */ + struct list_head tq_pend_list; /* pending task_t's */ + struct list_head tq_prio_list; /* priority pending task_t's */ + struct list_head tq_delay_list; /* delayed task_t's */ + wait_queue_head_t tq_work_waitq; /* new work waitq */ + wait_queue_head_t tq_wait_waitq; /* wait waitq */ + tq_lock_role_t tq_lock_class; /* class when taking tq_lock */ } taskq_t; typedef struct taskq_ent { @@ -103,8 +104,8 @@ typedef struct taskq_ent { uintptr_t tqent_flags; } taskq_ent_t; -#define TQENT_FLAG_PREALLOC 0x1 -#define TQENT_FLAG_CANCEL 0x2 +#define TQENT_FLAG_PREALLOC 0x1 +#define TQENT_FLAG_CANCEL 0x2 typedef struct taskq_thread { struct list_head tqt_thread_list; @@ -134,9 +135,9 @@ extern void taskq_wait(taskq_t *); extern int taskq_cancel_id(taskq_t *, taskqid_t); extern int taskq_member(taskq_t *, void *); -#define taskq_create_proc(name, nthreads, pri, min, max, proc, flags) \ +#define taskq_create_proc(name, nthreads, pri, min, max, proc, flags) \ taskq_create(name, nthreads, pri, min, max, flags) -#define taskq_create_sysdc(name, nthreads, min, max, proc, dc, flags) \ +#define taskq_create_sysdc(name, nthreads, min, max, proc, dc, flags) \ taskq_create(name, nthreads, maxclsyspri, min, max, flags) int spl_taskq_init(void); diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index ded6d3b80..89d68f33c 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,9 +20,9 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see <http://www.gnu.org/licenses/>. - ***************************************************************************** + * * Solaris Porting Layer (SPL) Task Queue Implementation. -\*****************************************************************************/ + */ #include <sys/taskq.h> #include <sys/kmem.h> @@ -39,12 +39,12 @@ MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); int spl_taskq_thread_priority = 1; module_param(spl_taskq_thread_priority, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_priority, - "Allow non-default priority for taskq threads"); + "Allow non-default priority for taskq threads"); int spl_taskq_thread_sequential = 4; module_param(spl_taskq_thread_sequential, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_sequential, - "Create new taskq threads after N sequential tasks"); + "Create new taskq threads after N sequential tasks"); /* Global system-wide dynamic task queue available for all consumers */ taskq_t *system_taskq; @@ -58,12 +58,12 @@ static int task_km_flags(uint_t flags) { if (flags & TQ_NOSLEEP) - return KM_NOSLEEP; + return (KM_NOSLEEP); if (flags & TQ_PUSHPAGE) - return KM_PUSHPAGE; + return (KM_PUSHPAGE); - return KM_SLEEP; + return (KM_SLEEP); } /* @@ -122,7 +122,7 @@ retry: } spin_unlock_irqrestore(&tq->tq_lock, *irqflags); - t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags)); + t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); if (t) { @@ -146,7 +146,7 @@ task_free(taskq_t *tq, taskq_ent_t *t) ASSERT(list_empty(&t->tqent_list)); ASSERT(!timer_pending(&t->tqent_timer)); - kmem_free(t, sizeof(taskq_ent_t)); + kmem_free(t, sizeof (taskq_ent_t)); tq->tq_nalloc--; } @@ -653,7 +653,7 @@ EXPORT_SYMBOL(taskq_dispatch_delay); void taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, - taskq_ent_t *t) + taskq_ent_t *t) { unsigned long irqflags; ASSERT(tq); @@ -702,7 +702,7 @@ EXPORT_SYMBOL(taskq_dispatch_ent); int taskq_empty_ent(taskq_ent_t *t) { - return list_empty(&t->tqent_list); + return (list_empty(&t->tqent_list)); } EXPORT_SYMBOL(taskq_empty_ent); @@ -809,7 +809,7 @@ taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) (tq->tq_nactive == 0) && /* No threads are handling tasks */ (tq->tq_nthreads > 1) && /* More than 1 thread is running */ (!taskq_next_ent(tq)) && /* There are no pending tasks */ - (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */ + (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ } static int @@ -828,9 +828,9 @@ taskq_thread(void *args) tq = tqt->tqt_tq; current->flags |= PF_NOFREEZE; - #if defined(PF_MEMALLOC_NOIO) +#if defined(PF_MEMALLOC_NOIO) (void) memalloc_noio_save(); - #endif +#endif sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); @@ -873,17 +873,21 @@ taskq_thread(void *args) if ((t = taskq_next_ent(tq)) != NULL) { list_del_init(&t->tqent_list); - /* In order to support recursively dispatching a + /* + * In order to support recursively dispatching a * preallocated taskq_ent_t, tqent_id must be - * stored prior to executing tqent_func. */ + * stored prior to executing tqent_func. + */ tqt->tqt_id = t->tqent_id; tqt->tqt_task = t; - /* We must store a copy of the flags prior to + /* + * We must store a copy of the flags prior to * servicing the task (servicing a prealloc'd task * returns the ownership of the tqent back to * the caller of taskq_dispatch). Thus, - * tqent_flags _may_ change within the call. */ + * tqent_flags _may_ change within the call. + */ tqt->tqt_flags = t->tqent_flags; taskq_insert_in_order(tq, tqt); @@ -903,8 +907,10 @@ taskq_thread(void *args) if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) task_done(tq, t); - /* When the current lowest outstanding taskqid is - * done calculate the new lowest outstanding id */ + /* + * When the current lowest outstanding taskqid is + * done calculate the new lowest outstanding id + */ if (tq->tq_lowest_id == tqt->tqt_id) { tq->tq_lowest_id = taskq_lowest_id(tq); ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); @@ -999,18 +1005,18 @@ taskq_create(const char *name, int nthreads, pri_t pri, spin_lock_init(&tq->tq_lock); INIT_LIST_HEAD(&tq->tq_thread_list); INIT_LIST_HEAD(&tq->tq_active_list); - tq->tq_name = strdup(name); - tq->tq_nactive = 0; - tq->tq_nthreads = 0; - tq->tq_nspawn = 0; + tq->tq_name = strdup(name); + tq->tq_nactive = 0; + tq->tq_nthreads = 0; + tq->tq_nspawn = 0; tq->tq_maxthreads = nthreads; - tq->tq_pri = pri; - tq->tq_minalloc = minalloc; - tq->tq_maxalloc = maxalloc; - tq->tq_nalloc = 0; - tq->tq_flags = (flags | TASKQ_ACTIVE); - tq->tq_next_id = 1; - tq->tq_lowest_id = 1; + tq->tq_pri = pri; + tq->tq_minalloc = minalloc; + tq->tq_maxalloc = maxalloc; + tq->tq_nalloc = 0; + tq->tq_flags = (flags | TASKQ_ACTIVE); + tq->tq_next_id = 1; + tq->tq_lowest_id = 1; INIT_LIST_HEAD(&tq->tq_free_list); INIT_LIST_HEAD(&tq->tq_pend_list); INIT_LIST_HEAD(&tq->tq_prio_list); @@ -1136,8 +1142,9 @@ spl_taskq_init(void) return (1); } - /* This is used to annotate tq_lock, so - * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch + /* + * This is used to annotate tq_lock, so + * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch * does not trigger a lockdep warning re: possible recursive locking */ dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; |