aboutsummaryrefslogtreecommitdiffstats
path: root/modules/spl/spl-taskq.c
blob: d26b40db8ba4455914ed9672402279bc4d118bf2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#include <sys/taskq.h>

#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif

#define DEBUG_SUBSYSTEM S_TASKQ

/*
 * Task queue interface
 *
 * The taskq_work_wrapper functions are used to manage the work_structs
 * which must be submitted to linux.  The shim layer allocates a wrapper
 * structure for all items which contains a pointer to itself as well as
 * the real work to be performed.  When the work item run the generic
 * handle is called which calls the real work function and then using
 * the self pointer frees the work_struct.
 */
typedef struct taskq_work_wrapper {
        struct work_struct tww_work;
        task_func_t        tww_func;
        void *             tww_priv;
} taskq_work_wrapper_t;

static void
taskq_work_handler(void *priv)
{
        taskq_work_wrapper_t *tww = priv;

        ASSERT(tww);
        ASSERT(tww->tww_func);

        /* Call the real function and free the wrapper */
        tww->tww_func(tww->tww_priv);
        kfree(tww);
}

/* XXX - All flags currently ignored */
taskqid_t
__taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
{
        struct workqueue_struct *wq = tq;
        taskq_work_wrapper_t *tww;
        int rc;
	ENTRY;

        ASSERT(tq);
        ASSERT(func);

	/* Use GFP_ATOMIC since this may be called in interrupt context */
        tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_ATOMIC);
        if (!tww)
                RETURN((taskqid_t)0);

        INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
        tww->tww_func = func;
        tww->tww_priv = priv;

        rc = queue_work(wq, &(tww->tww_work));
        if (!rc) {
                kfree(tww);
                RETURN((taskqid_t)0);
        }

        RETURN((taskqid_t)wq);
}
EXPORT_SYMBOL(__taskq_dispatch);

/* XXX - We must fully implement dynamic workqueues since they make a
 *       significant impact in terms of performance.  For now I've made
 *       a trivial compromise.  If you ask for one thread you get one
 *       thread, if you ask for more than that you get one per core.
 *       It's unclear if you ever really need/want more than one per-core
 *       anyway.  More analysis is required.
 *
 * name  - Workqueue names are limited to 10 chars
 * pri   - Ignore priority
 * min   - Ignored until this is a dynamic thread pool
 * max   - Ignored until this is a dynamic thread pool
 * flags - Ignored until this is a dynamic thread_pool
 */
taskq_t *
__taskq_create(const char *name, int nthreads, pri_t pri,
               int minalloc, int maxalloc, uint_t flags)
{
	taskq_t *tq;
	ENTRY;

	if (nthreads == 1)
		tq = create_singlethread_workqueue(name);
	else
		tq = create_workqueue(name);

	return tq;
}
EXPORT_SYMBOL(__taskq_create);

void
__taskq_destroy(taskq_t *tq)
{
	ENTRY;
	destroy_workqueue(tq);
	EXIT;
}
EXPORT_SYMBOL(__taskq_destroy);

void
__taskq_wait(taskq_t *tq)
{
	ENTRY;
	flush_workqueue(tq);
	EXIT;
}
EXPORT_SYMBOL(__taskq_wait);