1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
|
#ifndef _SPL_RWLOCK_H
#define _SPL_RWLOCK_H
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <asm/current.h>
#include "spl-types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
RW_DRIVER = 2, /* driver (DDI) rwlock */
RW_DEFAULT = 4 /* kernel default rwlock */
} krw_type_t;
typedef enum {
RW_WRITER,
RW_READER
} krw_t;
#define RW_READ_HELD(x) (rw_read_held((x)))
#define RW_WRITE_HELD(x) (rw_write_held((x)))
#define RW_LOCK_HELD(x) (rw_lock_held((x)))
#define RW_ISWRITER(x) (rw_iswriter(x))
#define RW_MAGIC 0x3423645a
#define RW_POISON 0xa6
typedef struct {
int rw_magic;
char *rw_name;
struct rw_semaphore rw_sem;
struct task_struct *rw_owner; /* holder of the write lock */
} krwlock_t;
static __inline__ void
rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
{
BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
BUG_ON(arg != NULL); /* XXX no irq handler use */
rwlp->rw_magic = RW_MAGIC;
rwlp->rw_owner = NULL; /* no one holds the write lock yet */
init_rwsem(&rwlp->rw_sem);
rwlp->rw_name = NULL;
if (name) {
rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
if (rwlp->rw_name)
strcpy(rwlp->rw_name, name);
}
}
static __inline__ void
rw_destroy(krwlock_t *rwlp)
{
BUG_ON(rwlp == NULL);
BUG_ON(rwlp->rw_magic != RW_MAGIC);
BUG_ON(rwlp->rw_owner != NULL);
spin_lock(&rwlp->rw_sem.wait_lock);
BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
spin_unlock(&rwlp->rw_sem.wait_lock);
if (rwlp->rw_name)
kfree(rwlp->rw_name);
memset(rwlp, RW_POISON, sizeof(krwlock_t));
}
/* Return 0 if the lock could not be obtained without blocking.
*/
static __inline__ int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
int result;
BUG_ON(rwlp->rw_magic != RW_MAGIC);
switch (rw) {
/* these functions return 1 if success, 0 if contention */
case RW_READER:
/* Here the Solaris code would return 0
* if there were any write waiters. Specifically
* thinking about the case where readers may have
* the lock and we would also allow this thread
* to grab the read lock with a writer waiting in the
* queue. This doesn't seem like a correctness
* issue, so just call down_read_trylock()
* for the test. We may have to revisit this if
* it becomes an issue */
result = down_read_trylock(&rwlp->rw_sem);
break;
case RW_WRITER:
result = down_write_trylock(&rwlp->rw_sem);
if (result) {
/* there better not be anyone else
* holding the write lock here */
BUG_ON(rwlp->rw_owner != NULL);
rwlp->rw_owner = current;
}
break;
}
return result;
}
static __inline__ void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
BUG_ON(rwlp->rw_magic != RW_MAGIC);
switch (rw) {
case RW_READER:
/* Here the Solaris code would block
* if there were any write waiters. Specifically
* thinking about the case where readers may have
* the lock and we would also allow this thread
* to grab the read lock with a writer waiting in the
* queue. This doesn't seem like a correctness
* issue, so just call down_read()
* for the test. We may have to revisit this if
* it becomes an issue */
down_read(&rwlp->rw_sem);
break;
case RW_WRITER:
down_write(&rwlp->rw_sem);
/* there better not be anyone else
* holding the write lock here */
BUG_ON(rwlp->rw_owner != NULL);
rwlp->rw_owner = current;
break;
}
}
static __inline__ void
rw_exit(krwlock_t *rwlp)
{
BUG_ON(rwlp->rw_magic != RW_MAGIC);
/* rw_owner is held by current
* thread iff it is a writer */
if (rwlp->rw_owner == current) {
rwlp->rw_owner = NULL;
up_write(&rwlp->rw_sem);
} else {
up_read(&rwlp->rw_sem);
}
}
static __inline__ void
rw_downgrade(krwlock_t *rwlp)
{
BUG_ON(rwlp->rw_magic != RW_MAGIC);
BUG_ON(rwlp->rw_owner != current);
rwlp->rw_owner = NULL;
downgrade_write(&rwlp->rw_sem);
}
/* Return 0 if unable to perform the upgrade.
* Might be wise to fix the caller
* to acquire the write lock first?
*/
static __inline__ int
rw_tryupgrade(krwlock_t *rwlp)
{
int result;
BUG_ON(rwlp->rw_magic != RW_MAGIC);
spin_lock(&rwlp->rw_sem.wait_lock);
/* Check if there is anyone waiting for the
* lock. If there is, then we know we should
* not try to upgrade the lock */
if (!list_empty(&rwlp->rw_sem.wait_list)) {
printk(KERN_WARNING "There are threads waiting\n");
spin_unlock(&rwlp->rw_sem.wait_lock);
return 0;
}
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
/* Note that activity is protected by
* the wait_lock. Don't try to upgrade
* if there are multiple readers currently
* holding the lock */
if (rwlp->rw_sem.activity > 1) {
#else
/* Don't try to upgrade
* if there are multiple readers currently
* holding the lock */
if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
#endif
spin_unlock(&rwlp->rw_sem.wait_lock);
return 0;
}
/* Here it should be safe to drop the
* read lock and reacquire it for writing since
* we know there are no waiters */
up_read(&rwlp->rw_sem);
/* returns 1 if success, 0 if contention */
result = down_write_trylock(&rwlp->rw_sem);
/* Check if upgrade failed. Should not ever happen
* if we got to this point */
BUG_ON(!result);
BUG_ON(rwlp->rw_owner != NULL);
rwlp->rw_owner = current;
spin_unlock(&rwlp->rw_sem.wait_lock);
return 1;
}
static __inline__ kthread_t *
rw_owner(krwlock_t *rwlp)
{
BUG_ON(rwlp->rw_magic != RW_MAGIC);
return rwlp->rw_owner;
}
#ifdef __cplusplus
}
#endif
#endif /* _SPL_RWLOCK_H */
|