1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
/*****************************************************************************\
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
* For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
\*****************************************************************************/
#ifndef _SPL_RWLOCK_H
#define _SPL_RWLOCK_H
#include <sys/types.h>
#include <linux/rwsem.h>
#include <linux/rwsem_compat.h>
typedef enum {
RW_DRIVER = 2,
RW_DEFAULT = 4,
RW_NOLOCKDEP = 5
} krw_type_t;
typedef enum {
RW_NONE = 0,
RW_WRITER = 1,
RW_READER = 2
} krw_t;
/*
* If CONFIG_RWSEM_SPIN_ON_OWNER is defined, rw_semaphore will have an owner
* field, so we don't need our own.
*/
typedef struct {
struct rw_semaphore rw_rwlock;
#ifndef CONFIG_RWSEM_SPIN_ON_OWNER
kthread_t *rw_owner;
#endif
#ifdef CONFIG_LOCKDEP
krw_type_t rw_type;
#endif /* CONFIG_LOCKDEP */
} krwlock_t;
#define SEM(rwp) (&(rwp)->rw_rwlock)
static inline void
spl_rw_set_owner(krwlock_t *rwp)
{
/*
* If CONFIG_RWSEM_SPIN_ON_OWNER is defined, down_write, up_write,
* downgrade_write and __init_rwsem will set/clear owner for us.
*/
#ifndef CONFIG_RWSEM_SPIN_ON_OWNER
rwp->rw_owner = current;
#endif
}
static inline void
spl_rw_clear_owner(krwlock_t *rwp)
{
#ifndef CONFIG_RWSEM_SPIN_ON_OWNER
rwp->rw_owner = NULL;
#endif
}
static inline kthread_t *
rw_owner(krwlock_t *rwp)
{
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
return SEM(rwp)->owner;
#else
return rwp->rw_owner;
#endif
}
#ifdef CONFIG_LOCKDEP
static inline void
spl_rw_set_type(krwlock_t *rwp, krw_type_t type)
{
rwp->rw_type = type;
}
static inline void
spl_rw_lockdep_off_maybe(krwlock_t *rwp) \
{ \
if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
lockdep_off(); \
}
static inline void
spl_rw_lockdep_on_maybe(krwlock_t *rwp) \
{ \
if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
lockdep_on(); \
}
#else /* CONFIG_LOCKDEP */
#define spl_rw_set_type(rwp, type)
#define spl_rw_lockdep_off_maybe(rwp)
#define spl_rw_lockdep_on_maybe(rwp)
#endif /* CONFIG_LOCKDEP */
static inline int
RW_READ_HELD(krwlock_t *rwp)
{
return (spl_rwsem_is_locked(SEM(rwp)) && rw_owner(rwp) == NULL);
}
static inline int
RW_WRITE_HELD(krwlock_t *rwp)
{
return (rw_owner(rwp) == current);
}
static inline int
RW_LOCK_HELD(krwlock_t *rwp)
{
return spl_rwsem_is_locked(SEM(rwp));
}
/*
* The following functions must be a #define and not static inline.
* This ensures that the native linux semaphore functions (down/up)
* will be correctly located in the users code which is important
* for the built in kernel lock analysis tools
*/
#define rw_init(rwp, name, type, arg) \
({ \
static struct lock_class_key __key; \
ASSERT(type == RW_DEFAULT || type == RW_NOLOCKDEP); \
\
__init_rwsem(SEM(rwp), #rwp, &__key); \
spl_rw_clear_owner(rwp); \
spl_rw_set_type(rwp, type); \
})
#define rw_destroy(rwp) \
({ \
VERIFY(!RW_LOCK_HELD(rwp)); \
})
#define rw_tryenter(rwp, rw) \
({ \
int _rc_ = 0; \
\
spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \
case RW_READER: \
_rc_ = down_read_trylock(SEM(rwp)); \
break; \
case RW_WRITER: \
if ((_rc_ = down_write_trylock(SEM(rwp)))) \
spl_rw_set_owner(rwp); \
break; \
default: \
VERIFY(0); \
} \
spl_rw_lockdep_on_maybe(rwp); \
_rc_; \
})
#define rw_enter(rwp, rw) \
({ \
spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \
case RW_READER: \
down_read(SEM(rwp)); \
break; \
case RW_WRITER: \
down_write(SEM(rwp)); \
spl_rw_set_owner(rwp); \
break; \
default: \
VERIFY(0); \
} \
spl_rw_lockdep_on_maybe(rwp); \
})
#define rw_exit(rwp) \
({ \
spl_rw_lockdep_off_maybe(rwp); \
if (RW_WRITE_HELD(rwp)) { \
spl_rw_clear_owner(rwp); \
up_write(SEM(rwp)); \
} else { \
ASSERT(RW_READ_HELD(rwp)); \
up_read(SEM(rwp)); \
} \
spl_rw_lockdep_on_maybe(rwp); \
})
#define rw_downgrade(rwp) \
({ \
spl_rw_lockdep_off_maybe(rwp); \
spl_rw_clear_owner(rwp); \
downgrade_write(SEM(rwp)); \
spl_rw_lockdep_on_maybe(rwp); \
})
#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#error spinlock rwsem should not have spin on owner
#endif
/*
* For the generic implementations of rw-semaphores the following is
* true. If your semaphore implementation internally represents the
* semaphore state differently then special case handling is required.
* - if activity/count is 0 then there are no active readers or writers
* - if activity/count is +ve then that is the number of active readers
* - if activity/count is -1 then there is one active writer
*/
extern void __up_read_locked(struct rw_semaphore *);
extern int __down_write_trylock_locked(struct rw_semaphore *);
#define rw_tryupgrade(rwp) \
({ \
unsigned long _flags_; \
int _rc_ = 0; \
\
spl_rw_lockdep_off_maybe(rwp); \
spl_rwsem_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
if ((list_empty(&SEM(rwp)->wait_list)) && \
(SEM(rwp)->activity == 1)) { \
__up_read_locked(SEM(rwp)); \
VERIFY(_rc_ = __down_write_trylock_locked(SEM(rwp))); \
(rwp)->rw_owner = current; \
} \
spl_rwsem_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
spl_rw_lockdep_on_maybe(rwp); \
_rc_; \
})
#else
/*
* rw_tryupgrade() can be implemented correctly but for each supported
* arch we will need a custom implementation. For the x86 implementation
* it looks like a custom cmpxchg() to atomically check and promote the
* rwsem would be safe. For now that's not worth the trouble so in this
* case rw_tryupgrade() has just been disabled.
*/
#define rw_tryupgrade(rwp) ({ 0; })
#endif
int spl_rw_init(void);
void spl_rw_fini(void);
#endif /* _SPL_RWLOCK_H */
|