1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
|
/*
* This file is part of the SPL: Solaris Porting Layer.
*
* Copyright (c) 2008 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory
* Written by:
* Brian Behlendorf <behlendorf1@llnl.gov>,
* Herb Wartens <wartens2@llnl.gov>,
* Jim Garlick <garlick@llnl.gov>
* UCRL-CODE-235197
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef _SPL_ATOMIC_H
#define _SPL_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
#include <linux/module.h>
#include <linux/spinlock.h>
#include <sys/isa_defs.h>
/* XXX: Serialize everything through global locks. This is
* going to be bad for performance, but for now it's the easiest
* way to ensure correct behavior. I don't like it at all.
* It would be nicer to make these function to the atomic linux
* functions, but the normal uint64_t type complicates this.
*/
extern spinlock_t atomic64_lock;
extern spinlock_t atomic32_lock;
static __inline__ uint32_t
atomic_add_32(volatile uint32_t *target, int32_t delta)
{
uint32_t rc;
spin_lock(&atomic32_lock);
rc = *target;
*target += delta;
spin_unlock(&atomic32_lock);
return rc;
}
static __inline__ void
atomic_inc_64(volatile uint64_t *target)
{
spin_lock(&atomic64_lock);
(*target)++;
spin_unlock(&atomic64_lock);
}
static __inline__ void
atomic_dec_64(volatile uint64_t *target)
{
spin_lock(&atomic64_lock);
(*target)--;
spin_unlock(&atomic64_lock);
}
static __inline__ uint64_t
atomic_add_64(volatile uint64_t *target, uint64_t delta)
{
uint64_t rc;
spin_lock(&atomic64_lock);
rc = *target;
*target += delta;
spin_unlock(&atomic64_lock);
return rc;
}
static __inline__ uint64_t
atomic_sub_64(volatile uint64_t *target, uint64_t delta)
{
uint64_t rc;
spin_lock(&atomic64_lock);
rc = *target;
*target -= delta;
spin_unlock(&atomic64_lock);
return rc;
}
static __inline__ uint64_t
atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
{
spin_lock(&atomic64_lock);
*target += delta;
spin_unlock(&atomic64_lock);
return *target;
}
static __inline__ uint64_t
atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
{
spin_lock(&atomic64_lock);
*target -= delta;
spin_unlock(&atomic64_lock);
return *target;
}
static __inline__ uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval)
{
uint64_t rc;
spin_lock(&atomic64_lock);
rc = *target;
if (*target == cmp)
*target = newval;
spin_unlock(&atomic64_lock);
return rc;
}
static __inline__ uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
uint32_t newval)
{
uint32_t rc;
spin_lock(&atomic32_lock);
rc = *target;
if (*target == cmp)
*target = newval;
spin_unlock(&atomic32_lock);
return rc;
}
#ifdef _LP64
/* XXX: Implement atomic_cas_ptr() in terms of uint64'ts. This
* is of course only safe and correct for 64 bit arches... but
* for now I'm OK with that.
*/
static __inline__ void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return (void *)atomic_cas_64((volatile uint64_t *)target,
(uint64_t)cmp, (uint64_t)newval);
}
#else
static __inline__ void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return (void *)atomic_cas_32((volatile uint32_t *)target,
(uint32_t)cmp, (uint32_t)newval);
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SPL_ATOMIC_H */
|