summaryrefslogtreecommitdiffstats
path: root/lib/libspl/asm-generic/atomic.c
diff options
context:
space:
mode:
authorEtienne Dechamps <[email protected]>2012-06-27 10:26:49 +0200
committerBrian Behlendorf <[email protected]>2012-10-17 08:56:37 -0700
commit142e6dd100eb70ef06f39015a2e54cbd74172f8b (patch)
tree24a836ff1197a704824819738bcaeeb127f0e7a5 /lib/libspl/asm-generic/atomic.c
parent82f46731fd5a9eef4f87530e94922664b58a6138 (diff)
Add atomic_sub_* functions to libspl.
Both the SPL and the ZFS libspl export most of the atomic_* functions, except atomic_sub_* functions which are only exported by the SPL, not by libspl. This patch remedies that by implementing atomic_sub_* functions in libspl. Signed-off-by: Brian Behlendorf <[email protected]> Issue #1013
Diffstat (limited to 'lib/libspl/asm-generic/atomic.c')
-rw-r--r--lib/libspl/asm-generic/atomic.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/lib/libspl/asm-generic/atomic.c b/lib/libspl/asm-generic/atomic.c
index de4430f9f..a3223eadc 100644
--- a/lib/libspl/asm-generic/atomic.c
+++ b/lib/libspl/asm-generic/atomic.c
@@ -103,6 +103,31 @@ void atomic_add_ptr(volatile void *target, ssize_t bits)
}
+#define ATOMIC_SUB(name, type1, type2) \
+ void atomic_sub_##name(volatile type1 *target, type2 bits) \
+ { \
+ VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
+ *target -= bits; \
+ VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
+ }
+
+ATOMIC_SUB(8, uint8_t, int8_t)
+ATOMIC_SUB(char, uchar_t, signed char)
+ATOMIC_SUB(16, uint16_t, int16_t)
+ATOMIC_SUB(short, ushort_t, short)
+ATOMIC_SUB(32, uint32_t, int32_t)
+ATOMIC_SUB(int, uint_t, int)
+ATOMIC_SUB(long, ulong_t, long)
+ATOMIC_SUB(64, uint64_t, int64_t)
+
+void atomic_sub_ptr(volatile void *target, ssize_t bits)
+{
+ VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
+ *(caddr_t *)target -= bits;
+ VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
+}
+
+
#define ATOMIC_OR(name, type) \
void atomic_or_##name(volatile type *target, type bits) \
{ \
@@ -216,6 +241,37 @@ void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
}
+#define ATOMIC_SUB_NV(name, type1, type2) \
+ type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\
+ { \
+ type1 rc; \
+ VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
+ rc = (*target -= bits); \
+ VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
+ return rc; \
+ }
+
+ATOMIC_SUB_NV(8, uint8_t, int8_t)
+ATOMIC_SUB_NV(char, uchar_t, signed char)
+ATOMIC_SUB_NV(16, uint16_t, int16_t)
+ATOMIC_SUB_NV(short, ushort_t, short)
+ATOMIC_SUB_NV(32, uint32_t, int32_t)
+ATOMIC_SUB_NV(int, uint_t, int)
+ATOMIC_SUB_NV(long, ulong_t, long)
+ATOMIC_SUB_NV(64, uint64_t, int64_t)
+
+void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
+{
+ void *ptr;
+
+ VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
+ ptr = (*(caddr_t *)target -= bits);
+ VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
+
+ return ptr;
+}
+
+
#define ATOMIC_OR_NV(name, type) \
type atomic_or_##name##_nv(volatile type *target, type bits) \
{ \