summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-03-28 18:21:09 +0000
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-03-28 18:21:09 +0000
commit9f4c835a0efd55139f878c8ed4746cd7da815658 (patch)
tree2cba49e5c08a44483c164441d7f0879a819992d2 /include
parent4a4295b26736a651a16a2d291868028dbd7cf91b (diff)
Correctly functioning 64-bit atomic shim layer. It's not
what I would call effecient but it does have the advantage of being correct which is all I need right now. I added a regression test as well. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@57 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
Diffstat (limited to 'include')
-rw-r--r--include/sys/atomic.h75
1 files changed, 64 insertions, 11 deletions
diff --git a/include/sys/atomic.h b/include/sys/atomic.h
index 1f2a4780b..647d0db9a 100644
--- a/include/sys/atomic.h
+++ b/include/sys/atomic.h
@@ -6,43 +6,90 @@ extern "C" {
#endif
#include <linux/module.h>
-/* FIXME - NONE OF THIS IS ATOMIC, IT SHOULD BE. I think we can
- * get by for now since I'm only working on real 64bit systems but
- * this will need to be addressed properly.
+#include <linux/spinlock.h>
+
+/* XXX: Serialize everything through global locks. This is
+ * going to be bad for performance, but for now it's the easiest
+ * way to ensure correct behavior. I don't like it at all.
+ * It would be nicer to make these function to the atomic linux
+ * functions, but the normal uint64_t type complicates this.
*/
+extern spinlock_t atomic64_lock;
+extern spinlock_t atomic32_lock;
+extern spinlock_t atomic_lock;
+
+static __inline__ uint32_t
+atomic_add_32(volatile uint32_t *target, int32_t delta)
+{
+ uint32_t rc;
+
+ spin_lock(&atomic32_lock);
+ rc = *target;
+ *target += delta;
+ spin_unlock(&atomic32_lock);
+
+ return rc;
+}
static __inline__ void
atomic_inc_64(volatile uint64_t *target)
{
+ spin_lock(&atomic64_lock);
(*target)++;
+ spin_unlock(&atomic64_lock);
}
static __inline__ void
atomic_dec_64(volatile uint64_t *target)
{
+ spin_lock(&atomic64_lock);
(*target)--;
+ spin_unlock(&atomic64_lock);
}
-static __inline__ uint32_t
-atomic_add_32(volatile uint32_t *target, int32_t delta)
+static __inline__ uint64_t
+atomic_add_64(volatile uint64_t *target, uint64_t delta)
{
- uint32_t rc = *target;
+ uint64_t rc;
+
+ spin_lock(&atomic64_lock);
+ rc = *target;
*target += delta;
+ spin_unlock(&atomic64_lock);
+
return rc;
}
static __inline__ uint64_t
-atomic_add_64(volatile uint64_t *target, uint64_t delta)
+atomic_sub_64(volatile uint64_t *target, uint64_t delta)
{
- uint64_t rc = *target;
- *target += delta;
+ uint64_t rc;
+
+ spin_lock(&atomic64_lock);
+ rc = *target;
+ *target -= delta;
+ spin_unlock(&atomic64_lock);
+
return rc;
}
static __inline__ uint64_t
atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
{
+ spin_lock(&atomic64_lock);
*target += delta;
+ spin_unlock(&atomic64_lock);
+
+ return *target;
+}
+
+static __inline__ uint64_t
+atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
+{
+ spin_lock(&atomic64_lock);
+ *target -= delta;
+ spin_unlock(&atomic64_lock);
+
return *target;
}
@@ -50,10 +97,13 @@ static __inline__ uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval)
{
- uint64_t rc = *target;
+ uint64_t rc;
+ spin_lock(&atomic64_lock);
+ rc = *target;
if (*target == cmp)
*target = newval;
+ spin_unlock(&atomic64_lock);
return rc;
}
@@ -61,10 +111,13 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
static __inline__ void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
- void *rc = (void *)target;
+ void *rc;
+ spin_lock(&atomic_lock);
+ rc = (void *)target;
if (target == cmp)
target = newval;
+ spin_unlock(&atomic_lock);
return rc;
}