aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/sys/atomic.h26
-rw-r--r--include/sys/div64.h44
2 files changed, 69 insertions, 1 deletions
diff --git a/include/sys/atomic.h b/include/sys/atomic.h
index 7bb915611..cd0eb3b0a 100644
--- a/include/sys/atomic.h
+++ b/include/sys/atomic.h
@@ -33,6 +33,7 @@ extern "C" {
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <sys/isa_defs.h>
/* XXX: Serialize everything through global locks. This is
* going to be bad for performance, but for now it's the easiest
@@ -133,7 +134,23 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
return rc;
}
-#if defined(__x86_64__)
+static __inline__ uint32_t
+atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
+ uint32_t newval)
+{
+ uint32_t rc;
+
+ spin_lock(&atomic32_lock);
+ rc = *target;
+ if (*target == cmp)
+ *target = newval;
+
+ spin_unlock(&atomic32_lock);
+
+ return rc;
+}
+
+#ifdef _LP64
/* XXX: Implement atomic_cas_ptr() in terms of uint64'ts. This
* is of course only safe and correct for 64 bit arches... but
* for now I'm OK with that.
@@ -144,6 +161,13 @@ atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
return (void *)atomic_cas_64((volatile uint64_t *)target,
(uint64_t)cmp, (uint64_t)newval);
}
+#else
+static __inline__ void *
+atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
+{
+ return (void *)atomic_cas_32((volatile uint32_t *)target,
+ (uint32_t)cmp, (uint32_t)newval);
+}
#endif
#ifdef __cplusplus
diff --git a/include/sys/div64.h b/include/sys/div64.h
new file mode 100644
index 000000000..cb62cd3c5
--- /dev/null
+++ b/include/sys/div64.h
@@ -0,0 +1,44 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Sun Microsystems, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _SPL_DIV64_H
+#define _SPL_DIV64_H
+
+#include <asm/div64.h>
+
+#ifndef HAVE_DIV64_64
+#if BITS_PER_LONG == 32
+
+extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
+#define div64_64(a,b) spl_div64_64(a,b)
+
+#else /* BITS_PER_LONG == 32 */
+
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+ return dividend / divisor;
+}
+
+#endif /* BITS_PER_LONG == 32 */
+#endif /* HAVE_DIV64_64 */
+
+#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
+
+#endif /* _SPL_DIV64_H */