aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-11-03 20:34:17 +0000
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>2008-11-03 20:34:17 +0000
commit550f1705259c9b97ad158354a921003bbbfe819f (patch)
tree5d9a990331d3a61b716775e094e121b1c25e787e
parent749045bbfa3302f8917ef681373775245b241698 (diff)
Apply two nice improvements caught by Ricardo,
spl-05-div64.patch This is a much less intrusive fix for undefined 64-bit division symbols when compiling the DMU in 32-bit kernels. * spl-06-atomic64.patch This is a workaround for 32-bit kernels that don't have atomic64_t. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@162 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
-rw-r--r--autoconf/spl-build.m41
-rw-r--r--include/asm/atomic_compat.h58
-rw-r--r--include/sys/div64.h44
-rw-r--r--include/sys/kmem.h2
-rw-r--r--modules/spl/spl-generic.c24
-rw-r--r--modules/spl/spl-kmem.c8
-rw-r--r--modules/spl/spl-proc.c2
7 files changed, 85 insertions, 54 deletions
diff --git a/autoconf/spl-build.m4 b/autoconf/spl-build.m4
index 61d11e8ff..585e3482e 100644
--- a/autoconf/spl-build.m4
+++ b/autoconf/spl-build.m4
@@ -653,6 +653,7 @@ AC_DEFUN([SPL_AC_DIV64_64], [
AC_MSG_CHECKING([whether div64_64() is available])
SPL_LINUX_TRY_COMPILE([
#include <asm/div64.h>
+ #include <linux/types.h>
],[
uint64_t i = div64_64(1ULL, 1ULL);
],[
diff --git a/include/asm/atomic_compat.h b/include/asm/atomic_compat.h
new file mode 100644
index 000000000..0ab1d6cbd
--- /dev/null
+++ b/include/asm/atomic_compat.h
@@ -0,0 +1,58 @@
+#ifndef _SPL_ATOMIC_COMPAT_H
+#define _SPL_ATOMIC_COMPAT_H
+
+#include <asm/atomic.h>
+
+#ifndef HAVE_ATOMIC64_T
+#include <linux/spinlock.h>
+
+typedef struct {
+ spinlock_t lock;
+ __s64 val;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { .lock = SPIN_LOCK_UNLOCKED, .val = (i) }
+
+static inline void atomic64_add(__s64 i, atomic64_t *v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val += i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+static inline void atomic64_sub(__s64 i, atomic64_t *v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val -= i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+static inline __s64 atomic64_read(atomic64_t *v)
+{
+ unsigned long flags;
+ __s64 r;
+
+ spin_lock_irqsave(&v->lock, flags);
+ r = v->val;
+ spin_unlock_irqrestore(&v->lock, flags);
+
+ return r;
+}
+
+static inline void atomic64_set(atomic64_t *v, __s64 i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val = i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+#endif /* HAVE_ATOMIC64_T */
+
+#endif /* _SPL_ATOMIC_COMPAT_H */
+
diff --git a/include/sys/div64.h b/include/sys/div64.h
deleted file mode 100644
index cb62cd3c5..000000000
--- a/include/sys/div64.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * This file is part of the SPL: Solaris Porting Layer.
- *
- * Copyright (c) 2008 Sun Microsystems, Inc.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _SPL_DIV64_H
-#define _SPL_DIV64_H
-
-#include <asm/div64.h>
-
-#ifndef HAVE_DIV64_64
-#if BITS_PER_LONG == 32
-
-extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
-#define div64_64(a,b) spl_div64_64(a,b)
-
-#else /* BITS_PER_LONG == 32 */
-
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
- return dividend / divisor;
-}
-
-#endif /* BITS_PER_LONG == 32 */
-#endif /* HAVE_DIV64_64 */
-
-#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
-
-#endif /* _SPL_DIV64_H */
diff --git a/include/sys/kmem.h b/include/sys/kmem.h
index 1ca2c261a..336eeda3e 100644
--- a/include/sys/kmem.h
+++ b/include/sys/kmem.h
@@ -42,8 +42,10 @@ extern "C" {
#include <linux/rwsem.h>
#include <linux/hash.h>
#include <linux/ctype.h>
+#include <asm/atomic_compat.h>
#include <sys/types.h>
#include <sys/debug.h>
+
/*
* Memory allocation interfaces
*/
diff --git a/modules/spl/spl-generic.c b/modules/spl/spl-generic.c
index afaefb6b4..944d70e72 100644
--- a/modules/spl/spl-generic.c
+++ b/modules/spl/spl-generic.c
@@ -88,12 +88,15 @@ highbit(unsigned long i)
EXPORT_SYMBOL(highbit);
/*
- * Implementation of div64_64(), for kernels that don't have it.
- *
- * Taken from a 2.6.24 kernel.
+ * Implementation of 64 bit division for 32-bit machines.
*/
-uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
+#if BITS_PER_LONG == 32
+uint64_t __udivdi3(uint64_t dividend, uint64_t divisor)
{
+#ifdef HAVE_DIV64_64
+ return div64_64(dividend, divisor);
+#else
+ /* Taken from a 2.6.24 kernel. */
uint32_t high, d;
high = divisor >> 32;
@@ -108,8 +111,19 @@ uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
do_div(dividend, d);
return dividend;
+#endif
}
-EXPORT_SYMBOL(spl_div64_64);
+EXPORT_SYMBOL(__udivdi3);
+
+/*
+ * Implementation of 64 bit modulo for 32-bit machines.
+ */
+uint64_t __umoddi3(uint64_t dividend, uint64_t divisor)
+{
+ return dividend - divisor * (dividend / divisor);
+}
+EXPORT_SYMBOL(__umoddi3);
+#endif
int
ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
diff --git a/modules/spl/spl-kmem.c b/modules/spl/spl-kmem.c
index 208f11b8e..796caa20c 100644
--- a/modules/spl/spl-kmem.c
+++ b/modules/spl/spl-kmem.c
@@ -43,9 +43,9 @@
*/
#ifdef DEBUG_KMEM
/* Shim layer memory accounting */
-atomic64_t kmem_alloc_used;
+atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
unsigned long kmem_alloc_max = 0;
-atomic64_t vmem_alloc_used;
+atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
unsigned long vmem_alloc_max = 0;
int kmem_warning_flag = 1;
@@ -1031,12 +1031,12 @@ spl_kmem_fini(void)
* a serious concern here since it is module unload time. */
if (atomic64_read(&kmem_alloc_used) != 0)
CWARN("kmem leaked %ld/%ld bytes\n",
- atomic_read(&kmem_alloc_used), kmem_alloc_max);
+ atomic64_read(&kmem_alloc_used), kmem_alloc_max);
if (atomic64_read(&vmem_alloc_used) != 0)
CWARN("vmem leaked %ld/%ld bytes\n",
- atomic_read(&vmem_alloc_used), vmem_alloc_max);
+ atomic64_read(&vmem_alloc_used), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
diff --git a/modules/spl/spl-proc.c b/modules/spl/spl-proc.c
index 01983433d..017c69d47 100644
--- a/modules/spl/spl-proc.c
+++ b/modules/spl/spl-proc.c
@@ -415,7 +415,7 @@ proc_doatomic64(struct ctl_table *table, int write, struct file *filp,
if (write) {
*ppos += *lenp;
} else {
- val = atomic_read((atomic64_t *)table->data);
+ val = atomic64_read((atomic64_t *)table->data);
rc = proc_doulongvec_minmax(&dummy, write, filp,
buffer, lenp, ppos);
}