summaryrefslogtreecommitdiffstats
path: root/module/spl/spl-kmem.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2014-12-08 13:35:51 -0500
committerBrian Behlendorf <[email protected]>2015-01-16 13:55:09 -0800
commitb34b95635a99223b6bff5437fb389e9340dc7dcd (patch)
tree71a140683067236d1277c0f2e5ca653656e8ef6f /module/spl/spl-kmem.c
parente5b9b344c728bb0d9304f1a143db9255901dc5fe (diff)
Fix kmem cstyle issues
Address all cstyle issues in the kmem, vmem, and kmem_cache source and headers. This will done to make it easier to review subsequent changes which will rework the kmem/vmem implementation. Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/spl/spl-kmem.c')
-rw-r--r--module/spl/spl-kmem.c104
1 files changed, 55 insertions, 49 deletions
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index 075bf2580..96ad2b043 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -1,4 +1,4 @@
-/*****************************************************************************\
+/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -20,9 +20,7 @@
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
- *****************************************************************************
- * Solaris Porting Layer (SPL) Kmem Implementation.
-\*****************************************************************************/
+ */
#include <sys/debug.h>
#include <sys/kmem.h>
@@ -31,7 +29,7 @@
int
kmem_debugging(void)
{
- return 0;
+ return (0);
}
EXPORT_SYMBOL(kmem_debugging);
@@ -47,7 +45,7 @@ kmem_vasprintf(const char *fmt, va_list ap)
va_end(aq);
} while (ptr == NULL);
- return ptr;
+ return (ptr);
}
EXPORT_SYMBOL(kmem_vasprintf);
@@ -63,7 +61,7 @@ kmem_asprintf(const char *fmt, ...)
va_end(ap);
} while (ptr == NULL);
- return ptr;
+ return (ptr);
}
EXPORT_SYMBOL(kmem_asprintf);
@@ -78,13 +76,13 @@ __strdup(const char *str, int flags)
if (ptr)
memcpy(ptr, str, n + 1);
- return ptr;
+ return (ptr);
}
char *
strdup(const char *str)
{
- return __strdup(str, KM_SLEEP);
+ return (__strdup(str, KM_SLEEP));
}
EXPORT_SYMBOL(strdup);
@@ -104,18 +102,19 @@ EXPORT_SYMBOL(strfree);
#ifdef DEBUG_KMEM
/* Shim layer memory accounting */
-# ifdef HAVE_ATOMIC64_T
+#ifdef HAVE_ATOMIC64_T
atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
unsigned long long kmem_alloc_max = 0;
-# else /* HAVE_ATOMIC64_T */
+#else /* HAVE_ATOMIC64_T */
atomic_t kmem_alloc_used = ATOMIC_INIT(0);
unsigned long long kmem_alloc_max = 0;
-# endif /* HAVE_ATOMIC64_T */
+#endif /* HAVE_ATOMIC64_T */
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
-/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
+/*
+ * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
* but also the location of every alloc and free. When the SPL module is
* unloaded a list of all leaked addresses and where they were allocated
* will be dumped to the console. Enabling this feature has a significant
@@ -126,18 +125,18 @@ EXPORT_SYMBOL(kmem_alloc_max);
* debugging enabled for anything other than debugging we need to minimize
* the contention by moving to a lock per xmem_table entry model.
*/
-# ifdef DEBUG_KMEM_TRACKING
+#ifdef DEBUG_KMEM_TRACKING
-# define KMEM_HASH_BITS 10
-# define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
+#define KMEM_HASH_BITS 10
+#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
typedef struct kmem_debug {
- struct hlist_node kd_hlist; /* Hash node linkage */
- struct list_head kd_list; /* List of all allocations */
- void *kd_addr; /* Allocation pointer */
- size_t kd_size; /* Allocation size */
- const char *kd_func; /* Allocation function */
- int kd_line; /* Allocation line */
+ struct hlist_node kd_hlist; /* Hash node linkage */
+ struct list_head kd_list; /* List of all allocations */
+ void *kd_addr; /* Allocation pointer */
+ size_t kd_size; /* Allocation size */
+ const char *kd_func; /* Allocation function */
+ int kd_line; /* Allocation line */
} kmem_debug_t;
spinlock_t kmem_lock;
@@ -149,7 +148,8 @@ EXPORT_SYMBOL(kmem_table);
EXPORT_SYMBOL(kmem_list);
static kmem_debug_t *
-kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
+kmem_del_init(spinlock_t *lock, struct hlist_head *table,
+ int bits, const void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -165,7 +165,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *
hlist_del_init(&p->kd_hlist);
list_del_init(&p->kd_list);
spin_unlock_irqrestore(lock, flags);
- return p;
+ return (p);
}
}
@@ -183,12 +183,12 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
unsigned long irq_flags;
/* Function may be called with KM_NOSLEEP so failure is possible */
- dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof (kmem_debug_t),
flags & ~__GFP_ZERO);
if (unlikely(dptr == NULL)) {
printk(KERN_WARNING "debug kmem_alloc(%ld, 0x%x) at %s:%d "
- "failed (%lld/%llu)\n", sizeof(kmem_debug_t), flags,
+ "failed (%lld/%llu)\n", sizeof (kmem_debug_t), flags,
func, line, kmem_alloc_used_read(), kmem_alloc_max);
} else {
/*
@@ -280,7 +280,7 @@ kmem_free_track(const void *ptr, size_t size)
kmem_alloc_used_sub(size);
kfree(dptr->kd_func);
- memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
+ memset((void *)dptr, 0x5a, sizeof (kmem_debug_t));
kfree(dptr);
memset((void *)ptr, 0x5a, size);
@@ -288,7 +288,7 @@ kmem_free_track(const void *ptr, size_t size)
}
EXPORT_SYMBOL(kmem_free_track);
-# else /* DEBUG_KMEM_TRACKING */
+#else /* DEBUG_KMEM_TRACKING */
void *
kmem_alloc_debug(size_t size, int flags, const char *func, int line,
@@ -342,7 +342,7 @@ kmem_free_debug(const void *ptr, size_t size)
}
EXPORT_SYMBOL(kmem_free_debug);
-# endif /* DEBUG_KMEM_TRACKING */
+#endif /* DEBUG_KMEM_TRACKING */
#endif /* DEBUG_KMEM */
#if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
@@ -355,15 +355,19 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
ASSERT(str != NULL && len >= 17);
memset(str, 0, len);
- /* Check for a fully printable string, and while we are at
- * it place the printable characters in the passed buffer. */
+ /*
+ * Check for a fully printable string, and while we are at
+ * it place the printable characters in the passed buffer.
+ */
for (i = 0; i < size; i++) {
str[i] = ((char *)(kd->kd_addr))[i];
if (isprint(str[i])) {
continue;
} else {
- /* Minimum number of printable characters found
- * to make it worthwhile to print this as ascii. */
+ /*
+ * Minimum number of printable characters found
+ * to make it worthwhile to print this as ascii.
+ */
if (i > min)
break;
@@ -374,17 +378,17 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
if (!flag) {
sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
- *((uint8_t *)kd->kd_addr),
- *((uint8_t *)kd->kd_addr + 2),
- *((uint8_t *)kd->kd_addr + 4),
- *((uint8_t *)kd->kd_addr + 6),
- *((uint8_t *)kd->kd_addr + 8),
- *((uint8_t *)kd->kd_addr + 10),
- *((uint8_t *)kd->kd_addr + 12),
- *((uint8_t *)kd->kd_addr + 14));
+ *((uint8_t *)kd->kd_addr),
+ *((uint8_t *)kd->kd_addr + 2),
+ *((uint8_t *)kd->kd_addr + 4),
+ *((uint8_t *)kd->kd_addr + 6),
+ *((uint8_t *)kd->kd_addr + 8),
+ *((uint8_t *)kd->kd_addr + 10),
+ *((uint8_t *)kd->kd_addr + 12),
+ *((uint8_t *)kd->kd_addr + 14));
}
- return str;
+ return (str);
}
static int
@@ -411,18 +415,18 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
- "size", "data", "func", "line");
+ "size", "data", "func", "line");
list_for_each_entry(kd, list, kd_list)
printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
- (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
- kd->kd_func, kd->kd_line);
+ (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
+ kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
-#define spl_kmem_init_tracking(list, lock, size)
-#define spl_kmem_fini_tracking(list, lock)
+#define spl_kmem_init_tracking(list, lock, size)
+#define spl_kmem_fini_tracking(list, lock)
#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
int
@@ -442,10 +446,12 @@ void
spl_kmem_fini(void)
{
#ifdef DEBUG_KMEM
- /* Display all unreclaimed memory addresses, including the
+ /*
+ * Display all unreclaimed memory addresses, including the
* allocation size and the first few bytes of what's located
* at that address to aid in debugging. Performance is not
- * a serious concern here since it is module unload time. */
+ * a serious concern here since it is module unload time.
+ */
if (kmem_alloc_used_read() != 0)
printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
kmem_alloc_used_read(), kmem_alloc_max);