aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2014-11-05 17:30:35 -0500
committerBrian Behlendorf <[email protected]>2014-11-19 10:35:07 -0800
commit8d9a23e82cea5d897e9357d569ef364106703d5a (patch)
treee49678dab2f5b419d630d388aca924a3c6e4fc72 /module
parent917fef273295616c563bbb0a5f6986cfce543d2f (diff)
Retire legacy debugging infrastructure
When the SPL was originally written Linux tracepoints were still in their infancy. Therefore, an entire debugging subsystem was added to facilite tracing which served us well for many years. Now that Linux tracepoints have matured they provide all the functionality of the previous tracing subsystem. Rather than maintain parallel functionality it makes sense to fully adopt tracepoints. Therefore, this patch retires the legacy debugging infrastructure. See zfsonlinux/zfs@bc9f413 for the tracepoint changes. Signed-off-by: Ned Bass <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #408
Diffstat (limited to 'module')
-rw-r--r--module/spl/Makefile.in1
-rw-r--r--module/spl/spl-condvar.c30
-rw-r--r--module/spl/spl-debug.c1265
-rw-r--r--module/spl/spl-err.c96
-rw-r--r--module/spl/spl-generic.c36
-rw-r--r--module/spl/spl-kmem.c302
-rw-r--r--module/spl/spl-kobj.c24
-rw-r--r--module/spl/spl-kstat.c26
-rw-r--r--module/spl/spl-proc.c373
-rw-r--r--module/spl/spl-taskq.c106
-rw-r--r--module/spl/spl-thread.c22
-rw-r--r--module/spl/spl-tsd.c87
-rw-r--r--module/spl/spl-vnode.c133
-rw-r--r--module/spl/spl-xdr.c13
-rw-r--r--module/spl/spl-zlib.c14
-rw-r--r--module/splat/splat-internal.h1
-rw-r--r--module/splat/splat-kmem.c2
17 files changed, 328 insertions, 2203 deletions
diff --git a/module/spl/Makefile.in b/module/spl/Makefile.in
index 30620349f..9f67ed646 100644
--- a/module/spl/Makefile.in
+++ b/module/spl/Makefile.in
@@ -6,7 +6,6 @@ EXTRA_CFLAGS = $(SPL_MODULE_CFLAGS) @KERNELCPPFLAGS@
# Solaris porting layer module
obj-$(CONFIG_SPL) := $(MODULE).o
-$(MODULE)-objs += @top_srcdir@/module/spl/spl-debug.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-proc.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-kmem.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-thread.o
diff --git a/module/spl/spl-condvar.c b/module/spl/spl-condvar.c
index 8236412dd..2a0052f56 100644
--- a/module/spl/spl-condvar.c
+++ b/module/spl/spl-condvar.c
@@ -25,18 +25,10 @@
\*****************************************************************************/
#include <sys/condvar.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_CONDVAR
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
- SENTRY;
ASSERT(cvp);
ASSERT(name == NULL);
ASSERT(type == CV_DEFAULT);
@@ -48,8 +40,6 @@ __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
atomic_set(&cvp->cv_waiters, 0);
atomic_set(&cvp->cv_refs, 1);
cvp->cv_mutex = NULL;
-
- SEXIT;
}
EXPORT_SYMBOL(__cv_init);
@@ -68,7 +58,6 @@ cv_destroy_wakeup(kcondvar_t *cvp)
void
__cv_destroy(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
@@ -83,8 +72,6 @@ __cv_destroy(kcondvar_t *cvp)
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
-
- SEXIT;
}
EXPORT_SYMBOL(__cv_destroy);
@@ -92,7 +79,6 @@ static void
cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
{
DEFINE_WAIT(wait);
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
@@ -127,8 +113,6 @@ cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
-
- SEXIT;
}
void
@@ -161,7 +145,6 @@ __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
{
DEFINE_WAIT(wait);
clock_t time_left;
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
@@ -179,7 +162,7 @@ __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
time_left = expire_time - jiffies;
if (time_left <= 0) {
atomic_dec(&cvp->cv_refs);
- SRETURN(-1);
+ return (-1);
}
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
@@ -201,7 +184,7 @@ __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
- SRETURN(time_left > 0 ? time_left : -1);
+ return (time_left > 0 ? time_left : -1);
}
clock_t
@@ -229,7 +212,6 @@ __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
DEFINE_WAIT(wait);
hrtime_t time_left, now;
unsigned long time_left_us;
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
@@ -247,7 +229,7 @@ __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
time_left = expire_time - now;
if (time_left <= 0) {
atomic_dec(&cvp->cv_refs);
- SRETURN(-1);
+ return (-1);
}
time_left_us = time_left / NSEC_PER_USEC;
@@ -273,7 +255,7 @@ __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
atomic_dec(&cvp->cv_refs);
time_left = expire_time - gethrtime();
- SRETURN(time_left > 0 ? time_left : -1);
+ return (time_left > 0 ? time_left : -1);
}
/*
@@ -302,7 +284,6 @@ EXPORT_SYMBOL(cv_timedwait_hires);
void
__cv_signal(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
@@ -315,14 +296,12 @@ __cv_signal(kcondvar_t *cvp)
wake_up(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
- SEXIT;
}
EXPORT_SYMBOL(__cv_signal);
void
__cv_broadcast(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
@@ -333,6 +312,5 @@ __cv_broadcast(kcondvar_t *cvp)
wake_up_all(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
- SEXIT;
}
EXPORT_SYMBOL(__cv_broadcast);
diff --git a/module/spl/spl-debug.c b/module/spl/spl-debug.c
deleted file mode 100644
index 6c4e043f0..000000000
--- a/module/spl/spl-debug.c
+++ /dev/null
@@ -1,1265 +0,0 @@
-/*****************************************************************************\
- * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Brian Behlendorf <[email protected]>.
- * UCRL-CODE-235197
- *
- * This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://zfsonlinux.org/>.
- *
- * The SPL is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The SPL is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with the SPL. If not, see <http://www.gnu.org/licenses/>.
- *****************************************************************************
- * Solaris Porting Layer (SPL) Debug Implementation.
-\*****************************************************************************/
-
-#include <linux/kmod.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kthread.h>
-#include <linux/hardirq.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/proc_compat.h>
-#include <linux/file_compat.h>
-#include <linux/swap.h>
-#include <linux/ratelimit.h>
-#include <sys/sysmacros.h>
-#include <sys/thread.h>
-#include <spl-debug.h>
-#include <spl-trace.h>
-#include <spl-ctl.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_DEBUG
-
-/* Debug log support enabled */
-#ifdef DEBUG_LOG
-
-unsigned long spl_debug_subsys = ~0;
-EXPORT_SYMBOL(spl_debug_subsys);
-module_param(spl_debug_subsys, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
-
-unsigned long spl_debug_mask = SD_CANTMASK;
-EXPORT_SYMBOL(spl_debug_mask);
-module_param(spl_debug_mask, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
-
-unsigned long spl_debug_printk = SD_CANTMASK;
-EXPORT_SYMBOL(spl_debug_printk);
-module_param(spl_debug_printk, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
-
-int spl_debug_mb = -1;
-EXPORT_SYMBOL(spl_debug_mb);
-module_param(spl_debug_mb, int, 0644);
-MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
-
-unsigned int spl_debug_binary = 1;
-EXPORT_SYMBOL(spl_debug_binary);
-
-unsigned int spl_debug_catastrophe;
-EXPORT_SYMBOL(spl_debug_catastrophe);
-
-unsigned int spl_debug_panic_on_bug = 0;
-EXPORT_SYMBOL(spl_debug_panic_on_bug);
-module_param(spl_debug_panic_on_bug, uint, 0644);
-MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
-
-static char spl_debug_file_name[PATH_MAX];
-char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
-
-unsigned int spl_console_ratelimit = 1;
-EXPORT_SYMBOL(spl_console_ratelimit);
-
-long spl_console_max_delay;
-EXPORT_SYMBOL(spl_console_max_delay);
-
-long spl_console_min_delay;
-EXPORT_SYMBOL(spl_console_min_delay);
-
-unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
-EXPORT_SYMBOL(spl_console_backoff);
-
-unsigned int spl_debug_stack;
-EXPORT_SYMBOL(spl_debug_stack);
-
-static int spl_panic_in_progress;
-
-union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
-char *trace_console_buffers[NR_CPUS][3];
-struct rw_semaphore trace_sem;
-atomic_t trace_tage_allocated = ATOMIC_INIT(0);
-
-static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
-static void trace_fini(void);
-
-
-/* Memory percentage breakdown by type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
- 80, /* 80% pages for TCD_TYPE_PROC */
- 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
- 10 /* 10% pages for TCD_TYPE_IRQ */
-};
-
-const char *
-spl_debug_subsys2str(int subsys)
-{
- switch (subsys) {
- default:
- return NULL;
- case SS_UNDEFINED:
- return "undefined";
- case SS_ATOMIC:
- return "atomic";
- case SS_KOBJ:
- return "kobj";
- case SS_VNODE:
- return "vnode";
- case SS_TIME:
- return "time";
- case SS_RWLOCK:
- return "rwlock";
- case SS_THREAD:
- return "thread";
- case SS_CONDVAR:
- return "condvar";
- case SS_MUTEX:
- return "mutex";
- case SS_RNG:
- return "rng";
- case SS_TASKQ:
- return "taskq";
- case SS_KMEM:
- return "kmem";
- case SS_DEBUG:
- return "debug";
- case SS_GENERIC:
- return "generic";
- case SS_PROC:
- return "proc";
- case SS_MODULE:
- return "module";
- case SS_CRED:
- return "cred";
- case SS_KSTAT:
- return "kstat";
- case SS_XDR:
- return "xdr";
- case SS_TSD:
- return "tsd";
- case SS_ZLIB:
- return "zlib";
- case SS_USER1:
- return "user1";
- case SS_USER2:
- return "user2";
- case SS_USER3:
- return "user3";
- case SS_USER4:
- return "user4";
- case SS_USER5:
- return "user5";
- case SS_USER6:
- return "user6";
- case SS_USER7:
- return "user7";
- case SS_USER8:
- return "user8";
- }
-}
-
-const char *
-spl_debug_dbg2str(int debug)
-{
- switch (debug) {
- default:
- return NULL;
- case SD_TRACE:
- return "trace";
- case SD_INFO:
- return "info";
- case SD_WARNING:
- return "warning";
- case SD_ERROR:
- return "error";
- case SD_EMERG:
- return "emerg";
- case SD_CONSOLE:
- return "console";
- case SD_IOCTL:
- return "ioctl";
- case SD_DPRINTF:
- return "dprintf";
- case SD_OTHER:
- return "other";
- }
-}
-
-int
-spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
- spl_debug_dbg2str;
- const char *token;
- int i, bit, len = 0;
-
- if (mask == 0) { /* "0" */
- if (size > 0)
- str[0] = '0';
- len = 1;
- } else { /* space-separated tokens */
- for (i = 0; i < 32; i++) {
- bit = 1 << i;
-
- if ((mask & bit) == 0)
- continue;
-
- token = fn(bit);
- if (token == NULL) /* unused bit */
- continue;
-
- if (len > 0) { /* separator? */
- if (len < size)
- str[len] = ' ';
- len++;
- }
-
- while (*token != 0) {
- if (len < size)
- str[len] = *token;
- token++;
- len++;
- }
- }
- }
-
- /* terminate 'str' */
- if (len < size)
- str[len] = 0;
- else
- str[size - 1] = 0;
-
- return len;
-}
-
-static int
-spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
- spl_debug_dbg2str;
- const char *token;
- int i, j, bit;
-
- /* match against known tokens */
- for (i = 0; i < 32; i++) {
- bit = 1 << i;
-
- token = fn(bit);
- if (token == NULL) /* unused? */
- continue;
-
- /* strcasecmp */
- for (j = 0; ; j++) {
- if (j == len) { /* end of token */
- if (token[j] == 0) {
- *mask = bit;
- return 0;
- }
- break;
- }
-
- if (token[j] == 0)
- break;
-
- if (str[j] == token[j])
- continue;
-
- if (str[j] < 'A' || 'Z' < str[j])
- break;
-
- if (str[j] - 'A' + 'a' != token[j])
- break;
- }
- }
-
- return -EINVAL; /* no match */
-}
-
-int
-spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
-{
- char op = 0;
- int m = 0, matched, n, t;
-
- /* Allow a number for backwards compatibility */
- for (n = strlen(str); n > 0; n--)
- if (!isspace(str[n-1]))
- break;
- matched = n;
-
- if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
- *mask = m;
- return 0;
- }
-
- /* <str> must be a list of debug tokens or numbers separated by
- * whitespace and optionally an operator ('+' or '-'). If an operator
- * appears first in <str>, '*mask' is used as the starting point
- * (relative), otherwise 0 is used (absolute). An operator applies to
- * all following tokens up to the next operator. */
- matched = 0;
- while (*str != 0) {
- while (isspace(*str)) /* skip whitespace */
- str++;
-
- if (*str == 0)
- break;
-
- if (*str == '+' || *str == '-') {
- op = *str++;
-
- /* op on first token == relative */
- if (!matched)
- m = *mask;
-
- while (isspace(*str)) /* skip whitespace */
- str++;
-
- if (*str == 0) /* trailing op */
- return -EINVAL;
- }
-
- /* find token length */
- for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
-
- /* match token */
- if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
- return -EINVAL;
-
- matched = 1;
- if (op == '-')
- m &= ~t;
- else
- m |= t;
-
- str += n;
- }
-
- if (!matched)
- return -EINVAL;
-
- *mask = m;
- return 0;
-}
-
-static void
-spl_debug_dumplog_internal(dumplog_priv_t *dp)
-{
- void *journal_info;
-
- journal_info = current->journal_info;
- current->journal_info = NULL;
-
- snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
- "%s.%ld.%ld", spl_debug_file_path,
- get_seconds(), (long)dp->dp_pid);
- printk("SPL: Dumping log to %s\n", spl_debug_file_name);
- spl_debug_dump_all_pages(dp, spl_debug_file_name);
-
- current->journal_info = journal_info;
-}
-
-static int
-spl_debug_dumplog_thread(void *arg)
-{
- dumplog_priv_t *dp = (dumplog_priv_t *)arg;
-
- spl_debug_dumplog_internal(dp);
- atomic_set(&dp->dp_done, 1);
- wake_up(&dp->dp_waitq);
- complete_and_exit(NULL, 0);
-
- return 0; /* Unreachable */
-}
-
-/* When flag is set do not use a new thread for the debug dump */
-int
-spl_debug_dumplog(int flags)
-{
- struct task_struct *tsk;
- dumplog_priv_t dp;
-
- init_waitqueue_head(&dp.dp_waitq);
- dp.dp_pid = current->pid;
- dp.dp_flags = flags;
- atomic_set(&dp.dp_done, 0);
-
- if (dp.dp_flags & DL_NOTHREAD) {
- spl_debug_dumplog_internal(&dp);
- } else {
-
- tsk = spl_kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
- if (tsk == NULL)
- return -ENOMEM;
-
- wake_up_process(tsk);
- wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
- }
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_dumplog);
-
-static char *
-trace_get_console_buffer(void)
-{
- int cpu = get_cpu();
- int idx;
-
- if (in_irq()) {
- idx = 0;
- } else if (in_softirq()) {
- idx = 1;
- } else {
- idx = 2;
- }
-
- return trace_console_buffers[cpu][idx];
-}
-
-static void
-trace_put_console_buffer(char *buffer)
-{
- put_cpu();
-}
-
-static int
-trace_lock_tcd(struct trace_cpu_data *tcd)
-{
- __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-
- spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
-
- return 1;
-}
-
-static void
-trace_unlock_tcd(struct trace_cpu_data *tcd)
-{
- __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-
- spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
-}
-
-static struct trace_cpu_data *
-trace_get_tcd(void)
-{
- int cpu;
- struct trace_cpu_data *tcd;
-
- cpu = get_cpu();
- if (in_irq())
- tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
- else if (in_softirq())
- tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
- else
- tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
-
- trace_lock_tcd(tcd);
-
- return tcd;
-}
-
-static void
-trace_put_tcd (struct trace_cpu_data *tcd)
-{
- trace_unlock_tcd(tcd);
-
- put_cpu();
-}
-
-static void
-trace_set_debug_header(struct spl_debug_header *header, int subsys,
- int mask, const int line, unsigned long stack)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
-
- header->ph_subsys = subsys;
- header->ph_mask = mask;
- header->ph_cpu_id = smp_processor_id();
- header->ph_sec = (__u32)tv.tv_sec;
- header->ph_usec = tv.tv_usec;
- header->ph_stack = stack;
- header->ph_pid = current->pid;
- header->ph_line_num = line;
-
- return;
-}
-
-static void
-trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
- int len, const char *file, const char *fn)
-{
- char *prefix = "SPL", *ptype = NULL;
-
- if ((mask & SD_EMERG) != 0) {
- prefix = "SPLError";
- ptype = KERN_EMERG;
- } else if ((mask & SD_ERROR) != 0) {
- prefix = "SPLError";
- ptype = KERN_ERR;
- } else if ((mask & SD_WARNING) != 0) {
- prefix = "SPL";
- ptype = KERN_WARNING;
- } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
- prefix = "SPL";
- ptype = KERN_INFO;
- }
-
- if ((mask & SD_CONSOLE) != 0) {
- printk("%s%s: %.*s", ptype, prefix, len, buf);
- } else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_stack, file,
- hdr->ph_line_num, fn, len, buf);
- }
-
- return;
-}
-
-static int
-trace_max_debug_mb(void)
-{
- return MAX(512, ((totalram_pages >> (20 - PAGE_SHIFT)) * 80) / 100);
-}
-
-static struct trace_page *
-tage_alloc(int gfp)
-{
- struct page *page;
- struct trace_page *tage;
-
- page = alloc_pages(gfp | __GFP_NOWARN, 0);
- if (page == NULL)
- return NULL;
-
- tage = kmalloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- __free_pages(page, 0);
- return NULL;
- }
-
- tage->page = page;
- atomic_inc(&trace_tage_allocated);
-
- return tage;
-}
-
-static void
-tage_free(struct trace_page *tage)
-{
- __ASSERT(tage != NULL);
- __ASSERT(tage->page != NULL);
-
- __free_pages(tage->page, 0);
- kfree(tage);
- atomic_dec(&trace_tage_allocated);
-}
-
-static struct trace_page *
-tage_from_list(struct list_head *list)
-{
- return list_entry(list, struct trace_page, linkage);
-}
-
-static void
-tage_to_tail(struct trace_page *tage, struct list_head *queue)
-{
- __ASSERT(tage != NULL);
- __ASSERT(queue != NULL);
-
- list_move_tail(&tage->linkage, queue);
-}
-
-/* try to return a page that has 'len' bytes left at the end */
-static struct trace_page *
-trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
-{
- struct trace_page *tage;
-
- if (tcd->tcd_cur_pages > 0) {
- __ASSERT(!list_empty(&tcd->tcd_pages));
- tage = tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_SIZE)
- return tage;
- }
-
- if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
- if (tcd->tcd_cur_stock_pages > 0) {
- tage = tage_from_list(tcd->tcd_stock_pages.prev);
- tcd->tcd_cur_stock_pages--;
- list_del_init(&tage->linkage);
- } else {
- tage = tage_alloc(GFP_ATOMIC);
- if (tage == NULL) {
- printk(KERN_WARNING
- "failure to allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
- return NULL;
- }
- }
-
- tage->used = 0;
- tage->cpu = smp_processor_id();
- tage->type = tcd->tcd_type;
- list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
-
- return tage;
- }
-
- return NULL;
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct trace_page *
-trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
-{
- struct trace_page *tage;
-
- __ASSERT(len <= PAGE_SIZE);
-
- tage = trace_get_tage_try(tcd, len);
- if (tage)
- return tage;
-
- if (tcd->tcd_cur_pages > 0) {
- tage = tage_from_list(tcd->tcd_pages.next);
- tage->used = 0;
- tage_to_tail(tage, &tcd->tcd_pages);
- }
-
- return tage;
-}
-
-int
-spl_debug_msg(void *arg, int subsys, int mask, const char *file,
- const char *fn, const int line, const char *format, ...)
-{
- spl_debug_limit_state_t *cdls = arg;
- struct trace_cpu_data *tcd = NULL;
- struct spl_debug_header header = { 0, };
- struct trace_page *tage;
- /* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int i;
-
- if (subsys == 0)
- subsys = SS_DEBUG_SUBSYS;
-
- if (mask == 0)
- mask = SD_EMERG;
-
- if (strchr(file, '/'))
- file = strrchr(file, '/') + 1;
-
- tcd = trace_get_tcd();
- trace_set_debug_header(&header, subsys, mask, line, 0);
- if (tcd == NULL)
- goto console;
-
- if (tcd->tcd_shutting_down) {
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- known_size = strlen(file) + 1;
- if (fn)
- known_size += strlen(fn) + 1;
-
- if (spl_debug_binary)
- known_size += sizeof(header);
-
- /* '2' used because vsnprintf returns real size required for output
- * _without_ terminating NULL. */
- for (i = 0; i < 2; i++) {
- tage = trace_get_tage(tcd, needed + known_size + 1);
- if (tage == NULL) {
- if (needed + known_size > PAGE_SIZE)
- mask |= SD_ERROR;
-
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- string_buf = (char *)page_address(tage->page) +
- tage->used + known_size;
-
- max_nob = PAGE_SIZE - tage->used - known_size;
- if (max_nob <= 0) {
- printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
- mask |= SD_ERROR;
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- needed = 0;
- if (format) {
- va_start(ap, format);
- needed += vsnprintf(string_buf, max_nob, format, ap);
- va_end(ap);
- }
-
- if (needed < max_nob)
- break;
- }
-
- header.ph_len = known_size + needed;
- debug_buf = (char *)page_address(tage->page) + tage->used;
-
- if (spl_debug_binary) {
- memcpy(debug_buf, &header, sizeof(header));
- tage->used += sizeof(header);
- debug_buf += sizeof(header);
- }
-
- strcpy(debug_buf, file);
- tage->used += strlen(file) + 1;
- debug_buf += strlen(file) + 1;
-
- if (fn) {
- strcpy(debug_buf, fn);
- tage->used += strlen(fn) + 1;
- debug_buf += strlen(fn) + 1;
- }
-
- __ASSERT(debug_buf == string_buf);
-
- tage->used += needed;
- __ASSERT (tage->used <= PAGE_SIZE);
-
-console:
- if ((mask & spl_debug_printk) == 0) {
- /* no console output requested */
- if (tcd != NULL)
- trace_put_tcd(tcd);
- return 1;
- }
-
- if (cdls != NULL) {
- if (spl_console_ratelimit && cdls->cdls_next != 0 &&
- !time_before(cdls->cdls_next, jiffies)) {
- /* skipping a console message */
- cdls->cdls_count++;
- if (tcd != NULL)
- trace_put_tcd(tcd);
- return 1;
- }
-
- if (time_before(cdls->cdls_next + spl_console_max_delay +
- (10 * HZ), jiffies)) {
- /* last timeout was a long time ago */
- cdls->cdls_delay /= spl_console_backoff * 4;
- } else {
- cdls->cdls_delay *= spl_console_backoff;
-
- if (cdls->cdls_delay < spl_console_min_delay)
- cdls->cdls_delay = spl_console_min_delay;
- else if (cdls->cdls_delay > spl_console_max_delay)
- cdls->cdls_delay = spl_console_max_delay;
- }
-
- /* ensure cdls_next is never zero after it's been seen */
- cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
- }
-
- if (tcd != NULL) {
- trace_print_to_console(&header, mask, string_buf, needed, file, fn);
- trace_put_tcd(tcd);
- } else {
- string_buf = trace_get_console_buffer();
-
- needed = 0;
- if (format != NULL) {
- va_start(ap, format);
- needed += vsnprintf(string_buf,
- TRACE_CONSOLE_BUFFER_SIZE, format, ap);
- va_end(ap);
- }
- trace_print_to_console(&header, mask,
- string_buf, needed, file, fn);
-
- trace_put_console_buffer(string_buf);
- }
-
- if (cdls != NULL && cdls->cdls_count != 0) {
- string_buf = trace_get_console_buffer();
-
- needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
- "Skipped %d previous similar message%s\n",
- cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
-
- trace_print_to_console(&header, mask,
- string_buf, needed, file, fn);
-
- trace_put_console_buffer(string_buf);
- cdls->cdls_count = 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_msg);
-
-/* Do the collect_pages job on a single CPU: assumes that all other
- * CPUs have been stopped during a panic. If this isn't true for
- * some arch, this will have to be implemented separately in each arch.
- */
-static void
-collect_pages_from_single_cpu(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- int i, j;
-
- tcd_for_each(tcd, i, j) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- }
-}
-
-static void
-collect_pages_on_all_cpus(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- }
- }
- spin_unlock(&pc->pc_lock);
-}
-
-static void
-collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
-{
- INIT_LIST_HEAD(&pc->pc_pages);
-
- if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
- collect_pages_from_single_cpu(pc);
- else
- collect_pages_on_all_cpus(pc);
-}
-
-static void
-put_pages_back_on_all_cpus(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- struct list_head *cur_head;
- struct trace_page *tage;
- struct trace_page *tmp;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
-
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- cur_head = tcd->tcd_pages.next;
-
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
- linkage) {
- if (tage->cpu != cpu || tage->type != i)
- continue;
-
- tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
- }
- }
- }
-
- spin_unlock(&pc->pc_lock);
-}
-
-static void
-put_pages_back(struct page_collection *pc)
-{
- if (!spl_panic_in_progress)
- put_pages_back_on_all_cpus(pc);
-}
-
-static int
-spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
-{
- struct page_collection pc;
- struct file *filp;
- struct trace_page *tage;
- struct trace_page *tmp;
- mm_segment_t oldfs;
- int rc = 0;
-
- down_write(&trace_sem);
-
- filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
- 0600, &rc);
- if (filp == NULL) {
- if (rc != -EEXIST)
- printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
- filename, rc);
- goto out;
- }
-
- spin_lock_init(&pc.pc_lock);
- collect_pages(dp, &pc);
- if (list_empty(&pc.pc_pages)) {
- rc = 0;
- goto close;
- }
-
- oldfs = get_fs();
- set_fs(get_ds());
-
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- rc = spl_filp_write(filp, page_address(tage->page),
- tage->used, spl_filp_poff(filp));
- if (rc != (int)tage->used) {
- printk(KERN_WARNING "SPL: Wanted to write %u "
- "but wrote %d\n", tage->used, rc);
- put_pages_back(&pc);
- __ASSERT(list_empty(&pc.pc_pages));
- break;
- }
- list_del(&tage->linkage);
- tage_free(tage);
- }
-
- set_fs(oldfs);
-
- rc = spl_filp_fsync(filp, 1);
- if (rc)
- printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
- close:
- spl_filp_close(filp);
- out:
- up_write(&trace_sem);
-
- return rc;
-}
-
-static void
-spl_debug_flush_pages(void)
-{
- dumplog_priv_t dp;
- struct page_collection pc;
- struct trace_page *tage;
- struct trace_page *tmp;
-
- spin_lock_init(&pc.pc_lock);
- init_waitqueue_head(&dp.dp_waitq);
- dp.dp_pid = current->pid;
- dp.dp_flags = 0;
- atomic_set(&dp.dp_done, 0);
-
- collect_pages(&dp, &pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- list_del(&tage->linkage);
- tage_free(tage);
- }
-}
-
-unsigned long
-spl_debug_set_mask(unsigned long mask) {
- spl_debug_mask = mask;
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_mask);
-
-unsigned long
-spl_debug_get_mask(void) {
- return spl_debug_mask;
-}
-EXPORT_SYMBOL(spl_debug_get_mask);
-
-unsigned long
-spl_debug_set_subsys(unsigned long subsys) {
- spl_debug_subsys = subsys;
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_subsys);
-
-unsigned long
-spl_debug_get_subsys(void) {
- return spl_debug_subsys;
-}
-EXPORT_SYMBOL(spl_debug_get_subsys);
-
-int
-spl_debug_set_mb(int mb)
-{
- int i, j, pages;
- int limit = trace_max_debug_mb();
- struct trace_cpu_data *tcd;
-
- if (mb < num_possible_cpus()) {
- printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
- "%dMB - lower limit is %d\n", mb, num_possible_cpus());
- return -EINVAL;
- }
-
- if (mb > limit) {
- printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
- "%dMB - upper limit is %d\n", mb, limit);
- return -EINVAL;
- }
-
- mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_SHIFT);
-
- down_write(&trace_sem);
-
- tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
-
- up_write(&trace_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_mb);
-
-int
-spl_debug_get_mb(void)
-{
- int i, j;
- struct trace_cpu_data *tcd;
- int total_pages = 0;
-
- down_read(&trace_sem);
-
- tcd_for_each(tcd, i, j)
- total_pages += tcd->tcd_max_pages;
-
- up_read(&trace_sem);
-
- return (total_pages >> (20 - PAGE_SHIFT)) + 1;
-}
-EXPORT_SYMBOL(spl_debug_get_mb);
-
-/*
- * Limit the number of stack traces dumped to not more than 5 every
- * 60 seconds to prevent denial-of-service attacks from debug code.
- */
-DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state, 60 * HZ, 5);
-
-void
-spl_debug_dumpstack(struct task_struct *tsk)
-{
- if (__ratelimit(&dumpstack_ratelimit_state)) {
- if (tsk == NULL)
- tsk = current;
-
- printk("SPL: Showing stack for process %d\n", tsk->pid);
- dump_stack();
- }
-}
-EXPORT_SYMBOL(spl_debug_dumpstack);
-
-void spl_debug_bug(char *file, const char *func, const int line, int flags)
-{
- spl_debug_catastrophe = 1;
- spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
-
- if (in_interrupt())
- panic("SPL PANIC in interrupt.\n");
-
- if (in_atomic() || irqs_disabled())
- flags |= DL_NOTHREAD;
-
- /* Ensure all debug pages and dumped by current cpu */
- if (spl_debug_panic_on_bug)
- spl_panic_in_progress = 1;
-
- spl_debug_dumpstack(NULL);
-
- if (spl_debug_panic_on_bug) {
- spl_debug_dumplog(flags);
- panic("SPL PANIC");
- }
-
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- while (1)
- schedule();
-}
-EXPORT_SYMBOL(spl_debug_bug);
-
-int
-spl_debug_clear_buffer(void)
-{
- spl_debug_flush_pages();
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_clear_buffer);
-
-int
-spl_debug_mark_buffer(char *text)
-{
- SDEBUG(SD_WARNING, "*************************************\n");
- SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
- SDEBUG(SD_WARNING, "*************************************\n");
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_mark_buffer);
-
-static int
-trace_init(int max_pages)
-{
- struct trace_cpu_data *tcd;
- int i, j;
-
- init_rwsem(&trace_sem);
-
- /* initialize trace_data */
- memset(trace_data, 0, sizeof(trace_data));
- for (i = 0; i < TCD_TYPE_MAX; i++) {
- trace_data[i] = kmalloc(sizeof(union trace_data_union) *
- NR_CPUS, GFP_KERNEL);
- if (trace_data[i] == NULL)
- goto out;
- }
-
- tcd_for_each(tcd, i, j) {
- spin_lock_init(&tcd->tcd_lock);
- tcd->tcd_pages_factor = pages_factor[i];
- tcd->tcd_type = i;
- tcd->tcd_cpu = j;
- INIT_LIST_HEAD(&tcd->tcd_pages);
- INIT_LIST_HEAD(&tcd->tcd_stock_pages);
- tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_stock_pages = 0;
- tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
- tcd->tcd_shutting_down = 0;
- }
-
- for (i = 0; i < num_possible_cpus(); i++) {
- for (j = 0; j < 3; j++) {
- trace_console_buffers[i][j] =
- kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
-
- if (trace_console_buffers[i][j] == NULL)
- goto out;
- }
- }
-
- return 0;
-out:
- trace_fini();
- printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
- return -ENOMEM;
-}
-
-int
-spl_debug_init(void)
-{
- int rc, max = spl_debug_mb;
-
- spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
- spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
-
- /* If spl_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
- if (max > (totalram_pages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
- max >= 512 || max < 0) {
- max = TCD_MAX_PAGES;
- } else {
- max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
- }
-
- rc = trace_init(max);
- if (rc)
- return rc;
-
- return rc;
-}
-
-static void
-trace_cleanup_on_all_cpus(void)
-{
- struct trace_cpu_data *tcd;
- struct trace_page *tage;
- struct trace_page *tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- tcd->tcd_shutting_down = 1;
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
- linkage) {
- list_del(&tage->linkage);
- tage_free(tage);
- }
- tcd->tcd_cur_pages = 0;
- }
- }
-}
-
-static void
-trace_fini(void)
-{
- int i, j;
-
- trace_cleanup_on_all_cpus();
-
- for (i = 0; i < num_possible_cpus(); i++) {
- for (j = 0; j < 3; j++) {
- if (trace_console_buffers[i][j] != NULL) {
- kfree(trace_console_buffers[i][j]);
- trace_console_buffers[i][j] = NULL;
- }
- }
- }
-
- for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
- kfree(trace_data[i]);
- trace_data[i] = NULL;
- }
-}
-
-void
-spl_debug_fini(void)
-{
- trace_fini();
-}
-
-#endif /* DEBUG_LOG */
diff --git a/module/spl/spl-err.c b/module/spl/spl-err.c
index 2706f9bd1..14ff8a337 100644
--- a/module/spl/spl-err.c
+++ b/module/spl/spl-err.c
@@ -26,66 +26,81 @@
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
-#include <spl-debug.h>
+#include <linux/ratelimit.h>
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
+/*
+ * Limit the number of stack traces dumped to not more than 5 every
+ * 60 seconds to prevent denial-of-service attacks from debug code.
+ */
+DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state, 60 * HZ, 5);
-#define SS_DEBUG_SUBSYS SS_GENERIC
-
-#ifdef DEBUG_LOG
-static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
-static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
-#endif
+void
+spl_dumpstack(void)
+{
+ if (__ratelimit(&dumpstack_ratelimit_state)) {
+ printk("Showing stack for process %d\n", current->pid);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(spl_dumpstack);
int
-spl_PANIC(char *filename, const char *functionname,
- int lineno, const char *fmt, ...) {
+spl_panic(const char *file, const char *func, int line, const char *fmt, ...) {
+ const char *newfile;
char msg[MAXMSGLEN];
va_list ap;
+ newfile = strrchr(file, '/');
+ if (newfile != NULL)
+ newfile = newfile + 1;
+ else
+ newfile = file;
+
va_start(ap, fmt);
- if (vsnprintf(msg, sizeof (msg), fmt, ap) == sizeof (msg))
- msg[sizeof (msg) - 1] = '\0';
+ (void) vsnprintf(msg, sizeof (msg), fmt, ap);
va_end(ap);
-#ifdef NDEBUG
+
printk(KERN_EMERG "%s", msg);
-#else
- spl_debug_msg(NULL, 0, 0,
- filename, functionname, lineno, "%s", msg);
-#endif
- spl_debug_bug(filename, functionname, lineno, 0);
- return 1;
-}
-EXPORT_SYMBOL(spl_PANIC);
+ printk(KERN_EMERG "PANIC at %s:%d:%s()\n", newfile, line, func);
+ spl_dumpstack();
-void
-vpanic(const char *fmt, va_list ap)
-{
- char msg[MAXMSGLEN];
+ /* Halt the thread to facilitate further debugging */
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
- vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
- PANIC("%s", msg);
-} /* vpanic() */
-EXPORT_SYMBOL(vpanic);
+ /* Unreachable */
+ return (1);
+}
+EXPORT_SYMBOL(spl_panic);
void
vcmn_err(int ce, const char *fmt, va_list ap)
{
char msg[MAXMSGLEN];
- if (ce == CE_PANIC)
- vpanic(fmt, ap);
+ vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
- if (ce != CE_NOTE) {
- vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
+ switch (ce) {
+ case CE_IGNORE:
+ break;
+ case CE_CONT:
+ printk("%s", msg);
+ break;
+ case CE_NOTE:
+ printk(KERN_NOTICE "NOTICE: %s\n", msg);
+ break;
+ case CE_WARN:
+ printk(KERN_WARNING "WARNING: %s\n", msg);
+ break;
+ case CE_PANIC:
+ printk(KERN_EMERG "PANIC: %s\n", msg);
+ spl_dumpstack();
- if (fmt[0] == '!')
- SDEBUG(SD_INFO, "%s%s%s",
- ce_prefix[ce], msg, ce_suffix[ce]);
- else
- SERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
+ /* Halt the thread to facilitate further debugging */
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
}
} /* vcmn_err() */
EXPORT_SYMBOL(vcmn_err);
@@ -100,4 +115,3 @@ cmn_err(int ce, const char *fmt, ...)
va_end(ap);
} /* cmn_err() */
EXPORT_SYMBOL(cmn_err);
-
diff --git a/module/spl/spl-generic.c b/module/spl/spl-generic.c
index fd68789bc..ecfb663de 100644
--- a/module/spl/spl-generic.c
+++ b/module/spl/spl-generic.c
@@ -40,13 +40,6 @@
#include <sys/file.h>
#include <linux/kmod.h>
#include <linux/proc_compat.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_GENERIC
char spl_version[32] = "SPL v" SPL_META_VERSION "-" SPL_META_RELEASE;
EXPORT_SYMBOL(spl_version);
@@ -490,39 +483,36 @@ __init spl_init(void)
{
int rc = 0;
- if ((rc = spl_debug_init()))
- return rc;
-
if ((rc = spl_kmem_init()))
- SGOTO(out1, rc);
+ goto out1;
if ((rc = spl_mutex_init()))
- SGOTO(out2, rc);
+ goto out2;
if ((rc = spl_rw_init()))
- SGOTO(out3, rc);
+ goto out3;
if ((rc = spl_taskq_init()))
- SGOTO(out4, rc);
+ goto out4;
if ((rc = spl_vn_init()))
- SGOTO(out5, rc);
+ goto out5;
if ((rc = spl_proc_init()))
- SGOTO(out6, rc);
+ goto out6;
if ((rc = spl_kstat_init()))
- SGOTO(out7, rc);
+ goto out7;
if ((rc = spl_tsd_init()))
- SGOTO(out8, rc);
+ goto out8;
if ((rc = spl_zlib_init()))
- SGOTO(out9, rc);
+ goto out9;
printk(KERN_NOTICE "SPL: Loaded module v%s-%s%s\n", SPL_META_VERSION,
SPL_META_RELEASE, SPL_DEBUG_STR);
- SRETURN(rc);
+ return (rc);
out9:
spl_tsd_fini();
@@ -541,19 +531,16 @@ out3:
out2:
spl_kmem_fini();
out1:
- spl_debug_fini();
-
printk(KERN_NOTICE "SPL: Failed to Load Solaris Porting Layer "
"v%s-%s%s, rc = %d\n", SPL_META_VERSION, SPL_META_RELEASE,
SPL_DEBUG_STR, rc);
+
return rc;
}
static void
spl_fini(void)
{
- SENTRY;
-
printk(KERN_NOTICE "SPL: Unloaded module v%s-%s%s\n",
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
spl_zlib_fini();
@@ -565,7 +552,6 @@ spl_fini(void)
spl_rw_fini();
spl_mutex_fini();
spl_kmem_fini();
- spl_debug_fini();
}
/* Called when a dependent module is loaded */
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index 65aa27739..37849f504 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -25,13 +25,6 @@
\*****************************************************************************/
#include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KMEM
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
@@ -265,7 +258,6 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
- SENTRY;
spin_lock_irqsave(lock, flags);
@@ -282,7 +274,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *
spin_unlock_irqrestore(lock, flags);
- SRETURN(NULL);
+ return (NULL);
}
void *
@@ -292,28 +284,26 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- SENTRY;
/* Function may be called with KM_NOSLEEP so failure is possible */
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (unlikely(dptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
- "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
- sizeof(kmem_debug_t), flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
+ printk(KERN_WARNING "debug kmem_alloc(%ld, 0x%x) at %s:%d "
+ "failed (%lld/%llu)\n", sizeof(kmem_debug_t), flags,
+ func, line, kmem_alloc_used_read(), kmem_alloc_max);
} else {
/*
* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best.
*/
if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
- "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
+ printk(KERN_WARNING "large kmem_alloc(%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long)size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ spl_dumpstack();
}
/*
@@ -325,9 +315,9 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "debug __strdup() at %s:%d failed (%lld/%llu)\n",
- func, line, kmem_alloc_used_read(), kmem_alloc_max);
+ printk(KERN_WARNING "debug __strdup() at %s:%d "
+ "failed (%lld/%llu)\n", func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
@@ -344,8 +334,8 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
- "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "kmem_alloc(%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
goto out;
@@ -367,14 +357,9 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
&kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
list_add_tail(&dptr->kd_list, &kmem_list);
spin_unlock_irqrestore(&kmem_lock, irq_flags);
-
- SDEBUG_LIMIT(SD_INFO,
- "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line, ptr,
- kmem_alloc_used_read(), kmem_alloc_max);
}
out:
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(kmem_alloc_track);
@@ -382,14 +367,12 @@ void
kmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
-
/* Must exist in hash due to kmem_alloc() */
+ dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
ASSERT(dptr);
/* Size must match */
@@ -398,10 +381,6 @@ kmem_free_track(const void *ptr, size_t size)
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
kmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, kmem_alloc_used_read(),
- kmem_alloc_max);
-
kfree(dptr->kd_func);
memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
@@ -409,8 +388,6 @@ kmem_free_track(const void *ptr, size_t size)
memset((void *)ptr, 0x5a, size);
kfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(kmem_free_track);
@@ -420,7 +397,6 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- SENTRY;
ASSERT(flags & KM_SLEEP);
@@ -428,8 +404,8 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (unlikely(dptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
- "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
sizeof(kmem_debug_t), flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
} else {
@@ -443,9 +419,9 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "debug __strdup() at %s:%d failed (%lld/%llu)\n",
- func, line, vmem_alloc_used_read(), vmem_alloc_max);
+ printk(KERN_WARNING "debug __strdup() at %s:%d "
+ "failed (%lld/%llu)\n", func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
@@ -459,8 +435,8 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
- "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "vmem_alloc (%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
goto out;
@@ -482,14 +458,9 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
&vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
list_add_tail(&dptr->kd_list, &vmem_list);
spin_unlock_irqrestore(&vmem_lock, irq_flags);
-
- SDEBUG_LIMIT(SD_INFO,
- "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- ptr, vmem_alloc_used_read(), vmem_alloc_max);
}
out:
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(vmem_alloc_track);
@@ -497,14 +468,12 @@ void
vmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
-
/* Must exist in hash due to vmem_alloc() */
+ dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
ASSERT(dptr);
/* Size must match */
@@ -513,10 +482,6 @@ vmem_free_track(const void *ptr, size_t size)
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
vmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, vmem_alloc_used_read(),
- vmem_alloc_max);
-
kfree(dptr->kd_func);
memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
@@ -524,8 +489,6 @@ vmem_free_track(const void *ptr, size_t size)
memset((void *)ptr, 0x5a, size);
vfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(vmem_free_track);
@@ -536,18 +499,17 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
int node_alloc, int node)
{
void *ptr;
- SENTRY;
/*
* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best.
*/
if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
- SDEBUG(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
+ spl_dumpstack();
}
/* Use the correct allocator */
@@ -561,40 +523,26 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
}
if (unlikely(ptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
} else {
kmem_alloc_used_add(size);
if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
kmem_alloc_max = kmem_alloc_used_read();
-
- SDEBUG_LIMIT(SD_INFO,
- "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line, ptr,
- kmem_alloc_used_read(), kmem_alloc_max);
}
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(kmem_alloc_debug);
void
kmem_free_debug(const void *ptr, size_t size)
{
- SENTRY;
-
- ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
- (unsigned long long) size);
-
+ ASSERT(ptr || size > 0);
kmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, kmem_alloc_used_read(),
- kmem_alloc_max);
kfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(kmem_free_debug);
@@ -602,7 +550,6 @@ void *
vmem_alloc_debug(size_t size, int flags, const char *func, int line)
{
void *ptr;
- SENTRY;
ASSERT(flags & KM_SLEEP);
@@ -614,39 +561,26 @@ vmem_alloc_debug(size_t size, int flags, const char *func, int line)
}
if (unlikely(ptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- vmem_alloc_used_read(), vmem_alloc_max);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max);
} else {
vmem_alloc_used_add(size);
if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
vmem_alloc_max = vmem_alloc_used_read();
-
- SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
- vmem_alloc_used_read(), vmem_alloc_max);
}
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(vmem_alloc_debug);
void
vmem_free_debug(const void *ptr, size_t size)
{
- SENTRY;
-
- ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
- (unsigned long long) size);
-
+ ASSERT(ptr || size > 0);
vmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, vmem_alloc_used_read(),
- vmem_alloc_max);
vfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(vmem_free_debug);
@@ -833,7 +767,7 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
- SRETURN(NULL);
+ return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
@@ -851,8 +785,10 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
obj = kv_alloc(skc, offslab_size, flags);
- if (!obj)
- SGOTO(out, rc = -ENOMEM);
+ if (!obj) {
+ rc = -ENOMEM;
+ goto out;
+ }
} else {
obj = base + spl_sks_size(skc) + (i * obj_size);
}
@@ -877,7 +813,7 @@ out:
sks = NULL;
}
- SRETURN(sks);
+ return (sks);
}
/*
@@ -890,7 +826,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
- SENTRY;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
@@ -910,8 +845,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
-
- SEXIT;
}
/*
@@ -931,7 +864,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
LIST_HEAD(sko_list);
uint32_t size = 0;
int i = 0;
- SENTRY;
/*
* Move empty slabs and objects which have not been touched in
@@ -979,8 +911,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
-
- SEXIT;
}
static spl_kmem_emergency_t *
@@ -1037,23 +967,22 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
spl_kmem_emergency_t *ske;
int empty;
- SENTRY;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
- SRETURN(-EEXIST);
+ return (-EEXIST);
ske = kmalloc(sizeof(*ske), flags);
if (ske == NULL)
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
if (ske->ske_obj == NULL) {
kfree(ske);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
@@ -1069,12 +998,12 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
if (unlikely(!empty)) {
kfree(ske->ske_obj);
kfree(ske);
- SRETURN(-EINVAL);
+ return (-EINVAL);
}
*obj = ske->ske_obj;
- SRETURN(0);
+ return (0);
}
/*
@@ -1084,7 +1013,6 @@ static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
- SENTRY;
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
@@ -1096,12 +1024,12 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
spin_unlock(&skc->skc_lock);
if (unlikely(ske == NULL))
- SRETURN(-ENOENT);
+ return (-ENOENT);
kfree(ske->ske_obj);
kfree(ske);
- SRETURN(0);
+ return (0);
}
/*
@@ -1112,7 +1040,6 @@ static void
__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
int i, count = MIN(flush, skm->skm_avail);
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
@@ -1124,8 +1051,6 @@ __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof(void *) * skm->skm_avail);
-
- SEXIT;
}
static void
@@ -1227,7 +1152,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
if (skc->skc_flags & KMC_OFFSLAB) {
*objs = spl_kmem_cache_obj_per_slab;
*size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
- SRETURN(0);
+ return (0);
} else {
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
@@ -1241,7 +1166,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= spl_kmem_cache_obj_per_slab)
- SRETURN(0);
+ return (0);
}
/*
@@ -1252,10 +1177,10 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
*size = max_size;
*objs = (*size - sks_size) / obj_size;
if (*objs >= (spl_kmem_cache_obj_per_slab_min))
- SRETURN(0);
+ return (0);
}
- SRETURN(-ENOSPC);
+ return (-ENOSPC);
}
/*
@@ -1268,7 +1193,6 @@ spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
- SENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
@@ -1282,7 +1206,7 @@ spl_magazine_size(spl_kmem_cache_t *skc)
else
size = 256;
- SRETURN(size);
+ return (size);
}
/*
@@ -1294,7 +1218,6 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
- SENTRY;
skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
if (skm) {
@@ -1307,7 +1230,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
skm->skm_cpu = cpu;
}
- SRETURN(skm);
+ return (skm);
}
/*
@@ -1319,12 +1242,10 @@ spl_magazine_free(spl_kmem_magazine_t *skm)
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skm->skm_size;
- SENTRY;
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kmem_free(skm, size);
- SEXIT;
}
/*
@@ -1334,10 +1255,9 @@ static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
- SENTRY;
if (skc->skc_flags & KMC_NOMAGAZINE)
- SRETURN(0);
+ return (0);
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
@@ -1348,11 +1268,11 @@ spl_magazine_create(spl_kmem_cache_t *skc)
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
}
- SRETURN(0);
+ return (0);
}
/*
@@ -1363,20 +1283,15 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i;
- SENTRY;
- if (skc->skc_flags & KMC_NOMAGAZINE) {
- SEXIT;
+ if (skc->skc_flags & KMC_NOMAGAZINE)
return;
- }
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
-
- SEXIT;
}
/*
@@ -1409,11 +1324,13 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
{
spl_kmem_cache_t *skc;
int rc;
- SENTRY;
- ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
- ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
- ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
+ /*
+ * Unsupported flags
+ */
+ ASSERT0(flags & KMC_NOMAGAZINE);
+ ASSERT0(flags & KMC_NOHASH);
+ ASSERT0(flags & KMC_QCACHE);
ASSERT(vmp == NULL);
might_sleep();
@@ -1427,14 +1344,14 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
*/
skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
if (skc == NULL)
- SRETURN(NULL);
+ return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
- SRETURN(NULL);
+ return (NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
@@ -1519,16 +1436,18 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
- SGOTO(out, rc);
+ goto out;
rc = spl_magazine_create(skc);
if (rc)
- SGOTO(out, rc);
+ goto out;
} else {
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, 0, NULL);
- if (skc->skc_linux_cache == NULL)
- SGOTO(out, rc = ENOMEM);
+ if (skc->skc_linux_cache == NULL) {
+ rc = ENOMEM;
+ goto out;
+ }
kmem_cache_set_allocflags(skc, __GFP_COMP);
skc->skc_flags |= KMC_NOMAGAZINE;
@@ -1543,11 +1462,11 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
- SRETURN(skc);
+ return (skc);
out:
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
- SRETURN(NULL);
+ return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
@@ -1571,7 +1490,6 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
@@ -1617,8 +1535,6 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
spin_unlock(&skc->skc_lock);
kmem_free(skc, sizeof(*skc));
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
@@ -1708,7 +1624,6 @@ static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
@@ -1722,7 +1637,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
- SRETURN(rc ? rc : -EAGAIN);
+ return (rc ? rc : -EAGAIN);
}
/*
@@ -1738,7 +1653,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
if (ska == NULL) {
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
wake_up_all(&skc->skc_waitq);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
@@ -1776,7 +1691,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
rc = -ENOMEM;
}
- SRETURN(rc);
+ return (rc);
}
/*
@@ -1792,7 +1707,6 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
@@ -1811,14 +1725,14 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
- SRETURN(obj);
+ return (obj);
if (rc)
- SGOTO(out, rc);
+ goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
- SGOTO(out, rc);
+ goto out;
/* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
@@ -1853,7 +1767,7 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
spin_unlock(&skc->skc_lock);
out:
- SRETURN(NULL);
+ return (NULL);
}
/*
@@ -1864,7 +1778,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
@@ -1895,8 +1808,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
-
- SEXIT;
}
/*
@@ -1908,7 +1819,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
@@ -1939,9 +1849,7 @@ restart:
* the local magazine since this may have changed
* when we need to grow the cache. */
skm = skc->skc_mag[smp_processor_id()];
- ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
- skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
- skm->skm_size, skm->skm_refill, skm->skm_avail);
+ ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
@@ -1950,7 +1858,7 @@ restart:
} else {
obj = spl_cache_refill(skc, skm, flags);
if (obj == NULL)
- SGOTO(restart, obj = NULL);
+ goto restart;
}
local_irq_enable();
@@ -1968,7 +1876,7 @@ ret:
atomic_dec(&skc->skc_ref);
- SRETURN(obj);
+ return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
@@ -1984,7 +1892,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
@@ -2009,8 +1916,10 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
* are guaranteed to have physical addresses. They must be removed
* from the tree of emergency objects and the freed.
*/
- if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
- SGOTO(out, spl_emergency_free(skc, obj));
+ if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj)) {
+ spl_emergency_free(skc, obj);
+ goto out;
+ }
local_irq_save(flags);
@@ -2031,8 +1940,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
local_irq_restore(flags);
out:
atomic_dec(&skc->skc_ref);
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_free);
@@ -2113,8 +2020,6 @@ SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
- SENTRY;
-
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
@@ -2131,14 +2036,14 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
kmem_cache_shrink(skc->skc_linux_cache);
- SGOTO(out, 0);
+ goto out;
}
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
- SGOTO(out, 0);
+ goto out;
/*
* When a reclaim function is available it may be invoked repeatedly
@@ -2190,8 +2095,6 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
@@ -2256,7 +2159,6 @@ static int
spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
{
int i;
- SENTRY;
spin_lock_init(lock);
INIT_LIST_HEAD(list);
@@ -2264,7 +2166,7 @@ spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
for (i = 0; i < size; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
- SRETURN(0);
+ return (0);
}
static void
@@ -2273,7 +2175,6 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
unsigned long flags;
kmem_debug_t *kd;
char str[17];
- SENTRY;
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
@@ -2286,7 +2187,6 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
- SEXIT;
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
#define spl_kmem_init_tracking(list, lock, size)
@@ -2297,7 +2197,6 @@ int
spl_kmem_init(void)
{
int rc = 0;
- SENTRY;
#ifdef DEBUG_KMEM
kmem_alloc_used_set(0);
@@ -2314,14 +2213,12 @@ spl_kmem_init(void)
spl_register_shrinker(&spl_kmem_cache_shrinker);
- SRETURN(rc);
+ return (rc);
}
void
spl_kmem_fini(void)
{
- SENTRY;
-
spl_unregister_shrinker(&spl_kmem_cache_shrinker);
taskq_destroy(spl_kmem_cache_taskq);
@@ -2331,19 +2228,14 @@ spl_kmem_fini(void)
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
if (kmem_alloc_used_read() != 0)
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "kmem leaked %ld/%ld bytes\n",
+ printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
kmem_alloc_used_read(), kmem_alloc_max);
-
if (vmem_alloc_used_read() != 0)
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "vmem leaked %ld/%ld bytes\n",
+ printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n",
vmem_alloc_used_read(), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
#endif /* DEBUG_KMEM */
-
- SEXIT;
}
diff --git a/module/spl/spl-kobj.c b/module/spl/spl-kobj.c
index f14f47f5d..5b29fdb58 100644
--- a/module/spl/spl-kobj.c
+++ b/module/spl/spl-kobj.c
@@ -25,13 +25,6 @@
\*****************************************************************************/
#include <sys/kobj.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KOBJ
struct _buf *
kobj_open_file(const char *name)
@@ -39,38 +32,34 @@ kobj_open_file(const char *name)
struct _buf *file;
vnode_t *vp;
int rc;
- SENTRY;
file = kmalloc(sizeof(_buf_t), GFP_KERNEL);
if (file == NULL)
- SRETURN((_buf_t *)-1UL);
+ return ((_buf_t *)-1UL);
if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0))) {
kfree(file);
- SRETURN((_buf_t *)-1UL);
+ return ((_buf_t *)-1UL);
}
file->vp = vp;
- SRETURN(file);
+ return (file);
} /* kobj_open_file() */
EXPORT_SYMBOL(kobj_open_file);
void
kobj_close_file(struct _buf *file)
{
- SENTRY;
VOP_CLOSE(file->vp, 0, 0, 0, 0, 0);
kfree(file);
- SEXIT;
} /* kobj_close_file() */
EXPORT_SYMBOL(kobj_close_file);
int
kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off)
{
- SENTRY;
- SRETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
+ return (vn_rdwr(UIO_READ, file->vp, buf, size, off,
UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL));
} /* kobj_read_file() */
EXPORT_SYMBOL(kobj_read_file);
@@ -80,14 +69,13 @@ kobj_get_filesize(struct _buf *file, uint64_t *size)
{
vattr_t vap;
int rc;
- SENTRY;
rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL);
if (rc)
- SRETURN(rc);
+ return (rc);
*size = vap.va_size;
- SRETURN(rc);
+ return (rc);
} /* kobj_get_filesize() */
EXPORT_SYMBOL(kobj_get_filesize);
diff --git a/module/spl/spl-kstat.c b/module/spl/spl-kstat.c
index c604a32f2..cb27ed3d3 100644
--- a/module/spl/spl-kstat.c
+++ b/module/spl/spl-kstat.c
@@ -26,13 +26,7 @@
#include <linux/seq_file.h>
#include <sys/kstat.h>
-#include <spl-debug.h>
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KSTAT
#ifndef HAVE_PDE_DATA
#define PDE_DATA(x) (PDE(x)->data)
#endif
@@ -344,7 +338,6 @@ static void *
kstat_seq_data_addr(kstat_t *ksp, loff_t n)
{
void *rc = NULL;
- SENTRY;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
@@ -369,7 +362,7 @@ kstat_seq_data_addr(kstat_t *ksp, loff_t n)
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
- SRETURN(rc);
+ return (rc);
}
static void *
@@ -378,7 +371,6 @@ kstat_seq_start(struct seq_file *f, loff_t *pos)
loff_t n = *pos;
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
- SENTRY;
mutex_enter(ksp->ks_lock);
@@ -393,12 +385,12 @@ kstat_seq_start(struct seq_file *f, loff_t *pos)
ksp->ks_snaptime = gethrtime();
if (!n && kstat_seq_show_headers(f))
- SRETURN(NULL);
+ return (NULL);
if (n >= ksp->ks_ndata)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(kstat_seq_data_addr(ksp, n));
+ return (kstat_seq_data_addr(ksp, n));
}
static void *
@@ -406,13 +398,12 @@ kstat_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
- SENTRY;
++*pos;
if (*pos >= ksp->ks_ndata)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(kstat_seq_data_addr(ksp, *pos));
+ return (kstat_seq_data_addr(ksp, *pos));
}
static void
@@ -689,19 +680,16 @@ EXPORT_SYMBOL(__kstat_delete);
int
spl_kstat_init(void)
{
- SENTRY;
mutex_init(&kstat_module_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&kstat_module_list);
kstat_id = 0;
- SRETURN(0);
+ return (0);
}
void
spl_kstat_fini(void)
{
- SENTRY;
ASSERT(list_empty(&kstat_module_list));
mutex_destroy(&kstat_module_lock);
- SEXIT;
}
diff --git a/module/spl/spl-proc.c b/module/spl/spl-proc.c
index 6ecc0c31c..137af7188 100644
--- a/module/spl/spl-proc.c
+++ b/module/spl/spl-proc.c
@@ -30,13 +30,6 @@
#include <linux/seq_file.h>
#include <linux/proc_compat.h>
#include <linux/version.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_PROC
#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
typedef struct ctl_table __no_const spl_ctl_table;
@@ -110,209 +103,6 @@ proc_copyout_string(char *ubuffer, int ubuffer_size,
return size;
}
-#ifdef DEBUG_LOG
-static int
-proc_dobitmasks(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- unsigned long *mask = table->data;
- int is_subsys = (mask == &spl_debug_subsys) ? 1 : 0;
- int is_printk = (mask == &spl_debug_printk) ? 1 : 0;
- int size = 512, rc;
- char *str;
- SENTRY;
-
- str = kmem_alloc(size, KM_SLEEP);
- if (str == NULL)
- SRETURN(-ENOMEM);
-
- if (write) {
- rc = proc_copyin_string(str, size, buffer, *lenp);
- if (rc < 0)
- SRETURN(rc);
-
- rc = spl_debug_str2mask(mask, str, is_subsys);
- /* Always print BUG/ASSERT to console, so keep this mask */
- if (is_printk)
- *mask |= SD_EMERG;
-
- *ppos += *lenp;
- } else {
- rc = spl_debug_mask2str(str, size, *mask, is_subsys);
- if (*ppos >= rc)
- rc = 0;
- else
- rc = proc_copyout_string(buffer, *lenp,
- str + *ppos, "\n");
- if (rc >= 0) {
- *lenp = rc;
- *ppos += rc;
- }
- }
-
- kmem_free(str, size);
- SRETURN(rc);
-}
-
-static int
-proc_debug_mb(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- char str[32];
- int rc, len;
- SENTRY;
-
- if (write) {
- rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
- if (rc < 0)
- SRETURN(rc);
-
- rc = spl_debug_set_mb(simple_strtoul(str, NULL, 0));
- *ppos += *lenp;
- } else {
- len = snprintf(str, sizeof(str), "%d", spl_debug_get_mb());
- if (*ppos >= len)
- rc = 0;
- else
- rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
-
- if (rc >= 0) {
- *lenp = rc;
- *ppos += rc;
- }
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_dump_kernel(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- SENTRY;
-
- if (write) {
- spl_debug_dumplog(0);
- *ppos += *lenp;
- } else {
- *lenp = 0;
- }
-
- SRETURN(0);
-}
-
-static int
-proc_force_bug(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- SENTRY;
-
- if (write)
- PANIC("Crashing due to forced panic\n");
- else
- *lenp = 0;
-
- SRETURN(0);
-}
-
-static int
-proc_console_max_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, max_delay_cs;
- spl_ctl_table dummy = *table;
- long d;
- SENTRY;
-
- dummy.data = &max_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- max_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (max_delay_cs <= 0)
- SRETURN(-EINVAL);
-
- d = (max_delay_cs * HZ) / 100;
- if (d == 0 || d < spl_console_min_delay)
- SRETURN(-EINVAL);
-
- spl_console_max_delay = d;
- } else {
- max_delay_cs = (spl_console_max_delay * 100) / HZ;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_console_min_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, min_delay_cs;
- spl_ctl_table dummy = *table;
- long d;
- SENTRY;
-
- dummy.data = &min_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- min_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (min_delay_cs <= 0)
- SRETURN(-EINVAL);
-
- d = (min_delay_cs * HZ) / 100;
- if (d == 0 || d > spl_console_max_delay)
- SRETURN(-EINVAL);
-
- spl_console_min_delay = d;
- } else {
- min_delay_cs = (spl_console_min_delay * 100) / HZ;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_console_backoff(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, backoff;
- spl_ctl_table dummy = *table;
- SENTRY;
-
- dummy.data = &backoff;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- backoff = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (backoff <= 0)
- SRETURN(-EINVAL);
-
- spl_console_backoff = backoff;
- } else {
- backoff = spl_console_backoff;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-#endif /* DEBUG_LOG */
-
#ifdef DEBUG_KMEM
static int
proc_domemused(struct ctl_table *table, int write,
@@ -321,7 +111,6 @@ proc_domemused(struct ctl_table *table, int write,
int rc = 0;
unsigned long min = 0, max = ~0, val;
spl_ctl_table dummy = *table;
- SENTRY;
dummy.data = &val;
dummy.proc_handler = &proc_dointvec;
@@ -339,7 +128,7 @@ proc_domemused(struct ctl_table *table, int write,
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
}
- SRETURN(rc);
+ return (rc);
}
static int
@@ -350,7 +139,6 @@ proc_doslab(struct ctl_table *table, int write,
unsigned long min = 0, max = ~0, val = 0, mask;
spl_ctl_table dummy = *table;
spl_kmem_cache_t *skc;
- SENTRY;
dummy.data = &val;
dummy.proc_handler = &proc_dointvec;
@@ -387,7 +175,7 @@ proc_doslab(struct ctl_table *table, int write,
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
}
- SRETURN(rc);
+ return (rc);
}
#endif /* DEBUG_KMEM */
@@ -397,7 +185,6 @@ proc_dohostid(struct ctl_table *table, int write,
{
int len, rc = 0;
char *end, str[32];
- SENTRY;
if (write) {
/* We can't use proc_doulongvec_minmax() in the write
@@ -405,11 +192,11 @@ proc_dohostid(struct ctl_table *table, int write,
* leading 0x which confuses the helper function. */
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
if (rc < 0)
- SRETURN(rc);
+ return (rc);
spl_hostid = simple_strtoul(str, &end, 16);
if (str == end)
- SRETURN(-EINVAL);
+ return (-EINVAL);
} else {
len = snprintf(str, sizeof(str), "%lx", spl_hostid);
@@ -424,7 +211,7 @@ proc_dohostid(struct ctl_table *table, int write,
}
}
- SRETURN(rc);
+ return (rc);
}
#ifdef DEBUG_KMEM
@@ -487,7 +274,6 @@ slab_seq_start(struct seq_file *f, loff_t *pos)
{
struct list_head *p;
loff_t n = *pos;
- SENTRY;
down_read(&spl_kmem_cache_sem);
if (!n)
@@ -497,20 +283,19 @@ slab_seq_start(struct seq_file *f, loff_t *pos)
while (n--) {
p = p->next;
if (p == &spl_kmem_cache_list)
- SRETURN(NULL);
+ return (NULL);
}
- SRETURN(list_entry(p, spl_kmem_cache_t, skc_list));
+ return (list_entry(p, spl_kmem_cache_t, skc_list));
}
static void *
slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
spl_kmem_cache_t *skc = p;
- SENTRY;
++*pos;
- SRETURN((skc->skc_list.next == &spl_kmem_cache_list) ?
+ return ((skc->skc_list.next == &spl_kmem_cache_list) ?
NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
}
@@ -541,108 +326,6 @@ static struct file_operations proc_slab_operations = {
};
#endif /* DEBUG_KMEM */
-#ifdef DEBUG_LOG
-static struct ctl_table spl_debug_table[] = {
- {
- .procname = "subsystem",
- .data = &spl_debug_subsys,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "mask",
- .data = &spl_debug_mask,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "printk",
- .data = &spl_debug_printk,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "mb",
- .mode = 0644,
- .proc_handler = &proc_debug_mb,
- },
- {
- .procname = "binary",
- .data = &spl_debug_binary,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "catastrophe",
- .data = &spl_debug_catastrophe,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "panic_on_bug",
- .data = &spl_debug_panic_on_bug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "path",
- .data = spl_debug_file_path,
- .maxlen = sizeof(spl_debug_file_path),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
- {
- .procname = "dump",
- .mode = 0200,
- .proc_handler = &proc_dump_kernel,
- },
- {
- .procname = "force_bug",
- .mode = 0200,
- .proc_handler = &proc_force_bug,
- },
- {
- .procname = "console_ratelimit",
- .data = &spl_console_ratelimit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "console_max_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_max_delay_cs,
- },
- {
- .procname = "console_min_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_min_delay_cs,
- },
- {
- .procname = "console_backoff",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_backoff,
- },
- {
- .procname = "stack_max",
- .data = &spl_debug_stack,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {0},
-};
-#endif /* DEBUG_LOG */
-
#ifdef DEBUG_KMEM
static struct ctl_table spl_kmem_table[] = {
{
@@ -765,13 +448,6 @@ static struct ctl_table spl_table[] = {
.mode = 0644,
.proc_handler = &proc_dohostid,
},
-#ifdef DEBUG_LOG
- {
- .procname = "debug",
- .mode = 0555,
- .child = spl_debug_table,
- },
-#endif
#ifdef DEBUG_KMEM
{
.procname = "kmem",
@@ -812,31 +488,38 @@ int
spl_proc_init(void)
{
int rc = 0;
- SENTRY;
spl_header = register_sysctl_table(spl_root);
if (spl_header == NULL)
- SRETURN(-EUNATCH);
+ return (-EUNATCH);
proc_spl = proc_mkdir("spl", NULL);
- if (proc_spl == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
#ifdef DEBUG_KMEM
proc_spl_kmem = proc_mkdir("kmem", proc_spl);
- if (proc_spl_kmem == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kmem == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
proc_spl_kmem_slab = proc_create_data("slab", 0444,
proc_spl_kmem, &proc_slab_operations, NULL);
- if (proc_spl_kmem_slab == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kmem_slab == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
#endif /* DEBUG_KMEM */
proc_spl_kstat = proc_mkdir("kstat", proc_spl);
- if (proc_spl_kstat == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kstat == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
out:
if (rc) {
remove_proc_entry("kstat", proc_spl);
@@ -848,14 +531,12 @@ out:
unregister_sysctl_table(spl_header);
}
- SRETURN(rc);
+ return (rc);
}
void
spl_proc_fini(void)
{
- SENTRY;
-
remove_proc_entry("kstat", proc_spl);
#ifdef DEBUG_KMEM
remove_proc_entry("slab", proc_spl_kmem);
@@ -865,6 +546,4 @@ spl_proc_fini(void)
ASSERT(spl_header != NULL);
unregister_sysctl_table(spl_header);
-
- SEXIT;
}
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index 0cb2ceeaf..951298d9f 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -26,13 +26,6 @@
#include <sys/taskq.h>
#include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_TASKQ
int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
@@ -63,7 +56,6 @@ task_alloc(taskq_t *tq, uint_t flags)
{
taskq_ent_t *t;
int count = 0;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -77,17 +69,17 @@ retry:
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
- SRETURN(t);
+ return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
- SRETURN(NULL);
+ return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
- SRETURN(NULL);
+ return (NULL);
/*
* Sleep periodically polling the free list for an available
@@ -103,8 +95,10 @@ retry:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (count < 100)
- SGOTO(retry, count++);
+ if (count < 100) {
+ count++;
+ goto retry;
+ }
}
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
@@ -116,7 +110,7 @@ retry:
tq->tq_nalloc++;
}
- SRETURN(t);
+ return (t);
}
/*
@@ -126,8 +120,6 @@ retry:
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -136,8 +128,6 @@ task_free(taskq_t *tq, taskq_ent_t *t)
kmem_free(t, sizeof(taskq_ent_t));
tq->tq_nalloc--;
-
- SEXIT;
}
/*
@@ -147,7 +137,6 @@ task_free(taskq_t *tq, taskq_ent_t *t)
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -167,8 +156,6 @@ task_done(taskq_t *tq, taskq_ent_t *t)
} else {
task_free(tq, t);
}
-
- SEXIT;
}
/*
@@ -222,7 +209,6 @@ taskq_lowest_id(taskq_t *tq)
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -249,7 +235,7 @@ taskq_lowest_id(taskq_t *tq)
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
- SRETURN(lowest_id);
+ return (lowest_id);
}
/*
@@ -261,7 +247,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
taskq_thread_t *w;
struct list_head *l;
- SENTRY;
ASSERT(tq);
ASSERT(tqt);
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -275,8 +260,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
-
- SEXIT;
}
/*
@@ -288,7 +271,6 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
{
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
@@ -296,13 +278,13 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
- SRETURN(t);
+ return (t);
if (t->tqent_id > id)
break;
}
- SRETURN(NULL);
+ return (NULL);
}
/*
@@ -317,33 +299,32 @@ taskq_find(taskq_t *tq, taskqid_t id, int *active)
taskq_thread_t *tqt;
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
*active = 0;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
- SRETURN(t);
+ return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
t = tqt->tqt_task;
*active = 1;
- SRETURN(t);
+ return (t);
}
}
- SRETURN(NULL);
+ return (NULL);
}
static int
@@ -405,7 +386,7 @@ taskq_wait_check(taskq_t *tq, taskqid_t id)
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
void
@@ -419,7 +400,7 @@ void
taskq_wait(taskq_t *tq)
{
taskqid_t id;
- SENTRY;
+
ASSERT(tq);
/* Wait for the largest outstanding taskqid */
@@ -428,9 +409,6 @@ taskq_wait(taskq_t *tq)
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
taskq_wait_all(tq, id);
-
- SEXIT;
-
}
EXPORT_SYMBOL(taskq_wait);
@@ -439,7 +417,6 @@ taskq_member(taskq_t *tq, void *t)
{
struct list_head *l;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(t);
@@ -447,10 +424,10 @@ taskq_member(taskq_t *tq, void *t)
list_for_each(l, &tq->tq_thread_list) {
tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
if (tqt->tqt_thread == (struct task_struct *)t)
- SRETURN(1);
+ return (1);
}
- SRETURN(0);
+ return (0);
}
EXPORT_SYMBOL(taskq_member);
@@ -466,7 +443,6 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
taskq_ent_t *t;
int active = 0;
int rc = ENOENT;
- SENTRY;
ASSERT(tq);
@@ -507,7 +483,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
rc = EBUSY;
}
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
@@ -516,7 +492,6 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
ASSERT(tq);
ASSERT(func);
@@ -525,15 +500,15 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
@@ -559,7 +534,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
@@ -567,9 +542,8 @@ taskqid_t
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
- taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
+ taskq_ent_t *t;
ASSERT(tq);
ASSERT(func);
@@ -578,10 +552,10 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
@@ -603,7 +577,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
spin_unlock(&t->tqent_lock);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
@@ -611,8 +585,6 @@ void
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(func);
ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
@@ -650,7 +622,6 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SEXIT;
}
EXPORT_SYMBOL(taskq_dispatch_ent);
@@ -685,7 +656,6 @@ taskq_thread(void *args)
taskq_t *tq;
taskq_ent_t *t;
struct list_head *pend_list;
- SENTRY;
ASSERT(tqt);
tq = tqt->tqt_tq;
@@ -778,7 +748,7 @@ taskq_thread(void *args)
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(0);
+ return (0);
}
taskq_t *
@@ -789,7 +759,6 @@ taskq_create(const char *name, int nthreads, pri_t pri,
taskq_t *tq;
taskq_thread_t *tqt;
int rc = 0, i, j = 0;
- SENTRY;
ASSERT(name != NULL);
ASSERT(pri <= maxclsyspri);
@@ -808,7 +777,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
if (tq == NULL)
- SRETURN(NULL);
+ return (NULL);
spin_lock_init(&tq->tq_lock);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
@@ -869,7 +838,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq = NULL;
}
- SRETURN(tq);
+ return (tq);
}
EXPORT_SYMBOL(taskq_create);
@@ -879,7 +848,6 @@ taskq_destroy(taskq_t *tq)
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
- SENTRY;
ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
@@ -929,30 +897,24 @@ taskq_destroy(taskq_t *tq)
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
kmem_free(tq, sizeof(taskq_t));
-
- SEXIT;
}
EXPORT_SYMBOL(taskq_destroy);
int
spl_taskq_init(void)
{
- SENTRY;
-
/* Solaris creates a dynamic taskq of up to 64 threads, however in
* a Linux environment 1 thread per-core is usually about right */
system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
minclsyspri, 4, 512, TASKQ_PREPOPULATE);
if (system_taskq == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_taskq_fini(void)
{
- SENTRY;
taskq_destroy(system_taskq);
- SEXIT;
}
diff --git a/module/spl/spl-thread.c b/module/spl/spl-thread.c
index 5c8514051..b0f4d5715 100644
--- a/module/spl/spl-thread.c
+++ b/module/spl/spl-thread.c
@@ -27,13 +27,6 @@
#include <sys/thread.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_THREAD
/*
* Thread interfaces
@@ -73,8 +66,6 @@ thread_generic_wrapper(void *arg)
void
__thread_exit(void)
{
- SENTRY;
- SEXIT;
tsd_exit();
complete_and_exit(NULL, 0);
/* Unreachable */
@@ -92,7 +83,6 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
thread_priv_t *tp;
struct task_struct *tsk;
char *p;
- SENTRY;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
@@ -100,7 +90,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tp = kmem_alloc(sizeof(thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
- SRETURN(NULL);
+ return (NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_name_size = strlen(name) + 1;
@@ -108,7 +98,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tp->tp_name = kmem_alloc(tp->tp_name_size, KM_PUSHPAGE);
if (tp->tp_name == NULL) {
kmem_free(tp, sizeof(thread_priv_t));
- SRETURN(NULL);
+ return (NULL);
}
strncpy(tp->tp_name, name, tp->tp_name_size);
@@ -128,13 +118,11 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tsk = spl_kthread_create(thread_generic_wrapper, (void *)tp,
"%s", tp->tp_name);
- if (IS_ERR(tsk)) {
- SERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
- SRETURN(NULL);
- }
+ if (IS_ERR(tsk))
+ return (NULL);
wake_up_process(tsk);
- SRETURN((kthread_t *)tsk);
+ return ((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);
diff --git a/module/spl/spl-tsd.c b/module/spl/spl-tsd.c
index 6e5605b9d..c9d532f4e 100644
--- a/module/spl/spl-tsd.c
+++ b/module/spl/spl-tsd.c
@@ -61,14 +61,6 @@
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/tsd.h>
-#include <spl-debug.h>
-
-#ifdef DEBUG_SUBSYSTEM
-#undef DEBUG_SUBSYSTEM
-#endif
-
-#define DEBUG_SUBSYSTEM SS_TSD
-#define DEBUG_SUBSYSTEM SS_TSD
typedef struct tsd_hash_bin {
spinlock_t hb_lock;
@@ -108,7 +100,6 @@ tsd_hash_search(tsd_hash_table_t *table, uint_t key, pid_t pid)
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
- SENTRY;
hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
bin = &table->ht_bins[hash];
@@ -117,12 +108,12 @@ tsd_hash_search(tsd_hash_table_t *table, uint_t key, pid_t pid)
entry = list_entry(node, tsd_hash_entry_t, he_list);
if ((entry->he_key == key) && (entry->he_pid == pid)) {
spin_unlock(&bin->hb_lock);
- SRETURN(entry);
+ return (entry);
}
}
spin_unlock(&bin->hb_lock);
- SRETURN(NULL);
+ return (NULL);
}
/*
@@ -136,7 +127,6 @@ static void
tsd_hash_dtor(struct hlist_head *work)
{
tsd_hash_entry_t *entry;
- SENTRY;
while (!hlist_empty(work)) {
entry = hlist_entry(work->first, tsd_hash_entry_t, he_list);
@@ -147,8 +137,6 @@ tsd_hash_dtor(struct hlist_head *work)
kmem_free(entry, sizeof(tsd_hash_entry_t));
}
-
- SEXIT;
}
/*
@@ -170,14 +158,13 @@ tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
tsd_hash_bin_t *bin;
ulong_t hash;
int rc = 0;
- SENTRY;
ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
/* New entry allocate structure, set value, and add to hash */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
entry->he_key = key;
entry->he_pid = pid;
@@ -209,7 +196,7 @@ tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(rc);
+ return (rc);
}
/*
@@ -230,14 +217,13 @@ tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor)
tsd_hash_bin_t *bin;
ulong_t hash;
int keys_checked = 0;
- SENTRY;
ASSERT3P(table, !=, NULL);
/* Allocate entry to be used as a destructor for this key */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
/* Determine next available key value */
spin_lock(&table->ht_lock);
@@ -249,7 +235,7 @@ tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor)
/* Ensure failure when all TSD_KEYS_MAX keys are in use */
if (keys_checked++ >= TSD_KEYS_MAX) {
spin_unlock(&table->ht_lock);
- SRETURN(ENOENT);
+ return (ENOENT);
}
tmp_entry = tsd_hash_search(table, table->ht_key, DTOR_PID);
@@ -273,7 +259,7 @@ tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor)
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(0);
+ return (0);
}
/*
@@ -291,12 +277,11 @@ tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
- SENTRY;
/* Allocate entry to be used as the process reference */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
spin_lock(&table->ht_lock);
entry->he_key = PID_KEY;
@@ -316,7 +301,7 @@ tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(0);
+ return (0);
}
/*
@@ -328,14 +313,10 @@ tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
static void
tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
{
- SENTRY;
-
ASSERT(spin_is_locked(&table->ht_lock));
hlist_del(&entry->he_list);
list_del_init(&entry->he_key_list);
list_del_init(&entry->he_pid_list);
-
- SEXIT;
}
/*
@@ -350,17 +331,16 @@ tsd_hash_table_init(uint_t bits)
{
tsd_hash_table_t *table;
int hash, size = (1 << bits);
- SENTRY;
table = kmem_zalloc(sizeof(tsd_hash_table_t), KM_SLEEP);
if (table == NULL)
- SRETURN(NULL);
+ return (NULL);
table->ht_bins = kmem_zalloc(sizeof(tsd_hash_bin_t) * size,
KM_SLEEP | KM_NODEBUG);
if (table->ht_bins == NULL) {
kmem_free(table, sizeof(tsd_hash_table_t));
- SRETURN(NULL);
+ return (NULL);
}
for (hash = 0; hash < size; hash++) {
@@ -372,7 +352,7 @@ tsd_hash_table_init(uint_t bits)
table->ht_bits = bits;
table->ht_key = 1;
- SRETURN(table);
+ return (table);
}
/*
@@ -390,7 +370,6 @@ tsd_hash_table_fini(tsd_hash_table_t *table)
tsd_hash_bin_t *bin;
tsd_hash_entry_t *entry;
int size, i;
- SENTRY;
ASSERT3P(table, !=, NULL);
spin_lock(&table->ht_lock);
@@ -410,8 +389,6 @@ tsd_hash_table_fini(tsd_hash_table_t *table)
tsd_hash_dtor(&work);
kmem_free(table->ht_bins, sizeof(tsd_hash_bin_t)*(1<<table->ht_bits));
kmem_free(table, sizeof(tsd_hash_table_t));
-
- SEXIT;
}
/*
@@ -432,20 +409,19 @@ tsd_set(uint_t key, void *value)
tsd_hash_entry_t *entry;
pid_t pid;
int rc;
- SENTRY;
table = tsd_hash_table;
pid = curthread->pid;
ASSERT3P(table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
- SRETURN(EINVAL);
+ return (EINVAL);
/* Entry already exists in hash table update value */
entry = tsd_hash_search(table, key, pid);
if (entry) {
entry->he_value = value;
- SRETURN(0);
+ return (0);
}
/* Add a process entry to the hash if not yet exists */
@@ -453,11 +429,11 @@ tsd_set(uint_t key, void *value)
if (entry == NULL) {
rc = tsd_hash_add_pid(table, pid);
if (rc)
- SRETURN(rc);
+ return (rc);
}
rc = tsd_hash_add(table, key, pid, value);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(tsd_set);
@@ -473,18 +449,17 @@ void *
tsd_get(uint_t key)
{
tsd_hash_entry_t *entry;
- SENTRY;
ASSERT3P(tsd_hash_table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
- SRETURN(NULL);
+ return (NULL);
entry = tsd_hash_search(tsd_hash_table, key, curthread->pid);
if (entry == NULL)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(entry->he_value);
+ return (entry->he_value);
}
EXPORT_SYMBOL(tsd_get);
@@ -503,17 +478,11 @@ EXPORT_SYMBOL(tsd_get);
void
tsd_create(uint_t *keyp, dtor_func_t dtor)
{
- SENTRY;
-
ASSERT3P(keyp, !=, NULL);
- if (*keyp) {
- SEXIT;
+ if (*keyp)
return;
- }
(void)tsd_hash_add_key(tsd_hash_table, keyp, dtor);
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_create);
@@ -534,7 +503,6 @@ tsd_destroy(uint_t *keyp)
tsd_hash_entry_t *dtor_entry, *entry;
tsd_hash_bin_t *dtor_entry_bin, *entry_bin;
ulong_t hash;
- SENTRY;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
@@ -543,7 +511,6 @@ tsd_destroy(uint_t *keyp)
dtor_entry = tsd_hash_search(table, *keyp, DTOR_PID);
if (dtor_entry == NULL) {
spin_unlock(&table->ht_lock);
- SEXIT;
return;
}
@@ -580,8 +547,6 @@ tsd_destroy(uint_t *keyp)
tsd_hash_dtor(&work);
*keyp = 0;
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_destroy);
@@ -601,7 +566,6 @@ tsd_exit(void)
tsd_hash_entry_t *pid_entry, *entry;
tsd_hash_bin_t *pid_entry_bin, *entry_bin;
ulong_t hash;
- SENTRY;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
@@ -610,7 +574,6 @@ tsd_exit(void)
pid_entry = tsd_hash_search(table, PID_KEY, curthread->pid);
if (pid_entry == NULL) {
spin_unlock(&table->ht_lock);
- SEXIT;
return;
}
@@ -646,28 +609,22 @@ tsd_exit(void)
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_exit);
int
spl_tsd_init(void)
{
- SENTRY;
-
tsd_hash_table = tsd_hash_table_init(TSD_HASH_TABLE_BITS_DEFAULT);
if (tsd_hash_table == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_tsd_fini(void)
{
- SENTRY;
tsd_hash_table_fini(tsd_hash_table);
tsd_hash_table = NULL;
- SEXIT;
}
diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c
index b9f9d7b1f..cac0aaf29 100644
--- a/module/spl/spl-vnode.c
+++ b/module/spl/spl-vnode.c
@@ -27,13 +27,6 @@
#include <sys/cred.h>
#include <sys/vnode.h>
#include <linux/falloc.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_VNODE
vnode_t *rootdir = (vnode_t *)0xabcd1234;
EXPORT_SYMBOL(rootdir);
@@ -107,7 +100,6 @@ vnode_t *
vn_alloc(int flag)
{
vnode_t *vp;
- SENTRY;
vp = kmem_cache_alloc(vn_cache, flag);
if (vp != NULL) {
@@ -115,16 +107,14 @@ vn_alloc(int flag)
vp->v_type = 0;
}
- SRETURN(vp);
+ return (vp);
} /* vn_alloc() */
EXPORT_SYMBOL(vn_alloc);
void
vn_free(vnode_t *vp)
{
- SENTRY;
kmem_cache_free(vn_cache, vp);
- SEXIT;
} /* vn_free() */
EXPORT_SYMBOL(vn_free);
@@ -137,7 +127,6 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
int rc, saved_umask = 0;
gfp_t saved_gfp;
vnode_t *vp;
- SENTRY;
ASSERT(flags & (FWRITE | FREAD));
ASSERT(seg == UIO_SYSSPACE);
@@ -163,7 +152,7 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
(void)xchg(&current->fs->umask, saved_umask);
if (IS_ERR(fp))
- SRETURN(-PTR_ERR(fp));
+ return (-PTR_ERR(fp));
#ifdef HAVE_2ARGS_VFS_GETATTR
rc = vfs_getattr(&fp->f_path, &stat);
@@ -172,13 +161,13 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
#endif
if (rc) {
filp_close(fp, 0);
- SRETURN(-rc);
+ return (-rc);
}
vp = vn_alloc(KM_SLEEP);
if (!vp) {
filp_close(fp, 0);
- SRETURN(ENOMEM);
+ return (ENOMEM);
}
saved_gfp = mapping_gfp_mask(fp->f_mapping);
@@ -191,7 +180,7 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
*vpp = vp;
mutex_exit(&vp->v_lock);
- SRETURN(0);
+ return (0);
} /* vn_open() */
EXPORT_SYMBOL(vn_open);
@@ -201,20 +190,19 @@ vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
{
char *realpath;
int len, rc;
- SENTRY;
ASSERT(vp == rootdir);
len = strlen(path) + 2;
realpath = kmalloc(len, GFP_KERNEL);
if (!realpath)
- SRETURN(ENOMEM);
+ return (ENOMEM);
(void)snprintf(realpath, len, "/%s", path);
rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
kfree(realpath);
- SRETURN(rc);
+ return (rc);
} /* vn_openat() */
EXPORT_SYMBOL(vn_openat);
@@ -226,7 +214,6 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
mm_segment_t saved_fs;
struct file *fp;
int rc;
- SENTRY;
ASSERT(uio == UIO_WRITE || uio == UIO_READ);
ASSERT(vp);
@@ -256,16 +243,16 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
fp->f_pos = offset;
if (rc < 0)
- SRETURN(-rc);
+ return (-rc);
if (residp) {
*residp = len - rc;
} else {
if (rc != len)
- SRETURN(EIO);
+ return (EIO);
}
- SRETURN(0);
+ return (0);
} /* vn_rdwr() */
EXPORT_SYMBOL(vn_rdwr);
@@ -273,7 +260,6 @@ int
vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
{
int rc;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@@ -282,7 +268,7 @@ vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
rc = filp_close(vp->v_file, 0);
vn_free(vp);
- SRETURN(-rc);
+ return (-rc);
} /* vn_close() */
EXPORT_SYMBOL(vn_close);
@@ -386,7 +372,6 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
struct path parent;
struct inode *inode = NULL;
int rc = 0;
- SENTRY;
ASSERT(seg == UIO_SYSSPACE);
ASSERT(flags == RMFILE);
@@ -394,14 +379,18 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
dentry = spl_kern_path_locked(path, &parent);
rc = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
- if (parent.dentry->d_name.name[parent.dentry->d_name.len])
- SGOTO(slashes, rc = 0);
+ if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
+ rc = 0;
+ goto slashes;
+ }
inode = dentry->d_inode;
- if (inode)
+ if (inode) {
atomic_inc(&inode->i_count);
- else
- SGOTO(slashes, rc = 0);
+ } else {
+ rc = 0;
+ goto slashes;
+ }
#ifdef HAVE_2ARGS_VFS_UNLINK
rc = vfs_unlink(parent.dentry->d_inode, dentry);
@@ -419,12 +408,12 @@ exit1:
iput(inode); /* truncate the inode here */
path_put(&parent);
- SRETURN(-rc);
+ return (-rc);
slashes:
rc = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
- SGOTO(exit1, rc);
+ goto exit1;
} /* vn_remove() */
EXPORT_SYMBOL(vn_remove);
@@ -437,23 +426,26 @@ vn_rename(const char *oldname, const char *newname, int x1)
struct dentry *trap;
struct path old_parent, new_parent;
int rc = 0;
- SENTRY;
old_dentry = spl_kern_path_locked(oldname, &old_parent);
- if (IS_ERR(old_dentry))
- SGOTO(exit, rc = PTR_ERR(old_dentry));
+ if (IS_ERR(old_dentry)) {
+ rc = PTR_ERR(old_dentry);
+ goto exit;
+ }
spl_inode_unlock(old_parent.dentry->d_inode);
new_dentry = spl_kern_path_locked(newname, &new_parent);
- if (IS_ERR(new_dentry))
- SGOTO(exit2, rc = PTR_ERR(new_dentry));
+ if (IS_ERR(new_dentry)) {
+ rc = PTR_ERR(new_dentry);
+ goto exit2;
+ }
spl_inode_unlock(new_parent.dentry->d_inode);
rc = -EXDEV;
if (old_parent.mnt != new_parent.mnt)
- SGOTO(exit3, rc);
+ goto exit3;
old_dir = old_parent.dentry;
new_dir = new_parent.dentry;
@@ -462,25 +454,25 @@ vn_rename(const char *oldname, const char *newname, int x1)
/* source should not be ancestor of target */
rc = -EINVAL;
if (old_dentry == trap)
- SGOTO(exit4, rc);
+ goto exit4;
/* target should not be an ancestor of source */
rc = -ENOTEMPTY;
if (new_dentry == trap)
- SGOTO(exit4, rc);
+ goto exit4;
/* source must exist */
rc = -ENOENT;
if (!old_dentry->d_inode)
- SGOTO(exit4, rc);
+ goto exit4;
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
rc = -ENOTDIR;
if (old_dentry->d_name.name[old_dentry->d_name.len])
- SGOTO(exit4, rc);
+ goto exit4;
if (new_dentry->d_name.name[new_dentry->d_name.len])
- SGOTO(exit4, rc);
+ goto exit4;
}
#if defined(HAVE_4ARGS_VFS_RENAME)
@@ -502,7 +494,7 @@ exit2:
dput(old_dentry);
path_put(&old_parent);
exit:
- SRETURN(-rc);
+ return (-rc);
}
EXPORT_SYMBOL(vn_rename);
@@ -512,7 +504,6 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
struct file *fp;
struct kstat stat;
int rc;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@@ -526,7 +517,7 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
#endif
if (rc)
- SRETURN(-rc);
+ return (-rc);
vap->va_type = vn_mode_to_vtype(stat.mode);
vap->va_mode = stat.mode;
@@ -543,14 +534,13 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
vap->va_rdev = stat.rdev;
vap->va_nblocks = stat.blocks;
- SRETURN(0);
+ return (0);
}
EXPORT_SYMBOL(vn_getattr);
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{
int datasync = 0;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@@ -558,7 +548,7 @@ int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
if (flags & FDSYNC)
datasync = 1;
- SRETURN(-spl_filp_fsync(vp->v_file, datasync));
+ return (-spl_filp_fsync(vp->v_file, datasync));
} /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync);
@@ -566,10 +556,9 @@ int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
offset_t offset, void *x6, void *x7)
{
int error = EOPNOTSUPP;
- SENTRY;
if (cmd != F_FREESP || bfp->l_whence != 0)
- SRETURN(EOPNOTSUPP);
+ return (EOPNOTSUPP);
ASSERT(vp);
ASSERT(vp->v_file);
@@ -584,7 +573,7 @@ int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
bfp->l_start, bfp->l_len);
if (error == 0)
- SRETURN(0);
+ return (0);
#endif
#ifdef HAVE_INODE_TRUNCATE_RANGE
@@ -600,7 +589,7 @@ int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
if (end % PAGE_SIZE != 0) {
end &= ~(off_t)(PAGE_SIZE - 1);
if (end <= bfp->l_start)
- SRETURN(0);
+ return (0);
}
--end;
@@ -608,11 +597,11 @@ int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
vp->v_file->f_dentry->d_inode,
bfp->l_start, end
);
- SRETURN(0);
+ return (0);
}
#endif
- SRETURN(error);
+ return (error);
}
EXPORT_SYMBOL(vn_space);
@@ -642,7 +631,6 @@ vn_getf(int fd)
file_t *fp;
vnode_t *vp;
int rc = 0;
- SENTRY;
/* Already open just take an extra reference */
spin_lock(&vn_file_lock);
@@ -651,7 +639,7 @@ vn_getf(int fd)
if (fp) {
atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock);
- SRETURN(fp);
+ return (fp);
}
spin_unlock(&vn_file_lock);
@@ -659,7 +647,7 @@ vn_getf(int fd)
/* File was not yet opened create the object and setup */
fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
if (fp == NULL)
- SGOTO(out, rc);
+ goto out;
mutex_enter(&fp->f_lock);
@@ -670,11 +658,11 @@ vn_getf(int fd)
lfp = fget(fd);
if (lfp == NULL)
- SGOTO(out_mutex, rc);
+ goto out_mutex;
vp = vn_alloc(KM_SLEEP);
if (vp == NULL)
- SGOTO(out_fget, rc);
+ goto out_fget;
#ifdef HAVE_2ARGS_VFS_GETATTR
rc = vfs_getattr(&lfp->f_path, &stat);
@@ -682,7 +670,7 @@ vn_getf(int fd)
rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
#endif
if (rc)
- SGOTO(out_vnode, rc);
+ goto out_vnode;
mutex_enter(&vp->v_lock);
vp->v_type = vn_mode_to_vtype(stat.mode);
@@ -698,7 +686,7 @@ vn_getf(int fd)
spin_unlock(&vn_file_lock);
mutex_exit(&fp->f_lock);
- SRETURN(fp);
+ return (fp);
out_vnode:
vn_free(vp);
@@ -708,7 +696,7 @@ out_mutex:
mutex_exit(&fp->f_lock);
kmem_cache_free(vn_file_cache, fp);
out:
- SRETURN(NULL);
+ return (NULL);
} /* getf() */
EXPORT_SYMBOL(getf);
@@ -728,7 +716,6 @@ void
vn_releasef(int fd)
{
file_t *fp;
- SENTRY;
spin_lock(&vn_file_lock);
fp = file_find(fd);
@@ -736,7 +723,6 @@ vn_releasef(int fd)
atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) {
spin_unlock(&vn_file_lock);
- SEXIT;
return;
}
@@ -745,7 +731,6 @@ vn_releasef(int fd)
}
spin_unlock(&vn_file_lock);
- SEXIT;
return;
} /* releasef() */
EXPORT_SYMBOL(releasef);
@@ -783,7 +768,6 @@ vn_set_pwd(const char *filename)
struct path path;
mm_segment_t saved_fs;
int rc;
- SENTRY;
/*
* user_path_dir() and __user_walk() both expect 'filename' to be
@@ -795,11 +779,11 @@ vn_set_pwd(const char *filename)
rc = user_path_dir(filename, &path);
if (rc)
- SGOTO(out, rc);
+ goto out;
rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
if (rc)
- SGOTO(dput_and_out, rc);
+ goto dput_and_out;
vn_set_fs_pwd(current->fs, &path);
@@ -808,7 +792,7 @@ dput_and_out:
out:
set_fs(saved_fs);
- SRETURN(-rc);
+ return (-rc);
} /* vn_set_pwd() */
EXPORT_SYMBOL(vn_set_pwd);
@@ -853,7 +837,6 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
int
spl_vn_init(void)
{
- SENTRY;
vn_cache = kmem_cache_create("spl_vn_cache",
sizeof(struct vnode), 64,
vn_cache_constructor,
@@ -865,7 +848,7 @@ spl_vn_init(void)
vn_file_cache_constructor,
vn_file_cache_destructor,
NULL, NULL, NULL, KMC_KMEM);
- SRETURN(0);
+ return (0);
} /* vn_init() */
void
@@ -873,7 +856,6 @@ spl_vn_fini(void)
{
file_t *fp, *next_fp;
int leaked = 0;
- SENTRY;
spin_lock(&vn_file_lock);
@@ -886,11 +868,10 @@ spl_vn_fini(void)
spin_unlock(&vn_file_lock);
if (leaked > 0)
- SWARN("Warning %d files leaked\n", leaked);
+ printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
kmem_cache_destroy(vn_file_cache);
kmem_cache_destroy(vn_cache);
- SEXIT;
return;
} /* vn_fini() */
diff --git a/module/spl/spl-xdr.c b/module/spl/spl-xdr.c
index 62efa31a5..9405dc88d 100644
--- a/module/spl/spl-xdr.c
+++ b/module/spl/spl-xdr.c
@@ -27,13 +27,6 @@
#include <sys/types.h>
#include <rpc/types.h>
#include <rpc/xdr.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_XDR
/*
* SPL's XDR mem implementation.
@@ -150,7 +143,6 @@ xdrmem_create(XDR *xdrs, const caddr_t addr, const uint_t size,
xdrs->x_ops = &xdrmem_decode_ops;
break;
default:
- SWARN("Invalid op value: %d\n", op);
xdrs->x_ops = NULL; /* Let the caller know we failed */
return;
}
@@ -160,7 +152,6 @@ xdrmem_create(XDR *xdrs, const caddr_t addr, const uint_t size,
xdrs->x_addr_end = addr + size;
if (xdrs->x_addr_end < xdrs->x_addr) {
- SWARN("Overflow while creating xdrmem: %p, %u\n", addr, size);
xdrs->x_ops = NULL;
}
}
@@ -171,10 +162,8 @@ xdrmem_control(XDR *xdrs, int req, void *info)
{
struct xdr_bytesrec *rec = (struct xdr_bytesrec *) info;
- if (req != XDR_GET_BYTES_AVAIL) {
- SWARN("Called with unknown request: %d\n", req);
+ if (req != XDR_GET_BYTES_AVAIL)
return FALSE;
- }
rec->xc_is_last_record = TRUE; /* always TRUE in xdrmem streams */
rec->xc_num_avail = xdrs->x_addr_end - xdrs->x_addr;
diff --git a/module/spl/spl-zlib.c b/module/spl/spl-zlib.c
index 807e743d5..2b8aab865 100644
--- a/module/spl/spl-zlib.c
+++ b/module/spl/spl-zlib.c
@@ -55,13 +55,6 @@
#include <sys/kmem.h>
#include <sys/zmod.h>
-#include <spl-debug.h>
-
-#ifdef DEBUG_SUBSYSTEM
-#undef DEBUG_SUBSYSTEM
-#endif
-
-#define DEBUG_SUBSYSTEM SS_ZLIB
static spl_kmem_cache_t *zlib_workspace_cache;
@@ -200,7 +193,6 @@ int
spl_zlib_init(void)
{
int size;
- SENTRY;
size = MAX(spl_zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
@@ -210,16 +202,14 @@ spl_zlib_init(void)
size, 0, NULL, NULL, NULL, NULL, NULL,
KMC_VMEM | KMC_NOEMERGENCY);
if (!zlib_workspace_cache)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_zlib_fini(void)
{
- SENTRY;
kmem_cache_destroy(zlib_workspace_cache);
zlib_workspace_cache = NULL;
- SEXIT;
}
diff --git a/module/splat/splat-internal.h b/module/splat/splat-internal.h
index ea0a88f0c..eff8a9e74 100644
--- a/module/splat/splat-internal.h
+++ b/module/splat/splat-internal.h
@@ -25,7 +25,6 @@
#ifndef _SPLAT_INTERNAL_H
#define _SPLAT_INTERNAL_H
-#include "spl-debug.h"
#include "splat-ctl.h"
#include <sys/mutex.h>
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
index db787ae92..cf47ce65a 100644
--- a/module/splat/splat-kmem.c
+++ b/module/splat/splat-kmem.c
@@ -313,7 +313,7 @@ splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
{
kmem_cache_thread_t *kct;
- ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
+ ASSERT3S(id, <, SPLAT_KMEM_THREADS);
ASSERT(kcp->kcp_kct[id] == NULL);
kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);