aboutsummaryrefslogtreecommitdiffstats
path: root/module/splat
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2009-01-15 10:44:54 -0800
committerBrian Behlendorf <[email protected]>2009-01-15 10:44:54 -0800
commit617d5a673cd16aa91fa9668b94cc385094fae852 (patch)
tree37c7e043f3599d458a3aa0e763363853c298fba3 /module/splat
parentf6a19c0d37992755ed6b1b50344047537a1efe5c (diff)
Rename modules to module and update references
Diffstat (limited to 'module/splat')
-rw-r--r--module/splat/Makefile.in47
-rw-r--r--module/splat/splat-atomic.c226
-rw-r--r--module/splat/splat-condvar.c479
-rw-r--r--module/splat/splat-ctl.c682
-rw-r--r--module/splat/splat-generic.c233
-rw-r--r--module/splat/splat-internal.h239
-rw-r--r--module/splat/splat-kmem.c733
-rw-r--r--module/splat/splat-kobj.c164
-rw-r--r--module/splat/splat-list.c473
-rw-r--r--module/splat/splat-mutex.c355
-rw-r--r--module/splat/splat-random.c129
-rw-r--r--module/splat/splat-rwlock.c786
-rw-r--r--module/splat/splat-taskq.c310
-rw-r--r--module/splat/splat-thread.c203
-rw-r--r--module/splat/splat-time.c117
-rw-r--r--module/splat/splat-vnode.c532
16 files changed, 5708 insertions, 0 deletions
diff --git a/module/splat/Makefile.in b/module/splat/Makefile.in
new file mode 100644
index 000000000..33b2865c1
--- /dev/null
+++ b/module/splat/Makefile.in
@@ -0,0 +1,47 @@
+# Makefile.in for splat kernel module
+
+MODULES := splat
+DISTFILES = Makefile.in *.c *.h
+EXTRA_CFLAGS = @KERNELCPPFLAGS@
+
+# Solaris Porting LAyer Tests
+obj-m := splat.o
+
+splat-objs += splat-ctl.o
+splat-objs += splat-kmem.o
+splat-objs += splat-taskq.o
+splat-objs += splat-random.o
+splat-objs += splat-mutex.o
+splat-objs += splat-condvar.o
+splat-objs += splat-thread.o
+splat-objs += splat-rwlock.o
+splat-objs += splat-time.o
+splat-objs += splat-vnode.o
+splat-objs += splat-kobj.o
+splat-objs += splat-atomic.o
+splat-objs += splat-list.o
+splat-objs += splat-generic.o
+
+splatmodule := splat.ko
+splatmoduledir := @kmoduledir@/kernel/lib/
+
+install:
+ mkdir -p $(DESTDIR)$(splatmoduledir)
+ $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+uninstall:
+ rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+clean:
+ -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+ rm -f Makefile
+ rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+ cp -p $(DISTFILES) $(distdir)
diff --git a/module/splat/splat-atomic.c b/module/splat/splat-atomic.c
new file mode 100644
index 000000000..cc947d095
--- /dev/null
+++ b/module/splat/splat-atomic.c
@@ -0,0 +1,226 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_ATOMIC 0x0b00
+#define SPLAT_ATOMIC_NAME "atomic"
+#define SPLAT_ATOMIC_DESC "Kernel Atomic Tests"
+
+#define SPLAT_ATOMIC_TEST1_ID 0x0b01
+#define SPLAT_ATOMIC_TEST1_NAME "64-bit"
+#define SPLAT_ATOMIC_TEST1_DESC "Validate 64-bit atomic ops"
+
+#define SPLAT_ATOMIC_TEST_MAGIC 0x43435454UL
+#define SPLAT_ATOMIC_INIT_VALUE 10000000UL
+
+typedef enum {
+ SPLAT_ATOMIC_INC_64 = 0,
+ SPLAT_ATOMIC_DEC_64 = 1,
+ SPLAT_ATOMIC_ADD_64 = 2,
+ SPLAT_ATOMIC_SUB_64 = 3,
+ SPLAT_ATOMIC_ADD_64_NV = 4,
+ SPLAT_ATOMIC_SUB_64_NV = 5,
+ SPLAT_ATOMIC_COUNT_64 = 6
+} atomic_op_t;
+
+typedef struct atomic_priv {
+ unsigned long ap_magic;
+ struct file *ap_file;
+ spinlock_t ap_lock;
+ wait_queue_head_t ap_waitq;
+ volatile uint64_t ap_atomic;
+ volatile uint64_t ap_atomic_exited;
+ atomic_op_t ap_op;
+
+} atomic_priv_t;
+
+static void
+splat_atomic_work(void *priv)
+{
+ atomic_priv_t *ap;
+ atomic_op_t op;
+ int i;
+
+ ap = (atomic_priv_t *)priv;
+ ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC);
+
+ spin_lock(&ap->ap_lock);
+ op = ap->ap_op;
+ wake_up(&ap->ap_waitq);
+ spin_unlock(&ap->ap_lock);
+
+ splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+ "Thread %d successfully started: %lu/%lu\n", op,
+ (long unsigned)ap->ap_atomic,
+ (long unsigned)ap->ap_atomic_exited);
+
+ for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) {
+
+ /* Periodically sleep to mix up the ordering */
+ if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) {
+ splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+ "Thread %d sleeping: %lu/%lu\n", op,
+ (long unsigned)ap->ap_atomic,
+ (long unsigned)ap->ap_atomic_exited);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100);
+ }
+
+ switch (op) {
+ case SPLAT_ATOMIC_INC_64:
+ atomic_inc_64(&ap->ap_atomic);
+ break;
+ case SPLAT_ATOMIC_DEC_64:
+ atomic_dec_64(&ap->ap_atomic);
+ break;
+ case SPLAT_ATOMIC_ADD_64:
+ atomic_add_64(&ap->ap_atomic, 3);
+ break;
+ case SPLAT_ATOMIC_SUB_64:
+ atomic_sub_64(&ap->ap_atomic, 3);
+ break;
+ case SPLAT_ATOMIC_ADD_64_NV:
+ atomic_add_64_nv(&ap->ap_atomic, 5);
+ break;
+ case SPLAT_ATOMIC_SUB_64_NV:
+ atomic_sub_64_nv(&ap->ap_atomic, 5);
+ break;
+ default:
+ SBUG();
+ }
+ }
+
+ atomic_inc_64(&ap->ap_atomic_exited);
+
+ splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+ "Thread %d successfully exited: %lu/%lu\n", op,
+ (long unsigned)ap->ap_atomic,
+ (long unsigned)ap->ap_atomic_exited);
+
+ wake_up(&ap->ap_waitq);
+ thread_exit();
+}
+
+static int
+splat_atomic_test1_cond(atomic_priv_t *ap, int started)
+{
+ return (ap->ap_atomic_exited == started);
+}
+
+static int
+splat_atomic_test1(struct file *file, void *arg)
+{
+ atomic_priv_t ap;
+ DEFINE_WAIT(wait);
+ kthread_t *thr;
+ int i, rc = 0;
+
+ ap.ap_magic = SPLAT_ATOMIC_TEST_MAGIC;
+ ap.ap_file = file;
+ spin_lock_init(&ap.ap_lock);
+ init_waitqueue_head(&ap.ap_waitq);
+ ap.ap_atomic = SPLAT_ATOMIC_INIT_VALUE;
+ ap.ap_atomic_exited = 0;
+
+ for (i = 0; i < SPLAT_ATOMIC_COUNT_64; i++) {
+ spin_lock(&ap.ap_lock);
+ ap.ap_op = i;
+
+ thr = (kthread_t *)thread_create(NULL, 0, splat_atomic_work,
+ &ap, 0, &p0, TS_RUN,
+ minclsyspri);
+ if (thr == NULL) {
+ rc = -ESRCH;
+ spin_unlock(&ap.ap_lock);
+ break;
+ }
+
+ /* Prepare to wait, the new thread will wake us once it
+ * has made a copy of the unique private passed data */
+ prepare_to_wait(&ap.ap_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&ap.ap_lock);
+ schedule();
+ }
+
+ wait_event_interruptible(ap.ap_waitq, splat_atomic_test1_cond(&ap, i));
+
+ if (rc) {
+ splat_vprint(file, SPLAT_ATOMIC_TEST1_NAME, "Only started "
+ "%d/%d test threads\n", i, SPLAT_ATOMIC_COUNT_64);
+ return rc;
+ }
+
+ if (ap.ap_atomic != SPLAT_ATOMIC_INIT_VALUE) {
+ splat_vprint(file, SPLAT_ATOMIC_TEST1_NAME,
+ "Final value %lu does not match initial value %lu\n",
+ (long unsigned)ap.ap_atomic, SPLAT_ATOMIC_INIT_VALUE);
+ return -EINVAL;
+ }
+
+ splat_vprint(file, SPLAT_ATOMIC_TEST1_NAME,
+ "Success initial and final values match, %lu == %lu\n",
+ (long unsigned)ap.ap_atomic, SPLAT_ATOMIC_INIT_VALUE);
+
+ return 0;
+}
+
+splat_subsystem_t *
+splat_atomic_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_ATOMIC_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_ATOMIC_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_ATOMIC;
+
+ SPLAT_TEST_INIT(sub, SPLAT_ATOMIC_TEST1_NAME, SPLAT_ATOMIC_TEST1_DESC,
+ SPLAT_ATOMIC_TEST1_ID, splat_atomic_test1);
+
+ return sub;
+}
+
+void
+splat_atomic_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_ATOMIC_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_atomic_id(void) {
+ return SPLAT_SUBSYSTEM_ATOMIC;
+}
diff --git a/module/splat/splat-condvar.c b/module/splat/splat-condvar.c
new file mode 100644
index 000000000..276798818
--- /dev/null
+++ b/module/splat/splat-condvar.c
@@ -0,0 +1,479 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_CONDVAR 0x0500
+#define SPLAT_CONDVAR_NAME "condvar"
+#define SPLAT_CONDVAR_DESC "Kernel Condition Variable Tests"
+
+#define SPLAT_CONDVAR_TEST1_ID 0x0501
+#define SPLAT_CONDVAR_TEST1_NAME "signal1"
+#define SPLAT_CONDVAR_TEST1_DESC "Wake a single thread, cv_wait()/cv_signal()"
+
+#define SPLAT_CONDVAR_TEST2_ID 0x0502
+#define SPLAT_CONDVAR_TEST2_NAME "broadcast1"
+#define SPLAT_CONDVAR_TEST2_DESC "Wake all threads, cv_wait()/cv_broadcast()"
+
+#define SPLAT_CONDVAR_TEST3_ID 0x0503
+#define SPLAT_CONDVAR_TEST3_NAME "signal2"
+#define SPLAT_CONDVAR_TEST3_DESC "Wake a single thread, cv_wait_timeout()/cv_signal()"
+
+#define SPLAT_CONDVAR_TEST4_ID 0x0504
+#define SPLAT_CONDVAR_TEST4_NAME "broadcast2"
+#define SPLAT_CONDVAR_TEST4_DESC "Wake all threads, cv_wait_timeout()/cv_broadcast()"
+
+#define SPLAT_CONDVAR_TEST5_ID 0x0505
+#define SPLAT_CONDVAR_TEST5_NAME "timeout"
+#define SPLAT_CONDVAR_TEST5_DESC "Timeout thread, cv_wait_timeout()"
+
+#define SPLAT_CONDVAR_TEST_MAGIC 0x115599DDUL
+#define SPLAT_CONDVAR_TEST_NAME "condvar_test"
+#define SPLAT_CONDVAR_TEST_COUNT 8
+
+typedef struct condvar_priv {
+ unsigned long cv_magic;
+ struct file *cv_file;
+ kcondvar_t cv_condvar;
+ kmutex_t cv_mtx;
+} condvar_priv_t;
+
+typedef struct condvar_thr {
+ int ct_id;
+ const char *ct_name;
+ condvar_priv_t *ct_cvp;
+ int ct_rc;
+} condvar_thr_t;
+
+int
+splat_condvar_test12_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+
+ ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name),"%s%d",SPLAT_CONDVAR_TEST_NAME,ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ splat_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ cv_wait(&cv->cv_condvar, &cv->cv_mtx);
+ splat_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+splat_condvar_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ if (!rc)
+ splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+splat_condvar_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = SPLAT_CONDVAR_TEST2_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] > 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake all threads waiting on the condition variable */
+ cv_broadcast(&cv.cv_condvar);
+
+ /* Wait until all threads have exited */
+ while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ splat_vprint(file, SPLAT_CONDVAR_TEST2_NAME, "Correctly woke all "
+ "%d sleeping threads at once\n", count);
+
+ /* Wake everything for the failure case */
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+int
+splat_condvar_test34_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+ clock_t rc;
+
+ ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d", SPLAT_CONDVAR_TEST_NAME, ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ splat_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+
+ /* Sleep no longer than 3 seconds, for this test we should
+ * actually never sleep that long without being woken up. */
+ rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
+ if (rc == -1) {
+ ct->ct_rc = -ETIMEDOUT;
+ splat_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
+ "should have been woken\n", name);
+ } else {
+ splat_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ }
+
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+splat_condvar_test3(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+splat_condvar_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ splat_vprint(file, SPLAT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+splat_condvar_test5(struct file *file, void *arg)
+{
+ kcondvar_t condvar;
+ kmutex_t mtx;
+ clock_t time_left, time_before, time_after, time_delta;
+ int64_t whole_delta;
+ int32_t remain_delta;
+ int rc = 0;
+
+ mutex_init(&mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&condvar, SPLAT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
+ "%d second and expecting to be woken by timeout\n", 1);
+
+ /* Allow a 1 second timeout, plenty long to validate correctness. */
+ time_before = lbolt;
+ mutex_enter(&mtx);
+ time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
+ mutex_exit(&mtx);
+ time_after = lbolt;
+ time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
+ whole_delta = time_delta;
+ remain_delta = do_div(whole_delta, HZ);
+
+ if (time_left == -1) {
+ if (time_delta >= HZ) {
+ splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out and was asleep "
+ "for %d.%d seconds (%d second min)\n",
+ (int)whole_delta, remain_delta, 1);
+ } else {
+ splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out but was only "
+ "asleep for %d.%d seconds (%d second "
+ "min)\n", (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+ } else {
+ splat_vprint(file, SPLAT_CONDVAR_TEST5_NAME,
+ "Thread exited after only %d.%d seconds, it "
+ "did not hit the %d second timeout\n",
+ (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+
+ cv_destroy(&condvar);
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+splat_subsystem_t *
+splat_condvar_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_CONDVAR_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_CONDVAR_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_CONDVAR;
+
+ SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST1_NAME, SPLAT_CONDVAR_TEST1_DESC,
+ SPLAT_CONDVAR_TEST1_ID, splat_condvar_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST2_NAME, SPLAT_CONDVAR_TEST2_DESC,
+ SPLAT_CONDVAR_TEST2_ID, splat_condvar_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST3_NAME, SPLAT_CONDVAR_TEST3_DESC,
+ SPLAT_CONDVAR_TEST3_ID, splat_condvar_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST4_NAME, SPLAT_CONDVAR_TEST4_DESC,
+ SPLAT_CONDVAR_TEST4_ID, splat_condvar_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_CONDVAR_TEST5_NAME, SPLAT_CONDVAR_TEST5_DESC,
+ SPLAT_CONDVAR_TEST5_ID, splat_condvar_test5);
+
+ return sub;
+}
+
+void
+splat_condvar_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST5_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_CONDVAR_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_condvar_id(void) {
+ return SPLAT_SUBSYSTEM_CONDVAR;
+}
diff --git a/module/splat/splat-ctl.c b/module/splat/splat-ctl.c
new file mode 100644
index 000000000..d7aed023c
--- /dev/null
+++ b/module/splat/splat-ctl.c
@@ -0,0 +1,682 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * My intent is to create a loadable 'splat' (Solaris Porting LAyer
+ * Tests) module which can be used as an access point to run
+ * in kernel Solaris ABI regression tests. This provides a
+ * nice mechanism to validate the shim primates are working properly.
+ *
+ * The basic design is the splat module is that it is constructed of
+ * various splat_* source files each of which contains regression tests.
+ * For example the splat_linux_kmem.c file contains tests for validating
+ * kmem correctness. When the splat module is loaded splat_*_init()
+ * will be called for each subsystems tests, similarly splat_*_fini() is
+ * called when the splat module is removed. Each test can then be
+ * run by making an ioctl() call from a userspace control application
+ * to pick the subsystem and test which should be run.
+ */
+
+#include "splat-internal.h"
+
+static spl_class *splat_class;
+static spl_device *splat_device;
+static struct list_head splat_module_list;
+static spinlock_t splat_module_lock;
+
+static int
+splat_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ splat_info_t *info;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&info->info_lock);
+ info->info_size = SPLAT_INFO_BUFFER_SIZE;
+ info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
+ if (info->info_buffer == NULL) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ info->info_head = info->info_buffer;
+ file->private_data = (void *)info;
+
+ return 0;
+}
+
+static int
+splat_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ splat_info_t *info = (splat_info_t *)file->private_data;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ vfree(info->info_buffer);
+ kfree(info);
+
+ return 0;
+}
+
+static int
+splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_info_t *info = (splat_info_t *)file->private_data;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ memset(info->info_buffer, 0, info->info_size);
+ info->info_head = info->info_buffer;
+ spin_unlock(&info->info_lock);
+
+ return 0;
+}
+
+static int
+splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_info_t *info = (splat_info_t *)file->private_data;
+ char *buf;
+ int min, size, rc = 0;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ if (kcfg->cfg_arg1 > 0) {
+
+ size = kcfg->cfg_arg1;
+ buf = (char *)vmalloc(size);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Zero fill and truncate contents when coping buffer */
+ min = ((size < info->info_size) ? size : info->info_size);
+ memset(buf, 0, size);
+ memcpy(buf, info->info_buffer, min);
+ vfree(info->info_buffer);
+ info->info_size = size;
+ info->info_buffer = buf;
+ info->info_head = info->info_buffer;
+ }
+
+ kcfg->cfg_rc1 = info->info_size;
+
+ if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ rc = -EFAULT;
+out:
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+
+static splat_subsystem_t *
+splat_subsystem_find(int id) {
+ splat_subsystem_t *sub;
+
+ spin_lock(&splat_module_lock);
+ list_for_each_entry(sub, &splat_module_list, subsystem_list) {
+ if (id == sub->desc.id) {
+ spin_unlock(&splat_module_lock);
+ return sub;
+ }
+ }
+ spin_unlock(&splat_module_lock);
+
+ return NULL;
+}
+
+static int
+splat_subsystem_count(splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_subsystem_t *sub;
+ int i = 0;
+
+ spin_lock(&splat_module_lock);
+ list_for_each_entry(sub, &splat_module_list, subsystem_list)
+ i++;
+
+ spin_unlock(&splat_module_lock);
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+splat_subsystem_list(splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_subsystem_t *sub;
+ splat_cfg_t *tmp;
+ int size, i = 0;
+
+ /* Structure will be sized large enough for N subsystem entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid subsystems will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all subsystems we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp) + kcfg->cfg_data.splat_subsystems.size *
+ sizeof(splat_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&splat_module_lock);
+ list_for_each_entry(sub, &splat_module_list, subsystem_list) {
+ strncpy(tmp->cfg_data.splat_subsystems.descs[i].name,
+ sub->desc.name, SPLAT_NAME_SIZE);
+ strncpy(tmp->cfg_data.splat_subsystems.descs[i].desc,
+ sub->desc.desc, SPLAT_DESC_SIZE);
+ tmp->cfg_data.splat_subsystems.descs[i].id = sub->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.splat_subsystems.size)
+ break;
+ }
+ spin_unlock(&splat_module_lock);
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+splat_test_count(splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_subsystem_t *sub;
+ splat_test_t *test;
+ int i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = splat_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list)
+ i++;
+
+ spin_unlock(&(sub->test_lock));
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+splat_test_list(splat_cfg_t *kcfg, unsigned long arg)
+{
+ splat_subsystem_t *sub;
+ splat_test_t *test;
+ splat_cfg_t *tmp;
+ int size, i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = splat_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ /* Structure will be sized large enough for N test entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid tests will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all tests we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp)+kcfg->cfg_data.splat_tests.size*sizeof(splat_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ strncpy(tmp->cfg_data.splat_tests.descs[i].name,
+ test->desc.name, SPLAT_NAME_SIZE);
+ strncpy(tmp->cfg_data.splat_tests.descs[i].desc,
+ test->desc.desc, SPLAT_DESC_SIZE);
+ tmp->cfg_data.splat_tests.descs[i].id = test->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.splat_tests.size)
+ break;
+ }
+ spin_unlock(&(sub->test_lock));
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct splat_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+splat_validate(struct file *file, splat_subsystem_t *sub, int cmd, void *arg)
+{
+ splat_test_t *test;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ if (test->desc.id == cmd) {
+ spin_unlock(&(sub->test_lock));
+ return test->test(file, arg);
+ }
+ }
+ spin_unlock(&(sub->test_lock));
+
+ return -EINVAL;
+}
+
+static int
+splat_ioctl_cfg(struct file *file, unsigned long arg)
+{
+ splat_cfg_t kcfg;
+ int rc = 0;
+
+ if (copy_from_user(&kcfg, (splat_cfg_t *)arg, sizeof(kcfg)))
+ return -EFAULT;
+
+ if (kcfg.cfg_magic != SPLAT_CFG_MAGIC) {
+ splat_print(file, "Bad config magic 0x%x != 0x%x\n",
+ kcfg.cfg_magic, SPLAT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ switch (kcfg.cfg_cmd) {
+ case SPLAT_CFG_BUFFER_CLEAR:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Unused
+ */
+ rc = splat_buffer_clear(file, &kcfg, arg);
+ break;
+ case SPLAT_CFG_BUFFER_SIZE:
+ /* cfg_arg1 - 0 - query size; >0 resize
+ * cfg_rc1 - Set to current buffer size
+ */
+ rc = splat_buffer_size(file, &kcfg, arg);
+ break;
+ case SPLAT_CFG_SUBSYSTEM_COUNT:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ */
+ rc = splat_subsystem_count(&kcfg, arg);
+ break;
+ case SPLAT_CFG_SUBSYSTEM_LIST:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ * cfg_data.splat_subsystems - Populated with subsystems
+ */
+ rc = splat_subsystem_list(&kcfg, arg);
+ break;
+ case SPLAT_CFG_TEST_COUNT:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ */
+ rc = splat_test_count(&kcfg, arg);
+ break;
+ case SPLAT_CFG_TEST_LIST:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ * cfg_data.splat_subsystems - Populated with tests
+ */
+ rc = splat_test_list(&kcfg, arg);
+ break;
+ default:
+ splat_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+splat_ioctl_cmd(struct file *file, unsigned long arg)
+{
+ splat_subsystem_t *sub;
+ splat_cmd_t kcmd;
+ int rc = -EINVAL;
+ void *data = NULL;
+
+ if (copy_from_user(&kcmd, (splat_cfg_t *)arg, sizeof(kcmd)))
+ return -EFAULT;
+
+ if (kcmd.cmd_magic != SPLAT_CMD_MAGIC) {
+ splat_print(file, "Bad command magic 0x%x != 0x%x\n",
+ kcmd.cmd_magic, SPLAT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for any opaque data the caller needed to pass on */
+ if (kcmd.cmd_data_size > 0) {
+ data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, (void *)(arg + offsetof(splat_cmd_t,
+ cmd_data_str)), kcmd.cmd_data_size)) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+
+ sub = splat_subsystem_find(kcmd.cmd_subsystem);
+ if (sub != NULL)
+ rc = splat_validate(file, sub, kcmd.cmd_test, data);
+ else
+ rc = -EINVAL;
+
+ if (data != NULL)
+ kfree(data);
+
+ return rc;
+}
+
+static int
+splat_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ int rc = 0;
+
+ /* Ignore tty ioctls */
+ if ((cmd & 0xffffff00) == ((int)'T') << 8)
+ return -ENOTTY;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ switch (cmd) {
+ case SPLAT_CFG:
+ rc = splat_ioctl_cfg(file, arg);
+ break;
+ case SPLAT_CMD:
+ rc = splat_ioctl_cmd(file, arg);
+ break;
+ default:
+ splat_print(file, "Bad ioctl command %d\n", cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/* I'm not sure why you would want to write in to this buffer from
+ * user space since its principle use is to pass test status info
+ * back to the user space, but I don't see any reason to prevent it.
+ */
+static ssize_t splat_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ splat_info_t *info = (splat_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Write beyond EOF */
+ if (*ppos >= info->info_size) {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_from_user(info->info_buffer, buf, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static ssize_t splat_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ splat_info_t *info = (splat_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Read beyond EOF */
+ if (*ppos >= info->info_size)
+ goto out;
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static loff_t splat_seek(struct file *file, loff_t offset, int origin)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ splat_info_t *info = (splat_info_t *)file->private_data;
+ int rc = -EINVAL;
+
+ if (minor >= SPLAT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ switch (origin) {
+ case 0: /* SEEK_SET - No-op just do it */
+ break;
+ case 1: /* SEEK_CUR - Seek from current */
+ offset = file->f_pos + offset;
+ break;
+ case 2: /* SEEK_END - Seek from end */
+ offset = info->info_size + offset;
+ break;
+ }
+
+ if (offset >= 0) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ rc = offset;
+ }
+
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+static struct file_operations splat_fops = {
+ .owner = THIS_MODULE,
+ .open = splat_open,
+ .release = splat_release,
+ .ioctl = splat_ioctl,
+ .read = splat_read,
+ .write = splat_write,
+ .llseek = splat_seek,
+};
+
+static struct cdev splat_cdev = {
+ .owner = THIS_MODULE,
+ .kobj = { .name = SPLAT_NAME, },
+};
+
+static int __init
+splat_init(void)
+{
+ dev_t dev;
+ int rc;
+
+ spin_lock_init(&splat_module_lock);
+ INIT_LIST_HEAD(&splat_module_list);
+
+ SPLAT_SUBSYSTEM_INIT(kmem);
+ SPLAT_SUBSYSTEM_INIT(taskq);
+ SPLAT_SUBSYSTEM_INIT(krng);
+ SPLAT_SUBSYSTEM_INIT(mutex);
+ SPLAT_SUBSYSTEM_INIT(condvar);
+ SPLAT_SUBSYSTEM_INIT(thread);
+ SPLAT_SUBSYSTEM_INIT(rwlock);
+ SPLAT_SUBSYSTEM_INIT(time);
+ SPLAT_SUBSYSTEM_INIT(vnode);
+ SPLAT_SUBSYSTEM_INIT(kobj);
+ SPLAT_SUBSYSTEM_INIT(atomic);
+ SPLAT_SUBSYSTEM_INIT(list);
+ SPLAT_SUBSYSTEM_INIT(generic);
+
+ dev = MKDEV(SPLAT_MAJOR, 0);
+ if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
+ goto error;
+
+ /* Support for registering a character driver */
+ cdev_init(&splat_cdev, &splat_fops);
+ if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
+ printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
+ kobject_put(&splat_cdev.kobj);
+ unregister_chrdev_region(dev, SPLAT_MINORS);
+ goto error;
+ }
+
+ /* Support for udev make driver info available in sysfs */
+ splat_class = spl_class_create(THIS_MODULE, "splat");
+ if (IS_ERR(splat_class)) {
+ rc = PTR_ERR(splat_class);
+ printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
+ cdev_del(&splat_cdev);
+ unregister_chrdev_region(dev, SPLAT_MINORS);
+ goto error;
+ }
+
+ splat_device = spl_device_create(splat_class, NULL,
+ MKDEV(SPLAT_MAJOR, 0),
+ NULL, SPLAT_NAME);
+
+ printk(KERN_INFO "SPLAT: Loaded Solaris Porting LAyer "
+ "Tests v%s\n", VERSION);
+ return 0;
+error:
+ printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
+ return rc;
+}
+
+static void
+splat_fini(void)
+{
+ dev_t dev = MKDEV(SPLAT_MAJOR, 0);
+
+ spl_device_destroy(splat_class, splat_device, dev);
+ spl_class_destroy(splat_class);
+ cdev_del(&splat_cdev);
+ unregister_chrdev_region(dev, SPLAT_MINORS);
+
+ SPLAT_SUBSYSTEM_FINI(generic);
+ SPLAT_SUBSYSTEM_FINI(list);
+ SPLAT_SUBSYSTEM_FINI(atomic);
+ SPLAT_SUBSYSTEM_FINI(kobj);
+ SPLAT_SUBSYSTEM_FINI(vnode);
+ SPLAT_SUBSYSTEM_FINI(time);
+ SPLAT_SUBSYSTEM_FINI(rwlock);
+ SPLAT_SUBSYSTEM_FINI(thread);
+ SPLAT_SUBSYSTEM_FINI(condvar);
+ SPLAT_SUBSYSTEM_FINI(mutex);
+ SPLAT_SUBSYSTEM_FINI(krng);
+ SPLAT_SUBSYSTEM_FINI(taskq);
+ SPLAT_SUBSYSTEM_FINI(kmem);
+
+ ASSERT(list_empty(&splat_module_list));
+ printk(KERN_INFO "SPLAT: Unloaded Solaris Porting LAyer "
+ "Tests v%s\n", VERSION);
+}
+
+module_init(splat_init);
+module_exit(splat_fini);
+
+MODULE_AUTHOR("Lawrence Livermore National Labs");
+MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
+MODULE_LICENSE("GPL");
diff --git a/module/splat/splat-generic.c b/module/splat/splat-generic.c
new file mode 100644
index 000000000..6da7473e0
--- /dev/null
+++ b/module/splat/splat-generic.c
@@ -0,0 +1,233 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_GENERIC 0x0d00
+#define SPLAT_GENERIC_NAME "generic"
+#define SPLAT_GENERIC_DESC "Kernel Generic Tests"
+
+#define SPLAT_GENERIC_TEST1_ID 0x0d01
+#define SPLAT_GENERIC_TEST1_NAME "ddi_strtoul"
+#define SPLAT_GENERIC_TEST1_DESC "ddi_strtoul Test"
+
+#define SPLAT_GENERIC_TEST2_ID 0x0d02
+#define SPLAT_GENERIC_TEST2_NAME "ddi_strtol"
+#define SPLAT_GENERIC_TEST2_DESC "ddi_strtol Test"
+
+#define SPLAT_GENERIC_TEST3_ID 0x0d03
+#define SPLAT_GENERIC_TEST3_NAME "ddi_strtoull"
+#define SPLAT_GENERIC_TEST3_DESC "ddi_strtoull Test"
+
+#define SPLAT_GENERIC_TEST4_ID 0x0d04
+#define SPLAT_GENERIC_TEST4_NAME "ddi_strtoll"
+#define SPLAT_GENERIC_TEST4_DESC "ddi_strtoll Test"
+
+#define STR_POS "123456789"
+#define STR_NEG "-123456789"
+#define STR_BASE "0xabcdef"
+#define STR_RANGE_MAX "10000000000000000"
+#define STR_RANGE_MIN "-10000000000000000"
+#define STR_INVAL1 "12345U"
+#define STR_INVAL2 "invald"
+
+#define VAL_POS 123456789
+#define VAL_NEG -123456789
+#define VAL_BASE 0xabcdef
+#define VAL_INVAL1 12345U
+
+#define define_generic_msg_strtox(type, valtype) \
+static void \
+generic_msg_strto##type(struct file *file, char *msg, int rc, int *err, \
+ const char *s, valtype d, char *endptr) \
+{ \
+ splat_vprint(file, SPLAT_GENERIC_TEST1_NAME, \
+ "%s (%d) %s: %s == %lld, 0x%p\n", \
+ rc ? "Fail" : "Pass", *err, msg, s, \
+ (unsigned long long)d, endptr); \
+ *err = rc; \
+}
+
+define_generic_msg_strtox(ul, unsigned long);
+define_generic_msg_strtox(l, long);
+define_generic_msg_strtox(ull, unsigned long long);
+define_generic_msg_strtox(ll, long long);
+
+#define define_splat_generic_test_strtox(type, valtype) \
+static int \
+splat_generic_test_strto##type(struct file *file, void *arg) \
+{ \
+ int rc, rc1, rc2, rc3, rc4, rc5, rc6, rc7; \
+ char str[20], *endptr; \
+ valtype r; \
+ \
+ /* Positive value: expect success */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ rc1 = ddi_strto##type(STR_POS, &endptr, 10, &r); \
+ if (rc1 == 0 && r == VAL_POS && endptr && *endptr == '\0') \
+ rc = 0; \
+ \
+ generic_msg_strto##type(file, "positive", rc , &rc1, \
+ STR_POS, r, endptr); \
+ \
+ /* Negative value: expect success */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ strcpy(str, STR_NEG); \
+ rc2 = ddi_strto##type(str, &endptr, 10, &r); \
+ if (#type[0] == 'u') { \
+ if (rc2 == 0 && r == 0 && endptr == str) \
+ rc = 0; \
+ } else { \
+ if (rc2 == 0 && r == VAL_NEG && \
+ endptr && *endptr == '\0') \
+ rc = 0; \
+ } \
+ \
+ generic_msg_strto##type(file, "negative", rc, &rc2, \
+ STR_NEG, r, endptr); \
+ \
+ /* Non decimal base: expect sucess */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ rc3 = ddi_strto##type(STR_BASE, &endptr, 0, &r); \
+ if (rc3 == 0 && r == VAL_BASE && endptr && *endptr == '\0') \
+ rc = 0; \
+ \
+ generic_msg_strto##type(file, "base", rc, &rc3, \
+ STR_BASE, r, endptr); \
+ \
+ /* Max out of range: failure expected, r unchanged */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ rc4 = ddi_strto##type(STR_RANGE_MAX, &endptr, 16, &r); \
+ if (rc4 == ERANGE && r == 0 && endptr == NULL) \
+ rc = 0; \
+ \
+ generic_msg_strto##type(file, "max", rc, &rc4, \
+ STR_RANGE_MAX, r, endptr); \
+ \
+ /* Min out of range: failure expected, r unchanged */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ strcpy(str, STR_RANGE_MIN); \
+ rc5 = ddi_strto##type(str, &endptr, 16, &r); \
+ if (#type[0] == 'u') { \
+ if (rc5 == 0 && r == 0 && endptr == str) \
+ rc = 0; \
+ } else { \
+ if (rc5 == ERANGE && r == 0 && endptr == NULL) \
+ rc = 0; \
+ } \
+ \
+ generic_msg_strto##type(file, "min", rc, &rc5, \
+ STR_RANGE_MIN, r, endptr); \
+ \
+ /* Invalid string: success expected, endptr == 'U' */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ rc6 = ddi_strto##type(STR_INVAL1, &endptr, 10, &r); \
+ if (rc6 == 0 && r == VAL_INVAL1 && endptr && *endptr == 'U') \
+ rc = 0; \
+ \
+ generic_msg_strto##type(file, "invalid", rc, &rc6, \
+ STR_INVAL1, r, endptr); \
+ \
+ /* Invalid string: failure expected, endptr == str */ \
+ r = 0; \
+ rc = 1; \
+ endptr = NULL; \
+ strcpy(str, STR_INVAL2); \
+ rc7 = ddi_strto##type(str, &endptr, 10, &r); \
+ if (rc7 == 0 && r == 0 && endptr == str) \
+ rc = 0; \
+ \
+ generic_msg_strto##type(file, "invalid", rc, &rc7, \
+ STR_INVAL2, r, endptr); \
+ \
+ return (rc1 || rc2 || rc3 || rc4 || rc5 || rc6 || rc7) ? \
+ -EINVAL : 0; \
+}
+
+define_splat_generic_test_strtox(ul, unsigned long);
+define_splat_generic_test_strtox(l, long);
+define_splat_generic_test_strtox(ull, unsigned long long);
+define_splat_generic_test_strtox(ll, long long);
+
+splat_subsystem_t *
+splat_generic_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_GENERIC_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_GENERIC_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_GENERIC;
+
+ SPLAT_TEST_INIT(sub, SPLAT_GENERIC_TEST1_NAME, SPLAT_GENERIC_TEST1_DESC,
+ SPLAT_GENERIC_TEST1_ID, splat_generic_test_strtoul);
+ SPLAT_TEST_INIT(sub, SPLAT_GENERIC_TEST2_NAME, SPLAT_GENERIC_TEST2_DESC,
+ SPLAT_GENERIC_TEST2_ID, splat_generic_test_strtol);
+ SPLAT_TEST_INIT(sub, SPLAT_GENERIC_TEST3_NAME, SPLAT_GENERIC_TEST3_DESC,
+ SPLAT_GENERIC_TEST3_ID, splat_generic_test_strtoull);
+ SPLAT_TEST_INIT(sub, SPLAT_GENERIC_TEST4_NAME, SPLAT_GENERIC_TEST4_DESC,
+ SPLAT_GENERIC_TEST4_ID, splat_generic_test_strtoll);
+
+ return sub;
+}
+
+void
+splat_generic_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_GENERIC_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_GENERIC_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_GENERIC_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_GENERIC_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_generic_id(void)
+{
+ return SPLAT_SUBSYSTEM_GENERIC;
+}
diff --git a/module/splat/splat-internal.h b/module/splat/splat-internal.h
new file mode 100644
index 000000000..87c47b173
--- /dev/null
+++ b/module/splat/splat-internal.h
@@ -0,0 +1,239 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _SPLAT_INTERNAL_H
+#define _SPLAT_INTERNAL_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/elf.h>
+#include <linux/limits.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+
+#include <asm/ioctls.h>
+#include <asm/uaccess.h>
+#include <stdarg.h>
+
+#include <sys/callb.h>
+#include <sys/condvar.h>
+#include <sys/cred.h>
+#include <sys/sysmacros.h>
+#include <sys/kmem.h>
+#include <sys/kstat.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/rwlock.h>
+#include <sys/taskq.h>
+#include <sys/thread.h>
+#include <sys/time.h>
+#include <sys/timer.h>
+#include <sys/types.h>
+#include <sys/kobj.h>
+#include <sys/atomic.h>
+#include <sys/list.h>
+#include <sys/sunddi.h>
+#include <linux/cdev.h>
+
+#include "spl-device.h"
+#include "splat-ctl.h"
+
+#define SPLAT_SUBSYSTEM_INIT(type) \
+({ splat_subsystem_t *_sub_; \
+ \
+ _sub_ = (splat_subsystem_t *)splat_##type##_init(); \
+ if (_sub_ == NULL) { \
+ printk(KERN_ERR "splat: Error initializing: " #type "\n"); \
+ } else { \
+ spin_lock(&splat_module_lock); \
+ list_add_tail(&(_sub_->subsystem_list), \
+ &splat_module_list); \
+ spin_unlock(&splat_module_lock); \
+ } \
+})
+
+#define SPLAT_SUBSYSTEM_FINI(type) \
+({ splat_subsystem_t *_sub_, *_tmp_; \
+ int _id_, _flag_ = 0; \
+ \
+ _id_ = splat_##type##_id(); \
+ spin_lock(&splat_module_lock); \
+ list_for_each_entry_safe(_sub_, _tmp_, &splat_module_list, \
+ subsystem_list) { \
+ if (_sub_->desc.id == _id_) { \
+ list_del_init(&(_sub_->subsystem_list)); \
+ spin_unlock(&splat_module_lock); \
+ splat_##type##_fini(_sub_); \
+ spin_lock(&splat_module_lock); \
+ _flag_ = 1; \
+ } \
+ } \
+ spin_unlock(&splat_module_lock); \
+ \
+ if (!_flag_) \
+ printk(KERN_ERR "splat: Error finalizing: " #type "\n"); \
+})
+
+#define SPLAT_TEST_INIT(sub, n, d, tid, func) \
+({ splat_test_t *_test_; \
+ \
+ _test_ = (splat_test_t *)kmalloc(sizeof(*_test_), GFP_KERNEL); \
+ if (_test_ == NULL) { \
+ printk(KERN_ERR "splat: Error initializing: " n "/" #tid" \n");\
+ } else { \
+ memset(_test_, 0, sizeof(*_test_)); \
+ strncpy(_test_->desc.name, n, SPLAT_NAME_SIZE-1); \
+ strncpy(_test_->desc.desc, d, SPLAT_DESC_SIZE-1); \
+ _test_->desc.id = tid; \
+ _test_->test = func; \
+ INIT_LIST_HEAD(&(_test_->test_list)); \
+ spin_lock(&((sub)->test_lock)); \
+ list_add_tail(&(_test_->test_list),&((sub)->test_list));\
+ spin_unlock(&((sub)->test_lock)); \
+ } \
+})
+
+#define SPLAT_TEST_FINI(sub, tid) \
+({ splat_test_t *_test_, *_tmp_; \
+ int _flag_ = 0; \
+ \
+ spin_lock(&((sub)->test_lock)); \
+ list_for_each_entry_safe(_test_, _tmp_, \
+ &((sub)->test_list), test_list) { \
+ if (_test_->desc.id == tid) { \
+ list_del_init(&(_test_->test_list)); \
+ _flag_ = 1; \
+ } \
+ } \
+ spin_unlock(&((sub)->test_lock)); \
+ \
+ if (!_flag_) \
+ printk(KERN_ERR "splat: Error finalizing: " #tid "\n"); \
+})
+
+typedef int (*splat_test_func_t)(struct file *, void *);
+
+typedef struct splat_test {
+ struct list_head test_list;
+ splat_user_t desc;
+ splat_test_func_t test;
+} splat_test_t;
+
+typedef struct splat_subsystem {
+ struct list_head subsystem_list;/* List had to chain entries */
+ splat_user_t desc;
+ spinlock_t test_lock;
+ struct list_head test_list;
+} splat_subsystem_t;
+
+#define SPLAT_INFO_BUFFER_SIZE 65536
+#define SPLAT_INFO_BUFFER_REDZONE 256
+
+typedef struct splat_info {
+ spinlock_t info_lock;
+ int info_size;
+ char *info_buffer;
+ char *info_head; /* Internal kernel use only */
+} splat_info_t;
+
+#define sym2str(sym) (char *)(#sym)
+
+#define splat_print(file, format, args...) \
+({ splat_info_t *_info_ = (splat_info_t *)file->private_data; \
+ int _rc_; \
+ \
+ ASSERT(_info_); \
+ ASSERT(_info_->info_buffer); \
+ \
+ spin_lock(&_info_->info_lock); \
+ \
+ /* Don't allow the kernel to start a write in the red zone */ \
+ if ((int)(_info_->info_head - _info_->info_buffer) > \
+ (SPLAT_INFO_BUFFER_SIZE - SPLAT_INFO_BUFFER_REDZONE)) { \
+ _rc_ = -EOVERFLOW; \
+ } else { \
+ _rc_ = sprintf(_info_->info_head, format, args); \
+ if (_rc_ >= 0) \
+ _info_->info_head += _rc_; \
+ } \
+ \
+ spin_unlock(&_info_->info_lock); \
+ _rc_; \
+})
+
+#define splat_vprint(file, test, format, args...) \
+ splat_print(file, "%*s: " format, SPLAT_NAME_SIZE, test, args)
+
+splat_subsystem_t *splat_condvar_init(void);
+splat_subsystem_t *splat_kmem_init(void);
+splat_subsystem_t *splat_mutex_init(void);
+splat_subsystem_t *splat_krng_init(void);
+splat_subsystem_t *splat_rwlock_init(void);
+splat_subsystem_t *splat_taskq_init(void);
+splat_subsystem_t *splat_thread_init(void);
+splat_subsystem_t *splat_time_init(void);
+splat_subsystem_t *splat_vnode_init(void);
+splat_subsystem_t *splat_kobj_init(void);
+splat_subsystem_t *splat_atomic_init(void);
+splat_subsystem_t *splat_list_init(void);
+splat_subsystem_t *splat_generic_init(void);
+
+void splat_condvar_fini(splat_subsystem_t *);
+void splat_kmem_fini(splat_subsystem_t *);
+void splat_mutex_fini(splat_subsystem_t *);
+void splat_krng_fini(splat_subsystem_t *);
+void splat_rwlock_fini(splat_subsystem_t *);
+void splat_taskq_fini(splat_subsystem_t *);
+void splat_thread_fini(splat_subsystem_t *);
+void splat_time_fini(splat_subsystem_t *);
+void splat_vnode_fini(splat_subsystem_t *);
+void splat_kobj_fini(splat_subsystem_t *);
+void splat_atomic_fini(splat_subsystem_t *);
+void splat_list_fini(splat_subsystem_t *);
+void splat_generic_fini(splat_subsystem_t *);
+
+int splat_condvar_id(void);
+int splat_kmem_id(void);
+int splat_mutex_id(void);
+int splat_krng_id(void);
+int splat_rwlock_id(void);
+int splat_taskq_id(void);
+int splat_thread_id(void);
+int splat_time_id(void);
+int splat_vnode_id(void);
+int splat_kobj_id(void);
+int splat_atomic_id(void);
+int splat_list_id(void);
+int splat_generic_id(void);
+
+#endif /* _SPLAT_INTERNAL_H */
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
new file mode 100644
index 000000000..a9792b1a5
--- /dev/null
+++ b/module/splat/splat-kmem.c
@@ -0,0 +1,733 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_KMEM 0x0100
+#define SPLAT_KMEM_NAME "kmem"
+#define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
+
+#define SPLAT_KMEM_TEST1_ID 0x0101
+#define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
+#define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
+
+#define SPLAT_KMEM_TEST2_ID 0x0102
+#define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
+#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
+
+#define SPLAT_KMEM_TEST3_ID 0x0103
+#define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
+#define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
+
+#define SPLAT_KMEM_TEST4_ID 0x0104
+#define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
+#define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
+
+#define SPLAT_KMEM_TEST5_ID 0x0105
+#define SPLAT_KMEM_TEST5_NAME "kmem_cache1"
+#define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
+
+#define SPLAT_KMEM_TEST6_ID 0x0106
+#define SPLAT_KMEM_TEST6_NAME "kmem_cache2"
+#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
+
+#define SPLAT_KMEM_TEST7_ID 0x0107
+#define SPLAT_KMEM_TEST7_NAME "kmem_reap"
+#define SPLAT_KMEM_TEST7_DESC "Slab reaping test"
+
+#define SPLAT_KMEM_TEST8_ID 0x0108
+#define SPLAT_KMEM_TEST8_NAME "kmem_lock"
+#define SPLAT_KMEM_TEST8_DESC "Slab locking test"
+
+#define SPLAT_KMEM_ALLOC_COUNT 10
+#define SPLAT_VMEM_ALLOC_COUNT 10
+
+
+/* XXX - This test may fail under tight memory conditions */
+static int
+splat_kmem_test1(struct file *file, void *arg)
+{
+ void *ptr[SPLAT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, count, rc = 0;
+
+ /* We are intentionally going to push kmem_alloc to its max
+ * allocation size, so suppress the console warnings for now */
+ kmem_set_warning(0);
+
+ while ((!rc) && (size <= (PAGE_SIZE * 32))) {
+ count = 0;
+
+ for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_alloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, SPLAT_KMEM_ALLOC_COUNT);
+ if (count != SPLAT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ kmem_set_warning(1);
+
+ return rc;
+}
+
+static int
+splat_kmem_test2(struct file *file, void *arg)
+{
+ void *ptr[SPLAT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, j, count, rc = 0;
+
+ /* We are intentionally going to push kmem_alloc to its max
+ * allocation size, so suppress the console warnings for now */
+ kmem_set_warning(0);
+
+ while ((!rc) && (size <= (PAGE_SIZE * 32))) {
+ count = 0;
+
+ for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ /* Ensure buffer has been zero filled */
+ for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
+ for (j = 0; j < size; j++) {
+ if (((char *)ptr[i])[j] != '\0') {
+ splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
+ "%d-byte allocation was "
+ "not zeroed\n", size);
+ rc = -EFAULT;
+ }
+ }
+ }
+
+ for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, SPLAT_KMEM_ALLOC_COUNT);
+ if (count != SPLAT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ kmem_set_warning(1);
+
+ return rc;
+}
+
+static int
+splat_kmem_test3(struct file *file, void *arg)
+{
+ void *ptr[SPLAT_VMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, count, rc = 0;
+
+ while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
+ count = 0;
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
+ ptr[i] = vmem_alloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ vmem_free(ptr[i], size);
+
+ splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, SPLAT_VMEM_ALLOC_COUNT);
+ if (count != SPLAT_VMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+static int
+splat_kmem_test4(struct file *file, void *arg)
+{
+ void *ptr[SPLAT_VMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, j, count, rc = 0;
+
+ while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
+ count = 0;
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
+ ptr[i] = vmem_zalloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ /* Ensure buffer has been zero filled */
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
+ for (j = 0; j < size; j++) {
+ if (((char *)ptr[i])[j] != '\0') {
+ splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
+ "%d-byte allocation was "
+ "not zeroed\n", size);
+ rc = -EFAULT;
+ }
+ }
+ }
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ vmem_free(ptr[i], size);
+
+ splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, SPLAT_VMEM_ALLOC_COUNT);
+ if (count != SPLAT_VMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
+#define SPLAT_KMEM_CACHE_NAME "kmem_test"
+#define SPLAT_KMEM_OBJ_COUNT 128
+#define SPLAT_KMEM_OBJ_RECLAIM 16
+
+typedef struct kmem_cache_data {
+ unsigned long kcd_magic;
+ int kcd_flag;
+ char kcd_buf[0];
+} kmem_cache_data_t;
+
+typedef struct kmem_cache_priv {
+ unsigned long kcp_magic;
+ struct file *kcp_file;
+ kmem_cache_t *kcp_cache;
+ kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
+ spinlock_t kcp_lock;
+ wait_queue_head_t kcp_waitq;
+ int kcp_size;
+ int kcp_count;
+ int kcp_threads;
+ int kcp_alloc;
+ int kcp_rc;
+} kmem_cache_priv_t;
+
+static int
+splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+
+ if (kcd && kcp) {
+ kcd->kcd_magic = kcp->kcp_magic;
+ kcd->kcd_flag = 1;
+ memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
+ kcp->kcp_count++;
+ }
+
+ return 0;
+}
+
+static void
+splat_kmem_cache_test_destructor(void *ptr, void *priv)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+
+ if (kcd && kcp) {
+ kcd->kcd_magic = 0;
+ kcd->kcd_flag = 0;
+ memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
+ kcp->kcp_count--;
+ }
+
+ return;
+}
+
+static int
+splat_kmem_cache_size_test(struct file *file, void *arg,
+ char *name, int size, int flags)
+{
+ kmem_cache_t *cache = NULL;
+ kmem_cache_data_t *kcd = NULL;
+ kmem_cache_priv_t kcp;
+ int rc = 0, max;
+
+ kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_size = size;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
+ splat_kmem_cache_test_constructor,
+ splat_kmem_cache_test_destructor,
+ NULL, &kcp, NULL, flags);
+ if (!cache) {
+ splat_vprint(file, name,
+ "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcd = kmem_cache_alloc(cache, KM_SLEEP);
+ if (!kcd) {
+ splat_vprint(file, name,
+ "Unable to allocate from '%s'\n",
+ SPLAT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (!kcd->kcd_flag) {
+ splat_vprint(file, name,
+ "Failed to run contructor for '%s'\n",
+ SPLAT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (kcd->kcd_magic != kcp.kcp_magic) {
+ splat_vprint(file, name,
+ "Failed to pass private data to constructor "
+ "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ max = kcp.kcp_count;
+ kmem_cache_free(cache, kcd);
+
+ /* Destroy the entire cache which will force destructors to
+ * run and we can verify one was called for every object */
+ kmem_cache_destroy(cache);
+ if (kcp.kcp_count) {
+ splat_vprint(file, name,
+ "Failed to run destructor on all slab objects "
+ "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ }
+
+ splat_vprint(file, name,
+ "Successfully ran ctors/dtors for %d elements in '%s'\n",
+ max, SPLAT_KMEM_CACHE_NAME);
+
+ return rc;
+
+out_free:
+ if (kcd)
+ kmem_cache_free(cache, kcd);
+
+ kmem_cache_destroy(cache);
+ return rc;
+}
+
+/* Validate small object cache behavior for dynamic/kmem/vmem caches */
+static int
+splat_kmem_test5(struct file *file, void *arg)
+{
+ char *name = SPLAT_KMEM_TEST5_NAME;
+ int rc;
+
+ rc = splat_kmem_cache_size_test(file, arg, name, 128, 0);
+ if (rc)
+ return rc;
+
+ rc = splat_kmem_cache_size_test(file, arg, name, 128, KMC_KMEM);
+ if (rc)
+ return rc;
+
+ return splat_kmem_cache_size_test(file, arg, name, 128, KMC_VMEM);
+}
+
+/* Validate large object cache behavior for dynamic/kmem/vmem caches */
+static int
+splat_kmem_test6(struct file *file, void *arg)
+{
+ char *name = SPLAT_KMEM_TEST6_NAME;
+ int rc;
+
+ rc = splat_kmem_cache_size_test(file, arg, name, 128 * 1024, 0);
+ if (rc)
+ return rc;
+
+ rc = splat_kmem_cache_size_test(file, arg, name, 128 * 1024, KMC_KMEM);
+ if (rc)
+ return rc;
+
+ return splat_kmem_cache_size_test(file, arg, name, 128 * 1028, KMC_VMEM);
+}
+
+static void
+splat_kmem_cache_test_reclaim(void *priv)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+ int i, count;
+
+ count = min(SPLAT_KMEM_OBJ_RECLAIM, kcp->kcp_count);
+ splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST7_NAME,
+ "Reaping %d objects from '%s'\n", count,
+ SPLAT_KMEM_CACHE_NAME);
+
+ for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
+ if (kcp->kcp_kcd[i]) {
+ kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
+ kcp->kcp_kcd[i] = NULL;
+
+ if (--count == 0)
+ break;
+ }
+ }
+
+ return;
+}
+
+static int
+splat_kmem_test7(struct file *file, void *arg)
+{
+ kmem_cache_t *cache;
+ kmem_cache_priv_t kcp;
+ int i, rc = 0;
+
+ kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_size = 256;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
+ splat_kmem_cache_test_constructor,
+ splat_kmem_cache_test_destructor,
+ splat_kmem_cache_test_reclaim,
+ &kcp, NULL, 0);
+ if (!cache) {
+ splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
+ "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcp.kcp_cache = cache;
+
+ for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
+ /* All allocations need not succeed */
+ kcp.kcp_kcd[i] = kmem_cache_alloc(cache, KM_SLEEP);
+ if (!kcp.kcp_kcd[i]) {
+ splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
+ "Unable to allocate from '%s'\n",
+ SPLAT_KMEM_CACHE_NAME);
+ }
+ }
+
+ ASSERT(kcp.kcp_count > 0);
+
+ /* Request the slab cache free any objects it can. For a few reasons
+ * this may not immediately result in more free memory even if objects
+ * are freed. First off, due to fragmentation we may not be able to
+ * reclaim any slabs. Secondly, even if we do we fully clear some
+ * slabs we will not want to immedately reclaim all of them because
+ * we may contend with cache allocs and thrash. What we want to see
+ * is slab size decrease more gradually as it becomes clear they
+ * will not be needed. This should be acheivable in less than minute
+ * if it takes longer than this something has gone wrong.
+ */
+ for (i = 0; i < 60; i++) {
+ kmem_cache_reap_now(cache);
+ splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
+ "%s cache objects %d, slabs %u/%u objs %u/%u\n",
+ SPLAT_KMEM_CACHE_NAME, kcp.kcp_count,
+ (unsigned)cache->skc_slab_alloc,
+ (unsigned)cache->skc_slab_total,
+ (unsigned)cache->skc_obj_alloc,
+ (unsigned)cache->skc_obj_total);
+
+ if (cache->skc_obj_total == 0)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ if (cache->skc_obj_total == 0) {
+ splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
+ "Successfully created %d objects "
+ "in cache %s and reclaimed them\n",
+ SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
+ } else {
+ splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
+ "Failed to reclaim %u/%d objects from cache %s\n",
+ (unsigned)cache->skc_obj_total, SPLAT_KMEM_OBJ_COUNT,
+ SPLAT_KMEM_CACHE_NAME);
+ rc = -ENOMEM;
+ }
+
+ /* Cleanup our mess (for failure case of time expiring) */
+ for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
+ if (kcp.kcp_kcd[i])
+ kmem_cache_free(cache, kcp.kcp_kcd[i]);
+
+ kmem_cache_destroy(cache);
+
+ return rc;
+}
+
+static void
+splat_kmem_test8_thread(void *arg)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
+ int count = kcp->kcp_alloc, rc = 0, i;
+ void **objs;
+
+ ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
+
+ objs = vmem_zalloc(count * sizeof(void *), KM_SLEEP);
+ if (!objs) {
+ splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
+ "Unable to alloc objp array for cache '%s'\n",
+ kcp->kcp_cache->skc_name);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ objs[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
+ if (!objs[i]) {
+ splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
+ "Unable to allocate from cache '%s'\n",
+ kcp->kcp_cache->skc_name);
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ if (objs[i])
+ kmem_cache_free(kcp->kcp_cache, objs[i]);
+
+ vmem_free(objs, count * sizeof(void *));
+out:
+ spin_lock(&kcp->kcp_lock);
+ if (!kcp->kcp_rc)
+ kcp->kcp_rc = rc;
+
+ if (--kcp->kcp_threads == 0)
+ wake_up(&kcp->kcp_waitq);
+
+ spin_unlock(&kcp->kcp_lock);
+
+ thread_exit();
+}
+
+static int
+splat_kmem_test8_count(kmem_cache_priv_t *kcp, int threads)
+{
+ int ret;
+
+ spin_lock(&kcp->kcp_lock);
+ ret = (kcp->kcp_threads == threads);
+ spin_unlock(&kcp->kcp_lock);
+
+ return ret;
+}
+
+/* This test will always pass and is simply here so I can easily
+ * eyeball the slab cache locking overhead to ensure it is reasonable.
+ */
+static int
+splat_kmem_test8_sc(struct file *file, void *arg, int size, int count)
+{
+ kmem_cache_priv_t kcp;
+ kthread_t *thr;
+ struct timespec start, stop, delta;
+ char cache_name[32];
+ int i, j, rc = 0, threads = 32;
+
+ kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+
+ splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %s", "name",
+ "time (sec)\tslabs \tobjs \thash\n");
+ splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %s", "",
+ " \ttot/max/calc\ttot/max/calc\n");
+
+ for (i = 1; i <= count; i *= 2) {
+ kcp.kcp_size = size;
+ kcp.kcp_count = 0;
+ kcp.kcp_threads = 0;
+ kcp.kcp_alloc = i;
+ kcp.kcp_rc = 0;
+ spin_lock_init(&kcp.kcp_lock);
+ init_waitqueue_head(&kcp.kcp_waitq);
+
+ (void)snprintf(cache_name, 32, "%s-%d-%d",
+ SPLAT_KMEM_CACHE_NAME, size, i);
+ kcp.kcp_cache = kmem_cache_create(cache_name, kcp.kcp_size, 0,
+ splat_kmem_cache_test_constructor,
+ splat_kmem_cache_test_destructor,
+ NULL, &kcp, NULL, 0);
+ if (!kcp.kcp_cache) {
+ splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
+ "Unable to create '%s' cache\n",
+ SPLAT_KMEM_CACHE_NAME);
+ rc = -ENOMEM;
+ break;
+ }
+
+ start = current_kernel_time();
+
+ for (j = 0; j < threads; j++) {
+ thr = thread_create(NULL, 0, splat_kmem_test8_thread,
+ &kcp, 0, &p0, TS_RUN, minclsyspri);
+ if (thr == NULL) {
+ rc = -ESRCH;
+ break;
+ }
+ spin_lock(&kcp.kcp_lock);
+ kcp.kcp_threads++;
+ spin_unlock(&kcp.kcp_lock);
+ }
+
+ /* Sleep until the thread sets kcp.kcp_threads == 0 */
+ wait_event(kcp.kcp_waitq, splat_kmem_test8_count(&kcp, 0));
+ stop = current_kernel_time();
+ delta = timespec_sub(stop, start);
+
+ splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %2ld.%09ld\t"
+ "%lu/%lu/%lu\t%lu/%lu/%lu\n",
+ kcp.kcp_cache->skc_name,
+ delta.tv_sec, delta.tv_nsec,
+ (unsigned long)kcp.kcp_cache->skc_slab_total,
+ (unsigned long)kcp.kcp_cache->skc_slab_max,
+ (unsigned long)(kcp.kcp_alloc * threads /
+ SPL_KMEM_CACHE_OBJ_PER_SLAB),
+ (unsigned long)kcp.kcp_cache->skc_obj_total,
+ (unsigned long)kcp.kcp_cache->skc_obj_max,
+ (unsigned long)(kcp.kcp_alloc * threads));
+
+ kmem_cache_destroy(kcp.kcp_cache);
+
+ if (!rc && kcp.kcp_rc)
+ rc = kcp.kcp_rc;
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int
+splat_kmem_test8(struct file *file, void *arg)
+{
+ int i, rc = 0;
+
+ /* Run through slab cache with objects size from
+ * 16-1Mb in 4x multiples with 1024 objects each */
+ for (i = 16; i <= 1024*1024; i *= 4) {
+ rc = splat_kmem_test8_sc(file, arg, i, 256);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+splat_subsystem_t *
+splat_kmem_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
+
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
+ SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
+ SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
+ SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
+ SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
+ SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
+ SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
+ SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
+ SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
+
+ return sub;
+}
+
+void
+splat_kmem_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_kmem_id(void) {
+ return SPLAT_SUBSYSTEM_KMEM;
+}
diff --git a/module/splat/splat-kobj.c b/module/splat/splat-kobj.c
new file mode 100644
index 000000000..2137ab02a
--- /dev/null
+++ b/module/splat/splat-kobj.c
@@ -0,0 +1,164 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_KOBJ 0x0a00
+#define SPLAT_KOBJ_NAME "kobj"
+#define SPLAT_KOBJ_DESC "Kernel Kobj Tests"
+
+#define SPLAT_KOBJ_TEST1_ID 0x0a01
+#define SPLAT_KOBJ_TEST1_NAME "open"
+#define SPLAT_KOBJ_TEST1_DESC "Kobj Open/Close Test"
+
+#define SPLAT_KOBJ_TEST2_ID 0x0a02
+#define SPLAT_KOBJ_TEST2_NAME "size/read"
+#define SPLAT_KOBJ_TEST2_DESC "Kobj Size/Read Test"
+
+#define SPLAT_KOBJ_TEST_FILE "/etc/fstab"
+
+static int
+splat_kobj_test1(struct file *file, void *arg)
+{
+ struct _buf *f;
+
+ f = kobj_open_file(SPLAT_KOBJ_TEST_FILE);
+ if (f == (struct _buf *)-1) {
+ splat_vprint(file, SPLAT_KOBJ_TEST1_NAME, "Failed to open "
+ "test file: %s\n", SPLAT_KOBJ_TEST_FILE);
+ return -ENOENT;
+ }
+
+ kobj_close_file(f);
+ splat_vprint(file, SPLAT_KOBJ_TEST1_NAME, "Successfully opened and "
+ "closed test file: %s\n", SPLAT_KOBJ_TEST_FILE);
+
+ return 0;
+} /* splat_kobj_test1() */
+
+static int
+splat_kobj_test2(struct file *file, void *arg)
+{
+ struct _buf *f;
+ char *buf;
+ uint64_t size;
+ int rc;
+
+ f = kobj_open_file(SPLAT_KOBJ_TEST_FILE);
+ if (f == (struct _buf *)-1) {
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to open "
+ "test file: %s\n", SPLAT_KOBJ_TEST_FILE);
+ return -ENOENT;
+ }
+
+ rc = kobj_get_filesize(f, &size);
+ if (rc) {
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed stat of "
+ "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc);
+ goto out;
+ }
+
+ buf = kmalloc(size + 1, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed to alloc "
+ "%lld bytes for tmp buffer (%d)\n", size, rc);
+ goto out;
+ }
+
+ memset(buf, 0, size + 1);
+ rc = kobj_read_file(f, buf, size, 0);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Failed read of "
+ "test file: %s (%d)\n", SPLAT_KOBJ_TEST_FILE, rc);
+ goto out2;
+ }
+
+ /* Validate we read as many bytes as expected based on the stat. This
+ * isn't a perfect test since we didn't create the file however it is
+ * pretty unlikely there are garbage characters in your /etc/fstab */
+ if (size != (uint64_t)strlen(buf)) {
+ rc = -EFBIG;
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Stat'ed size "
+ "(%lld) does not match number of bytes read "
+ "(%lld)\n", size, (uint64_t)strlen(buf));
+ goto out2;
+ }
+
+ rc = 0;
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "\n%s\n", buf);
+ splat_vprint(file, SPLAT_KOBJ_TEST2_NAME, "Successfully stat'ed "
+ "and read expected number of bytes (%lld) from test "
+ "file: %s\n", size, SPLAT_KOBJ_TEST_FILE);
+out2:
+ kfree(buf);
+out:
+ kobj_close_file(f);
+
+ return rc;
+} /* splat_kobj_test2() */
+
+splat_subsystem_t *
+splat_kobj_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_KOBJ_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_KOBJ_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_KOBJ;
+
+ SPLAT_TEST_INIT(sub, SPLAT_KOBJ_TEST1_NAME, SPLAT_KOBJ_TEST1_DESC,
+ SPLAT_KOBJ_TEST1_ID, splat_kobj_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_KOBJ_TEST2_NAME, SPLAT_KOBJ_TEST2_DESC,
+ SPLAT_KOBJ_TEST2_ID, splat_kobj_test2);
+
+ return sub;
+} /* splat_kobj_init() */
+
+void
+splat_kobj_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_KOBJ_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_KOBJ_TEST1_ID);
+
+ kfree(sub);
+} /* splat_kobj_fini() */
+
+int
+splat_kobj_id(void)
+{
+ return SPLAT_SUBSYSTEM_KOBJ;
+} /* splat_kobj_id() */
diff --git a/module/splat/splat-list.c b/module/splat/splat-list.c
new file mode 100644
index 000000000..3d435cad0
--- /dev/null
+++ b/module/splat/splat-list.c
@@ -0,0 +1,473 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_LIST 0x0c00
+#define SPLAT_LIST_NAME "list"
+#define SPLAT_LIST_DESC "Kernel List Tests"
+
+#define SPLAT_LIST_TEST1_ID 0x0c01
+#define SPLAT_LIST_TEST1_NAME "create/destroy"
+#define SPLAT_LIST_TEST1_DESC "Create/destroy Test"
+
+#define SPLAT_LIST_TEST2_ID 0x0c02
+#define SPLAT_LIST_TEST2_NAME "ins/rm head"
+#define SPLAT_LIST_TEST2_DESC "Insert/remove head Test"
+
+#define SPLAT_LIST_TEST3_ID 0x0c03
+#define SPLAT_LIST_TEST3_NAME "ins/rm tail"
+#define SPLAT_LIST_TEST3_DESC "Insert/remove tail Test"
+
+#define SPLAT_LIST_TEST4_ID 0x0c04
+#define SPLAT_LIST_TEST4_NAME "insert_after"
+#define SPLAT_LIST_TEST4_DESC "Insert_after Test"
+
+#define SPLAT_LIST_TEST5_ID 0x0c05
+#define SPLAT_LIST_TEST5_NAME "insert_before"
+#define SPLAT_LIST_TEST5_DESC "Insert_before Test"
+
+#define SPLAT_LIST_TEST6_ID 0x0c06
+#define SPLAT_LIST_TEST6_NAME "remove"
+#define SPLAT_LIST_TEST6_DESC "Remove Test"
+
+#define SPLAT_LIST_TEST7_ID 0x0c7
+#define SPLAT_LIST_TEST7_NAME "active"
+#define SPLAT_LIST_TEST7_DESC "Active Test"
+
+/* It is important that li_node is not the first element, this
+ * ensures the list_d2l/list_object macros are working correctly. */
+typedef struct list_item {
+ int li_data;
+ list_node_t li_node;
+} list_item_t;
+
+#define LIST_ORDER_STACK 0
+#define LIST_ORDER_QUEUE 1
+
+static int
+splat_list_test1(struct file *file, void *arg)
+{
+ list_t list;
+
+ splat_vprint(file, SPLAT_LIST_TEST1_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ if (!list_is_empty(&list)) {
+ splat_vprint(file, SPLAT_LIST_TEST1_NAME,
+ "New list NOT empty%s\n", "");
+ /* list_destroy() intentionally skipped to avoid assert */
+ return -EEXIST;
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST1_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ /* Validate the list has been destroyed */
+ if (list_link_active(&list.list_head)) {
+ splat_vprint(file, SPLAT_LIST_TEST1_NAME,
+ "Destroyed list still active%s", "");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+splat_list_validate(list_t *list, int size, int order, int mult)
+{
+ list_item_t *li;
+ int i;
+
+ /* Walk all items in list from head to verify stack or queue
+ * ordering. We bound the for loop by size+1 to ensure that
+ * we still terminate if there is list corruption. We also
+ * intentionally make things a little more complex than they
+ * need to be by using list_head/list_next for queues, and
+ * list_tail/list_prev for stacks. This is simply done for
+ * coverage and to ensure these function are working right.
+ */
+ for (i = 0, li = (order ? list_head(list) : list_tail(list));
+ i < size + 1 && li != NULL;
+ i++, li = (order ? list_next(list, li) : list_prev(list, li)))
+ if (li->li_data != i * mult)
+ return -EIDRM;
+
+ if (i != size)
+ return -E2BIG;
+
+ return 0;
+}
+
+static int
+splat_list_test2(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li;
+ int i, list_size = 8, rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ /* Insert all items at the list head to form a stack */
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME,
+ "Adding %d items to list head\n", list_size);
+ for (i = 0; i < list_size; i++) {
+ li = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_link_init(&li->li_node);
+ li->li_data = i;
+ list_insert_head(&list, li);
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME,
+ "Validating %d item list is a stack\n", list_size);
+ rc = splat_list_validate(&list, list_size, LIST_ORDER_STACK, 1);
+ if (rc)
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME,
+ "List validation failed, %d\n", rc);
+out:
+ /* Remove all items */
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME,
+ "Removing %d items from list head\n", list_size);
+ while ((li = list_remove_head(&list)))
+ kmem_free(li, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+static int
+splat_list_test3(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li;
+ int i, list_size = 8, rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ /* Insert all items at the list tail to form a queue */
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME,
+ "Adding %d items to list tail\n", list_size);
+ for (i = 0; i < list_size; i++) {
+ li = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_link_init(&li->li_node);
+ li->li_data = i;
+ list_insert_tail(&list, li);
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME,
+ "Validating %d item list is a queue\n", list_size);
+ rc = splat_list_validate(&list, list_size, LIST_ORDER_QUEUE, 1);
+ if (rc)
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME,
+ "List validation failed, %d\n", rc);
+out:
+ /* Remove all items */
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME,
+ "Removing %d items from list tail\n", list_size);
+ while ((li = list_remove_tail(&list)))
+ kmem_free(li, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST3_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+static int
+splat_list_test4(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li_new, *li_last = NULL;
+ int i, list_size = 8, rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ /* Insert all items after the last item to form a queue */
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME,
+ "Adding %d items each after the last item\n", list_size);
+ for (i = 0; i < list_size; i++) {
+ li_new = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li_new == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_link_init(&li_new->li_node);
+ li_new->li_data = i;
+ list_insert_after(&list, li_last, li_new);
+ li_last = li_new;
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME,
+ "Validating %d item list is a queue\n", list_size);
+ rc = splat_list_validate(&list, list_size, LIST_ORDER_QUEUE, 1);
+ if (rc)
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME,
+ "List validation failed, %d\n", rc);
+out:
+ /* Remove all items */
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME,
+ "Removing %d items from list tail\n", list_size);
+ while ((li_new = list_remove_head(&list)))
+ kmem_free(li_new, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST4_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+static int
+splat_list_test5(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li_new, *li_last = NULL;
+ int i, list_size = 8, rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ /* Insert all items before the last item to form a stack */
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME,
+ "Adding %d items each before the last item\n", list_size);
+ for (i = 0; i < list_size; i++) {
+ li_new = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li_new == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_link_init(&li_new->li_node);
+ li_new->li_data = i;
+ list_insert_before(&list, li_last, li_new);
+ li_last = li_new;
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME,
+ "Validating %d item list is a queue\n", list_size);
+ rc = splat_list_validate(&list, list_size, LIST_ORDER_STACK, 1);
+ if (rc)
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME,
+ "List validation failed, %d\n", rc);
+out:
+ /* Remove all items */
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME,
+ "Removing %d items from list tail\n", list_size);
+ while ((li_new = list_remove_tail(&list)))
+ kmem_free(li_new, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+static int
+splat_list_test6(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li, *li_prev;
+ int i, list_size = 8, rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ /* Insert all items at the list tail to form a queue */
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME,
+ "Adding %d items to list tail\n", list_size);
+ for (i = 0; i < list_size; i++) {
+ li = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ list_link_init(&li->li_node);
+ li->li_data = i;
+ list_insert_tail(&list, li);
+ }
+
+ /* Remove all odd items from the queue */
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME,
+ "Removing %d odd items from the list\n", list_size / 2);
+ for (li = list_head(&list); li != NULL; li = list_next(&list, li)) {
+ if (li->li_data % 2 == 1) {
+ li_prev = list_prev(&list, li);
+ list_remove(&list, li);
+ li = li_prev;
+ }
+ }
+
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Validating %d item "
+ "list is a queue of only even elements\n", list_size / 2);
+ rc = splat_list_validate(&list, list_size / 2, LIST_ORDER_QUEUE, 2);
+ if (rc)
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME,
+ "List validation failed, %d\n", rc);
+out:
+ /* Remove all items */
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME,
+ "Removing %d items from list tail\n", list_size / 2);
+ while ((li = list_remove_tail(&list)))
+ kmem_free(li, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+static int
+splat_list_test7(struct file *file, void *arg)
+{
+ list_t list;
+ list_item_t *li;
+ int rc = 0;
+
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Creating list\n%s", "");
+ list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));
+
+ li = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
+ if (li == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Validate newly initialized node is inactive */
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Init list node\n%s", "");
+ list_link_init(&li->li_node);
+ if (list_link_active(&li->li_node)) {
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Newly initialized "
+ "list node should inactive %p/%p\n",
+ li->li_node.prev, li->li_node.next);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Validate node is active when linked in to a list */
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Insert list node\n%s", "");
+ list_insert_head(&list, li);
+ if (!list_link_active(&li->li_node)) {
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node "
+ "inserted in list should be active %p/%p\n",
+ li->li_node.prev, li->li_node.next);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Validate node is inactive when removed from list */
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Remove list node\n%s", "");
+ list_remove(&list, li);
+ if (list_link_active(&li->li_node)) {
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node "
+ "removed from list should be inactive %p/%p\n",
+ li->li_node.prev, li->li_node.next);
+ rc = -EINVAL;
+ }
+
+ kmem_free(li, sizeof(list_item_t));
+out:
+ /* Remove all items */
+ while ((li = list_remove_head(&list)))
+ kmem_free(li, sizeof(list_item_t));
+
+ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Destroying list\n%s", "");
+ list_destroy(&list);
+
+ return rc;
+}
+
+splat_subsystem_t *
+splat_list_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_LIST_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_LIST_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_LIST;
+
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST1_NAME, SPLAT_LIST_TEST1_DESC,
+ SPLAT_LIST_TEST1_ID, splat_list_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST2_NAME, SPLAT_LIST_TEST2_DESC,
+ SPLAT_LIST_TEST2_ID, splat_list_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST3_NAME, SPLAT_LIST_TEST3_DESC,
+ SPLAT_LIST_TEST3_ID, splat_list_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST4_NAME, SPLAT_LIST_TEST4_DESC,
+ SPLAT_LIST_TEST4_ID, splat_list_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST5_NAME, SPLAT_LIST_TEST5_DESC,
+ SPLAT_LIST_TEST5_ID, splat_list_test5);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST6_NAME, SPLAT_LIST_TEST6_DESC,
+ SPLAT_LIST_TEST6_ID, splat_list_test6);
+ SPLAT_TEST_INIT(sub, SPLAT_LIST_TEST7_NAME, SPLAT_LIST_TEST7_DESC,
+ SPLAT_LIST_TEST7_ID, splat_list_test7);
+
+ return sub;
+}
+
+void
+splat_list_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST7_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST6_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST5_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_LIST_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_list_id(void)
+{
+ return SPLAT_SUBSYSTEM_LIST;
+}
diff --git a/module/splat/splat-mutex.c b/module/splat/splat-mutex.c
new file mode 100644
index 000000000..640f8f407
--- /dev/null
+++ b/module/splat/splat-mutex.c
@@ -0,0 +1,355 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_MUTEX 0x0400
+#define SPLAT_MUTEX_NAME "mutex"
+#define SPLAT_MUTEX_DESC "Kernel Mutex Tests"
+
+#define SPLAT_MUTEX_TEST1_ID 0x0401
+#define SPLAT_MUTEX_TEST1_NAME "tryenter"
+#define SPLAT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
+
+#define SPLAT_MUTEX_TEST2_ID 0x0402
+#define SPLAT_MUTEX_TEST2_NAME "race"
+#define SPLAT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
+
+#define SPLAT_MUTEX_TEST3_ID 0x0403
+#define SPLAT_MUTEX_TEST3_NAME "owned"
+#define SPLAT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
+
+#define SPLAT_MUTEX_TEST4_ID 0x0404
+#define SPLAT_MUTEX_TEST4_NAME "owner"
+#define SPLAT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
+
+#define SPLAT_MUTEX_TEST_MAGIC 0x115599DDUL
+#define SPLAT_MUTEX_TEST_NAME "mutex_test"
+#define SPLAT_MUTEX_TEST_WORKQ "mutex_wq"
+#define SPLAT_MUTEX_TEST_COUNT 128
+
+typedef struct mutex_priv {
+ unsigned long mp_magic;
+ struct file *mp_file;
+ struct work_struct mp_work[SPLAT_MUTEX_TEST_COUNT];
+ kmutex_t mp_mtx;
+ int mp_rc;
+} mutex_priv_t;
+
+#ifdef HAVE_3ARGS_INIT_WORK
+static void
+splat_mutex_test1_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+
+ ASSERT(mp->mp_magic == SPLAT_MUTEX_TEST_MAGIC);
+ mp->mp_rc = 0;
+
+ if (!mutex_tryenter(&mp->mp_mtx))
+ mp->mp_rc = -EBUSY;
+}
+#endif
+
+static int
+splat_mutex_test1(struct file *file, void *arg)
+{
+ int rc = 0;
+#ifdef HAVE_3ARGS_INIT_WORK
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ mutex_priv_t *mp;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ wq = create_singlethread_workqueue(SPLAT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out2;
+ }
+
+ mutex_init(&(mp->mp_mtx), SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+ mutex_enter(&(mp->mp_mtx));
+
+ mp->mp_magic = SPLAT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ INIT_WORK(&work, splat_mutex_test1_work, mp);
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while its held. This should fail and the work
+ * item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ mutex_exit(&(mp->mp_mtx));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+ mutex_exit(&(mp->mp_mtx));
+
+ /* Work item successfully aquired mutex, very bad! */
+ if (mp->mp_rc != -EBUSY) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly failed when mutex held\n");
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while it is not held. This should work and
+ * the item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+
+ /* Work item failed to aquire mutex, very bad! */
+ if (mp->mp_rc != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly succeeded when mutex unheld\n");
+out:
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out2:
+ kfree(mp);
+#endif
+ return rc;
+}
+
+#ifdef HAVE_3ARGS_INIT_WORK
+static void
+splat_mutex_test2_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+ int rc;
+
+ ASSERT(mp->mp_magic == SPLAT_MUTEX_TEST_MAGIC);
+
+ /* Read the value before sleeping and write it after we wake up to
+ * maximize the chance of a race if mutexs are not working properly */
+ mutex_enter(&mp->mp_mtx);
+ rc = mp->mp_rc;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100); /* 1/100 of a second */
+ mp->mp_rc = rc + 1;
+ mutex_exit(&mp->mp_mtx);
+}
+#endif
+
+static int
+splat_mutex_test2(struct file *file, void *arg)
+{
+ int rc = 0;
+#ifdef HAVE_3ARGS_INIT_WORK
+ struct workqueue_struct *wq;
+ mutex_priv_t *mp;
+ int i;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ /* Create a thread per CPU items on queue will race */
+ wq = create_workqueue(SPLAT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&(mp->mp_mtx), SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mp->mp_magic = SPLAT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ mp->mp_rc = 0;
+
+ /* Schedule N work items to the work queue each of which enters the
+ * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
+ * box these work items will be handled by all available CPUs. The
+ * mutex is instrumented such that if any two processors are in the
+ * critical region at the same time the system will panic. If the
+ * mutex is implemented right this will never happy, that's a pass. */
+ for (i = 0; i < SPLAT_MUTEX_TEST_COUNT; i++) {
+ INIT_WORK(&(mp->mp_work[i]), splat_mutex_test2_work, mp);
+
+ if (!queue_work(wq, &(mp->mp_work[i]))) {
+ splat_vprint(file, SPLAT_MUTEX_TEST2_NAME,
+ "Failed to queue work id %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ flush_workqueue(wq);
+
+ if (mp->mp_rc == SPLAT_MUTEX_TEST_COUNT) {
+ splat_vprint(file, SPLAT_MUTEX_TEST2_NAME, "%d racing threads "
+ "correctly entered/exited the mutex %d times\n",
+ num_online_cpus(), mp->mp_rc);
+ } else {
+ splat_vprint(file, SPLAT_MUTEX_TEST2_NAME, "%d racing threads "
+ "only processed %d/%d mutex work items\n",
+ num_online_cpus(), mp->mp_rc, SPLAT_MUTEX_TEST_COUNT);
+ rc = -EINVAL;
+ }
+
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out:
+ kfree(mp);
+#endif
+ return rc;
+}
+
+static int
+splat_mutex_test3(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ int rc = 0;
+
+ mutex_init(&mtx, SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ if (!mutex_owned(&mtx)) {
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ if (mutex_owned(&mtx)) {
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
+ "not be owned but is owned by pid %d\n",
+ mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owned() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+static int
+splat_mutex_test4(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ kthread_t *owner;
+ int rc = 0;
+
+ mutex_init(&mtx, SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ owner = mutex_owner(&mtx);
+ if (current != owner) {
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ owner = mutex_owner(&mtx);
+ if (owner) {
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owner() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+splat_subsystem_t *
+splat_mutex_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_MUTEX_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_MUTEX_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_MUTEX;
+
+ SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST1_NAME, SPLAT_MUTEX_TEST1_DESC,
+ SPLAT_MUTEX_TEST1_ID, splat_mutex_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST2_NAME, SPLAT_MUTEX_TEST2_DESC,
+ SPLAT_MUTEX_TEST2_ID, splat_mutex_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST3_NAME, SPLAT_MUTEX_TEST3_DESC,
+ SPLAT_MUTEX_TEST3_ID, splat_mutex_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST4_NAME, SPLAT_MUTEX_TEST4_DESC,
+ SPLAT_MUTEX_TEST4_ID, splat_mutex_test4);
+
+ return sub;
+}
+
+void
+splat_mutex_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_mutex_id(void) {
+ return SPLAT_SUBSYSTEM_MUTEX;
+}
diff --git a/module/splat/splat-random.c b/module/splat/splat-random.c
new file mode 100644
index 000000000..c96dd480c
--- /dev/null
+++ b/module/splat/splat-random.c
@@ -0,0 +1,129 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_KRNG 0x0300
+#define SPLAT_KRNG_NAME "krng"
+#define SPLAT_KRNG_DESC "Kernel Random Number Generator Tests"
+
+#define SPLAT_KRNG_TEST1_ID 0x0301
+#define SPLAT_KRNG_TEST1_NAME "freq"
+#define SPLAT_KRNG_TEST1_DESC "Frequency Test"
+
+#define KRNG_NUM_BITS 1048576
+#define KRNG_NUM_BYTES (KRNG_NUM_BITS >> 3)
+#define KRNG_NUM_BITS_DIV2 (KRNG_NUM_BITS >> 1)
+#define KRNG_ERROR_RANGE 2097
+
+/* Random Number Generator Tests
+ There can be meny more tests on quality of the
+ random number generator. For now we are only
+ testing the frequency of particular bits.
+ We could also test consecutive sequences,
+ randomness within a particular block, etc.
+ but is probably not necessary for our purposes */
+
+static int
+splat_krng_test1(struct file *file, void *arg)
+{
+ uint8_t *buf;
+ int i, j, diff, num = 0, rc = 0;
+
+ buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
+
+ /* Always succeeds */
+ random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
+
+ for (i = 0; i < KRNG_NUM_BYTES; i++) {
+ uint8_t tmp = buf[i];
+ for (j = 0; j < 8; j++) {
+ uint8_t tmp2 = ((tmp >> j) & 0x01);
+ if (tmp2 == 1) {
+ num++;
+ }
+ }
+ }
+
+ kfree(buf);
+
+ diff = KRNG_NUM_BITS_DIV2 - num;
+ if (diff < 0)
+ diff *= -1;
+
+ splat_print(file, "Test 1 Number of ones: %d\n", num);
+ splat_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
+ diff, KRNG_ERROR_RANGE);
+
+ if (diff > KRNG_ERROR_RANGE)
+ rc = -ERANGE;
+out:
+ return rc;
+}
+
+splat_subsystem_t *
+splat_krng_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_KRNG_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_KRNG_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_KRNG;
+
+ SPLAT_TEST_INIT(sub, SPLAT_KRNG_TEST1_NAME, SPLAT_KRNG_TEST1_DESC,
+ SPLAT_KRNG_TEST1_ID, splat_krng_test1);
+
+ return sub;
+}
+
+void
+splat_krng_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_KRNG_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_krng_id(void) {
+ return SPLAT_SUBSYSTEM_KRNG;
+}
diff --git a/module/splat/splat-rwlock.c b/module/splat/splat-rwlock.c
new file mode 100644
index 000000000..b1db12ea8
--- /dev/null
+++ b/module/splat/splat-rwlock.c
@@ -0,0 +1,786 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_RWLOCK 0x0700
+#define SPLAT_RWLOCK_NAME "rwlock"
+#define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests"
+
+#define SPLAT_RWLOCK_TEST1_ID 0x0701
+#define SPLAT_RWLOCK_TEST1_NAME "rwtest1"
+#define SPLAT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
+
+#define SPLAT_RWLOCK_TEST2_ID 0x0702
+#define SPLAT_RWLOCK_TEST2_NAME "rwtest2"
+#define SPLAT_RWLOCK_TEST2_DESC "Multiple Writers"
+
+#define SPLAT_RWLOCK_TEST3_ID 0x0703
+#define SPLAT_RWLOCK_TEST3_NAME "rwtest3"
+#define SPLAT_RWLOCK_TEST3_DESC "Owner Verification"
+
+#define SPLAT_RWLOCK_TEST4_ID 0x0704
+#define SPLAT_RWLOCK_TEST4_NAME "rwtest4"
+#define SPLAT_RWLOCK_TEST4_DESC "Trylock Test"
+
+#define SPLAT_RWLOCK_TEST5_ID 0x0705
+#define SPLAT_RWLOCK_TEST5_NAME "rwtest5"
+#define SPLAT_RWLOCK_TEST5_DESC "Write Downgrade Test"
+
+#define SPLAT_RWLOCK_TEST6_ID 0x0706
+#define SPLAT_RWLOCK_TEST6_NAME "rwtest6"
+#define SPLAT_RWLOCK_TEST6_DESC "Read Upgrade Test"
+
+#define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL
+#define SPLAT_RWLOCK_TEST_NAME "rwlock_test"
+#define SPLAT_RWLOCK_TEST_COUNT 8
+
+#define SPLAT_RWLOCK_RELEASE_INIT 0
+#define SPLAT_RWLOCK_RELEASE_WRITERS 1
+#define SPLAT_RWLOCK_RELEASE_READERS 2
+
+typedef struct rw_priv {
+ unsigned long rw_magic;
+ struct file *rw_file;
+ krwlock_t rwl;
+ spinlock_t rw_priv_lock;
+ wait_queue_head_t rw_waitq;
+ atomic_t rw_completed;
+ atomic_t rw_acquired;
+ atomic_t rw_waiters;
+ atomic_t rw_release;
+} rw_priv_t;
+
+typedef struct rw_thr {
+ int rwt_id;
+ const char *rwt_name;
+ rw_priv_t *rwt_rwp;
+ int rwt_rc;
+} rw_thr_t;
+
+static inline void
+splat_rwlock_sleep(signed long delay)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay);
+}
+
+#define splat_rwlock_lock_and_test(lock,test) \
+({ \
+ int ret = 0; \
+ \
+ spin_lock(lock); \
+ ret = (test) ? 1 : 0; \
+ spin_unlock(lock); \
+ ret; \
+})
+
+void splat_init_rw_priv(rw_priv_t *rwv, struct file *file)
+{
+ rwv->rw_magic = SPLAT_RWLOCK_TEST_MAGIC;
+ rwv->rw_file = file;
+ spin_lock_init(&rwv->rw_priv_lock);
+ init_waitqueue_head(&rwv->rw_waitq);
+ atomic_set(&rwv->rw_completed, 0);
+ atomic_set(&rwv->rw_acquired, 0);
+ atomic_set(&rwv->rw_waiters, 0);
+ atomic_set(&rwv->rw_release, SPLAT_RWLOCK_RELEASE_INIT);
+
+ /* Initialize the read/write lock */
+ rw_init(&rwv->rwl, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
+}
+
+int
+splat_rwlock_test1_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ splat_rwlock_sleep(rnd * HZ / 1000);
+
+ spin_lock(&rwv->rw_priv_lock);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for writing
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ SPLAT_RWLOCK_RELEASE_WRITERS));
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+int
+splat_rwlock_test1_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ splat_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_READER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the read lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ SPLAT_RWLOCK_RELEASE_READERS));
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+static int
+splat_rwlock_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(splat_rwlock_test1_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Ensure that there is only 1 writer and all readers are waiting */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 1 ||
+ atomic_read(&rwv.rw_waiters) !=
+ SPLAT_RWLOCK_TEST_COUNT - 1)) {
+
+ splat_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Now ensure that there are multiple reader threads holding the lock */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 1)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+ /* Release the readers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_READERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ splat_rwlock_sleep(1 * HZ);
+
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+splat_rwlock_test2_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ splat_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Here just increment the waiters count even if we are not
+ * exactly about to call rw_enter(). Not really a big deal
+ * since more than likely will be true when we simulate work
+ * later on */
+ spin_lock(&rwv->rw_priv_lock);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can acquire the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ SPLAT_RWLOCK_RELEASE_WRITERS));
+
+ /* Take the semaphore for writing */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Give up the processor for a bit to simulate
+ * doing some work while taking the write lock */
+ splat_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Ensure that we are the only one writing */
+ if (atomic_read(&rwv->rw_acquired) > 1) {
+ rwt->rwt_rc = 1;
+ } else {
+ rwt->rwt_rc = 0;
+ }
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ rw_exit(&rwv->rwl);
+
+ return 0;
+}
+
+static int
+splat_rwlock_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = SPLAT_RWLOCK_TEST2_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ pids[i] = kernel_thread(splat_rwlock_test2_writer_thread,
+ &rwt[i], 0);
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Wait for writers to get queued up */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_waiters) < SPLAT_RWLOCK_TEST_COUNT)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the write threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error */
+ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+splat_rwlock_test3(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Take the rwlock for reading.
+ * Should not have an owner */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+splat_rwlock_test4_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ splat_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ splat_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
+
+ /* Here we acquired the lock this is a
+ * failure since the writer should be
+ * holding the lock */
+ if (rwt->rwt_rc == 1) {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_acquired);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ }
+ /* Here we know we didn't block and didn't
+ * acquire the rwlock for reading */
+ else {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ splat_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread could not acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+ }
+
+ return 0;
+}
+
+static int
+splat_rwlock_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[SPLAT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = SPLAT_RWLOCK_TEST4_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ /* We can reuse the test1 writer thread here */
+ pids[i] = kernel_thread(splat_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(splat_rwlock_test4_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 0)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Make sure that the reader threads complete */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_completed) != SPLAT_RWLOCK_TEST_COUNT - 1)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+ /* Release the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ splat_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the reader threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error since the rw_tryenter() should have failed */
+ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+splat_rwlock_test5(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the downgrade
+ * worked properly */
+ rw_downgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+splat_rwlock_test6(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ splat_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for reading */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the upgrade
+ * worked properly */
+ rc = !rw_tryupgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (rc || current != owner) {
+ splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d "
+ "trylock rc %d\n",
+ current->pid, owner ? owner->pid : -1, rc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+splat_subsystem_t *
+splat_rwlock_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK;
+
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC,
+ SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC,
+ SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC,
+ SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC,
+ SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC,
+ SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5);
+ SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC,
+ SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6);
+
+ return sub;
+}
+
+void
+splat_rwlock_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID);
+ kfree(sub);
+}
+
+int
+splat_rwlock_id(void) {
+ return SPLAT_SUBSYSTEM_RWLOCK;
+}
diff --git a/module/splat/splat-taskq.c b/module/splat/splat-taskq.c
new file mode 100644
index 000000000..3cc09bcb9
--- /dev/null
+++ b/module/splat/splat-taskq.c
@@ -0,0 +1,310 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_TASKQ 0x0200
+#define SPLAT_TASKQ_NAME "taskq"
+#define SPLAT_TASKQ_DESC "Kernel Task Queue Tests"
+
+#define SPLAT_TASKQ_TEST1_ID 0x0201
+#define SPLAT_TASKQ_TEST1_NAME "single"
+#define SPLAT_TASKQ_TEST1_DESC "Single task queue, single task"
+
+#define SPLAT_TASKQ_TEST2_ID 0x0202
+#define SPLAT_TASKQ_TEST2_NAME "multiple"
+#define SPLAT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
+
+#define SPLAT_TASKQ_TEST3_ID 0x0203
+#define SPLAT_TASKQ_TEST3_NAME "system"
+#define SPLAT_TASKQ_TEST3_DESC "System task queue, multiple tasks"
+
+typedef struct splat_taskq_arg {
+ int flag;
+ int id;
+ struct file *file;
+ const char *name;
+} splat_taskq_arg_t;
+
+/* Validation Test 1 - Create a taskq, queue a task, wait until
+ * task completes, ensure task ran properly, cleanup taskq,
+ */
+static void
+splat_taskq_test13_func(void *arg)
+{
+ splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' setting flag\n",
+ tq_arg->name, sym2str(splat_taskq_test13_func));
+ tq_arg->flag = 1;
+}
+
+static int
+splat_taskq_test1(struct file *file, void *arg)
+{
+ taskq_t *tq;
+ taskqid_t id;
+ splat_taskq_arg_t tq_arg;
+
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
+ SPLAT_TASKQ_TEST1_NAME);
+ if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
+ 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
+ "Taskq '%s' create failed\n",
+ SPLAT_TASKQ_TEST1_NAME);
+ return -EINVAL;
+ }
+
+ tq_arg.flag = 0;
+ tq_arg.id = 0;
+ tq_arg.file = file;
+ tq_arg.name = SPLAT_TASKQ_TEST1_NAME;
+
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatching\n",
+ tq_arg.name, sym2str(splat_taskq_test13_func));
+ if ((id = taskq_dispatch(tq, splat_taskq_test13_func,
+ &tq_arg, TQ_SLEEP)) == 0) {
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatch failed\n",
+ tq_arg.name, sym2str(splat_taskq_test13_func));
+ taskq_destroy(tq);
+ return -EINVAL;
+ }
+
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
+ tq_arg.name);
+ taskq_wait(tq);
+ splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
+ tq_arg.name);
+ taskq_destroy(tq);
+
+ return (tq_arg.flag) ? 0 : -EINVAL;
+}
+
+/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
+ * wait until all tasks complete, ensure all tasks ran properly and in the
+ * the correct order, cleanup taskq's
+ */
+static void
+splat_taskq_test2_func1(void *arg)
+{
+ splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(splat_taskq_test2_func1),
+ tq_arg->flag * 2, tq_arg->flag);
+ tq_arg->flag *= 2;
+}
+
+static void
+splat_taskq_test2_func2(void *arg)
+{
+ splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(splat_taskq_test2_func2),
+ tq_arg->flag + 1, tq_arg->flag);
+ tq_arg->flag += 1;
+}
+
+#define TEST2_TASKQS 8
+#define TEST2_THREADS_PER_TASKQ 4
+
+static int
+splat_taskq_test2(struct file *file, void *arg) {
+ taskq_t *tq[TEST2_TASKQS] = { NULL };
+ taskqid_t id;
+ splat_taskq_arg_t tq_args[TEST2_TASKQS];
+ int i, rc = 0;
+
+ for (i = 0; i < TEST2_TASKQS; i++) {
+
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
+ "creating\n", SPLAT_TASKQ_TEST2_NAME, i);
+ if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME,
+ TEST2_THREADS_PER_TASKQ,
+ maxclsyspri, 50, INT_MAX,
+ TASKQ_PREPOPULATE)) == NULL) {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' create failed\n",
+ SPLAT_TASKQ_TEST2_NAME, i);
+ rc = -EINVAL;
+ break;
+ }
+
+ tq_args[i].flag = i;
+ tq_args[i].id = i;
+ tq_args[i].file = file;
+ tq_args[i].name = SPLAT_TASKQ_TEST2_NAME;
+
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(splat_taskq_test2_func1));
+ if ((id = taskq_dispatch(
+ tq[i], splat_taskq_test2_func1,
+ &tq_args[i], TQ_SLEEP)) == 0) {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch "
+ "failed\n", tq_args[i].name, tq_args[i].id,
+ sym2str(splat_taskq_test2_func1));
+ rc = -EINVAL;
+ break;
+ }
+
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(splat_taskq_test2_func2));
+ if ((id = taskq_dispatch(
+ tq[i], splat_taskq_test2_func2,
+ &tq_args[i], TQ_SLEEP)) == 0) {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch failed\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(splat_taskq_test2_func2));
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ /* When rc is set we're effectively just doing cleanup here, so
+ * ignore new errors in that case. They just cause noise. */
+ for (i = 0; i < TEST2_TASKQS; i++) {
+ if (tq[i] != NULL) {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' waiting\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_wait(tq[i]);
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d; destroying\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_destroy(tq[i]);
+
+ if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "out of order; %d != %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ rc = -EINVAL;
+ } else {
+ splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "in the correct order; %d == %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/* Validation Test 3 - Use the global system task queue with a single
+ * task, * wait until task completes, ensure task ran properly.
+ */
+static int
+splat_taskq_test3(struct file *file, void *arg)
+{
+ taskqid_t id;
+ splat_taskq_arg_t tq_arg;
+
+ tq_arg.flag = 0;
+ tq_arg.id = 0;
+ tq_arg.file = file;
+ tq_arg.name = SPLAT_TASKQ_TEST3_NAME;
+
+ splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
+ "Taskq '%s' function '%s' dispatching\n",
+ tq_arg.name, sym2str(splat_taskq_test13_func));
+ if ((id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
+ &tq_arg, TQ_SLEEP)) == 0) {
+ splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
+ "Taskq '%s' function '%s' dispatch failed\n",
+ tq_arg.name, sym2str(splat_taskq_test13_func));
+ return -EINVAL;
+ }
+
+ splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
+ tq_arg.name);
+ taskq_wait(system_taskq);
+
+ return (tq_arg.flag) ? 0 : -EINVAL;
+}
+
+splat_subsystem_t *
+splat_taskq_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_TASKQ_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_TASKQ_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_TASKQ;
+
+ SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST1_NAME, SPLAT_TASKQ_TEST1_DESC,
+ SPLAT_TASKQ_TEST1_ID, splat_taskq_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST2_NAME, SPLAT_TASKQ_TEST2_DESC,
+ SPLAT_TASKQ_TEST2_ID, splat_taskq_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_TASKQ_TEST3_NAME, SPLAT_TASKQ_TEST3_DESC,
+ SPLAT_TASKQ_TEST3_ID, splat_taskq_test3);
+
+ return sub;
+}
+
+void
+splat_taskq_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_TASKQ_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_taskq_id(void) {
+ return SPLAT_SUBSYSTEM_TASKQ;
+}
diff --git a/module/splat/splat-thread.c b/module/splat/splat-thread.c
new file mode 100644
index 000000000..ca6c46ac3
--- /dev/null
+++ b/module/splat/splat-thread.c
@@ -0,0 +1,203 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_THREAD 0x0600
+#define SPLAT_THREAD_NAME "thread"
+#define SPLAT_THREAD_DESC "Kernel Thread Tests"
+
+#define SPLAT_THREAD_TEST1_ID 0x0601
+#define SPLAT_THREAD_TEST1_NAME "create"
+#define SPLAT_THREAD_TEST1_DESC "Validate thread creation"
+
+#define SPLAT_THREAD_TEST2_ID 0x0602
+#define SPLAT_THREAD_TEST2_NAME "exit"
+#define SPLAT_THREAD_TEST2_DESC "Validate thread exit"
+
+#define SPLAT_THREAD_TEST_MAGIC 0x4488CC00UL
+
+typedef struct thread_priv {
+ unsigned long tp_magic;
+ struct file *tp_file;
+ spinlock_t tp_lock;
+ wait_queue_head_t tp_waitq;
+ int tp_rc;
+} thread_priv_t;
+
+static int
+splat_thread_rc(thread_priv_t *tp, int rc)
+{
+ int ret;
+
+ spin_lock(&tp->tp_lock);
+ ret = (tp->tp_rc == rc);
+ spin_unlock(&tp->tp_lock);
+
+ return ret;
+}
+
+static void
+splat_thread_work1(void *priv)
+{
+ thread_priv_t *tp = (thread_priv_t *)priv;
+
+ spin_lock(&tp->tp_lock);
+ ASSERT(tp->tp_magic == SPLAT_THREAD_TEST_MAGIC);
+ tp->tp_rc = 1;
+ spin_unlock(&tp->tp_lock);
+
+ wake_up(&tp->tp_waitq);
+ thread_exit();
+}
+
+static int
+splat_thread_test1(struct file *file, void *arg)
+{
+ thread_priv_t tp;
+ kthread_t *thr;
+
+ tp.tp_magic = SPLAT_THREAD_TEST_MAGIC;
+ tp.tp_file = file;
+ spin_lock_init(&tp.tp_lock);
+ init_waitqueue_head(&tp.tp_waitq);
+ tp.tp_rc = 0;
+
+ thr = (kthread_t *)thread_create(NULL, 0, splat_thread_work1, &tp, 0,
+ &p0, TS_RUN, minclsyspri);
+ /* Must never fail under Solaris, but we check anyway since this
+ * can happen in the linux SPL, we may want to change this behavior */
+ if (thr == NULL)
+ return -ESRCH;
+
+ /* Sleep until the thread sets tp.tp_rc == 1 */
+ wait_event(tp.tp_waitq, splat_thread_rc(&tp, 1));
+
+ splat_vprint(file, SPLAT_THREAD_TEST1_NAME, "%s",
+ "Thread successfully started properly\n");
+ return 0;
+}
+
+static void
+splat_thread_work2(void *priv)
+{
+ thread_priv_t *tp = (thread_priv_t *)priv;
+
+ spin_lock(&tp->tp_lock);
+ ASSERT(tp->tp_magic == SPLAT_THREAD_TEST_MAGIC);
+ tp->tp_rc = 1;
+ spin_unlock(&tp->tp_lock);
+
+ wake_up(&tp->tp_waitq);
+ thread_exit();
+
+ /* The following code is unreachable when thread_exit() is
+ * working properly, which is exactly what we're testing */
+ spin_lock(&tp->tp_lock);
+ tp->tp_rc = 2;
+ spin_unlock(&tp->tp_lock);
+
+ wake_up(&tp->tp_waitq);
+}
+
+static int
+splat_thread_test2(struct file *file, void *arg)
+{
+ thread_priv_t tp;
+ kthread_t *thr;
+ int rc = 0;
+
+ tp.tp_magic = SPLAT_THREAD_TEST_MAGIC;
+ tp.tp_file = file;
+ spin_lock_init(&tp.tp_lock);
+ init_waitqueue_head(&tp.tp_waitq);
+ tp.tp_rc = 0;
+
+ thr = (kthread_t *)thread_create(NULL, 0, splat_thread_work2, &tp, 0,
+ &p0, TS_RUN, minclsyspri);
+ /* Must never fail under Solaris, but we check anyway since this
+ * can happen in the linux SPL, we may want to change this behavior */
+ if (thr == NULL)
+ return -ESRCH;
+
+ /* Sleep until the thread sets tp.tp_rc == 1 */
+ wait_event(tp.tp_waitq, splat_thread_rc(&tp, 1));
+
+ /* Sleep until the thread sets tp.tp_rc == 2, or until we hit
+ * the timeout. If thread exit is working properly we should
+ * hit the timeout and never see to.tp_rc == 2. */
+ rc = wait_event_timeout(tp.tp_waitq, splat_thread_rc(&tp, 2), HZ / 10);
+ if (rc > 0) {
+ rc = -EINVAL;
+ splat_vprint(file, SPLAT_THREAD_TEST2_NAME, "%s",
+ "Thread did not exit properly at thread_exit()\n");
+ } else {
+ splat_vprint(file, SPLAT_THREAD_TEST2_NAME, "%s",
+ "Thread successfully exited at thread_exit()\n");
+ }
+
+ return rc;
+}
+
+splat_subsystem_t *
+splat_thread_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_THREAD_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_THREAD_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_THREAD;
+
+ SPLAT_TEST_INIT(sub, SPLAT_THREAD_TEST1_NAME, SPLAT_THREAD_TEST1_DESC,
+ SPLAT_THREAD_TEST1_ID, splat_thread_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_THREAD_TEST2_NAME, SPLAT_THREAD_TEST2_DESC,
+ SPLAT_THREAD_TEST2_ID, splat_thread_test2);
+
+ return sub;
+}
+
+void
+splat_thread_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_THREAD_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_THREAD_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_thread_id(void) {
+ return SPLAT_SUBSYSTEM_THREAD;
+}
diff --git a/module/splat/splat-time.c b/module/splat/splat-time.c
new file mode 100644
index 000000000..1aa13e520
--- /dev/null
+++ b/module/splat/splat-time.c
@@ -0,0 +1,117 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_TIME 0x0800
+#define SPLAT_TIME_NAME "time"
+#define SPLAT_TIME_DESC "Kernel Time Tests"
+
+#define SPLAT_TIME_TEST1_ID 0x0801
+#define SPLAT_TIME_TEST1_NAME "time1"
+#define SPLAT_TIME_TEST1_DESC "HZ Test"
+
+#define SPLAT_TIME_TEST2_ID 0x0802
+#define SPLAT_TIME_TEST2_NAME "time2"
+#define SPLAT_TIME_TEST2_DESC "Monotonic Test"
+
+static int
+splat_time_test1(struct file *file, void *arg)
+{
+ int myhz = hz;
+ splat_vprint(file, SPLAT_TIME_TEST1_NAME, "hz is %d\n", myhz);
+ return 0;
+}
+
+static int
+splat_time_test2(struct file *file, void *arg)
+{
+ hrtime_t tm1, tm2;
+ int i;
+
+ tm1 = gethrtime();
+ splat_vprint(file, SPLAT_TIME_TEST2_NAME, "time is %lld\n", tm1);
+
+ for(i = 0; i < 100; i++) {
+ tm2 = gethrtime();
+ splat_vprint(file, SPLAT_TIME_TEST2_NAME, "time is %lld\n", tm2);
+
+ if(tm1 > tm2) {
+ splat_print(file, "%s: gethrtime() is not giving "
+ "monotonically increasing values\n",
+ SPLAT_TIME_TEST2_NAME);
+ return 1;
+ }
+ tm1 = tm2;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ return 0;
+}
+
+splat_subsystem_t *
+splat_time_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_TIME_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_TIME_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_TIME;
+
+ SPLAT_TEST_INIT(sub, SPLAT_TIME_TEST1_NAME, SPLAT_TIME_TEST1_DESC,
+ SPLAT_TIME_TEST1_ID, splat_time_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_TIME_TEST2_NAME, SPLAT_TIME_TEST2_DESC,
+ SPLAT_TIME_TEST2_ID, splat_time_test2);
+
+ return sub;
+}
+
+void
+splat_time_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_TIME_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_TIME_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+splat_time_id(void)
+{
+ return SPLAT_SUBSYSTEM_TIME;
+}
diff --git a/module/splat/splat-vnode.c b/module/splat/splat-vnode.c
new file mode 100644
index 000000000..413651dac
--- /dev/null
+++ b/module/splat/splat-vnode.c
@@ -0,0 +1,532 @@
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory
+ * Written by:
+ * Brian Behlendorf <[email protected]>,
+ * Herb Wartens <[email protected]>,
+ * Jim Garlick <[email protected]>
+ * UCRL-CODE-235197
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "splat-internal.h"
+#include <linux/rcupdate.h>
+
+#define SPLAT_SUBSYSTEM_VNODE 0x0900
+#define SPLAT_VNODE_NAME "vnode"
+#define SPLAT_VNODE_DESC "Kernel Vnode Tests"
+
+#define SPLAT_VNODE_TEST1_ID 0x0901
+#define SPLAT_VNODE_TEST1_NAME "vn_open"
+#define SPLAT_VNODE_TEST1_DESC "Vn_open Test"
+
+#define SPLAT_VNODE_TEST2_ID 0x0902
+#define SPLAT_VNODE_TEST2_NAME "vn_openat"
+#define SPLAT_VNODE_TEST2_DESC "Vn_openat Test"
+
+#define SPLAT_VNODE_TEST3_ID 0x0903
+#define SPLAT_VNODE_TEST3_NAME "vn_rdwr"
+#define SPLAT_VNODE_TEST3_DESC "Vn_rdwrt Test"
+
+#define SPLAT_VNODE_TEST4_ID 0x0904
+#define SPLAT_VNODE_TEST4_NAME "vn_rename"
+#define SPLAT_VNODE_TEST4_DESC "Vn_rename Test"
+
+#define SPLAT_VNODE_TEST5_ID 0x0905
+#define SPLAT_VNODE_TEST5_NAME "vn_getattr"
+#define SPLAT_VNODE_TEST5_DESC "Vn_getattr Test"
+
+#define SPLAT_VNODE_TEST6_ID 0x0906
+#define SPLAT_VNODE_TEST6_NAME "vn_sync"
+#define SPLAT_VNODE_TEST6_DESC "Vn_sync Test"
+
+#define SPLAT_VNODE_TEST7_ID 0x0907
+#define SPLAT_VNODE_TEST7_NAME "vn_getf"
+#define SPLAT_VNODE_TEST7_DESC "vn_getf/vn_releasef Test"
+
+#define SPLAT_VNODE_TEST_FILE "/etc/fstab"
+#define SPLAT_VNODE_TEST_FILE_AT "etc/fstab"
+#define SPLAT_VNODE_TEST_FILE_RW "/tmp/spl.vnode.tmp"
+#define SPLAT_VNODE_TEST_FILE_RW1 "/tmp/spl.vnode.tmp.1"
+#define SPLAT_VNODE_TEST_FILE_RW2 "/tmp/spl.vnode.tmp.2"
+
+static int
+splat_vnode_test1(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ int rc;
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE, UIO_SYSSPACE,
+ FREAD, 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST1_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ return rc;
+ }
+
+ rc = VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+
+ if (rc) {
+ splat_vprint(file, SPLAT_VNODE_TEST1_NAME,
+ "Failed to vn_close test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ return rc;
+ }
+
+ splat_vprint(file, SPLAT_VNODE_TEST1_NAME, "Successfully vn_open'ed "
+ "and vn_closed test file: %s\n", SPLAT_VNODE_TEST_FILE);
+
+ return rc;
+} /* splat_vnode_test1() */
+
+static int
+splat_vnode_test2(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ int rc;
+
+ if ((rc = vn_openat(SPLAT_VNODE_TEST_FILE_AT, UIO_SYSSPACE,
+ FREAD, 0644, &vp, 0, 0, rootdir, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST2_NAME,
+ "Failed to vn_openat test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ return rc;
+ }
+
+ rc = VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+
+ if (rc) {
+ splat_vprint(file, SPLAT_VNODE_TEST2_NAME,
+ "Failed to vn_close test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ return rc;
+ }
+
+ splat_vprint(file, SPLAT_VNODE_TEST2_NAME, "Successfully vn_openat'ed "
+ "and vn_closed test file: %s\n", SPLAT_VNODE_TEST_FILE);
+
+ return rc;
+} /* splat_vnode_test2() */
+
+static int
+splat_vnode_test3(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ char buf1[32] = "SPL VNode Interface Test File\n";
+ char buf2[32] = "";
+ int rc;
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW, UIO_SYSSPACE,
+ FWRITE | FREAD | FCREAT | FEXCL,
+ 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ return rc;
+ }
+
+ rc = vn_rdwr(UIO_WRITE, vp, buf1, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME,
+ "Failed vn_rdwr write of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ rc = vn_rdwr(UIO_READ, vp, buf2, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME,
+ "Failed vn_rdwr read of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ if (strncmp(buf1, buf2, strlen(buf1))) {
+ rc = -EINVAL;
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME,
+ "Failed strncmp data written does not match "
+ "data read\nWrote: %sRead: %s\n", buf1, buf2);
+ goto out;
+ }
+
+ rc = 0;
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Wrote: %s", buf1);
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Read: %s", buf2);
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Successfully wrote and "
+ "read expected data pattern to test file: %s\n",
+ SPLAT_VNODE_TEST_FILE_RW);
+
+out:
+ VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+ vn_remove(SPLAT_VNODE_TEST_FILE_RW, UIO_SYSSPACE, RMFILE);
+
+ return rc;
+} /* splat_vnode_test3() */
+
+static int
+splat_vnode_test4(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ char buf1[32] = "SPL VNode Interface Test File\n";
+ char buf2[32] = "";
+ int rc;
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW1, UIO_SYSSPACE,
+ FWRITE | FREAD | FCREAT | FEXCL, 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW1, rc);
+ goto out;
+ }
+
+ rc = vn_rdwr(UIO_WRITE, vp, buf1, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME,
+ "Failed vn_rdwr write of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW1, rc);
+ goto out2;
+ }
+
+ VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+
+ rc = vn_rename(SPLAT_VNODE_TEST_FILE_RW1,SPLAT_VNODE_TEST_FILE_RW2,0);
+ if (rc) {
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed vn_rename "
+ "%s -> %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW1,
+ SPLAT_VNODE_TEST_FILE_RW2, rc);
+ goto out;
+ }
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW2, UIO_SYSSPACE,
+ FREAD | FEXCL, 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW2, rc);
+ goto out;
+ }
+
+ rc = vn_rdwr(UIO_READ, vp, buf2, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME,
+ "Failed vn_rdwr read of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW2, rc);
+ goto out2;
+ }
+
+ if (strncmp(buf1, buf2, strlen(buf1))) {
+ rc = EINVAL;
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME,
+ "Failed strncmp data written does not match "
+ "data read\nWrote: %sRead: %s\n", buf1, buf2);
+ goto out2;
+ }
+
+ rc = 0;
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Wrote to %s: %s",
+ SPLAT_VNODE_TEST_FILE_RW1, buf1);
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Read from %s: %s",
+ SPLAT_VNODE_TEST_FILE_RW2, buf2);
+ splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Successfully renamed "
+ "test file %s -> %s and verified data pattern\n",
+ SPLAT_VNODE_TEST_FILE_RW1, SPLAT_VNODE_TEST_FILE_RW2);
+out2:
+ VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+out:
+ vn_remove(SPLAT_VNODE_TEST_FILE_RW1, UIO_SYSSPACE, RMFILE);
+ vn_remove(SPLAT_VNODE_TEST_FILE_RW2, UIO_SYSSPACE, RMFILE);
+
+ return rc;
+} /* splat_vnode_test4() */
+
+static int
+splat_vnode_test5(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ vattr_t vap;
+ int rc;
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE, UIO_SYSSPACE,
+ FREAD, 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST5_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ return rc;
+ }
+
+ rc = VOP_GETATTR(vp, &vap, 0, 0, NULL);
+ if (rc) {
+ splat_vprint(file, SPLAT_VNODE_TEST5_NAME,
+ "Failed to vn_getattr test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE, rc);
+ goto out;
+ }
+
+ if (vap.va_type != VREG) {
+ rc = -EINVAL;
+ splat_vprint(file, SPLAT_VNODE_TEST5_NAME,
+ "Failed expected regular file type "
+ "(%d != VREG): %s (%d)\n", vap.va_type,
+ SPLAT_VNODE_TEST_FILE, rc);
+ goto out;
+ }
+
+ splat_vprint(file, SPLAT_VNODE_TEST1_NAME, "Successfully "
+ "vn_getattr'ed test file: %s\n", SPLAT_VNODE_TEST_FILE);
+
+out:
+ VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+
+ return rc;
+} /* splat_vnode_test5() */
+
+static int
+splat_vnode_test6(struct file *file, void *arg)
+{
+ vnode_t *vp;
+ char buf[32] = "SPL VNode Interface Test File\n";
+ int rc;
+
+ if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW, UIO_SYSSPACE,
+ FWRITE | FCREAT | FEXCL, 0644, &vp, 0, 0))) {
+ splat_vprint(file, SPLAT_VNODE_TEST6_NAME,
+ "Failed to vn_open test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ return rc;
+ }
+
+ rc = vn_rdwr(UIO_WRITE, vp, buf, strlen(buf), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST6_NAME,
+ "Failed vn_rdwr write of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ rc = vn_fsync(vp, 0, 0, 0);
+ if (rc) {
+ splat_vprint(file, SPLAT_VNODE_TEST6_NAME,
+ "Failed vn_fsync of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ rc = 0;
+ splat_vprint(file, SPLAT_VNODE_TEST6_NAME, "Successfully "
+ "fsync'ed test file %s\n", SPLAT_VNODE_TEST_FILE_RW);
+out:
+ VOP_CLOSE(vp, 0, 0, 0, 0, 0);
+ VN_RELE(vp);
+ vn_remove(SPLAT_VNODE_TEST_FILE_RW, UIO_SYSSPACE, RMFILE);
+
+ return rc;
+} /* splat_vnode_test6() */
+
+/* Basically a slightly modified version of sys_close() */
+static int
+fd_uninstall(int fd)
+{
+ struct file *fp;
+ struct files_struct *files = current->files;
+#ifdef HAVE_FILES_FDTABLE
+ struct fdtable *fdt;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+
+ fp = fdt->fd[fd];
+ if (!fp)
+ goto out_unlock;
+
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+#else
+ spin_lock(&files->file_lock);
+ if (fd >= files->max_fds)
+ goto out_unlock;
+
+ fp = files->fd[fd];
+ if (!fp)
+ goto out_unlock;
+
+ files->fd[fd] = NULL;
+ FD_CLR(fd, files->close_on_exec);
+#endif
+ /* Dropping the lock here exposes a minor race but it allows me
+ * to use the existing kernel interfaces for this, and for a test
+ * case I think that's reasonable. */
+ spin_unlock(&files->file_lock);
+ put_unused_fd(fd);
+ return 0;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ return -EBADF;
+} /* fd_uninstall() */
+
+static int
+splat_vnode_test7(struct file *file, void *arg)
+{
+ char buf1[32] = "SPL VNode Interface Test File\n";
+ char buf2[32] = "";
+ struct file *lfp;
+ file_t *fp;
+ int rc, fd;
+
+ /* Prep work needed to test getf/releasef */
+ fd = get_unused_fd();
+ if (fd < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed to get unused fd (%d)\n", fd);
+ return fd;
+ }
+
+ lfp = filp_open(SPLAT_VNODE_TEST_FILE_RW, O_RDWR|O_CREAT|O_EXCL, 0644);
+ if (IS_ERR(lfp)) {
+ put_unused_fd(fd);
+ rc = PTR_ERR(lfp);
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed to filp_open: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ return rc;
+ }
+
+ /* Pair up the new fd and lfp in the current context, this allows
+ * getf to lookup the file struct simply by the known open fd */
+ fd_install(fd, lfp);
+
+ /* Actual getf()/releasef() test */
+ fp = vn_getf(fd);
+ if (fp == NULL) {
+ rc = -EINVAL;
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed to getf fd %d: (%d)\n", fd, rc);
+ goto out;
+ }
+
+ rc = vn_rdwr(UIO_WRITE, fp->f_vnode, buf1, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed vn_rdwr write of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ rc = vn_rdwr(UIO_READ, fp->f_vnode, buf2, strlen(buf1), 0,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ if (rc < 0) {
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed vn_rdwr read of test file: %s (%d)\n",
+ SPLAT_VNODE_TEST_FILE_RW, rc);
+ goto out;
+ }
+
+ if (strncmp(buf1, buf2, strlen(buf1))) {
+ rc = -EINVAL;
+ splat_vprint(file, SPLAT_VNODE_TEST7_NAME,
+ "Failed strncmp data written does not match "
+ "data read\nWrote: %sRead: %s\n", buf1, buf2);
+ goto out;
+ }
+
+ rc = 0;
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Wrote: %s", buf1);
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Read: %s", buf2);
+ splat_vprint(file, SPLAT_VNODE_TEST3_NAME, "Successfully wrote and "
+ "read expected data pattern to test file: %s\n",
+ SPLAT_VNODE_TEST_FILE_RW);
+out:
+ vn_releasef(fd);
+ fd_uninstall(fd);
+ filp_close(lfp, 0);
+ vn_remove(SPLAT_VNODE_TEST_FILE_RW, UIO_SYSSPACE, RMFILE);
+
+ return rc;
+} /* splat_vnode_test7() */
+
+splat_subsystem_t *
+splat_vnode_init(void)
+{
+ splat_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, SPLAT_VNODE_NAME, SPLAT_NAME_SIZE);
+ strncpy(sub->desc.desc, SPLAT_VNODE_DESC, SPLAT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = SPLAT_SUBSYSTEM_VNODE;
+
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST1_NAME, SPLAT_VNODE_TEST1_DESC,
+ SPLAT_VNODE_TEST1_ID, splat_vnode_test1);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST2_NAME, SPLAT_VNODE_TEST2_DESC,
+ SPLAT_VNODE_TEST2_ID, splat_vnode_test2);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST3_NAME, SPLAT_VNODE_TEST3_DESC,
+ SPLAT_VNODE_TEST3_ID, splat_vnode_test3);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST4_NAME, SPLAT_VNODE_TEST4_DESC,
+ SPLAT_VNODE_TEST4_ID, splat_vnode_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST5_NAME, SPLAT_VNODE_TEST5_DESC,
+ SPLAT_VNODE_TEST5_ID, splat_vnode_test5);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST6_NAME, SPLAT_VNODE_TEST6_DESC,
+ SPLAT_VNODE_TEST6_ID, splat_vnode_test6);
+ SPLAT_TEST_INIT(sub, SPLAT_VNODE_TEST7_NAME, SPLAT_VNODE_TEST7_DESC,
+ SPLAT_VNODE_TEST7_ID, splat_vnode_test7);
+
+ return sub;
+} /* splat_vnode_init() */
+
+void
+splat_vnode_fini(splat_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST7_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST6_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST5_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST4_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST3_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST2_ID);
+ SPLAT_TEST_FINI(sub, SPLAT_VNODE_TEST1_ID);
+
+ kfree(sub);
+} /* splat_vnode_fini() */
+
+int
+splat_vnode_id(void)
+{
+ return SPLAT_SUBSYSTEM_VNODE;
+} /* splat_vnode_id() */