summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am1
-rw-r--r--src/cmd/Makefile.am3
-rw-r--r--src/cmd/splat.c821
-rw-r--r--src/spl/Makefile.in50
-rw-r--r--src/spl/linux-kmem.c249
-rw-r--r--src/spl/linux-rwlock.c41
-rw-r--r--src/spl/linux-taskq.c78
-rw-r--r--src/spl/linux-thread.c113
-rw-r--r--src/splat/Makefile.in57
-rw-r--r--src/splat/splat-condvar.c454
-rw-r--r--src/splat/splat-ctl.c684
-rw-r--r--src/splat/splat-kmem.c365
-rw-r--r--src/splat/splat-mutex.c324
-rw-r--r--src/splat/splat-random.c104
-rw-r--r--src/splat/splat-rwlock.c764
-rw-r--r--src/splat/splat-taskq.c238
-rw-r--r--src/splat/splat-thread.c116
-rw-r--r--src/splat/splat-time.c90
18 files changed, 4552 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 000000000..86f519112
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = cmd spl splat
diff --git a/src/cmd/Makefile.am b/src/cmd/Makefile.am
new file mode 100644
index 000000000..2ab0a497c
--- /dev/null
+++ b/src/cmd/Makefile.am
@@ -0,0 +1,3 @@
+AM_CFLAGS = @EXTRA_CFLAGS@ -g -O2 -W -Wall -Wstrict-prototypes -Wshadow
+sbin_PROGRAMS = splat
+kzt_SOURCES = splat.c
diff --git a/src/cmd/splat.c b/src/cmd/splat.c
new file mode 100644
index 000000000..0ad65490c
--- /dev/null
+++ b/src/cmd/splat.c
@@ -0,0 +1,821 @@
+/* Kernel ZFS Test (KZT) user space command interface */
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <getopt.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <libuutil.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "splat.h"
+
+#undef ioctl
+
+static const char shortOpts[] = "hvlat:xc";
+static const struct option longOpts[] = {
+ { "help", no_argument, 0, 'h' },
+ { "verbose", no_argument, 0, 'v' },
+ { "list", no_argument, 0, 'l' },
+ { "all", no_argument, 0, 'a' },
+ { "test", required_argument, 0, 't' },
+ { "exit", no_argument, 0, 'x' },
+ { "nocolor", no_argument, 0, 'c' },
+ { 0, 0, 0, 0 }
+};
+
+static uu_list_t *subsystems; /* Subsystem/tests */
+static uu_list_pool_t *subsystem_pool; /* Subsystem pool */
+static uu_list_pool_t *test_pool; /* Test pool */
+static int kztctl_fd; /* Control file descriptor */
+static char kzt_version[KZT_VERSION_SIZE]; /* Kernel version string */
+static char *kzt_buffer = NULL; /* Scratch space area */
+static int kzt_buffer_size = 0; /* Scratch space size */
+
+
+static void test_list(uu_list_t *, int);
+static int dev_clear(void);
+
+
+static int usage(void) {
+ fprintf(stderr, "usage: kzt [hvla] [-t <subsystem:<tests>>]\n");
+ fprintf(stderr,
+ " --help -h This help\n"
+ " --verbose -v Increase verbosity\n"
+ " --list -l List all tests in all subsystems\n"
+ " --all -a Run all tests in all subsystems\n"
+ " --test -t <sub:test> Run 'test' in subsystem 'sub'\n"
+ " --exit -x Exit on first test error\n"
+ " --nocolor -c Do not colorize output\n");
+ fprintf(stderr, "\n"
+ "Examples:\n"
+ " kzt -t kmem:all # Runs all kmem tests\n"
+ " kzt -t taskq:0x201 # Run taskq test 0x201\n");
+
+ return 0;
+}
+
+static subsystem_t *subsystem_init(kzt_user_t *desc)
+{
+ subsystem_t *sub;
+
+ sub = (subsystem_t *)malloc(sizeof(*sub));
+ if (sub == NULL)
+ return NULL;
+
+ memcpy(&sub->sub_desc, desc, sizeof(*desc));
+ uu_list_node_init(sub, &sub->sub_node, subsystem_pool);
+
+ sub->sub_tests = uu_list_create(test_pool, NULL, 0);
+ if (sub->sub_tests == NULL) {
+ free(sub);
+ return NULL;
+ }
+
+ return sub;
+}
+
+static void subsystem_fini(subsystem_t *sub)
+{
+ assert(sub != NULL);
+
+ uu_list_node_fini(sub, &sub->sub_node, subsystem_pool);
+ free(sub);
+}
+
+static int subsystem_setup(void)
+{
+ kzt_cfg_t *cfg;
+ int i, rc, size, cfg_size;
+ subsystem_t *sub;
+ kzt_user_t *desc;
+
+ /* Aquire the number of registered subsystems */
+ cfg_size = sizeof(*cfg);
+ cfg = (kzt_cfg_t *)malloc(cfg_size);
+ if (cfg == NULL)
+ return -ENOMEM;
+
+ memset(cfg, 0, cfg_size);
+ cfg->cfg_magic = KZT_CFG_MAGIC;
+ cfg->cfg_cmd = KZT_CFG_SUBSYSTEM_COUNT;
+
+ rc = ioctl(kztctl_fd, KZT_CFG, cfg);
+ if (rc) {
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg->cfg_cmd, errno);
+ free(cfg);
+ return rc;
+ }
+
+ size = cfg->cfg_rc1;
+ free(cfg);
+
+ /* Based on the newly aquired number of subsystems allocate enough
+ * memory to get the descriptive information for them all. */
+ cfg_size = sizeof(*cfg) + size * sizeof(kzt_user_t);
+ cfg = (kzt_cfg_t *)malloc(cfg_size);
+ if (cfg == NULL)
+ return -ENOMEM;
+
+ memset(cfg, 0, cfg_size);
+ cfg->cfg_magic = KZT_CFG_MAGIC;
+ cfg->cfg_cmd = KZT_CFG_SUBSYSTEM_LIST;
+ cfg->cfg_data.kzt_subsystems.size = size;
+
+ rc = ioctl(kztctl_fd, KZT_CFG, cfg);
+ if (rc) {
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg->cfg_cmd, errno);
+ free(cfg);
+ return rc;
+ }
+
+ /* Add the new subsystems in to the global list */
+ size = cfg->cfg_rc1;
+ for (i = 0; i < size; i++) {
+ desc = &(cfg->cfg_data.kzt_subsystems.descs[i]);
+
+ sub = subsystem_init(desc);
+ if (sub == NULL) {
+ fprintf(stderr, "Error initializing subsystem: %s\n",
+ desc->name);
+ free(cfg);
+ return -ENOMEM;
+ }
+
+ uu_list_insert(subsystems, sub, 0);
+ }
+
+ free(cfg);
+ return 0;
+}
+
+static int subsystem_compare(const void *l_arg, const void *r_arg, void *private)
+{
+ const subsystem_t *l = l_arg;
+ const subsystem_t *r = r_arg;
+
+ if (l->sub_desc.id > r->sub_desc.id)
+ return 1;
+
+ if (l->sub_desc.id < r->sub_desc.id)
+ return -1;
+
+ return 0;
+}
+
+static void subsystem_list(uu_list_t *list, int indent)
+{
+ subsystem_t *sub;
+
+ fprintf(stdout,
+ "------------------------------- "
+ "Available KZT Tests "
+ "-------------------------------\n");
+
+ for (sub = uu_list_first(list); sub != NULL;
+ sub = uu_list_next(list, sub)) {
+ fprintf(stdout, "%*s0x%0*x %-*s ---- %s ----\n",
+ indent, "",
+ 4, sub->sub_desc.id,
+ KZT_NAME_SIZE + 7, sub->sub_desc.name,
+ sub->sub_desc.desc);
+ test_list(sub->sub_tests, indent + 7);
+ }
+}
+
+static test_t *test_init(subsystem_t *sub, kzt_user_t *desc)
+{
+ test_t *test;
+
+ test = (test_t *)malloc(sizeof(*test));
+ if (test == NULL)
+ return NULL;
+
+ test->test_sub = sub;
+ memcpy(&test->test_desc, desc, sizeof(*desc));
+ uu_list_node_init(test, &test->test_node, test_pool);
+
+ return test;
+}
+
+static void test_fini(test_t *test)
+{
+ assert(test != NULL);
+
+ uu_list_node_fini(test, &test->test_node, test_pool);
+ free(test);
+}
+
+static int test_setup(subsystem_t *sub)
+{
+ kzt_cfg_t *cfg;
+ int i, rc, size;
+ test_t *test;
+ kzt_user_t *desc;
+
+ /* Aquire the number of registered tests for the give subsystem */
+ cfg = (kzt_cfg_t *)malloc(sizeof(*cfg));
+ if (cfg == NULL)
+ return -ENOMEM;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->cfg_magic = KZT_CFG_MAGIC;
+ cfg->cfg_cmd = KZT_CFG_TEST_COUNT;
+ cfg->cfg_arg1 = sub->sub_desc.id; /* Subsystem of interest */
+
+ rc = ioctl(kztctl_fd, KZT_CFG, cfg);
+ if (rc) {
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg->cfg_cmd, errno);
+ free(cfg);
+ return rc;
+ }
+
+ size = cfg->cfg_rc1;
+ free(cfg);
+
+ /* Based on the newly aquired number of tests allocate enough
+ * memory to get the descriptive information for them all. */
+ cfg = (kzt_cfg_t *)malloc(sizeof(*cfg) + size * sizeof(kzt_user_t));
+ if (cfg == NULL)
+ return -ENOMEM;
+
+ memset(cfg, 0, sizeof(*cfg) + size * sizeof(kzt_user_t));
+ cfg->cfg_magic = KZT_CFG_MAGIC;
+ cfg->cfg_cmd = KZT_CFG_TEST_LIST;
+ cfg->cfg_arg1 = sub->sub_desc.id; /* Subsystem of interest */
+ cfg->cfg_data.kzt_tests.size = size;
+
+ rc = ioctl(kztctl_fd, KZT_CFG, cfg);
+ if (rc) {
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg->cfg_cmd, errno);
+ free(cfg);
+ return rc;
+ }
+
+ /* Add the new tests in to the relevant subsystems */
+ size = cfg->cfg_rc1;
+ for (i = 0; i < size; i++) {
+ desc = &(cfg->cfg_data.kzt_tests.descs[i]);
+
+ test = test_init(sub, desc);
+ if (test == NULL) {
+ fprintf(stderr, "Error initializing test: %s\n",
+ desc->name);
+ free(cfg);
+ return -ENOMEM;
+ }
+
+ uu_list_insert(sub->sub_tests, test, 0);
+ }
+
+ free(cfg);
+ return 0;
+}
+
+static int test_compare(const void *l_arg, const void *r_arg, void *private)
+{
+ const test_t *l = l_arg;
+ const test_t *r = r_arg;
+
+ if (l->test_desc.id > r->test_desc.id)
+ return 1;
+
+ if (l->test_desc.id < r->test_desc.id)
+ return -1;
+
+ return 0;
+}
+
+static test_t *test_copy(test_t *test)
+{
+ return test_init(test->test_sub, &test->test_desc);
+}
+
+static void test_list(uu_list_t *list, int indent)
+{
+ test_t *test;
+
+ for (test = uu_list_first(list); test != NULL;
+ test = uu_list_next(list, test))
+ fprintf(stdout, "%*s0x%0*x %-*s %-*s\n",
+ indent, "",
+ 04, test->test_desc.id,
+ KZT_NAME_SIZE, test->test_desc.name,
+ KZT_DESC_SIZE, test->test_desc.desc);
+}
+
+static test_t *test_find(char *sub_str, char *test_str)
+{
+ subsystem_t *sub;
+ test_t *test;
+ int sub_num, test_num;
+
+ /* No error checking here because it may not be a number, it's
+ * perfectly OK for it to be a string. Since we're just using
+ * it for comparison purposes this is all very safe.
+ */
+ sub_num = strtol(sub_str, NULL, 0);
+ test_num = strtol(test_str, NULL, 0);
+
+ for (sub = uu_list_first(subsystems); sub != NULL;
+ sub = uu_list_next(subsystems, sub)) {
+
+ if (strncmp(sub->sub_desc.name, sub_str, KZT_NAME_SIZE) &&
+ sub->sub_desc.id != sub_num)
+ continue;
+
+ for (test = uu_list_first(sub->sub_tests); test != NULL;
+ test = uu_list_next(sub->sub_tests, test)) {
+
+ if (!strncmp(test->test_desc.name, test_str,
+ KZT_NAME_SIZE) || test->test_desc.id == test_num)
+ return test;
+ }
+ }
+
+ return NULL;
+}
+
+static int test_add(cmd_args_t *args, test_t *test)
+{
+ test_t *tmp;
+
+ tmp = test_copy(test);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ uu_list_insert(args->args_tests, tmp, 0);
+ return 0;
+}
+
+static int test_add_all(cmd_args_t *args)
+{
+ subsystem_t *sub;
+ test_t *test;
+ int rc;
+
+ for (sub = uu_list_first(subsystems); sub != NULL;
+ sub = uu_list_next(subsystems, sub)) {
+
+ for (test = uu_list_first(sub->sub_tests); test != NULL;
+ test = uu_list_next(sub->sub_tests, test)) {
+
+ if (rc = test_add(args, test))
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int test_run(cmd_args_t *args, test_t *test)
+{
+ subsystem_t *sub = test->test_sub;
+ kzt_cmd_t *cmd;
+ int rc, cmd_size;
+
+ dev_clear();
+
+ cmd_size = sizeof(*cmd);
+ cmd = (kzt_cmd_t *)malloc(cmd_size);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ memset(cmd, 0, cmd_size);
+ cmd->cmd_magic = KZT_CMD_MAGIC;
+ cmd->cmd_subsystem = sub->sub_desc.id;
+ cmd->cmd_test = test->test_desc.id;
+ cmd->cmd_data_size = 0; /* Unused feature */
+
+ fprintf(stdout, "%*s:%-*s ",
+ KZT_NAME_SIZE, sub->sub_desc.name,
+ KZT_NAME_SIZE, test->test_desc.name);
+ fflush(stdout);
+ rc = ioctl(kztctl_fd, KZT_CMD, cmd);
+ if (args->args_do_color) {
+ fprintf(stdout, "%s %s\n", rc ?
+ COLOR_RED "Fail" COLOR_RESET :
+ COLOR_GREEN "Pass" COLOR_RESET,
+ rc ? strerror(errno) : "");
+ } else {
+ fprintf(stdout, "%s %s\n", rc ?
+ "Fail" : "Pass",
+ rc ? strerror(errno) : "");
+ }
+ fflush(stdout);
+ free(cmd);
+
+ if (args->args_verbose) {
+ if ((rc = read(kztctl_fd, kzt_buffer, kzt_buffer_size - 1)) < 0) {
+ fprintf(stdout, "Error reading results: %d\n", rc);
+ } else {
+ fprintf(stdout, "\n%s\n", kzt_buffer);
+ fflush(stdout);
+ }
+ }
+
+ return rc;
+}
+
+static int tests_run(cmd_args_t *args)
+{
+ test_t *test;
+ int rc;
+
+ fprintf(stdout,
+ "------------------------------- "
+ "Running KZT Tests "
+ "-------------------------------\n");
+
+ for (test = uu_list_first(args->args_tests); test != NULL;
+ test = uu_list_next(args->args_tests, test)) {
+
+ rc = test_run(args, test);
+ if (rc && args->args_exit_on_error)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int args_parse_test(cmd_args_t *args, char *str)
+{
+ subsystem_t *s;
+ test_t *t;
+ char *sub_str, *test_str;
+ int sub_num, test_num;
+ int sub_all = 0, test_all = 0;
+ int rc, flag = 0;
+
+ test_str = strchr(str, ':');
+ if (test_str == NULL) {
+ fprintf(stderr, "Test must be of the "
+ "form <subsystem:test>\n");
+ return -EINVAL;
+ }
+
+ sub_str = str;
+ test_str[0] = '\0';
+ test_str = test_str + 1;
+
+ sub_num = strtol(sub_str, NULL, 0);
+ test_num = strtol(test_str, NULL, 0);
+
+ if (!strncasecmp(sub_str, "all", strlen(sub_str)) || (sub_num == -1))
+ sub_all = 1;
+
+ if (!strncasecmp(test_str, "all", strlen(test_str)) || (test_num == -1))
+ test_all = 1;
+
+ if (sub_all) {
+ if (test_all) {
+ /* Add all tests from all subsystems */
+ for (s = uu_list_first(subsystems); s != NULL;
+ s = uu_list_next(subsystems, s))
+ for (t = uu_list_first(s->sub_tests);t != NULL;
+ t = uu_list_next(s->sub_tests, t))
+ if (rc = test_add(args, t))
+ goto error_run;
+ } else {
+ /* Add a specific test from all subsystems */
+ for (s = uu_list_first(subsystems); s != NULL;
+ s = uu_list_next(subsystems, s)) {
+ if (t = test_find(s->sub_desc.name,test_str)) {
+ if (rc = test_add(args, t))
+ goto error_run;
+
+ flag = 1;
+ }
+ }
+
+ if (!flag)
+ fprintf(stderr, "No tests '%s:%s' could be "
+ "found\n", sub_str, test_str);
+ }
+ } else {
+ if (test_all) {
+ /* Add all tests from a specific subsystem */
+ for (s = uu_list_first(subsystems); s != NULL;
+ s = uu_list_next(subsystems, s)) {
+ if (strncasecmp(sub_str, s->sub_desc.name,
+ strlen(sub_str)))
+ continue;
+
+ for (t = uu_list_first(s->sub_tests);t != NULL;
+ t = uu_list_next(s->sub_tests, t))
+ if (rc = test_add(args, t))
+ goto error_run;
+ }
+ } else {
+ /* Add a specific test from a specific subsystem */
+ if (t = test_find(sub_str, test_str)) {
+ if (rc = test_add(args, t))
+ goto error_run;
+ } else {
+ fprintf(stderr, "Test '%s:%s' could not be "
+ "found\n", sub_str, test_str);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+error_run:
+ fprintf(stderr, "Test '%s:%s' not added to run list: %d\n",
+ sub_str, test_str, rc);
+ return rc;
+}
+
+static void args_fini(cmd_args_t *args)
+{
+ struct cmd_test *ptr1, *ptr2;
+
+ assert(args != NULL);
+
+
+
+ if (args->args_tests != NULL) {
+ uu_list_destroy(args->args_tests);
+ }
+
+ free(args);
+}
+
+static cmd_args_t *
+args_init(int argc, char **argv)
+{
+ cmd_args_t *args;
+ int c, rc;
+
+ if (argc == 1) {
+ usage();
+ return (cmd_args_t *) NULL;
+ }
+
+ /* Configure and populate the args structures */
+ args = malloc(sizeof(*args));
+ if (args == NULL)
+ return NULL;
+
+ memset(args, 0, sizeof(*args));
+ args->args_verbose = 0;
+ args->args_do_list = 0;
+ args->args_do_all = 0;
+ args->args_do_color = 1;
+ args->args_exit_on_error = 0;
+ args->args_tests = uu_list_create(test_pool, NULL, 0);
+ if (args->args_tests == NULL) {
+ args_fini(args);
+ return NULL;
+ }
+
+ while ((c = getopt_long(argc, argv, shortOpts, longOpts, NULL)) != -1){
+ switch (c) {
+ case 'v': args->args_verbose++; break;
+ case 'l': args->args_do_list = 1; break;
+ case 'a': args->args_do_all = 1; break;
+ case 'c': args->args_do_color = 0; break;
+ case 'x': args->args_exit_on_error = 1; break;
+ case 't':
+ if (args->args_do_all) {
+ fprintf(stderr, "Option -t <subsystem:test> is "
+ "useless when used with -a\n");
+ args_fini(args);
+ return NULL;
+ }
+
+ rc = args_parse_test(args, argv[optind - 1]);
+ if (rc) {
+ args_fini(args);
+ return NULL;
+ }
+ break;
+ case 'h':
+ case '?':
+ usage();
+ args_fini(args);
+ return NULL;
+ default:
+ fprintf(stderr, "Unknown option '%s'\n",
+ argv[optind - 1]);
+ break;
+ }
+ }
+
+ return args;
+}
+
+static int
+dev_clear(void)
+{
+ kzt_cfg_t cfg;
+ int rc;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.cfg_magic = KZT_CFG_MAGIC;
+ cfg.cfg_cmd = KZT_CFG_BUFFER_CLEAR;
+ cfg.cfg_arg1 = 0;
+
+ rc = ioctl(kztctl_fd, KZT_CFG, &cfg);
+ if (rc)
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg.cfg_cmd, errno);
+
+ lseek(kztctl_fd, 0, SEEK_SET);
+
+ return rc;
+}
+
+static int
+dev_size(int size)
+{
+ kzt_cfg_t cfg;
+ int rc;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.cfg_magic = KZT_CFG_MAGIC;
+ cfg.cfg_cmd = KZT_CFG_BUFFER_SIZE;
+ cfg.cfg_arg1 = size;
+
+ rc = ioctl(kztctl_fd, KZT_CFG, &cfg);
+ if (rc) {
+ fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
+ (unsigned long) KZT_CFG, cfg.cfg_cmd, errno);
+ return rc;
+ }
+
+ return cfg.cfg_rc1;
+}
+
+static void
+dev_fini(void)
+{
+ if (kzt_buffer)
+ free(kzt_buffer);
+
+ if (kztctl_fd != -1) {
+ if (close(kztctl_fd) == -1) {
+ fprintf(stderr, "Unable to close %s: %d\n",
+ KZT_DEV, errno);
+ }
+ }
+}
+
+static int
+dev_init(void)
+{
+ subsystem_t *sub;
+ int rc;
+
+ kztctl_fd = open(KZT_DEV, O_RDONLY);
+ if (kztctl_fd == -1) {
+ fprintf(stderr, "Unable to open %s: %d\n"
+ "Is the kzt module loaded?\n", KZT_DEV, errno);
+ rc = errno;
+ goto error;
+ }
+
+ /* Determine kernel module version string */
+ memset(kzt_version, 0, KZT_VERSION_SIZE);
+ if ((rc = read(kztctl_fd, kzt_version, KZT_VERSION_SIZE - 1)) == -1)
+ goto error;
+
+ if (rc = dev_clear())
+ goto error;
+
+ if ((rc = dev_size(0)) < 0)
+ goto error;
+
+ kzt_buffer_size = rc;
+ kzt_buffer = (char *)malloc(kzt_buffer_size);
+ if (kzt_buffer == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ memset(kzt_buffer, 0, kzt_buffer_size);
+
+ /* Determine available subsystems */
+ if ((rc = subsystem_setup()) != 0)
+ goto error;
+
+ /* Determine available tests for all subsystems */
+ for (sub = uu_list_first(subsystems); sub != NULL;
+ sub = uu_list_next(subsystems, sub))
+ if ((rc = test_setup(sub)) != 0)
+ goto error;
+
+ return 0;
+
+error:
+ if (kztctl_fd != -1) {
+ if (close(kztctl_fd) == -1) {
+ fprintf(stderr, "Unable to close %s: %d\n",
+ KZT_DEV, errno);
+ }
+ }
+
+ return rc;
+}
+
+int
+init(void)
+{
+ int rc;
+
+ /* Configure the subsystem pool */
+ subsystem_pool = uu_list_pool_create("sub_pool", sizeof(subsystem_t),
+ offsetof(subsystem_t, sub_node),
+ subsystem_compare, 0);
+ if (subsystem_pool == NULL)
+ return -ENOMEM;
+
+ /* Configure the test pool */
+ test_pool = uu_list_pool_create("test_pool", sizeof(test_t),
+ offsetof(test_t, test_node),
+ test_compare, 0);
+ if (test_pool == NULL) {
+ uu_list_pool_destroy(subsystem_pool);
+ return -ENOMEM;
+ }
+
+ /* Allocate the subsystem list */
+ subsystems = uu_list_create(subsystem_pool, NULL, 0);
+ if (subsystems == NULL) {
+ uu_list_pool_destroy(test_pool);
+ uu_list_pool_destroy(subsystem_pool);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+fini(void)
+{
+ /* XXX - Cleanup destroy lists release memory */
+
+ /* XXX - Remove contents of list first */
+ uu_list_destroy(subsystems);
+}
+
+
+int
+main(int argc, char **argv)
+{
+ cmd_args_t *args = NULL;
+ int rc = 0;
+
+ /* General init */
+ if (rc = init())
+ return rc;
+
+ /* Device specific init */
+ if (rc = dev_init())
+ goto out;
+
+ /* Argument init and parsing */
+ if ((args = args_init(argc, argv)) == NULL) {
+ rc = -1;
+ goto out;
+ }
+
+ /* Generic kernel version string */
+ if (args->args_verbose)
+ fprintf(stdout, "%s", kzt_version);
+
+ /* Print the available test list and exit */
+ if (args->args_do_list) {
+ subsystem_list(subsystems, 0);
+ goto out;
+ }
+
+ /* Add all available test to the list of tests to run */
+ if (args->args_do_all) {
+ if (rc = test_add_all(args))
+ goto out;
+ }
+
+ /* Run all the requested tests */
+ if (rc = tests_run(args))
+ goto out;
+
+out:
+ if (args != NULL)
+ args_fini(args);
+
+ dev_fini();
+ fini();
+ return rc;
+}
+
diff --git a/src/spl/Makefile.in b/src/spl/Makefile.in
new file mode 100644
index 000000000..d052fc370
--- /dev/null
+++ b/src/spl/Makefile.in
@@ -0,0 +1,50 @@
+# Makefile.in for spl kernel module
+
+MODULES := spl
+
+DISTFILES = Makefile.in \
+ linux-kmem.c linux-rwlock.c linux-taskq.c linux-thread.c
+
+# Removed '-std=gnu99' does to compile issues with i386 SPIN_LOCK_UNLOCKED
+# EXTRA_CFLAGS += -I$(src)
+# EXTRA_CFLAGS += -Wall -Wno-unknown-pragmas -Wno-missing-braces \
+# -Wno-sign-compare -Wno-parentheses -Wno-uninitialized \
+# -Wno-implicit-function-declaration -Wno-unused -Wno-trigraphs \
+# -Wno-char-subscripts -Wno-switch
+
+# Solaris porting layer module
+obj-m := spl.o
+
+spl-objs += linux-kmem.o
+spl-objs += linux-thread.o
+spl-objs += linux-taskq.o
+spl-objs += linux-rwlock.o
+
+splmodule := spl.ko
+splmoduledir := @kmoduledir@/kernel/lib/
+
+all: all-spec
+
+install: all
+ mkdir -p $(DESTDIR)$(splmoduledir)
+ $(INSTALL) -m 644 $(splmodule) $(DESTDIR)$(splmoduledir)/$(splmodule)
+ -/sbin/depmod -a
+
+uninstall:
+ rm -f $(DESTDIR)$(splmoduledir)/$(splmodule)
+ -/sbin/depmod -a
+
+clean:
+ -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+ rm -f Makefile
+ rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+ cp -p $(DISTFILES) $(distdir)
+
+all-spec:
+ $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
diff --git a/src/spl/linux-kmem.c b/src/spl/linux-kmem.c
new file mode 100644
index 000000000..7de2b211d
--- /dev/null
+++ b/src/spl/linux-kmem.c
@@ -0,0 +1,249 @@
+#include <sys/linux-kmem.h>
+
+/*
+ * Memory allocation interfaces
+ */
+#ifdef DEBUG_KMEM
+/* Shim layer memory accounting */
+atomic_t kmem_alloc_used;
+unsigned int kmem_alloc_max;
+#endif
+
+/*
+ * Slab allocation interfaces
+ *
+ * While the linux slab implementation was inspired by solaris they
+ * have made some changes to the API which complicates this shim
+ * layer. For one thing the same symbol names are used with different
+ * arguments for the prototypes. To deal with this we must use the
+ * preprocessor to re-order arguments. Happily for us standard C says,
+ * "Macro's appearing in their own expansion are not reexpanded" so
+ * this does not result in an infinite recursion. Additionally the
+ * function pointers registered by solarias differ from those used
+ * by linux so a lookup and mapping from linux style callback to a
+ * solaris style callback is needed. There is some overhead in this
+ * operation which isn't horibile but it needs to be kept in mind.
+ */
+typedef struct kmem_cache_cb {
+ struct list_head kcc_list;
+ kmem_cache_t * kcc_cache;
+ kmem_constructor_t kcc_constructor;
+ kmem_destructor_t kcc_destructor;
+ kmem_reclaim_t kcc_reclaim;
+ void * kcc_private;
+ void * kcc_vmp;
+} kmem_cache_cb_t;
+
+
+static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
+//static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
+static LIST_HEAD(kmem_cache_cb_list);
+static struct shrinker *kmem_cache_shrinker;
+
+/* Function must be called while holding the kmem_cache_cb_lock
+ * Because kmem_cache_t is an opaque datatype we're forced to
+ * match pointers to identify specific cache entires.
+ */
+static kmem_cache_cb_t *
+kmem_cache_find_cache_cb(kmem_cache_t *cache)
+{
+ kmem_cache_cb_t *kcc;
+
+ list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
+ if (cache == kcc->kcc_cache)
+ return kcc;
+
+ return NULL;
+}
+
+static kmem_cache_cb_t *
+kmem_cache_add_cache_cb(kmem_cache_t *cache,
+ kmem_constructor_t constructor,
+ kmem_destructor_t destructor,
+ kmem_reclaim_t reclaim,
+ void *priv, void *vmp)
+{
+ kmem_cache_cb_t *kcc;
+
+ kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
+ if (kcc) {
+ kcc->kcc_cache = cache;
+ kcc->kcc_constructor = constructor;
+ kcc->kcc_destructor = destructor;
+ kcc->kcc_reclaim = reclaim;
+ kcc->kcc_private = priv;
+ kcc->kcc_vmp = vmp;
+ spin_lock(&kmem_cache_cb_lock);
+ list_add(&kcc->kcc_list, &kmem_cache_cb_list);
+ spin_unlock(&kmem_cache_cb_lock);
+ }
+
+ return kcc;
+}
+
+static void
+kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
+{
+ spin_lock(&kmem_cache_cb_lock);
+ list_del(&kcc->kcc_list);
+ spin_unlock(&kmem_cache_cb_lock);
+
+ if (kcc)
+ kfree(kcc);
+}
+
+static void
+kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+{
+ kmem_cache_cb_t *kcc;
+
+ spin_lock(&kmem_cache_cb_lock);
+
+ /* Callback list must be in sync with linux slab caches */
+ kcc = kmem_cache_find_cache_cb(cache);
+ BUG_ON(!kcc);
+
+ kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
+ spin_unlock(&kmem_cache_cb_lock);
+ /* Linux constructor has no return code, silently eat it */
+}
+
+static void
+kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+{
+ kmem_cache_cb_t *kcc;
+
+ spin_lock(&kmem_cache_cb_lock);
+
+ /* Callback list must be in sync with linux slab caches */
+ kcc = kmem_cache_find_cache_cb(cache);
+ BUG_ON(!kcc);
+
+ /* Solaris destructor takes no flags, silently eat them */
+ kcc->kcc_destructor(ptr, kcc->kcc_private);
+ spin_unlock(&kmem_cache_cb_lock);
+}
+
+/* XXX - Arguments are ignored */
+static int
+kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
+{
+ kmem_cache_cb_t *kcc;
+ int total = 0;
+
+ /* Under linux a shrinker is not tightly coupled with a slab
+ * cache. In fact linux always systematically trys calling all
+ * registered shrinker callbacks until its target reclamation level
+ * is reached. Because of this we only register one shrinker
+ * function in the shim layer for all slab caches. And we always
+ * attempt to shrink all caches when this generic shrinker is called.
+ */
+ spin_lock(&kmem_cache_cb_lock);
+
+ list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
+ /* Under linux the desired number and gfp type of objects
+ * is passed to the reclaiming function as a sugested reclaim
+ * target. I do not pass these args on because reclaim
+ * policy is entirely up to the owner under solaris. We only
+ * pass on the pre-registered private data.
+ */
+ if (kcc->kcc_reclaim)
+ kcc->kcc_reclaim(kcc->kcc_private);
+
+ total += 1;
+ }
+
+ /* Under linux we should return the remaining number of entires in
+ * the cache. Unfortunately, I don't see an easy way to safely
+ * emulate this behavior so I'm returning one entry per cache which
+ * was registered with the generic shrinker. This should fake out
+ * the linux VM when it attempts to shrink caches.
+ */
+ spin_unlock(&kmem_cache_cb_lock);
+ return total;
+}
+
+/* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
+ * removed here to prevent a recursive substitution, we want to call
+ * the native linux version.
+ */
+#undef kmem_cache_create
+#undef kmem_cache_destroy
+
+kmem_cache_t *
+__kmem_cache_create(char *name, size_t size, size_t align,
+ int (*constructor)(void *, void *, int),
+ void (*destructor)(void *, void *),
+ void (*reclaim)(void *),
+ void *priv, void *vmp, int flags)
+{
+ kmem_cache_t *cache;
+ kmem_cache_cb_t *kcc;
+ int shrinker_flag = 0;
+
+ /* FIXME: - Option currently unsupported by shim layer */
+ BUG_ON(vmp);
+
+ cache = kmem_cache_create(name, size, align, flags,
+ kmem_cache_generic_constructor,
+ kmem_cache_generic_destructor);
+ if (cache == NULL)
+ return NULL;
+
+ /* Register shared shrinker function on initial cache create */
+ spin_lock(&kmem_cache_cb_lock);
+ if (list_empty(&kmem_cache_cb_list)) {
+ kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
+ kmem_cache_generic_shrinker);
+ if (kmem_cache_shrinker == NULL) {
+ kmem_cache_destroy(cache);
+ spin_unlock(&kmem_cache_cb_lock);
+ return NULL;
+ }
+
+ }
+ spin_unlock(&kmem_cache_cb_lock);
+
+ kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
+ reclaim, priv, vmp);
+ if (kcc == NULL) {
+ if (shrinker_flag) /* New shrinker registered must be removed */
+ remove_shrinker(kmem_cache_shrinker);
+
+ kmem_cache_destroy(cache);
+ return NULL;
+ }
+
+ return cache;
+}
+
+/* Return codes discarded because Solaris implementation has void return */
+void
+__kmem_cache_destroy(kmem_cache_t *cache)
+{
+ kmem_cache_cb_t *kcc;
+
+ spin_lock(&kmem_cache_cb_lock);
+ kcc = kmem_cache_find_cache_cb(cache);
+ spin_unlock(&kmem_cache_cb_lock);
+ if (kcc == NULL)
+ return;
+
+ kmem_cache_destroy(cache);
+ kmem_cache_remove_cache_cb(kcc);
+
+ /* Unregister generic shrinker on removal of all caches */
+ spin_lock(&kmem_cache_cb_lock);
+ if (list_empty(&kmem_cache_cb_list))
+ remove_shrinker(kmem_cache_shrinker);
+
+ spin_unlock(&kmem_cache_cb_lock);
+}
+
+void
+__kmem_reap(void) {
+ /* Since there's no easy hook in to linux to force all the registered
+ * shrinkers to run we just run the ones registered for this shim */
+ kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
+}
+
diff --git a/src/spl/linux-rwlock.c b/src/spl/linux-rwlock.c
new file mode 100644
index 000000000..e95ec1555
--- /dev/null
+++ b/src/spl/linux-rwlock.c
@@ -0,0 +1,41 @@
+#include <sys/linux-rwlock.h>
+
+int
+rw_lock_held(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+ if (rwlp->rw_sem.activity != 0) {
+#else
+ if (rwlp->rw_sem.count != 0) {
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rw_read_held(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ if (rw_lock_held(rwlp) && rwlp->rw_owner == NULL) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rw_write_held(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ if (rwlp->rw_owner == current) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/src/spl/linux-taskq.c b/src/spl/linux-taskq.c
new file mode 100644
index 000000000..0babd2395
--- /dev/null
+++ b/src/spl/linux-taskq.c
@@ -0,0 +1,78 @@
+#include <sys/linux-taskq.h>
+
+/*
+ * Task queue interface
+ *
+ * The taskq_work_wrapper functions are used to manage the work_structs
+ * which must be submitted to linux. The shim layer allocates a wrapper
+ * structure for all items which contains a pointer to itself as well as
+ * the real work to be performed. When the work item run the generic
+ * handle is called which calls the real work function and then using
+ * the self pointer frees the work_struct.
+ */
+typedef struct taskq_work_wrapper {
+ struct work_struct tww_work;
+ task_func_t tww_func;
+ void * tww_priv;
+} taskq_work_wrapper_t;
+
+static void
+taskq_work_handler(void *priv)
+{
+ taskq_work_wrapper_t *tww = priv;
+
+ BUG_ON(tww == NULL);
+ BUG_ON(tww->tww_func == NULL);
+
+ /* Call the real function and free the wrapper */
+ tww->tww_func(tww->tww_priv);
+ kfree(tww);
+}
+
+/* XXX - All flags currently ignored */
+taskqid_t
+__taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
+{
+ struct workqueue_struct *wq = tq;
+ taskq_work_wrapper_t *tww;
+ int rc;
+
+
+ BUG_ON(in_interrupt());
+ BUG_ON(tq == NULL);
+ BUG_ON(func == NULL);
+
+ tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_KERNEL);
+ if (!tww)
+ return (taskqid_t)0;
+
+ INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
+ tww->tww_func = func;
+ tww->tww_priv = priv;
+
+ rc = queue_work(wq, &(tww->tww_work));
+ if (!rc) {
+ kfree(tww);
+ return (taskqid_t)0;
+ }
+
+ return (taskqid_t)wq;
+}
+
+/* XXX - Most args ignored until we decide if it's worth the effort
+ * to emulate the solaris notion of dynamic thread pools. For
+ * now we simply serialize everything through one thread which
+ * may come back to bite us as a performance issue.
+ * pri - Ignore priority
+ * min - Ignored until this is a dynamic thread pool
+ * max - Ignored until this is a dynamic thread pool
+ * flags - Ignored until this is a dynamic thread_pool
+ */
+taskq_t *
+__taskq_create(const char *name, int nthreads, pri_t pri,
+ int minalloc, int maxalloc, uint_t flags)
+{
+ /* NOTE: Linux workqueue names are limited to 10 chars */
+
+ return create_singlethread_workqueue(name);
+}
diff --git a/src/spl/linux-thread.c b/src/spl/linux-thread.c
new file mode 100644
index 000000000..ad036471a
--- /dev/null
+++ b/src/spl/linux-thread.c
@@ -0,0 +1,113 @@
+#include <sys/linux-thread.h>
+
+/*
+ * Thread interfaces
+ */
+typedef struct thread_priv_s {
+ unsigned long tp_magic; /* Magic */
+ void (*tp_func)(void *); /* Registered function */
+ void *tp_args; /* Args to be passed to function */
+ size_t tp_len; /* Len to be passed to function */
+ int tp_state; /* State to start thread at */
+ pri_t tp_pri; /* Priority to start threat at */
+ volatile kthread_t *tp_task; /* Task pointer for new thread */
+ spinlock_t tp_lock; /* Syncronization lock */
+ wait_queue_head_t tp_waitq; /* Syncronization wait queue */
+} thread_priv_t;
+
+int
+thread_generic_wrapper(void *arg)
+{
+ thread_priv_t *tp = (thread_priv_t *)arg;
+ void (*func)(void *);
+ void *args;
+ char name[16];
+
+ /* Use the truncated function name as thread name */
+ snprintf(name, sizeof(name), "%s", "kthread");
+ daemonize(name);
+
+ spin_lock(&tp->tp_lock);
+ BUG_ON(tp->tp_magic != TP_MAGIC);
+ func = tp->tp_func;
+ args = tp->tp_args;
+ tp->tp_task = get_current();
+ set_current_state(tp->tp_state);
+ set_user_nice((kthread_t *)tp->tp_task, PRIO_TO_NICE(tp->tp_pri));
+
+ spin_unlock(&tp->tp_lock);
+ wake_up(&tp->tp_waitq);
+
+ /* DO NOT USE 'ARG' AFTER THIS POINT, EVER, EVER, EVER!
+ * Local variables are used here because after the calling thread
+ * has been woken up it will exit and this memory will no longer
+ * be safe to access since it was declared on the callers stack. */
+ if (func)
+ func(args);
+
+ return 0;
+}
+
+void
+__thread_exit(void)
+{
+ return;
+}
+
+/* thread_create() may block forever if it cannot create a thread or
+ * allocate memory. This is preferable to returning a NULL which Solaris
+ * style callers likely never check for... since it can't fail. */
+kthread_t *
+__thread_create(caddr_t stk, size_t stksize, void (*proc)(void *),
+ void *args, size_t len, proc_t *pp, int state, pri_t pri)
+{
+ thread_priv_t tp;
+ DEFINE_WAIT(wait);
+ kthread_t *task;
+ long pid;
+
+ /* Option pp is simply ignored */
+ /* Variable stack size unsupported */
+ BUG_ON(stk != NULL);
+ BUG_ON(stk != 0);
+
+ /* Variable tp is located on the stack and not the heap because I want
+ * to minimize any chance of a failure, since the Solaris code is designed
+ * such that this function cannot fail. This is a little dangerous since
+ * we're passing a stack address to a new thread but correct locking was
+ * added to ensure the callee can use the data safely until wake_up(). */
+ tp.tp_magic = TP_MAGIC;
+ tp.tp_func = proc;
+ tp.tp_args = args;
+ tp.tp_len = len;
+ tp.tp_state = state;
+ tp.tp_pri = pri;
+ tp.tp_task = NULL;
+ spin_lock_init(&tp.tp_lock);
+ init_waitqueue_head(&tp.tp_waitq);
+
+ spin_lock(&tp.tp_lock);
+
+ /* Solaris says this must never fail so we try forever */
+ while ((pid = kernel_thread(thread_generic_wrapper, (void *)&tp, 0)) < 0)
+ printk(KERN_ERR "linux-thread: Unable to create thread; "
+ "pid = %ld\n", pid);
+
+ /* All signals are ignored due to sleeping TASK_UNINTERRUPTIBLE */
+ for (;;) {
+ prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (tp.tp_task != NULL)
+ break;
+
+ spin_unlock(&tp.tp_lock);
+ schedule();
+ spin_lock(&tp.tp_lock);
+ }
+
+ /* Verify the pid retunred matches the pid in the task struct */
+ BUG_ON(pid != (tp.tp_task)->pid);
+
+ spin_unlock(&tp.tp_lock);
+
+ return (kthread_t *)tp.tp_task;
+}
diff --git a/src/splat/Makefile.in b/src/splat/Makefile.in
new file mode 100644
index 000000000..758c4be0a
--- /dev/null
+++ b/src/splat/Makefile.in
@@ -0,0 +1,57 @@
+# Makefile.in for splat kernel module
+
+MODULES := splat
+
+DISTFILES = Makefile.in \
+ splat-kmem.c splat-random.c splat-taskq.c \
+ splat-time.c splat-condvar.c splat-mutex.c \
+ splat-rwlock.c splat-thread.c splat-ctl.c
+
+# Removed '-std=gnu99' does to compile issues with i386 SPIN_LOCK_UNLOCKED
+# EXTRA_CFLAGS += -I$(src)
+# EXTRA_CFLAGS += -Wall -Wno-unknown-pragmas -Wno-missing-braces \
+# -Wno-sign-compare -Wno-parentheses -Wno-uninitialized \
+# -Wno-implicit-function-declaration -Wno-unused -Wno-trigraphs \
+# -Wno-char-subscripts -Wno-switch
+
+# Solaris porting layer aggressive tests
+obj-m := splat.o
+
+splat-objs += splat-ctl.o
+splat-objs += splat-kmem.o
+splat-objs += splat-taskq.o
+splat-objs += splat-random.o
+splat-objs += splat-mutex.o
+splat-objs += splat-condvar.o
+splat-objs += splat-thread.o
+splat-objs += splat-rwlock.o
+splat-objs += splat-time.o
+
+splatmodule := splat.ko
+splatmoduledir := @kmoduledir@/kernel/lib/
+
+all: all-spec
+
+install: all
+ mkdir -p $(DESTDIR)$(splatmoduledir)
+ $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+uninstall:
+ rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+clean:
+ -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+ rm -f Makefile
+ rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+ cp -p $(DISTFILES) $(distdir)
+
+all-spec:
+ $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
diff --git a/src/splat/splat-condvar.c b/src/splat/splat-condvar.c
new file mode 100644
index 000000000..eaab2ac0a
--- /dev/null
+++ b/src/splat/splat-condvar.c
@@ -0,0 +1,454 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_CONDVAR 0x0500
+#define KZT_CONDVAR_NAME "condvar"
+#define KZT_CONDVAR_DESC "Kernel Condition Variable Tests"
+
+#define KZT_CONDVAR_TEST1_ID 0x0501
+#define KZT_CONDVAR_TEST1_NAME "signal1"
+#define KZT_CONDVAR_TEST1_DESC "Wake a single thread, cv_wait()/cv_signal()"
+
+#define KZT_CONDVAR_TEST2_ID 0x0502
+#define KZT_CONDVAR_TEST2_NAME "broadcast1"
+#define KZT_CONDVAR_TEST2_DESC "Wake all threads, cv_wait()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST3_ID 0x0503
+#define KZT_CONDVAR_TEST3_NAME "signal2"
+#define KZT_CONDVAR_TEST3_DESC "Wake a single thread, cv_wait_timeout()/cv_signal()"
+
+#define KZT_CONDVAR_TEST4_ID 0x0504
+#define KZT_CONDVAR_TEST4_NAME "broadcast2"
+#define KZT_CONDVAR_TEST4_DESC "Wake all threads, cv_wait_timeout()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST5_ID 0x0505
+#define KZT_CONDVAR_TEST5_NAME "timeout"
+#define KZT_CONDVAR_TEST5_DESC "Timeout thread, cv_wait_timeout()"
+
+#define KZT_CONDVAR_TEST_MAGIC 0x115599DDUL
+#define KZT_CONDVAR_TEST_NAME "condvar_test"
+#define KZT_CONDVAR_TEST_COUNT 8
+
+typedef struct condvar_priv {
+ unsigned long cv_magic;
+ struct file *cv_file;
+ kcondvar_t cv_condvar;
+ kmutex_t cv_mtx;
+} condvar_priv_t;
+
+typedef struct condvar_thr {
+ int ct_id;
+ const char *ct_name;
+ condvar_priv_t *ct_cvp;
+ int ct_rc;
+} condvar_thr_t;
+
+int
+kzt_condvar_test12_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+
+ ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ cv_wait(&cv->cv_condvar, &cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+kzt_condvar_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST1_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST2_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] > 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake all threads waiting on the condition variable */
+ cv_broadcast(&cv.cv_condvar);
+
+ /* Wait until all threads have exited */
+ while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ kzt_vprint(file, KZT_CONDVAR_TEST2_NAME, "Correctly woke all "
+ "%d sleeping threads at once\n", count);
+
+ /* Wake everything for the failure case */
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+int
+kzt_condvar_test34_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+ clock_t rc;
+
+ ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+
+ /* Sleep no longer than 3 seconds, for this test we should
+ * actually never sleep that long without being woken up. */
+ rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
+ if (rc == -1) {
+ ct->ct_rc = -ETIMEDOUT;
+ kzt_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
+ "should have been woken\n", name);
+ } else {
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ }
+
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+kzt_condvar_test3(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test5(struct file *file, void *arg)
+{
+ kcondvar_t condvar;
+ kmutex_t mtx;
+ clock_t time_left, time_before, time_after, time_delta;
+ int64_t whole_delta;
+ int32_t remain_delta;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
+ "%d second and expecting to be woken by timeout\n", 1);
+
+ /* Allow a 1 second timeout, plenty long to validate correctness. */
+ time_before = lbolt;
+ mutex_enter(&mtx);
+ time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
+ mutex_exit(&mtx);
+ time_after = lbolt;
+ time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
+ whole_delta = time_delta;
+ remain_delta = do_div(whole_delta, HZ);
+
+ if (time_left == -1) {
+ if (time_delta >= HZ) {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out and was asleep "
+ "for %d.%d seconds (%d second min)\n",
+ (int)whole_delta, remain_delta, 1);
+ } else {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out but was only "
+ "asleep for %d.%d seconds (%d second "
+ "min)\n", (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+ } else {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread exited after only %d.%d seconds, it "
+ "did not hit the %d second timeout\n",
+ (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+
+ cv_destroy(&condvar);
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_condvar_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_CONDVAR_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_CONDVAR_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_CONDVAR;
+
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST1_NAME, KZT_CONDVAR_TEST1_DESC,
+ KZT_CONDVAR_TEST1_ID, kzt_condvar_test1);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST2_NAME, KZT_CONDVAR_TEST2_DESC,
+ KZT_CONDVAR_TEST2_ID, kzt_condvar_test2);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST3_NAME, KZT_CONDVAR_TEST3_DESC,
+ KZT_CONDVAR_TEST3_ID, kzt_condvar_test3);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST4_NAME, KZT_CONDVAR_TEST4_DESC,
+ KZT_CONDVAR_TEST4_ID, kzt_condvar_test4);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST5_NAME, KZT_CONDVAR_TEST5_DESC,
+ KZT_CONDVAR_TEST5_ID, kzt_condvar_test5);
+
+ return sub;
+}
+
+void
+kzt_condvar_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST5_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_condvar_id(void) {
+ return KZT_SUBSYSTEM_CONDVAR;
+}
diff --git a/src/splat/splat-ctl.c b/src/splat/splat-ctl.c
new file mode 100644
index 000000000..5292b0e60
--- /dev/null
+++ b/src/splat/splat-ctl.c
@@ -0,0 +1,684 @@
+/*
+ * My intent is the create a loadable kzt (kernel ZFS test) module
+ * which can be used as an access point to run in kernel ZFS regression
+ * tests. Why do we need this when we have ztest? Well ztest.c only
+ * excersises the ZFS code proper, it cannot be used to validate the
+ * linux kernel shim primatives. This also provides a nice hook for
+ * any other in kernel regression tests we wish to run such as direct
+ * in-kernel tests against the DMU.
+ *
+ * The basic design is the kzt module is that it is constructed of
+ * various kzt_* source files each of which contains regression tests.
+ * For example the kzt_linux_kmem.c file contains tests for validating
+ * kmem correctness. When the kzt module is loaded kzt_*_init()
+ * will be called for each subsystems tests, similarly kzt_*_fini() is
+ * called when the kzt module is removed. Each test can then be
+ * run by making an ioctl() call from a userspace control application
+ * to pick the subsystem and test which should be run.
+ *
+ * Author: Brian Behlendorf
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/device.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#include <linux/devfs_fs_kernel.h>
+#endif
+
+#include <linux/cdev.h>
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+static struct class_simple *kzt_class;
+#else
+static struct class *kzt_class;
+#endif
+static struct list_head kzt_module_list;
+static spinlock_t kzt_module_lock;
+
+static int
+kzt_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ kzt_info_t *info;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&info->info_lock);
+ info->info_size = KZT_INFO_BUFFER_SIZE;
+ info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
+ if (info->info_buffer == NULL) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ info->info_head = info->info_buffer;
+ file->private_data = (void *)info;
+
+ kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
+
+ return 0;
+}
+
+static int
+kzt_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ vfree(info->info_buffer);
+ kfree(info);
+
+ return 0;
+}
+
+static int
+kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ memset(info->info_buffer, 0, info->info_size);
+ info->info_head = info->info_buffer;
+ spin_unlock(&info->info_lock);
+
+ return 0;
+}
+
+static int
+kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ char *buf;
+ int min, size, rc = 0;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ if (kcfg->cfg_arg1 > 0) {
+
+ size = kcfg->cfg_arg1;
+ buf = (char *)vmalloc(size);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Zero fill and truncate contents when coping buffer */
+ min = ((size < info->info_size) ? size : info->info_size);
+ memset(buf, 0, size);
+ memcpy(buf, info->info_buffer, min);
+ vfree(info->info_buffer);
+ info->info_size = size;
+ info->info_buffer = buf;
+ info->info_head = info->info_buffer;
+ }
+
+ kcfg->cfg_rc1 = info->info_size;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ rc = -EFAULT;
+out:
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+
+static kzt_subsystem_t *
+kzt_subsystem_find(int id) {
+ kzt_subsystem_t *sub;
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+ if (id == sub->desc.id) {
+ spin_unlock(&kzt_module_lock);
+ return sub;
+ }
+ }
+ spin_unlock(&kzt_module_lock);
+
+ return NULL;
+}
+
+static int
+kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ int i = 0;
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list)
+ i++;
+
+ spin_unlock(&kzt_module_lock);
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_cfg_t *tmp;
+ int size, i = 0;
+
+ /* Structure will be sized large enough for N subsystem entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid subsystems will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all subsystems we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
+ sizeof(kzt_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+ strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
+ sub->desc.name, KZT_NAME_SIZE);
+ strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
+ sub->desc.desc, KZT_DESC_SIZE);
+ tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
+ break;
+ }
+ spin_unlock(&kzt_module_lock);
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_test_t *test;
+ int rc, i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = kzt_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list)
+ i++;
+
+ spin_unlock(&(sub->test_lock));
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_test_t *test;
+ kzt_cfg_t *tmp;
+ int size, rc, i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = kzt_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ /* Structure will be sized large enough for N test entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid tests will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all tests we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
+ test->desc.name, KZT_NAME_SIZE);
+ strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
+ test->desc.desc, KZT_DESC_SIZE);
+ tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.kzt_tests.size)
+ break;
+ }
+ spin_unlock(&(sub->test_lock));
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
+{
+ kzt_test_t *test;
+ int rc = 0;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ if (test->desc.id == cmd) {
+ spin_unlock(&(sub->test_lock));
+ return test->test(file, arg);
+ }
+ }
+ spin_unlock(&(sub->test_lock));
+
+ return -EINVAL;
+}
+
+static int
+kzt_ioctl_cfg(struct file *file, unsigned long arg)
+{
+ kzt_cfg_t kcfg;
+ int rc = 0;
+
+ if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
+ return -EFAULT;
+
+ if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
+ kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
+ kcfg.cfg_magic, KZT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ switch (kcfg.cfg_cmd) {
+ case KZT_CFG_BUFFER_CLEAR:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Unused
+ */
+ rc = kzt_buffer_clear(file, &kcfg, arg);
+ break;
+ case KZT_CFG_BUFFER_SIZE:
+ /* cfg_arg1 - 0 - query size; >0 resize
+ * cfg_rc1 - Set to current buffer size
+ */
+ rc = kzt_buffer_size(file, &kcfg, arg);
+ break;
+ case KZT_CFG_SUBSYSTEM_COUNT:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ */
+ rc = kzt_subsystem_count(&kcfg, arg);
+ break;
+ case KZT_CFG_SUBSYSTEM_LIST:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ * cfg_data.kzt_subsystems - Populated with subsystems
+ */
+ rc = kzt_subsystem_list(&kcfg, arg);
+ break;
+ case KZT_CFG_TEST_COUNT:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ */
+ rc = kzt_test_count(&kcfg, arg);
+ break;
+ case KZT_CFG_TEST_LIST:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ * cfg_data.kzt_subsystems - Populated with tests
+ */
+ rc = kzt_test_list(&kcfg, arg);
+ break;
+ default:
+ kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+kzt_ioctl_cmd(struct file *file, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_cmd_t kcmd;
+ int rc = -EINVAL;
+ void *data = NULL;
+
+ if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
+ return -EFAULT;
+
+ if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
+ kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
+ kcmd.cmd_magic, KZT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for any opaque data the caller needed to pass on */
+ if (kcmd.cmd_data_size > 0) {
+ data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
+ cmd_data_str)), kcmd.cmd_data_size)) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+
+ sub = kzt_subsystem_find(kcmd.cmd_subsystem);
+ if (sub != NULL)
+ rc = kzt_validate(file, sub, kcmd.cmd_test, data);
+ else
+ rc = -EINVAL;
+
+ if (data != NULL)
+ kfree(data);
+
+ return rc;
+}
+
+static int
+kzt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int minor, rc = 0;
+
+ /* Ignore tty ioctls */
+ if ((cmd & 0xffffff00) == ((int)'T') << 8)
+ return -ENOTTY;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ switch (cmd) {
+ case KZT_CFG:
+ rc = kzt_ioctl_cfg(file, arg);
+ break;
+ case KZT_CMD:
+ rc = kzt_ioctl_cmd(file, arg);
+ break;
+ default:
+ kzt_print(file, "Bad ioctl command %d\n", cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/* I'm not sure why you would want to write in to this buffer from
+ * user space since its principle use is to pass test status info
+ * back to the user space, but I don't see any reason to prevent it.
+ */
+static ssize_t kzt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Write beyond EOF */
+ if (*ppos >= info->info_size) {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_from_user(info->info_buffer, buf, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static ssize_t kzt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Read beyond EOF */
+ if (*ppos >= info->info_size)
+ goto out;
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = -EINVAL;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ switch (origin) {
+ case 0: /* SEEK_SET - No-op just do it */
+ break;
+ case 1: /* SEEK_CUR - Seek from current */
+ offset = file->f_pos + offset;
+ break;
+ case 2: /* SEEK_END - Seek from end */
+ offset = info->info_size + offset;
+ break;
+ }
+
+ if (offset >= 0) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ rc = offset;
+ }
+
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+static struct file_operations kzt_fops = {
+ .owner = THIS_MODULE,
+ .open = kzt_open,
+ .release = kzt_release,
+ .ioctl = kzt_ioctl,
+ .read = kzt_read,
+ .write = kzt_write,
+ .llseek = kzt_seek,
+};
+
+static struct cdev kzt_cdev = {
+ .owner = THIS_MODULE,
+ .kobj = { .name = "kztctl", },
+};
+
+static int __init
+kzt_init(void)
+{
+ dev_t dev;
+ int i, rc;
+
+ spin_lock_init(&kzt_module_lock);
+ INIT_LIST_HEAD(&kzt_module_list);
+
+ KZT_SUBSYSTEM_INIT(kmem);
+ KZT_SUBSYSTEM_INIT(taskq);
+ KZT_SUBSYSTEM_INIT(krng);
+ KZT_SUBSYSTEM_INIT(mutex);
+ KZT_SUBSYSTEM_INIT(condvar);
+ KZT_SUBSYSTEM_INIT(thread);
+ KZT_SUBSYSTEM_INIT(rwlock);
+ KZT_SUBSYSTEM_INIT(time);
+
+ dev = MKDEV(KZT_MAJOR, 0);
+ if (rc = register_chrdev_region(dev, KZT_MINORS, "kztctl"))
+ goto error;
+
+ /* Support for registering a character driver */
+ cdev_init(&kzt_cdev, &kzt_fops);
+ if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
+ printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
+ kobject_put(&kzt_cdev.kobj);
+ unregister_chrdev_region(dev, KZT_MINORS);
+ goto error;
+ }
+
+ /* Support for udev make driver info available in sysfs */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ kzt_class = class_simple_create(THIS_MODULE, "kzt");
+#else
+ kzt_class = class_create(THIS_MODULE, "kzt");
+#endif
+ if (IS_ERR(kzt_class)) {
+ rc = PTR_ERR(kzt_class);
+ printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
+ cdev_del(&kzt_cdev);
+ unregister_chrdev_region(dev, KZT_MINORS);
+ goto error;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
+ NULL, "kztctl");
+#else
+ class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
+ NULL, "kztctl");
+#endif
+
+ printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
+ return 0;
+error:
+ printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
+ return rc;
+}
+
+static void
+kzt_fini(void)
+{
+ dev_t dev = MKDEV(KZT_MAJOR, 0);
+ int i;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ class_simple_device_remove(dev);
+ class_simple_destroy(kzt_class);
+ devfs_remove("kzt/kztctl");
+ devfs_remove("kzt");
+#else
+ class_device_destroy(kzt_class, dev);
+ class_destroy(kzt_class);
+#endif
+ cdev_del(&kzt_cdev);
+ unregister_chrdev_region(dev, KZT_MINORS);
+
+ KZT_SUBSYSTEM_FINI(time);
+ KZT_SUBSYSTEM_FINI(rwlock);
+ KZT_SUBSYSTEM_FINI(thread);
+ KZT_SUBSYSTEM_FINI(condvar);
+ KZT_SUBSYSTEM_FINI(mutex);
+ KZT_SUBSYSTEM_FINI(krng);
+ KZT_SUBSYSTEM_FINI(taskq);
+ KZT_SUBSYSTEM_FINI(kmem);
+
+ ASSERT(list_empty(&kzt_module_list));
+ printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
+}
+
+module_init(kzt_init);
+module_exit(kzt_fini);
+
+MODULE_AUTHOR("Lawrence Livermore National Labs");
+MODULE_DESCRIPTION("Kernel ZFS Test");
+MODULE_LICENSE("GPL");
+
diff --git a/src/splat/splat-kmem.c b/src/splat/splat-kmem.c
new file mode 100644
index 000000000..fb40819b5
--- /dev/null
+++ b/src/splat/splat-kmem.c
@@ -0,0 +1,365 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KMEM 0x0100
+#define KZT_KMEM_NAME "kmem"
+#define KZT_KMEM_DESC "Kernel Malloc/Slab Tests"
+
+#define KZT_KMEM_TEST1_ID 0x0101
+#define KZT_KMEM_TEST1_NAME "kmem_alloc"
+#define KZT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
+
+#define KZT_KMEM_TEST2_ID 0x0102
+#define KZT_KMEM_TEST2_NAME "kmem_zalloc"
+#define KZT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
+
+#define KZT_KMEM_TEST3_ID 0x0103
+#define KZT_KMEM_TEST3_NAME "slab_alloc"
+#define KZT_KMEM_TEST3_DESC "Slab constructor/destructor test"
+
+#define KZT_KMEM_TEST4_ID 0x0104
+#define KZT_KMEM_TEST4_NAME "slab_reap"
+#define KZT_KMEM_TEST4_DESC "Slab reaping test"
+
+#define KZT_KMEM_ALLOC_COUNT 10
+/* XXX - This test may fail under tight memory conditions */
+static int
+kzt_kmem_test1(struct file *file, void *arg)
+{
+ void *ptr[KZT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, count, rc = 0;
+
+ while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ count = 0;
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_alloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ kzt_vprint(file, KZT_KMEM_TEST1_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, KZT_KMEM_ALLOC_COUNT);
+ if (count != KZT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+static int
+kzt_kmem_test2(struct file *file, void *arg)
+{
+ void *ptr[KZT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, j, count, rc = 0;
+
+ while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ count = 0;
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ /* Ensure buffer has been zero filled */
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ for (j = 0; j < size; j++) {
+ if (((char *)ptr[i])[j] != '\0') {
+ kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+ "%d-byte allocation was "
+ "not zeroed\n", size);
+ rc = -EFAULT;
+ }
+ }
+ }
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, KZT_KMEM_ALLOC_COUNT);
+ if (count != KZT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+#define KZT_KMEM_TEST_MAGIC 0x004488CCUL
+#define KZT_KMEM_CACHE_NAME "kmem_test"
+#define KZT_KMEM_CACHE_SIZE 256
+#define KZT_KMEM_OBJ_COUNT 128
+#define KZT_KMEM_OBJ_RECLAIM 64
+
+typedef struct kmem_cache_data {
+ char kcd_buf[KZT_KMEM_CACHE_SIZE];
+ unsigned long kcd_magic;
+ int kcd_flag;
+} kmem_cache_data_t;
+
+typedef struct kmem_cache_priv {
+ unsigned long kcp_magic;
+ struct file *kcp_file;
+ kmem_cache_t *kcp_cache;
+ kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
+ int kcp_count;
+ int kcp_rc;
+} kmem_cache_priv_t;
+
+static int
+kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
+{
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+ if (kcd) {
+ memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
+ kcd->kcd_flag = 1;
+
+ if (kcp) {
+ kcd->kcd_magic = kcp->kcp_magic;
+ kcp->kcp_count++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+kzt_kmem_test34_destructor(void *ptr, void *priv)
+{
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+ if (kcd) {
+ memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
+ kcd->kcd_flag = 0;
+
+ if (kcp)
+ kcp->kcp_count--;
+ }
+
+ return;
+}
+
+static int
+kzt_kmem_test3(struct file *file, void *arg)
+{
+ kmem_cache_t *cache = NULL;
+ kmem_cache_data_t *kcd = NULL;
+ kmem_cache_priv_t kcp;
+ int rc = 0, max;
+
+ kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
+ kzt_kmem_test34_constructor,
+ kzt_kmem_test34_destructor,
+ NULL, &kcp, NULL, 0);
+ if (!cache) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcd = kmem_cache_alloc(cache, 0);
+ if (!kcd) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Unable to allocate from '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (!kcd->kcd_flag) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to run contructor for '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (kcd->kcd_magic != kcp.kcp_magic) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to pass private data to constructor "
+ "for '%s'\n", KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ max = kcp.kcp_count;
+
+ /* Destructor's run lazily so it hard to check correctness here.
+ * We assume if it doesn't crash the free worked properly */
+ kmem_cache_free(cache, kcd);
+
+ /* Destroy the entire cache which will force destructors to
+ * run and we can verify one was called for every object */
+ kmem_cache_destroy(cache);
+ if (kcp.kcp_count) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to run destructor on all slab objects "
+ "for '%s'\n", KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ }
+
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "%d allocated/destroyed objects for '%s'\n",
+ max, KZT_KMEM_CACHE_NAME);
+
+ return rc;
+
+out_free:
+ if (kcd)
+ kmem_cache_free(cache, kcd);
+out_destroy:
+ kmem_cache_destroy(cache);
+ return rc;
+}
+
+static void
+kzt_kmem_test4_reclaim(void *priv)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+ int i;
+
+ kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
+ "Reaping %d objects from '%s'\n",
+ KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
+ for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
+ if (kcp->kcp_kcd[i]) {
+ kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
+ kcp->kcp_kcd[i] = NULL;
+ }
+ }
+
+ return;
+}
+
+static int
+kzt_kmem_test4(struct file *file, void *arg)
+{
+ kmem_cache_t *cache;
+ kmem_cache_priv_t kcp;
+ int i, rc = 0, max, reclaim_percent, target_percent;
+
+ kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
+ sizeof(kmem_cache_data_t), 0,
+ kzt_kmem_test34_constructor,
+ kzt_kmem_test34_destructor,
+ kzt_kmem_test4_reclaim, &kcp, NULL, 0);
+ if (!cache) {
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcp.kcp_cache = cache;
+
+ for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
+ /* All allocations need not succeed */
+ kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
+ if (!kcp.kcp_kcd[i]) {
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "Unable to allocate from '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ }
+ }
+
+ max = kcp.kcp_count;
+
+ /* Force shrinker to run */
+ kmem_reap();
+
+ /* Reclaim reclaimed objects, this ensure the destructors are run */
+ kmem_cache_reap_now(cache);
+
+ reclaim_percent = ((kcp.kcp_count * 100) / max);
+ target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
+ KZT_KMEM_OBJ_COUNT);
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "%d%% (%d/%d) of previous size, target of "
+ "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
+ max, target_percent - 10, target_percent + 10,
+ KZT_KMEM_CACHE_NAME);
+ if ((reclaim_percent < target_percent - 10) ||
+ (reclaim_percent > target_percent + 10))
+ rc = -EINVAL;
+
+ /* Cleanup our mess */
+ for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
+ if (kcp.kcp_kcd[i])
+ kmem_cache_free(cache, kcp.kcp_kcd[i]);
+
+ kmem_cache_destroy(cache);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_kmem_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_KMEM;
+
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
+ KZT_KMEM_TEST1_ID, kzt_kmem_test1);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
+ KZT_KMEM_TEST2_ID, kzt_kmem_test2);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
+ KZT_KMEM_TEST3_ID, kzt_kmem_test3);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
+ KZT_KMEM_TEST4_ID, kzt_kmem_test4);
+
+ return sub;
+}
+
+void
+kzt_kmem_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_kmem_id(void) {
+ return KZT_SUBSYSTEM_KMEM;
+}
diff --git a/src/splat/splat-mutex.c b/src/splat/splat-mutex.c
new file mode 100644
index 000000000..254a40de2
--- /dev/null
+++ b/src/splat/splat-mutex.c
@@ -0,0 +1,324 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_MUTEX 0x0400
+#define KZT_MUTEX_NAME "mutex"
+#define KZT_MUTEX_DESC "Kernel Mutex Tests"
+
+#define KZT_MUTEX_TEST1_ID 0x0401
+#define KZT_MUTEX_TEST1_NAME "tryenter"
+#define KZT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
+
+#define KZT_MUTEX_TEST2_ID 0x0402
+#define KZT_MUTEX_TEST2_NAME "race"
+#define KZT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
+
+#define KZT_MUTEX_TEST3_ID 0x0403
+#define KZT_MUTEX_TEST3_NAME "owned"
+#define KZT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
+
+#define KZT_MUTEX_TEST4_ID 0x0404
+#define KZT_MUTEX_TEST4_NAME "owner"
+#define KZT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
+
+#define KZT_MUTEX_TEST_MAGIC 0x115599DDUL
+#define KZT_MUTEX_TEST_NAME "mutex_test"
+#define KZT_MUTEX_TEST_WORKQ "mutex_wq"
+#define KZT_MUTEX_TEST_COUNT 128
+
+typedef struct mutex_priv {
+ unsigned long mp_magic;
+ struct file *mp_file;
+ struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
+ kmutex_t mp_mtx;
+ int mp_rc;
+} mutex_priv_t;
+
+
+static void
+kzt_mutex_test1_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+
+ ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+ mp->mp_rc = 0;
+
+ if (!mutex_tryenter(&mp->mp_mtx))
+ mp->mp_rc = -EBUSY;
+}
+
+static int
+kzt_mutex_test1(struct file *file, void *arg)
+{
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ mutex_priv_t *mp;
+ int rc = 0;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out2;
+ }
+
+ mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+ mutex_enter(&(mp->mp_mtx));
+
+ mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ INIT_WORK(&work, kzt_mutex_test1_work, mp);
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while its held. This should fail and the work
+ * item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ mutex_exit(&(mp->mp_mtx));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+ mutex_exit(&(mp->mp_mtx));
+
+ /* Work item successfully aquired mutex, very bad! */
+ if (mp->mp_rc != -EBUSY) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly failed when mutex held\n");
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while it is not held. This should work and
+ * the item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+
+ /* Work item failed to aquire mutex, very bad! */
+ if (mp->mp_rc != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly succeeded when mutex unheld\n");
+out:
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out2:
+ kfree(mp);
+
+ return rc;
+}
+
+static void
+kzt_mutex_test2_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+ int rc;
+
+ ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+
+ /* Read the value before sleeping and write it after we wake up to
+ * maximize the chance of a race if mutexs are not working properly */
+ mutex_enter(&mp->mp_mtx);
+ rc = mp->mp_rc;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100); /* 1/100 of a second */
+ mp->mp_rc = rc + 1;
+ mutex_exit(&mp->mp_mtx);
+}
+
+static int
+kzt_mutex_test2(struct file *file, void *arg)
+{
+ struct workqueue_struct *wq;
+ mutex_priv_t *mp;
+ int i, rc = 0;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ /* Create a thread per CPU items on queue will race */
+ wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ mp->mp_rc = 0;
+
+ /* Schedule N work items to the work queue each of which enters the
+ * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
+ * box these work items will be handled by all available CPUs. The
+ * mutex is instrumented such that if any two processors are in the
+ * critical region at the same time the system will panic. If the
+ * mutex is implemented right this will never happy, that's a pass. */
+ for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
+ INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
+
+ if (!queue_work(wq, &(mp->mp_work[i]))) {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
+ "Failed to queue work id %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ flush_workqueue(wq);
+
+ if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+ "correctly entered/exited the mutex %d times\n",
+ num_online_cpus(), mp->mp_rc);
+ } else {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+ "only processed %d/%d mutex work items\n",
+ num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
+ rc = -EINVAL;
+ }
+
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out:
+ kfree(mp);
+
+ return rc;
+}
+
+static int
+kzt_mutex_test3(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ if (!mutex_owned(&mtx)) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ if (mutex_owned(&mtx)) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "not be owned but is owned by pid %d\n",
+ mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owned() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+static int
+kzt_mutex_test4(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ kthread_t *owner;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ owner = mutex_owner(&mtx);
+ if (current != owner) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ owner = mutex_owner(&mtx);
+ if (owner) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owner() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_mutex_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_MUTEX;
+
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
+ KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
+ KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
+ KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
+ KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
+
+ return sub;
+}
+
+void
+kzt_mutex_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_mutex_id(void) {
+ return KZT_SUBSYSTEM_MUTEX;
+}
diff --git a/src/splat/splat-random.c b/src/splat/splat-random.c
new file mode 100644
index 000000000..abb654063
--- /dev/null
+++ b/src/splat/splat-random.c
@@ -0,0 +1,104 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KRNG 0x0300
+#define KZT_KRNG_NAME "krng"
+#define KZT_KRNG_DESC "Kernel Random Number Generator Tests"
+
+#define KZT_KRNG_TEST1_ID 0x0301
+#define KZT_KRNG_TEST1_NAME "freq"
+#define KZT_KRNG_TEST1_DESC "Frequency Test"
+
+#define KRNG_NUM_BITS 1048576
+#define KRNG_NUM_BYTES (KRNG_NUM_BITS >> 3)
+#define KRNG_NUM_BITS_DIV2 (KRNG_NUM_BITS >> 1)
+#define KRNG_ERROR_RANGE 2097
+
+/* Random Number Generator Tests
+ There can be meny more tests on quality of the
+ random number generator. For now we are only
+ testing the frequency of particular bits.
+ We could also test consecutive sequences,
+ randomness within a particular block, etc.
+ but is probably not necessary for our purposes */
+
+static int
+kzt_krng_test1(struct file *file, void *arg)
+{
+ uint8_t *buf;
+ int i, j, diff, num = 0, rc = 0;
+
+ buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
+
+ /* Always succeeds */
+ random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
+
+ for (i = 0; i < KRNG_NUM_BYTES; i++) {
+ uint8_t tmp = buf[i];
+ for (j = 0; j < 8; j++) {
+ uint8_t tmp2 = ((tmp >> j) & 0x01);
+ if (tmp2 == 1) {
+ num++;
+ }
+ }
+ }
+
+ kfree(buf);
+
+ diff = KRNG_NUM_BITS_DIV2 - num;
+ if (diff < 0)
+ diff *= -1;
+
+ kzt_print(file, "Test 1 Number of ones: %d\n", num);
+ kzt_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
+ diff, KRNG_ERROR_RANGE);
+
+ if (diff > KRNG_ERROR_RANGE)
+ rc = -ERANGE;
+out:
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_krng_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_KRNG_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_KRNG_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_KRNG;
+
+ KZT_TEST_INIT(sub, KZT_KRNG_TEST1_NAME, KZT_KRNG_TEST1_DESC,
+ KZT_KRNG_TEST1_ID, kzt_krng_test1);
+
+ return sub;
+}
+
+void
+kzt_krng_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ KZT_TEST_FINI(sub, KZT_KRNG_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_krng_id(void) {
+ return KZT_SUBSYSTEM_KRNG;
+}
diff --git a/src/splat/splat-rwlock.c b/src/splat/splat-rwlock.c
new file mode 100644
index 000000000..9820937c3
--- /dev/null
+++ b/src/splat/splat-rwlock.c
@@ -0,0 +1,764 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_RWLOCK 0x0700
+#define KZT_RWLOCK_NAME "rwlock"
+#define KZT_RWLOCK_DESC "Kernel RW Lock Tests"
+
+#define KZT_RWLOCK_TEST1_ID 0x0701
+#define KZT_RWLOCK_TEST1_NAME "rwtest1"
+#define KZT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
+
+#define KZT_RWLOCK_TEST2_ID 0x0702
+#define KZT_RWLOCK_TEST2_NAME "rwtest2"
+#define KZT_RWLOCK_TEST2_DESC "Multiple Writers"
+
+#define KZT_RWLOCK_TEST3_ID 0x0703
+#define KZT_RWLOCK_TEST3_NAME "rwtest3"
+#define KZT_RWLOCK_TEST3_DESC "Owner Verification"
+
+#define KZT_RWLOCK_TEST4_ID 0x0704
+#define KZT_RWLOCK_TEST4_NAME "rwtest4"
+#define KZT_RWLOCK_TEST4_DESC "Trylock Test"
+
+#define KZT_RWLOCK_TEST5_ID 0x0705
+#define KZT_RWLOCK_TEST5_NAME "rwtest5"
+#define KZT_RWLOCK_TEST5_DESC "Write Downgrade Test"
+
+#define KZT_RWLOCK_TEST6_ID 0x0706
+#define KZT_RWLOCK_TEST6_NAME "rwtest6"
+#define KZT_RWLOCK_TEST6_DESC "Read Upgrade Test"
+
+#define KZT_RWLOCK_TEST_MAGIC 0x115599DDUL
+#define KZT_RWLOCK_TEST_NAME "rwlock_test"
+#define KZT_RWLOCK_TEST_COUNT 8
+
+#define KZT_RWLOCK_RELEASE_INIT 0
+#define KZT_RWLOCK_RELEASE_WRITERS 1
+#define KZT_RWLOCK_RELEASE_READERS 2
+
+typedef struct rw_priv {
+ unsigned long rw_magic;
+ struct file *rw_file;
+ krwlock_t rwl;
+ spinlock_t rw_priv_lock;
+ wait_queue_head_t rw_waitq;
+ atomic_t rw_completed;
+ atomic_t rw_acquired;
+ atomic_t rw_waiters;
+ atomic_t rw_release;
+} rw_priv_t;
+
+typedef struct rw_thr {
+ int rwt_id;
+ const char *rwt_name;
+ rw_priv_t *rwt_rwp;
+ int rwt_rc;
+} rw_thr_t;
+
+static inline void
+kzt_rwlock_sleep(signed long delay)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay);
+}
+
+#define kzt_rwlock_lock_and_test(lock,test) \
+({ \
+ int ret = 0; \
+ \
+ spin_lock(lock); \
+ ret = (test) ? 1 : 0; \
+ spin_unlock(lock); \
+ ret; \
+})
+
+void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
+{
+ rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
+ rwv->rw_file = file;
+ spin_lock_init(&rwv->rw_priv_lock);
+ init_waitqueue_head(&rwv->rw_waitq);
+ atomic_set(&rwv->rw_completed, 0);
+ atomic_set(&rwv->rw_acquired, 0);
+ atomic_set(&rwv->rw_waiters, 0);
+ atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
+
+ /* Initialize the read/write lock */
+ rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
+}
+
+int
+kzt_rwlock_test1_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for writing
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_WRITERS));
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+int
+kzt_rwlock_test1_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_READER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the read lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_READERS));
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+static int
+kzt_rwlock_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Ensure that there is only 1 writer and all readers are waiting */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 1 ||
+ atomic_read(&rwv.rw_waiters) !=
+ KZT_RWLOCK_TEST_COUNT - 1)) {
+
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Now ensure that there are multiple reader threads holding the lock */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 1)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Release the readers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+kzt_rwlock_test2_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Here just increment the waiters count even if we are not
+ * exactly about to call rw_enter(). Not really a big deal
+ * since more than likely will be true when we simulate work
+ * later on */
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can acquire the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_WRITERS));
+
+ /* Take the semaphore for writing */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Give up the processor for a bit to simulate
+ * doing some work while taking the write lock */
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Ensure that we are the only one writing */
+ if (atomic_read(&rwv->rw_acquired) > 1) {
+ rwt->rwt_rc = 1;
+ } else {
+ rwt->rwt_rc = 0;
+ }
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ rw_exit(&rwv->rwl);
+
+
+ return 0;
+}
+
+static int
+kzt_rwlock_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
+ &rwt[i], 0);
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Wait for writers to get queued up */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the write threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test3(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Take the rwlock for reading.
+ * Should not have an owner */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+kzt_rwlock_test4_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
+
+ /* Here we acquired the lock this is a
+ * failure since the writer should be
+ * holding the lock */
+ if (rwt->rwt_rc == 1) {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ }
+ /* Here we know we didn't block and didn't
+ * acquire the rwlock for reading */
+ else {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread could not acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+ }
+
+ return 0;
+}
+
+static int
+kzt_rwlock_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ /* We can reuse the test1 writer thread here */
+ pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Make sure that the reader threads complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Release the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the reader threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error since the rw_tryenter() should have failed */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test5(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the downgrade
+ * worked properly */
+ rw_downgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test6(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for reading */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the upgrade
+ * worked properly */
+ rc = !rw_tryupgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (rc || current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d "
+ "trylock rc %d\n",
+ current->pid, owner ? owner->pid : -1, rc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_rwlock_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
+
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
+ KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
+ KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
+ KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
+ KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
+ KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
+ KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
+
+ return sub;
+}
+
+void
+kzt_rwlock_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
+ kfree(sub);
+}
+
+int
+kzt_rwlock_id(void) {
+ return KZT_SUBSYSTEM_RWLOCK;
+}
diff --git a/src/splat/splat-taskq.c b/src/splat/splat-taskq.c
new file mode 100644
index 000000000..614e7136c
--- /dev/null
+++ b/src/splat/splat-taskq.c
@@ -0,0 +1,238 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TASKQ 0x0200
+#define KZT_TASKQ_NAME "taskq"
+#define KZT_TASKQ_DESC "Kernel Task Queue Tests"
+
+#define KZT_TASKQ_TEST1_ID 0x0201
+#define KZT_TASKQ_TEST1_NAME "single"
+#define KZT_TASKQ_TEST1_DESC "Single task queue, single task"
+
+#define KZT_TASKQ_TEST2_ID 0x0202
+#define KZT_TASKQ_TEST2_NAME "multiple"
+#define KZT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
+
+typedef struct kzt_taskq_arg {
+ int flag;
+ int id;
+ struct file *file;
+ const char *name;
+} kzt_taskq_arg_t;
+
+/* Validation Test 1 - Create a taskq, queue a task, wait until
+ * task completes, ensure task ran properly, cleanup taskq,
+ */
+static void
+kzt_taskq_test1_func(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' setting flag\n",
+ tq_arg->name, sym2str(kzt_taskq_test1_func));
+ tq_arg->flag = 1;
+}
+
+static int
+kzt_taskq_test1(struct file *file, void *arg)
+{
+ taskq_t *tq;
+ taskqid_t id;
+ kzt_taskq_arg_t tq_arg;
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
+ KZT_TASKQ_TEST1_NAME);
+ if ((tq = taskq_create(KZT_TASKQ_TEST1_NAME, 1, 0, 0, 0, 0)) == NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' create failed\n",
+ KZT_TASKQ_TEST1_NAME);
+ return -EINVAL;
+ }
+
+ tq_arg.flag = 0;
+ tq_arg.id = 0;
+ tq_arg.file = file;
+ tq_arg.name = KZT_TASKQ_TEST1_NAME;
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatching\n",
+ tq_arg.name, sym2str(kzt_taskq_test1_func));
+ if ((id = taskq_dispatch(tq, kzt_taskq_test1_func, &tq_arg, 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatch failed\n",
+ tq_arg.name, sym2str(kzt_taskq_test1_func));
+ taskq_destory(tq);
+ return -EINVAL;
+ }
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
+ tq_arg.name);
+ taskq_wait(tq);
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
+ tq_arg.name);
+ taskq_destory(tq);
+
+ return (tq_arg.flag) ? 0 : -EINVAL;
+}
+
+/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
+ * wait until all tasks complete, ensure all tasks ran properly and in the
+ * the correct order, cleanup taskq's
+ */
+static void
+kzt_taskq_test2_func1(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(kzt_taskq_test2_func1),
+ tq_arg->flag * 2, tq_arg->flag);
+ tq_arg->flag *= 2;
+}
+
+static void
+kzt_taskq_test2_func2(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(kzt_taskq_test2_func2),
+ tq_arg->flag + 1, tq_arg->flag);
+ tq_arg->flag += 1;
+}
+
+#define TEST2_TASKQS 8
+static int
+kzt_taskq_test2(struct file *file, void *arg) {
+ taskq_t *tq[TEST2_TASKQS] = { NULL };
+ taskqid_t id;
+ kzt_taskq_arg_t tq_args[TEST2_TASKQS];
+ int i, rc = 0;
+
+ for (i = 0; i < TEST2_TASKQS; i++) {
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
+ "creating\n", KZT_TASKQ_TEST2_NAME, i);
+ if ((tq[i] = taskq_create(KZT_TASKQ_TEST2_NAME,
+ 1, 0, 0, 0, 0)) == NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' create failed\n",
+ KZT_TASKQ_TEST2_NAME, i);
+ rc = -EINVAL;
+ break;
+ }
+
+ tq_args[i].flag = i;
+ tq_args[i].id = i;
+ tq_args[i].file = file;
+ tq_args[i].name = KZT_TASKQ_TEST2_NAME;
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func1));
+ if ((id = taskq_dispatch(
+ tq[i], kzt_taskq_test2_func1, &tq_args[i], 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch "
+ "failed\n", tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func1));
+ rc = -EINVAL;
+ break;
+ }
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func2));
+ if ((id = taskq_dispatch(
+ tq[i], kzt_taskq_test2_func2, &tq_args[i], 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch failed\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func2));
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ /* When rc is set we're effectively just doing cleanup here, so
+ * ignore new errors in that case. They just cause noise. */
+ for (i = 0; i < TEST2_TASKQS; i++) {
+ if (tq[i] != NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' waiting\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_wait(tq[i]);
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d; destroying\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_destory(tq[i]);
+
+ if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "out of order; %d != %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ rc = -EINVAL;
+ } else {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "in the correct order; %d == %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ }
+ }
+ }
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_taskq_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_TASKQ_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_TASKQ_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_TASKQ;
+
+ KZT_TEST_INIT(sub, KZT_TASKQ_TEST1_NAME, KZT_TASKQ_TEST1_DESC,
+ KZT_TASKQ_TEST1_ID, kzt_taskq_test1);
+ KZT_TEST_INIT(sub, KZT_TASKQ_TEST2_NAME, KZT_TASKQ_TEST2_DESC,
+ KZT_TASKQ_TEST2_ID, kzt_taskq_test2);
+
+ return sub;
+}
+
+void
+kzt_taskq_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_TASKQ_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_TASKQ_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_taskq_id(void) {
+ return KZT_SUBSYSTEM_TASKQ;
+}
diff --git a/src/splat/splat-thread.c b/src/splat/splat-thread.c
new file mode 100644
index 000000000..0741db1fa
--- /dev/null
+++ b/src/splat/splat-thread.c
@@ -0,0 +1,116 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_THREAD 0x0600
+#define KZT_THREAD_NAME "thread"
+#define KZT_THREAD_DESC "Kernel Thread Tests"
+
+#define KZT_THREAD_TEST1_ID 0x0601
+#define KZT_THREAD_TEST1_NAME "create"
+#define KZT_THREAD_TEST1_DESC "Validate thread creation and destruction"
+
+#define KZT_THREAD_TEST_MAGIC 0x4488CC00UL
+
+typedef struct thread_priv {
+ unsigned long tp_magic;
+ struct file *tp_file;
+ spinlock_t tp_lock;
+ wait_queue_head_t tp_waitq;
+ int tp_rc;
+} thread_priv_t;
+
+
+static void
+kzt_thread_work(void *priv)
+{
+ thread_priv_t *tp = (thread_priv_t *)priv;
+
+ spin_lock(&tp->tp_lock);
+ ASSERT(tp->tp_magic == KZT_THREAD_TEST_MAGIC);
+ tp->tp_rc = 1;
+
+ spin_unlock(&tp->tp_lock);
+ wake_up(&tp->tp_waitq);
+
+ thread_exit();
+}
+
+static int
+kzt_thread_test1(struct file *file, void *arg)
+{
+ thread_priv_t tp;
+ DEFINE_WAIT(wait);
+ kthread_t *thr;
+ int rc = 0;
+
+ tp.tp_magic = KZT_THREAD_TEST_MAGIC;
+ tp.tp_file = file;
+ spin_lock_init(&tp.tp_lock);
+ init_waitqueue_head(&tp.tp_waitq);
+ tp.tp_rc = 0;
+
+ spin_lock(&tp.tp_lock);
+
+ thr = (kthread_t *)thread_create(NULL, 0, kzt_thread_work, &tp, 0,
+ (proc_t *) &p0, TS_RUN, minclsyspri);
+ /* Must never fail under Solaris, but we check anyway so we can
+ * report an error when this impossible thing happens */
+ if (thr == NULL) {
+ rc = -ESRCH;
+ goto out;
+ }
+
+ for (;;) {
+ prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (tp.tp_rc)
+ break;
+
+ spin_unlock(&tp.tp_lock);
+ schedule();
+ spin_lock(&tp.tp_lock);
+ }
+
+ kzt_vprint(file, KZT_THREAD_TEST1_NAME, "%s",
+ "Thread successfully started and exited cleanly\n");
+out:
+ spin_unlock(&tp.tp_lock);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_thread_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_THREAD_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_THREAD_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_THREAD;
+
+ KZT_TEST_INIT(sub, KZT_THREAD_TEST1_NAME, KZT_THREAD_TEST1_DESC,
+ KZT_THREAD_TEST1_ID, kzt_thread_test1);
+
+ return sub;
+}
+
+void
+kzt_thread_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_THREAD_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_thread_id(void) {
+ return KZT_SUBSYSTEM_THREAD;
+}
diff --git a/src/splat/splat-time.c b/src/splat/splat-time.c
new file mode 100644
index 000000000..b7d4ce86e
--- /dev/null
+++ b/src/splat/splat-time.c
@@ -0,0 +1,90 @@
+#include <sys/zfs_context.h>
+#include <sys/splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TIME 0x0800
+#define KZT_TIME_NAME "time"
+#define KZT_TIME_DESC "Kernel Time Tests"
+
+#define KZT_TIME_TEST1_ID 0x0801
+#define KZT_TIME_TEST1_NAME "time1"
+#define KZT_TIME_TEST1_DESC "HZ Test"
+
+#define KZT_TIME_TEST2_ID 0x0802
+#define KZT_TIME_TEST2_NAME "time2"
+#define KZT_TIME_TEST2_DESC "Monotonic Test"
+
+static int
+kzt_time_test1(struct file *file, void *arg)
+{
+ int myhz = hz;
+ kzt_vprint(file, KZT_TIME_TEST1_NAME, "hz is %d\n", myhz);
+ return 0;
+}
+
+static int
+kzt_time_test2(struct file *file, void *arg)
+{
+ hrtime_t tm1, tm2;
+ int i;
+
+ tm1 = gethrtime();
+ kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm1);
+
+ for(i = 0; i < 100; i++) {
+ tm2 = gethrtime();
+ kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm2);
+
+ if(tm1 > tm2) {
+ kzt_print(file, "%s: gethrtime() is not giving monotonically increasing values\n", KZT_TIME_TEST2_NAME);
+ return 1;
+ }
+ tm1 = tm2;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ return 0;
+}
+
+kzt_subsystem_t *
+kzt_time_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_TIME_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_TIME_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_TIME;
+
+ KZT_TEST_INIT(sub, KZT_TIME_TEST1_NAME, KZT_TIME_TEST1_DESC,
+ KZT_TIME_TEST1_ID, kzt_time_test1);
+ KZT_TEST_INIT(sub, KZT_TIME_TEST2_NAME, KZT_TIME_TEST2_DESC,
+ KZT_TIME_TEST2_ID, kzt_time_test2);
+
+ return sub;
+}
+
+void
+kzt_time_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ KZT_TEST_FINI(sub, KZT_TIME_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_TIME_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_time_id(void)
+{
+ return KZT_SUBSYSTEM_TIME;
+}