aboutsummaryrefslogtreecommitdiffstats
path: root/include/sys
diff options
context:
space:
mode:
Diffstat (limited to 'include/sys')
-rw-r--r--include/sys/Makefile.am5
-rw-r--r--include/sys/callb.h46
-rw-r--r--include/sys/cmn_err.h4
-rw-r--r--include/sys/condvar.h202
-rw-r--r--include/sys/cred.h41
-rw-r--r--include/sys/debug.h4
-rw-r--r--include/sys/generic.h74
-rw-r--r--include/sys/kmem.h176
-rw-r--r--include/sys/kstat.h138
-rw-r--r--include/sys/mutex.h121
-rw-r--r--include/sys/param.h4
-rw-r--r--include/sys/random.h38
-rw-r--r--include/sys/rwlock.h224
-rw-r--r--include/sys/spl.h19
-rw-r--r--include/sys/taskq.h87
-rw-r--r--include/sys/thread.h50
-rw-r--r--include/sys/time.h63
-rw-r--r--include/sys/timer.h22
-rw-r--r--include/sys/types.h27
19 files changed, 1345 insertions, 0 deletions
diff --git a/include/sys/Makefile.am b/include/sys/Makefile.am
new file mode 100644
index 000000000..c44748e02
--- /dev/null
+++ b/include/sys/Makefile.am
@@ -0,0 +1,5 @@
+EXTRA_DIST = callb.h cmn_err.h condvar.h cred.h
+EXTRA_DIST += debug.h generic.h kmem.h kstat.h
+EXTRA_DIST += mutex.h param.h random.h rwlock.h
+EXTRA_DIST += spl.h taskq.h thread.h time.h
+EXTRA_DIST += timer.h types.h
diff --git a/include/sys/callb.h b/include/sys/callb.h
new file mode 100644
index 000000000..053ddf500
--- /dev/null
+++ b/include/sys/callb.h
@@ -0,0 +1,46 @@
+#ifndef _SPL_CALLB_H
+#define _SPL_CALLB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/mutex.h>
+
+#define DEBUG_CALLB
+
+#ifndef DEBUG_CALLB
+#define CALLB_CPR_ASSERT(cp) BUG_ON(!(MUTEX_HELD((cp)->cc_lockp)));
+#else
+#define CALLB_CPR_ASSERT(cp)
+#endif
+
+
+typedef struct callb_cpr {
+ kmutex_t *cc_lockp;
+} callb_cpr_t;
+
+#define CALLB_CPR_INIT(cp, lockp, func, name) { \
+ (cp)->cc_lockp = lockp; \
+}
+
+#define CALLB_CPR_SAFE_BEGIN(cp) { \
+ CALLB_CPR_ASSERT(cp); \
+}
+
+#define CALLB_CPR_SAFE_END(cp, lockp) { \
+ CALLB_CPR_ASSERT(cp); \
+}
+
+#define CALLB_CPR_EXIT(cp) { \
+ ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
+ mutex_exit((cp)->cc_lockp); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_CALLB_H */
+
diff --git a/include/sys/cmn_err.h b/include/sys/cmn_err.h
new file mode 100644
index 000000000..44ccefc9f
--- /dev/null
+++ b/include/sys/cmn_err.h
@@ -0,0 +1,4 @@
+#ifndef _SPL_CMN_ERR_H
+#define _SPL_CMN_ERR_H
+
+#endif /* SPL_CMN_ERR_H */
diff --git a/include/sys/condvar.h b/include/sys/condvar.h
new file mode 100644
index 000000000..6a2060fd2
--- /dev/null
+++ b/include/sys/condvar.h
@@ -0,0 +1,202 @@
+#ifndef _SPL_CONDVAR_H
+#define _SPL_CONDVAR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/wait.h>
+
+/* The kcondvar_t struct is protected by mutex taken externally before
+ * calling any of the wait/signal funs, and passed into the wait funs.
+ */
+#define CV_MAGIC 0x346545f4
+#define CV_POISON 0x95
+
+typedef struct {
+ int cv_magic;
+ char *cv_name;
+ wait_queue_head_t cv_event;
+ atomic_t cv_waiters;
+ kmutex_t *cv_mutex; /* only for verification purposes */
+} kcondvar_t;
+
+typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
+
+static __inline__ void
+cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(type != CV_DEFAULT);
+ BUG_ON(arg != NULL);
+
+ cvp->cv_magic = CV_MAGIC;
+ init_waitqueue_head(&cvp->cv_event);
+ atomic_set(&cvp->cv_waiters, 0);
+ cvp->cv_mutex = NULL;
+ cvp->cv_name = NULL;
+
+ if (name) {
+ cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (cvp->cv_name)
+ strcpy(cvp->cv_name, name);
+ }
+}
+
+static __inline__ void
+cv_destroy(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(atomic_read(&cvp->cv_waiters) != 0);
+ BUG_ON(waitqueue_active(&cvp->cv_event));
+
+ if (cvp->cv_name)
+ kfree(cvp->cv_name);
+
+ memset(cvp, CV_POISON, sizeof(*cvp));
+}
+
+static __inline__ void
+cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
+{
+ DEFINE_WAIT(wait);
+ int flag = 1;
+
+ BUG_ON(cvp == NULL || mtx == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(!mutex_owned(mtx));
+
+ if (cvp->cv_mutex == NULL)
+ cvp->cv_mutex = mtx;
+
+ /* Ensure the same mutex is used by all callers */
+ BUG_ON(cvp->cv_mutex != mtx);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&cvp->cv_event, &wait,
+ TASK_INTERRUPTIBLE);
+ /* Must occur after we are added to the list but only once */
+ if (flag) {
+ atomic_inc(&cvp->cv_waiters);
+ flag = 0;
+ }
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* Mutex should be dropped after prepare_to_wait() this
+ * ensures we're linked in to the waiters list and avoids the
+ * race where 'cvp->cv_waiters > 0' but the list is empty. */
+ mutex_exit(mtx);
+ schedule();
+ mutex_enter(mtx);
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ continue;
+
+ break;
+ }
+
+ atomic_dec(&cvp->cv_waiters);
+ finish_wait(&cvp->cv_event, &wait);
+}
+
+/* 'expire_time' argument is an absolute wall clock time in jiffies.
+ * Return value is time left (expire_time - now) or -1 if timeout occurred.
+ */
+static __inline__ clock_t
+cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
+{
+ DEFINE_WAIT(wait);
+ clock_t time_left;
+ int flag = 1;
+
+ BUG_ON(cvp == NULL || mtx == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(!mutex_owned(mtx));
+
+ if (cvp->cv_mutex == NULL)
+ cvp->cv_mutex = mtx;
+
+ /* XXX - Does not handle jiffie wrap properly */
+ time_left = expire_time - jiffies;
+ if (time_left <= 0)
+ return -1;
+
+ /* Ensure the same mutex is used by all callers */
+ BUG_ON(cvp->cv_mutex != mtx);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&cvp->cv_event, &wait,
+ TASK_INTERRUPTIBLE);
+ if (flag) {
+ atomic_inc(&cvp->cv_waiters);
+ flag = 0;
+ }
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* Mutex should be dropped after prepare_to_wait() this
+ * ensures we're linked in to the waiters list and avoids the
+ * race where 'cvp->cv_waiters > 0' but the list is empty. */
+ mutex_exit(mtx);
+ time_left = schedule_timeout(time_left);
+ mutex_enter(mtx);
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current)) {
+ if (time_left > 0)
+ continue;
+
+ flush_signals(current);
+ }
+
+ break;
+ }
+
+ atomic_dec(&cvp->cv_waiters);
+ finish_wait(&cvp->cv_event, &wait);
+
+ return (time_left > 0 ? time_left : -1);
+}
+
+static __inline__ void
+cv_signal(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+
+ /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
+ * waiter will be set runable with each call to wake_up().
+ * Additionally wake_up() holds a spin_lock assoicated with
+ * the wait queue to ensure we don't race waking up processes. */
+ if (atomic_read(&cvp->cv_waiters) > 0)
+ wake_up(&cvp->cv_event);
+}
+
+static __inline__ void
+cv_broadcast(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+
+ /* Wake_up_all() will wake up all waiters even those which
+ * have the WQ_FLAG_EXCLUSIVE flag set. */
+ if (atomic_read(&cvp->cv_waiters) > 0)
+ wake_up_all(&cvp->cv_event);
+}
+#endif /* _SPL_CONDVAR_H */
diff --git a/include/sys/cred.h b/include/sys/cred.h
new file mode 100644
index 000000000..0935a19fa
--- /dev/null
+++ b/include/sys/cred.h
@@ -0,0 +1,41 @@
+#ifndef _SPL_CRED_H
+#define _SPL_CRED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+/* XXX - Portions commented out because we really just want to have the type
+ * defined and the contents aren't nearly so important at the moment. */
+typedef struct cred {
+ uint_t cr_ref; /* reference count */
+ uid_t cr_uid; /* effective user id */
+ gid_t cr_gid; /* effective group id */
+ uid_t cr_ruid; /* real user id */
+ gid_t cr_rgid; /* real group id */
+ uid_t cr_suid; /* "saved" user id (from exec) */
+ gid_t cr_sgid; /* "saved" group id (from exec) */
+ uint_t cr_ngroups; /* number of groups returned by */
+ /* crgroups() */
+#if 0
+ cred_priv_t cr_priv; /* privileges */
+ projid_t cr_projid; /* project */
+ struct zone *cr_zone; /* pointer to per-zone structure */
+ struct ts_label_s *cr_label; /* pointer to the effective label */
+ credsid_t *cr_ksid; /* pointer to SIDs */
+#endif
+ gid_t cr_groups[1]; /* cr_groups size not fixed */
+ /* audit info is defined dynamically */
+ /* and valid only when audit enabled */
+ /* auditinfo_addr_t cr_auinfo; audit info */
+} cred_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_CRED_H */
+
diff --git a/include/sys/debug.h b/include/sys/debug.h
new file mode 100644
index 000000000..02f64c2cf
--- /dev/null
+++ b/include/sys/debug.h
@@ -0,0 +1,4 @@
+#ifndef _SPL_DEBUG_H
+#define _SPL_DEBUG_H
+
+#endif /* SPL_DEBUG_H */
diff --git a/include/sys/generic.h b/include/sys/generic.h
new file mode 100644
index 000000000..2d0989233
--- /dev/null
+++ b/include/sys/generic.h
@@ -0,0 +1,74 @@
+#ifndef _SPL_GENERIC_H
+#define _SPL_GENERIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+
+/* Missing defines.
+ */
+#define INT32_MAX INT_MAX
+#define UINT64_MAX (~0ULL)
+#define NBBY 8
+#define ENOTSUP ENOTSUPP
+#define MAXNAMELEN 256
+#define MAXPATHLEN PATH_MAX
+#define __va_list va_list
+#define _KERNEL __KERNEL__
+#define max_ncpus 64
+
+/* 0..MAX_PRIO-1: Process priority
+ * 0..MAX_RT_PRIO-1: RT priority tasks
+ * MAX_RT_PRIO..MAX_PRIO-1: SCHED_NORMAL tasks
+ *
+ * Treat shim tasks as SCHED_NORMAL tasks
+ */
+#define minclsyspri (MAX_RT_PRIO)
+#define maxclsyspri (MAX_PRIO-1)
+
+#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
+
+#define kred NULL
+
+#define FREAD 1
+#define FWRITE 2
+#define FCREAT O_CREAT
+#define FTRUNC O_TRUNC
+#define FOFFMAX O_LARGEFILE
+#define FSYNC O_SYNC
+#define FDSYNC O_DSYNC
+#define FRSYNC O_RSYNC
+#define FEXCL O_EXCL
+
+#define FNODSYNC 0x10000 /* fsync pseudo flag */
+#define FNOFOLLOW 0x20000 /* don't follow symlinks */
+
+/* Missing macros
+ */
+#define PAGESIZE PAGE_SIZE
+
+/* from Solaris sys/byteorder.h */
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
+#define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
+#define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
+
+/* Map some simple functions.
+ */
+#define bzero(ptr,size) memset(ptr,0,size)
+#define bcopy(src,dest,size) memcpy(dest,src,size)
+#define ASSERT(x) BUG_ON(!(x))
+#define ASSERT3U(left,OP,right) BUG_ON(!((left) OP (right)))
+
+/* Missing globals
+ */
+extern int p0;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_GENERIC_H */
diff --git a/include/sys/kmem.h b/include/sys/kmem.h
new file mode 100644
index 000000000..89367c069
--- /dev/null
+++ b/include/sys/kmem.h
@@ -0,0 +1,176 @@
+#ifndef _SPL_KMEM_H
+#define _SPL_KMEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#undef DEBUG_KMEM
+#undef DEBUG_KMEM_UNIMPLEMENTED
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+/*
+ * Memory allocation interfaces
+ */
+#define KM_SLEEP GFP_KERNEL
+#define KM_NOSLEEP GFP_ATOMIC
+#undef KM_PANIC /* No linux analog */
+#define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
+#define KM_VMFLAGS GFP_LEVEL_MASK
+#define KM_FLAGS __GFP_BITS_MASK
+
+#ifdef DEBUG_KMEM
+/* Shim layer memory accounting */
+extern atomic_t kmem_alloc_used;
+extern unsigned int kmem_alloc_max;
+#endif
+
+#ifdef DEBUG_KMEM
+#define __kmem_alloc(size, flags, allocator) \
+({ void *_ptr_; \
+ \
+ /* Marked unlikely because we should never be doing this */ \
+ if (unlikely((size) > (PAGE_SIZE * 2))) \
+ printk("Warning: kmem_alloc(%d, 0x%x) large alloc at %s:%d " \
+ "(%d/%d)\n", (int)(size), (int)(flags), \
+ __FILE__, __LINE__, \
+ atomic_read(&kmem_alloc_used), kmem_alloc_max); \
+ \
+ _ptr_ = (void *)allocator((size), (flags)); \
+ if (_ptr_ == NULL) { \
+ printk("Warning: kmem_alloc(%d, 0x%x) failed at %s:%d " \
+ "(%d/%d)\n", (int)(size), (int)(flags), \
+ __FILE__, __LINE__, \
+ atomic_read(&kmem_alloc_used), kmem_alloc_max); \
+ atomic_add((size), &kmem_alloc_used); \
+ if (unlikely(atomic_read(&kmem_alloc_used) > kmem_alloc_max)) \
+ kmem_alloc_max = atomic_read(&kmem_alloc_used); \
+ } \
+ \
+ _ptr_; \
+})
+
+#define kmem_alloc(size, flags) __kmem_alloc(size, flags, kmalloc)
+#define kmem_zalloc(size, flags) __kmem_alloc(size, flags, kzalloc)
+
+#define kmem_free(ptr, size) \
+({ \
+ BUG_ON(!ptr); \
+ atomic_sub((size), &kmem_alloc_used); \
+ memset(ptr, 0x5a, (size)); /* Poison */ \
+ kfree(ptr); \
+ (ptr) = (void *)0xdeadbeef; \
+})
+
+
+#else
+
+#define kmem_alloc(size, flags) kmalloc(size, flags)
+#define kmem_zalloc(size, flags) kzalloc(size, flags)
+#define kmem_free(ptr, size) kfree(ptr)
+
+#endif /* DEBUG_KMEM */
+
+
+#ifdef DEBUG_KMEM_UNIMPLEMENTED
+static __inline__ void *
+kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
+{
+#error "kmem_alloc_tryhard() not implemented"
+}
+#endif /* DEBUG_KMEM_UNIMPLEMENTED */
+
+/*
+ * Slab allocation interfaces
+ */
+#undef KMC_NOTOUCH /* No linux analog */
+#define KMC_NODEBUG 0x00000000 /* Default beahvior */
+#define KMC_NOMAGAZINE /* No linux analog */
+#define KMC_NOHASH /* No linux analog */
+#define KMC_QCACHE /* No linux analog */
+
+#define KMC_REAP_CHUNK 256
+#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
+
+/* Defined by linux slab.h
+ * typedef struct kmem_cache_s kmem_cache_t;
+ */
+
+/* No linux analog
+ * extern int kmem_ready;
+ * extern pgcnt_t kmem_reapahead;
+ */
+
+#ifdef DEBUG_KMEM_UNIMPLEMENTED
+static __inline__ void kmem_init(void) {
+#error "kmem_init() not implemented"
+}
+
+static __inline__ void kmem_thread_init(void) {
+#error "kmem_thread_init() not implemented"
+}
+
+static __inline__ void kmem_mp_init(void) {
+#error "kmem_mp_init() not implemented"
+}
+
+static __inline__ void kmem_reap_idspace(void) {
+#error "kmem_reap_idspace() not implemented"
+}
+
+static __inline__ size_t kmem_avail(void) {
+#error "kmem_avail() not implemented"
+}
+
+static __inline__ size_t kmem_maxavail(void) {
+#error "kmem_maxavail() not implemented"
+}
+
+static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
+#error "kmem_cache_stat() not implemented"
+}
+#endif /* DEBUG_KMEM_UNIMPLEMENTED */
+
+/* XXX - Used by arc.c to adjust its memory footprint. We may want
+ * to use this hook in the future to adjust behavior based on
+ * debug levels. For now it's safe to always return 0.
+ */
+static __inline__ int
+kmem_debugging(void)
+{
+ return 0;
+}
+
+typedef int (*kmem_constructor_t)(void *, void *, int);
+typedef void (*kmem_destructor_t)(void *, void *);
+typedef void (*kmem_reclaim_t)(void *);
+
+extern kmem_cache_t *
+__kmem_cache_create(char *name, size_t size, size_t align,
+ kmem_constructor_t constructor,
+ kmem_destructor_t destructor,
+ kmem_reclaim_t reclaim,
+ void *priv, void *vmp, int flags);
+
+void
+extern __kmem_cache_destroy(kmem_cache_t *cache);
+
+void
+extern __kmem_reap(void);
+
+#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
+ __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
+#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
+#define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
+#define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
+#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
+#define kmem_reap() __kmem_reap()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_KMEM_H */
diff --git a/include/sys/kstat.h b/include/sys/kstat.h
new file mode 100644
index 000000000..0b79a41c0
--- /dev/null
+++ b/include/sys/kstat.h
@@ -0,0 +1,138 @@
+#ifndef _SPL_KSTAT_H
+#define _SPL_KSTAT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+/* XXX - The minimum functionality here is stubbed out but nothing works. */
+
+#define KSTAT_STRLEN 31 /* 30 chars + NULL; must be 16 * n - 1 */
+
+#define KSTAT_TYPE_RAW 0 /* can be anything */
+ /* ks_ndata >= 1 */
+#define KSTAT_TYPE_NAMED 1 /* name/value pair */
+ /* ks_ndata >= 1 */
+#define KSTAT_TYPE_INTR 2 /* interrupt statistics */
+ /* ks_ndata == 1 */
+#define KSTAT_TYPE_IO 3 /* I/O statistics */
+ /* ks_ndata == 1 */
+#define KSTAT_TYPE_TIMER 4 /* event timer */
+ /* ks_ndata >= 1 */
+
+#define KSTAT_NUM_TYPES 5
+
+
+#define KSTAT_DATA_CHAR 0
+#define KSTAT_DATA_INT32 1
+#define KSTAT_DATA_UINT32 2
+#define KSTAT_DATA_INT64 3
+#define KSTAT_DATA_UINT64 4
+
+
+#define KSTAT_FLAG_VIRTUAL 0x01
+#define KSTAT_FLAG_VAR_SIZE 0x02
+#define KSTAT_FLAG_WRITABLE 0x04
+#define KSTAT_FLAG_PERSISTENT 0x08
+#define KSTAT_FLAG_DORMANT 0x10
+#define KSTAT_FLAG_INVALID 0x2
+
+
+typedef int kid_t; /* unique kstat id */
+
+typedef struct kstat_s {
+ /*
+ * Fields relevant to both kernel and user
+ */
+ hrtime_t ks_crtime; /* creation time (from gethrtime()) */
+ struct kstat_s *ks_next; /* kstat chain linkage */
+ kid_t ks_kid; /* unique kstat ID */
+ char ks_module[KSTAT_STRLEN]; /* provider module name */
+ uchar_t ks_resv; /* reserved, currently just padding */
+ int ks_instance; /* provider module's instance */
+ char ks_name[KSTAT_STRLEN]; /* kstat name */
+ uchar_t ks_type; /* kstat data type */
+ char ks_class[KSTAT_STRLEN]; /* kstat class */
+ uchar_t ks_flags; /* kstat flags */
+ void *ks_data; /* kstat type-specific data */
+ uint_t ks_ndata; /* # of type-specific data records */
+ size_t ks_data_size; /* total size of kstat data section */
+ hrtime_t ks_snaptime; /* time of last data shapshot */
+ /*
+ * Fields relevant to kernel only
+ */
+ int (*ks_update)(struct kstat *, int); /* dynamic update */
+ void *ks_private; /* arbitrary provider-private data */
+ int (*ks_snapshot)(struct kstat *, void *, int);
+ void *ks_lock; /* protects this kstat's data */
+} kstat_t;
+
+typedef struct kstat_named_s {
+ char name[KSTAT_STRLEN]; /* name of counter */
+ uchar_t data_type; /* data type */
+ union {
+ char c[16]; /* enough for 128-bit ints */
+ int32_t i32;
+ uint32_t ui32;
+ struct {
+ union {
+ char *ptr; /* NULL-term string */
+ char __pad[8]; /* 64-bit padding */
+ } addr;
+ uint32_t len; /* # bytes for strlen + '\0' */
+ } str;
+/*
+ * The int64_t and uint64_t types are not valid for a maximally conformant
+ * 32-bit compilation environment (cc -Xc) using compilers prior to the
+ * introduction of C99 conforming compiler (reference ISO/IEC 9899:1990).
+ * In these cases, the visibility of i64 and ui64 is only permitted for
+ * 64-bit compilation environments or 32-bit non-maximally conformant
+ * C89 or C90 ANSI C compilation environments (cc -Xt and cc -Xa). In the
+ * C99 ANSI C compilation environment, the long long type is supported.
+ * The _INT64_TYPE is defined by the implementation (see sys/int_types.h).
+ */
+ int64_t i64;
+ uint64_t ui64;
+ long l;
+ ulong_t ul;
+
+ /* These structure members are obsolete */
+
+ longlong_t ll;
+ u_longlong_t ull;
+ float f;
+ double d;
+ } value; /* value of counter */
+} kstat_named_t;
+
+
+static __inline__ kstat_t *
+kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
+ const char *ks_class, uchar_t ks_type, uint_t ks_ndata,
+ uchar_t ks_flags)
+{
+ return NULL;
+}
+
+static __inline__ void
+kstat_install(kstat_t *ksp)
+{
+ return;
+}
+
+static __inline__ void
+kstat_delete(kstat_t *ksp)
+{
+ return;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_KSTAT_H */
+
diff --git a/include/sys/mutex.h b/include/sys/mutex.h
new file mode 100644
index 000000000..2db4a7f96
--- /dev/null
+++ b/include/sys/mutex.h
@@ -0,0 +1,121 @@
+#ifndef _SPL_MUTEX_H
+#define _SPL_MUTEX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/types.h>
+
+/* See the "Big Theory Statement" in solaris mutex.c.
+ *
+ * Spin mutexes apparently aren't needed by zfs so we assert
+ * if ibc is non-zero.
+ *
+ * Our impementation of adaptive mutexes aren't really adaptive.
+ * They go to sleep every time.
+ */
+
+#define MUTEX_DEFAULT 0
+#define MUTEX_HELD(x) (mutex_owned(x))
+
+#define KM_MAGIC 0x42424242
+#define KM_POISON 0x84
+
+typedef struct {
+ int km_magic;
+ char *km_name;
+ struct task_struct *km_owner;
+ struct semaphore km_sem;
+} kmutex_t;
+
+#undef mutex_init
+static __inline__ void
+mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
+{
+ BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
+ BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
+
+ mp->km_magic = KM_MAGIC;
+ sema_init(&mp->km_sem, 1);
+ mp->km_owner = NULL;
+ mp->km_name = NULL;
+
+ if (name) {
+ mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (mp->km_name)
+ strcpy(mp->km_name, name);
+ }
+}
+
+#undef mutex_destroy
+static __inline__ void
+mutex_destroy(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+
+ if (mp->km_name)
+ kfree(mp->km_name);
+
+ memset(mp, KM_POISON, sizeof(*mp));
+}
+
+static __inline__ void
+mutex_enter(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ down(&mp->km_sem); /* Will check in_atomic() for us */
+ BUG_ON(mp->km_owner != NULL);
+ mp->km_owner = current;
+}
+
+/* Return 1 if we acquired the mutex, else zero.
+ */
+static __inline__ int
+mutex_tryenter(kmutex_t *mp)
+{
+ int result;
+
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
+ if (result == 0) {
+ BUG_ON(mp->km_owner != NULL);
+ mp->km_owner = current;
+ return 1;
+ }
+ return 0;
+}
+
+static __inline__ void
+mutex_exit(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ BUG_ON(mp->km_owner != current);
+ mp->km_owner = NULL;
+ up(&mp->km_sem);
+}
+
+/* Return 1 if mutex is held by current process, else zero.
+ */
+static __inline__ int
+mutex_owned(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ return (mp->km_owner == current);
+}
+
+/* Return owner if mutex is owned, else NULL.
+ */
+static __inline__ kthread_t *
+mutex_owner(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ return mp->km_owner;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_MUTEX_H */
diff --git a/include/sys/param.h b/include/sys/param.h
new file mode 100644
index 000000000..f924006ef
--- /dev/null
+++ b/include/sys/param.h
@@ -0,0 +1,4 @@
+#ifndef _SPL_PARAM_H
+#define _SPL_PARAM_H
+
+#endif /* SPL_PARAM_H */
diff --git a/include/sys/random.h b/include/sys/random.h
new file mode 100644
index 000000000..b7f83ce1a
--- /dev/null
+++ b/include/sys/random.h
@@ -0,0 +1,38 @@
+#ifndef _SPL_RANDOM_H
+#define _SPL_RANDOM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/random.h>
+
+/* FIXME:
+ * Should add support for blocking in the future to
+ * ensure that proper entopy is collected. ZFS doesn't
+ * use it at the moment so this is good enough for now.
+ * Always will succeed by returning 0.
+ */
+static __inline__ int
+random_get_bytes(uint8_t *ptr, size_t len)
+{
+ BUG_ON(len < 0);
+ get_random_bytes((void *)ptr,(int)len);
+ return 0;
+}
+
+ /* Always will succeed by returning 0. */
+static __inline__ int
+random_get_pseudo_bytes(uint8_t *ptr, size_t len)
+{
+ BUG_ON(len < 0);
+ get_random_bytes((void *)ptr,(int)len);
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_RANDOM_H */
diff --git a/include/sys/rwlock.h b/include/sys/rwlock.h
new file mode 100644
index 000000000..6c55ced98
--- /dev/null
+++ b/include/sys/rwlock.h
@@ -0,0 +1,224 @@
+#ifndef _SPL_RWLOCK_H
+#define _SPL_RWLOCK_H
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <asm/current.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ RW_DRIVER = 2, /* driver (DDI) rwlock */
+ RW_DEFAULT = 4 /* kernel default rwlock */
+} krw_type_t;
+
+typedef enum {
+ RW_WRITER,
+ RW_READER
+} krw_t;
+
+#define RW_READ_HELD(x) (rw_read_held((x)))
+#define RW_WRITE_HELD(x) (rw_write_held((x)))
+#define RW_LOCK_HELD(x) (rw_lock_held((x)))
+#define RW_ISWRITER(x) (rw_iswriter(x))
+
+#define RW_MAGIC 0x3423645a
+#define RW_POISON 0xa6
+
+typedef struct {
+ int rw_magic;
+ char *rw_name;
+ struct rw_semaphore rw_sem;
+ struct task_struct *rw_owner; /* holder of the write lock */
+} krwlock_t;
+
+static __inline__ void
+rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
+{
+ BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
+ BUG_ON(arg != NULL); /* XXX no irq handler use */
+ rwlp->rw_magic = RW_MAGIC;
+ rwlp->rw_owner = NULL; /* no one holds the write lock yet */
+ init_rwsem(&rwlp->rw_sem);
+ rwlp->rw_name = NULL;
+
+ if (name) {
+ rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (rwlp->rw_name)
+ strcpy(rwlp->rw_name, name);
+ }
+}
+
+static __inline__ void
+rw_destroy(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp == NULL);
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ BUG_ON(rwlp->rw_owner != NULL);
+ spin_lock(&rwlp->rw_sem.wait_lock);
+ BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+
+ if (rwlp->rw_name)
+ kfree(rwlp->rw_name);
+
+ memset(rwlp, RW_POISON, sizeof(krwlock_t));
+}
+
+/* Return 0 if the lock could not be obtained without blocking.
+ */
+static __inline__ int
+rw_tryenter(krwlock_t *rwlp, krw_t rw)
+{
+ int result;
+
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ switch (rw) {
+ /* these functions return 1 if success, 0 if contention */
+ case RW_READER:
+ /* Here the Solaris code would return 0
+ * if there were any write waiters. Specifically
+ * thinking about the case where readers may have
+ * the lock and we would also allow this thread
+ * to grab the read lock with a writer waiting in the
+ * queue. This doesn't seem like a correctness
+ * issue, so just call down_read_trylock()
+ * for the test. We may have to revisit this if
+ * it becomes an issue */
+ result = down_read_trylock(&rwlp->rw_sem);
+ break;
+ case RW_WRITER:
+ result = down_write_trylock(&rwlp->rw_sem);
+ if (result) {
+ /* there better not be anyone else
+ * holding the write lock here */
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ }
+ break;
+ }
+
+ return result;
+}
+
+static __inline__ void
+rw_enter(krwlock_t *rwlp, krw_t rw)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ switch (rw) {
+ case RW_READER:
+ /* Here the Solaris code would block
+ * if there were any write waiters. Specifically
+ * thinking about the case where readers may have
+ * the lock and we would also allow this thread
+ * to grab the read lock with a writer waiting in the
+ * queue. This doesn't seem like a correctness
+ * issue, so just call down_read()
+ * for the test. We may have to revisit this if
+ * it becomes an issue */
+ down_read(&rwlp->rw_sem);
+ break;
+ case RW_WRITER:
+ down_write(&rwlp->rw_sem);
+
+ /* there better not be anyone else
+ * holding the write lock here */
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ break;
+ }
+}
+
+static __inline__ void
+rw_exit(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ /* rw_owner is held by current
+ * thread iff it is a writer */
+ if (rwlp->rw_owner == current) {
+ rwlp->rw_owner = NULL;
+ up_write(&rwlp->rw_sem);
+ } else {
+ up_read(&rwlp->rw_sem);
+ }
+}
+
+static __inline__ void
+rw_downgrade(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ BUG_ON(rwlp->rw_owner != current);
+ rwlp->rw_owner = NULL;
+ downgrade_write(&rwlp->rw_sem);
+}
+
+/* Return 0 if unable to perform the upgrade.
+ * Might be wise to fix the caller
+ * to acquire the write lock first?
+ */
+static __inline__ int
+rw_tryupgrade(krwlock_t *rwlp)
+{
+ int result;
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ spin_lock(&rwlp->rw_sem.wait_lock);
+
+ /* Check if there is anyone waiting for the
+ * lock. If there is, then we know we should
+ * not try to upgrade the lock */
+ if (!list_empty(&rwlp->rw_sem.wait_list)) {
+ printk(KERN_WARNING "There are threads waiting\n");
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 0;
+ }
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+ /* Note that activity is protected by
+ * the wait_lock. Don't try to upgrade
+ * if there are multiple readers currently
+ * holding the lock */
+ if (rwlp->rw_sem.activity > 1) {
+#else
+ /* Don't try to upgrade
+ * if there are multiple readers currently
+ * holding the lock */
+ if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
+#endif
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 0;
+ }
+
+ /* Here it should be safe to drop the
+ * read lock and reacquire it for writing since
+ * we know there are no waiters */
+ up_read(&rwlp->rw_sem);
+
+ /* returns 1 if success, 0 if contention */
+ result = down_write_trylock(&rwlp->rw_sem);
+
+ /* Check if upgrade failed. Should not ever happen
+ * if we got to this point */
+ BUG_ON(!result);
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 1;
+}
+
+static __inline__ kthread_t *
+rw_owner(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ return rwlp->rw_owner;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_RWLOCK_H */
diff --git a/include/sys/spl.h b/include/sys/spl.h
new file mode 100644
index 000000000..7cc1cab58
--- /dev/null
+++ b/include/sys/spl.h
@@ -0,0 +1,19 @@
+#ifndef _SPL_H
+#define _SPL_H
+
+#include <sys/callb.h>
+#include <sys/condvar.h>
+#include <sys/cred.h>
+#include <sys/generic.h>
+#include <sys/kmem.h>
+#include <sys/kstat.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/rwlock.h>
+#include <sys/taskq.h>
+#include <sys/thread.h>
+#include <sys/time.h>
+#include <sys/timer.h>
+#include <sys/types.h>
+
+#endif /* _SPL_H */
diff --git a/include/sys/taskq.h b/include/sys/taskq.h
new file mode 100644
index 000000000..2d7583daf
--- /dev/null
+++ b/include/sys/taskq.h
@@ -0,0 +1,87 @@
+#ifndef _SPL_TASKQ_H
+#define _SPL_TASKQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Task Queues - As of linux 2.6.x task queues have been replaced by a
+ * similar construct called work queues. The big difference on the linux
+ * side is that functions called from work queues run in process context
+ * and not interrupt context.
+ *
+ * One nice feature of Solaris which does not exist in linux work
+ * queues in the notion of a dynamic work queue. Rather than implementing
+ * this in the shim layer I'm hardcoding one-thread per work queue.
+ *
+ * XXX - This may end up being a significant performance penalty which
+ * forces us to implement dynamic workqueues. Which is all very doable
+ * with a little effort.
+ */
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <sys/types.h>
+
+#undef DEBUG_TASKQ_UNIMPLEMENTED
+
+#define TASKQ_NAMELEN 31
+#define taskq_t workq_t
+
+typedef struct workqueue_struct workq_t;
+typedef unsigned long taskqid_t;
+typedef void (*task_func_t)(void *);
+
+/*
+ * Public flags for taskq_create(): bit range 0-15
+ */
+#define TASKQ_PREPOPULATE 0x0000 /* XXX - Workqueues fully populate */
+#define TASKQ_CPR_SAFE 0x0000 /* XXX - No analog */
+#define TASKQ_DYNAMIC 0x0000 /* XXX - Worksqueues not dynamic */
+
+/*
+ * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
+ * KM_SLEEP/KM_NOSLEEP.
+ */
+#define TQ_SLEEP 0x00 /* XXX - Workqueues don't support */
+#define TQ_NOSLEEP 0x00 /* these sorts of flags. They */
+#define TQ_NOQUEUE 0x00 /* always run in application */
+#define TQ_NOALLOC 0x00 /* context and can sleep. */
+
+
+#ifdef DEBUG_TASKQ_UNIMPLEMENTED
+static __inline__ void taskq_init(void) {
+#error "taskq_init() not implemented"
+}
+
+static __inline__ taskq_t *
+taskq_create_instance(const char *, int, int, pri_t, int, int, uint_t) {
+#error "taskq_create_instance() not implemented"
+}
+
+extern void nulltask(void *);
+extern void taskq_suspend(taskq_t *);
+extern int taskq_suspended(taskq_t *);
+extern void taskq_resume(taskq_t *);
+
+#endif /* DEBUG_TASKQ_UNIMPLEMENTED */
+
+extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
+extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
+
+#define taskq_create(name, thr, pri, min, max, flags) \
+ __taskq_create(name, thr, pri, min, max, flags)
+#define taskq_dispatch(tq, func, priv, flags) \
+ __taskq_dispatch(tq, func, priv, flags)
+#define taskq_destory(tq) destroy_workqueue(tq)
+#define taskq_wait(tq) flush_workqueue(tq)
+#define taskq_member(tq, kthr) 1 /* XXX -Just be true */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TASKQ_H */
diff --git a/include/sys/thread.h b/include/sys/thread.h
new file mode 100644
index 000000000..e7f99c96d
--- /dev/null
+++ b/include/sys/thread.h
@@ -0,0 +1,50 @@
+#ifndef _SPL_THREAD_H
+#define _SPL_THREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <sys/types.h>
+#include <sys/generic.h>
+
+/*
+ * Thread interfaces
+ */
+#define TP_MAGIC 0x53535353
+
+#define TS_SLEEP TASK_INTERRUPTIBLE
+#define TS_RUN TASK_RUNNING
+#define TS_ZOMB EXIT_ZOMBIE
+#define TS_STOPPED TASK_STOPPED
+#if 0
+#define TS_FREE 0x00 /* No clean linux mapping */
+#define TS_ONPROC 0x04 /* No clean linux mapping */
+#define TS_WAIT 0x20 /* No clean linux mapping */
+#endif
+
+#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
+ __thread_create(stk, stksize, func, arg, len, pp, state, pri)
+#define thread_exit() __thread_exit()
+#define curthread get_current()
+
+/* We just need a valid type to pass around, it's unused */
+typedef struct proc_s {
+ int foo;
+} proc_t;
+
+extern kthread_t *__thread_create(caddr_t stk, size_t stksize,
+ void (*proc)(void *), void *args,
+ size_t len, proc_t *pp, int state,
+ pri_t pri);
+extern void __thread_exit(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_THREAD_H */
+
diff --git a/include/sys/time.h b/include/sys/time.h
new file mode 100644
index 000000000..726bd5f8a
--- /dev/null
+++ b/include/sys/time.h
@@ -0,0 +1,63 @@
+#ifndef _SPL_TIME_H
+#define _SPL_TIME_H
+
+/*
+ * Structure returned by gettimeofday(2) system call,
+ * and used in other calls.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <sys/types.h>
+
+extern unsigned long long monotonic_clock(void);
+typedef struct timespec timestruc_t; /* definition per SVr4 */
+typedef longlong_t hrtime_t;
+
+#define TIME32_MAX INT32_MAX
+#define TIME32_MIN INT32_MIN
+
+#define SEC 1
+#define MILLISEC 1000
+#define MICROSEC 1000000
+#define NANOSEC 1000000000
+
+#define hz \
+({ \
+ BUG_ON(HZ < 100 || HZ > MICROSEC); \
+ HZ; \
+})
+
+#define gethrestime(ts) getnstimeofday((ts))
+
+static __inline__ hrtime_t
+gethrtime(void) {
+ /* BUG_ON(cur_timer == timer_none); */
+
+ /* Solaris expects a long long here but monotonic_clock() returns an
+ * unsigned long long. Note that monotonic_clock() returns the number
+ * of nanoseconds passed since kernel initialization. Even for a signed
+ * long long this will not "go negative" for ~292 years.
+ */
+ return monotonic_clock();
+}
+
+static __inline__ time_t
+gethrestime_sec(void)
+{
+ timestruc_t now;
+
+ gethrestime(&now);
+ return (now.tv_sec);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TIME_H */
diff --git a/include/sys/timer.h b/include/sys/timer.h
new file mode 100644
index 000000000..237195d76
--- /dev/null
+++ b/include/sys/timer.h
@@ -0,0 +1,22 @@
+#ifndef _SPL_TIMER_H
+#define _SPL_TIMER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#define lbolt ((clock_t)jiffies)
+#define lbolt64 ((int64_t)get_jiffies_64())
+
+#define delay(ticks) schedule_timeout((long timeout)(ticks))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TIMER_H */
+
diff --git a/include/sys/types.h b/include/sys/types.h
new file mode 100644
index 000000000..b72b6c9e0
--- /dev/null
+++ b/include/sys/types.h
@@ -0,0 +1,27 @@
+#ifndef _SPL_TYPES_H
+#define _SPL_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum { B_FALSE=0, B_TRUE=1 } boolean_t;
+typedef unsigned long uintptr_t;
+typedef unsigned long intptr_t;
+typedef unsigned long ulong_t;
+typedef unsigned int uint_t;
+typedef unsigned char uchar_t;
+typedef unsigned long long u_longlong_t;
+typedef unsigned long long u_offset_t;
+typedef unsigned long long rlim64_t;
+typedef long long longlong_t;
+typedef long long offset_t;
+typedef struct task_struct kthread_t;
+typedef struct vmem { } vmem_t;
+typedef short pri_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TYPES_H */