aboutsummaryrefslogtreecommitdiffstats
path: root/patches
diff options
context:
space:
mode:
Diffstat (limited to 'patches')
-rw-r--r--patches/README17
-rw-r--r--patches/lztest-lzdb.patch40
-rw-r--r--patches/no-debug-userspace.patch184
-rw-r--r--patches/no-events.patch44
-rw-r--r--patches/port-no-zmod.patch112
-rw-r--r--patches/port-pragma-init.patch52
-rw-r--r--patches/pthreads.patch924
-rw-r--r--patches/zap-cursor-move-to-key.patch115
8 files changed, 1488 insertions, 0 deletions
diff --git a/patches/README b/patches/README
new file mode 100644
index 000000000..31af59708
--- /dev/null
+++ b/patches/README
@@ -0,0 +1,17 @@
+#
+# Mostly patches for a userspace build. For now I'm leaving them all
+# out until we go through the code base and sort out the userspace
+# portion of the build system. We may find we do not want or need
+# many of these patches anymore. -Brian
+#
+zap-cursor-move-to-key.patch # Add a ZAP API to move a ZAP cursor to a
+given key.
+spa-force-readonly.patch # Add API to discard all writes
+no-debug-userspace.patch # Disable debug code on userspace
+no-events.patch # Define away spa_event_notify() in userspace
+pthreads.patch # Use POSIX threads in userspace.
+port-no-zmod.patch # Do not use zmod.h in userspace.
+port-pragma-init.patch # Use constructor attribute on non-Solaris
+platforms.
+lztest-lzdb.patch # Make lztest call lzdb from PATH.
+zpool-force.patch # Change -f to -F in zpool command
diff --git a/patches/lztest-lzdb.patch b/patches/lztest-lzdb.patch
new file mode 100644
index 000000000..37870b749
--- /dev/null
+++ b/patches/lztest-lzdb.patch
@@ -0,0 +1,40 @@
+Make lztest call lzdb from PATH.
+
+Index: zfs+chaos4/cmd/lztest/ztest.c
+===================================================================
+--- zfs+chaos4.orig/cmd/lztest/ztest.c
++++ zfs+chaos4/cmd/lztest/ztest.c
+@@ -3043,30 +3043,17 @@ ztest_verify_blocks(char *pool)
+ char zbuf[1024];
+ char *bin;
+ char *ztest;
+- char *isa;
+- int isalen;
+ FILE *fp;
+
+- (void) realpath(getexecname(), zdb);
+-
+- /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
+- bin = strstr(zdb, "/usr/bin/");
+- ztest = strstr(bin, "/ztest");
+- isa = bin + 8;
+- isalen = ztest - isa;
+- isa = strdup(isa);
+ /* LINTED */
+- (void) sprintf(bin,
+- "/usr/sbin%.*s/zdb -bc%s%s -U /tmp/zpool.cache -O %s %s",
+- isalen,
+- isa,
++ (void) sprintf(zdb,
++ "lzdb -bc%s%s -U /tmp/zpool.cache -O %s %s",
+ zopt_verbose >= 3 ? "s" : "",
+ zopt_verbose >= 4 ? "v" : "",
+ ztest_random(2) == 0 ? "pre" : "post", pool);
+- free(isa);
+
+ if (zopt_verbose >= 5)
+- (void) printf("Executing %s\n", strstr(zdb, "zdb "));
++ (void) printf("Executing %s\n", strstr(zdb, "lzdb "));
+
+ fp = popen(zdb, "r");
+
diff --git a/patches/no-debug-userspace.patch b/patches/no-debug-userspace.patch
new file mode 100644
index 000000000..616a69153
--- /dev/null
+++ b/patches/no-debug-userspace.patch
@@ -0,0 +1,184 @@
+Disable debug code on userspace
+
+Index: zfs+chaos4/lib/libzfscommon/include/sys/arc.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/arc.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/arc.h
+@@ -82,7 +82,7 @@ int arc_released(arc_buf_t *buf);
+ int arc_has_callback(arc_buf_t *buf);
+ void arc_buf_freeze(arc_buf_t *buf);
+ void arc_buf_thaw(arc_buf_t *buf);
+-#ifdef ZFS_DEBUG
++#if defined(ZFS_DEBUG) || (!defined(_KERNEL) && !defined(NDEBUG))
+ int arc_referenced(arc_buf_t *buf);
+ #endif
+
+Index: zfs+chaos4/lib/libzfscommon/include/sys/refcount.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/refcount.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/refcount.h
+@@ -43,7 +43,7 @@ extern "C" {
+ */
+ #define FTAG ((char *)__func__)
+
+-#if defined(DEBUG) || !defined(_KERNEL)
++#if defined(DEBUG)
+ typedef struct reference {
+ list_node_t ref_link;
+ void *ref_holder;
+Index: zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zfs_context_user.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h
+@@ -96,6 +96,8 @@ extern "C" {
+
+ #ifdef ZFS_DEBUG
+ extern void dprintf_setup(int *argc, char **argv);
++#else
++#define dprintf_setup(ac,av) ((void) 0)
+ #endif /* ZFS_DEBUG */
+
+ extern void cmn_err(int, const char *, ...);
+@@ -105,21 +107,26 @@ extern void vpanic(const char *, __va_li
+
+ #define fm_panic panic
+
++#ifndef zp_verify
+ /* This definition is copied from assert.h. */
+ #if defined(__STDC__)
+ #if __STDC_VERSION__ - 0 >= 199901L
+-#define verify(EX) (void)((EX) || \
++#define zp_verify(EX) (void)((EX) || \
+ (__assert_c99(#EX, __FILE__, __LINE__, __func__), 0))
+ #else
+-#define verify(EX) (void)((EX) || (__assert(#EX, __FILE__, __LINE__), 0))
++#define zp_verify(EX) (void)((EX) || (__assert(#EX, __FILE__, __LINE__), 0))
+ #endif /* __STDC_VERSION__ - 0 >= 199901L */
+ #else
+-#define verify(EX) (void)((EX) || (_assert("EX", __FILE__, __LINE__), 0))
++#define zp_verify(EX) (void)((EX) || (_assert("EX", __FILE__, __LINE__), 0))
+ #endif /* __STDC__ */
++#endif
+
+-
+-#define VERIFY verify
++#ifndef VERIFY
++#define VERIFY zp_verify
++#endif
++#ifndef ASSERT
+ #define ASSERT assert
++#endif
+
+ extern void __assert(const char *, const char *, int);
+
+@@ -332,6 +339,7 @@ extern int taskq_member(taskq_t *, void
+ typedef struct vnode {
+ uint64_t v_size;
+ int v_fd;
++ mode_t v_mode;
+ char *v_path;
+ } vnode_t;
+
+Index: zfs+chaos4/lib/libzfscommon/include/sys/zfs_debug.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zfs_debug.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/zfs_debug.h
+@@ -44,7 +44,7 @@ extern "C" {
+ * ZFS debugging
+ */
+
+-#if defined(DEBUG) || !defined(_KERNEL)
++#if defined(DEBUG)
+ #define ZFS_DEBUG
+ #endif
+
+Index: zfs+chaos4/lib/libzpool/arc.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/arc.c
++++ zfs+chaos4/lib/libzpool/arc.c
+@@ -1802,7 +1802,7 @@ arc_reclaim_needed(void)
+ return (1);
+ #endif
+
+-#else
++#elif defined(ZFS_DEBUG)
+ if (spa_get_random(100) == 0)
+ return (1);
+ #endif
+@@ -2881,7 +2881,7 @@ arc_has_callback(arc_buf_t *buf)
+ return (buf->b_efunc != NULL);
+ }
+
+-#ifdef ZFS_DEBUG
++#if defined(ZFS_DEBUG) || (!defined(_KERNEL) && !defined(NDEBUG))
+ int
+ arc_referenced(arc_buf_t *buf)
+ {
+Index: zfs+chaos4/lib/libzpool/kernel.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/kernel.c
++++ zfs+chaos4/lib/libzpool/kernel.c
+@@ -384,6 +384,7 @@ vn_open(char *path, int x1, int flags, i
+
+ vp->v_fd = fd;
+ vp->v_size = st.st_size;
++ vp->v_mode = st.st_mode;
+ vp->v_path = spa_strdup(path);
+
+ return (0);
+@@ -422,10 +423,17 @@ vn_rdwr(int uio, vnode_t *vp, void *addr
+ * To simulate partial disk writes, we split writes into two
+ * system calls so that the process can be killed in between.
+ */
+- split = (len > 0 ? rand() % len : 0);
+- iolen = pwrite64(vp->v_fd, addr, split, offset);
+- iolen += pwrite64(vp->v_fd, (char *)addr + split,
+- len - split, offset + split);
++#ifdef ZFS_DEBUG
++ if (!S_ISBLK(vp->v_mode) && !S_ISCHR(vp->v_mode)) {
++ split = (len > 0 ? rand() % len : 0);
++ iolen = pwrite64(vp->v_fd, addr, split, offset);
++ iolen += pwrite64(vp->v_fd, (char *)addr + split,
++ len - split, offset + split);
++ } else
++ iolen = pwrite64(vp->v_fd, addr, len, offset);
++#else
++ iolen = pwrite64(vp->v_fd, addr, len, offset);
++#endif
+ }
+
+ if (iolen < 0)
+Index: zfs+chaos4/lib/libzpool/refcount.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/refcount.c
++++ zfs+chaos4/lib/libzpool/refcount.c
+@@ -28,7 +28,7 @@
+ #include <sys/zfs_context.h>
+ #include <sys/refcount.h>
+
+-#if defined(DEBUG) || !defined(_KERNEL)
++#if defined(DEBUG)
+
+ #ifdef _KERNEL
+ int reference_tracking_enable = FALSE; /* runs out of memory too easily */
+Index: zfs+chaos4/lib/libzpool/spa_misc.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/spa_misc.c
++++ zfs+chaos4/lib/libzpool/spa_misc.c
+@@ -178,11 +178,15 @@ kmem_cache_t *spa_buffer_pool;
+ int spa_mode;
+
+ #ifdef ZFS_DEBUG
++#ifdef _KERNEL
+ /* Everything except dprintf is on by default in debug builds */
+ int zfs_flags = ~ZFS_DEBUG_DPRINTF;
+ #else
++int zfs_flags = ~0;
++#endif /* _KERNEL */
++#else
+ int zfs_flags = 0;
+-#endif
++#endif /* ZFS_DEBUG */
+
+ /*
+ * zfs_recover can be set to nonzero to attempt to recover from
diff --git a/patches/no-events.patch b/patches/no-events.patch
new file mode 100644
index 000000000..054b7ae17
--- /dev/null
+++ b/patches/no-events.patch
@@ -0,0 +1,44 @@
+Define away spa_event_notify() in userspace - not necessary and breaks compilation in older Solaris builds.
+
+Index: zfs+chaos4/lib/libzfscommon/include/sys/spa.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/spa.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/spa.h
+@@ -516,7 +516,11 @@ extern int spa_prop_get(spa_t *spa, nvli
+ extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
+
+ /* asynchronous event notification */
++#ifdef _KERNEL
+ extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name);
++#else
++#define spa_event_notify(s,v,n) ((void) 0)
++#endif
+
+ #ifdef ZFS_DEBUG
+ #define dprintf_bp(bp, fmt, ...) do { \
+Index: zfs+chaos4/lib/libzpool/spa.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/spa.c
++++ zfs+chaos4/lib/libzpool/spa.c
+@@ -4449,10 +4449,10 @@ spa_has_spare(spa_t *spa, uint64_t guid)
+ * in the userland libzpool, as we don't want consumers to misinterpret ztest
+ * or zdb as real changes.
+ */
++#ifdef _KERNEL
+ void
+ spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
+ {
+-#ifdef _KERNEL
+ sysevent_t *ev;
+ sysevent_attr_list_t *attr = NULL;
+ sysevent_value_t value;
+@@ -4497,8 +4497,8 @@ done:
+ if (attr)
+ sysevent_free_attr(attr);
+ sysevent_free(ev);
+-#endif
+ }
++#endif
+
+ void
+ spa_discard_io(spa_t *spa)
diff --git a/patches/port-no-zmod.patch b/patches/port-no-zmod.patch
new file mode 100644
index 000000000..34cabd1fd
--- /dev/null
+++ b/patches/port-no-zmod.patch
@@ -0,0 +1,112 @@
+Do not use zmod.h in userspace.
+
+Index: zfs+chaos4/lib/libzpool/gzip.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/gzip.c
++++ zfs+chaos4/lib/libzpool/gzip.c
+@@ -28,22 +28,35 @@
+
+ #include <sys/debug.h>
+ #include <sys/types.h>
+-#include <sys/zmod.h>
+
+ #ifdef _KERNEL
++
+ #include <sys/systm.h>
+-#else
++#include <sys/zmod.h>
++
++typedef size_t zlen_t;
++#define compress_func z_compress_level
++#define uncompress_func z_uncompress
++
++#else /* _KERNEL */
++
+ #include <strings.h>
++#include <zlib.h>
++
++typedef uLongf zlen_t;
++#define compress_func compress2
++#define uncompress_func uncompress
++
+ #endif
+
+ size_t
+ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
+ {
+- size_t dstlen = d_len;
++ zlen_t dstlen = d_len;
+
+ ASSERT(d_len <= s_len);
+
+- if (z_compress_level(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
++ if (compress_func(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
+ if (d_len != s_len)
+ return (s_len);
+
+@@ -51,18 +64,18 @@ gzip_compress(void *s_start, void *d_sta
+ return (s_len);
+ }
+
+- return (dstlen);
++ return ((size_t) dstlen);
+ }
+
+ /*ARGSUSED*/
+ int
+ gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
+ {
+- size_t dstlen = d_len;
++ zlen_t dstlen = d_len;
+
+ ASSERT(d_len >= s_len);
+
+- if (z_uncompress(d_start, &dstlen, s_start, s_len) != Z_OK)
++ if (uncompress_func(d_start, &dstlen, s_start, s_len) != Z_OK)
+ return (-1);
+
+ return (0);
+Index: zfs+chaos4/lib/libzpool/kernel.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/kernel.c
++++ zfs+chaos4/lib/libzpool/kernel.c
+@@ -36,7 +36,6 @@
+ #include <sys/stat.h>
+ #include <sys/processor.h>
+ #include <sys/zfs_context.h>
+-#include <sys/zmod.h>
+ #include <sys/utsname.h>
+ #include <sys/time.h>
+
+@@ -876,31 +875,6 @@ kernel_fini(void)
+ urandom_fd = -1;
+ }
+
+-int
+-z_uncompress(void *dst, size_t *dstlen, const void *src, size_t srclen)
+-{
+- int ret;
+- uLongf len = *dstlen;
+-
+- if ((ret = uncompress(dst, &len, src, srclen)) == Z_OK)
+- *dstlen = (size_t)len;
+-
+- return (ret);
+-}
+-
+-int
+-z_compress_level(void *dst, size_t *dstlen, const void *src, size_t srclen,
+- int level)
+-{
+- int ret;
+- uLongf len = *dstlen;
+-
+- if ((ret = compress2(dst, &len, src, srclen, level)) == Z_OK)
+- *dstlen = (size_t)len;
+-
+- return (ret);
+-}
+-
+ /*ARGSUSED*/
+ size_t u8_textprep_str(char *i, size_t *il, char *o, size_t *ol, int nf,
+ size_t vers, int *err)
diff --git a/patches/port-pragma-init.patch b/patches/port-pragma-init.patch
new file mode 100644
index 000000000..665aa9b62
--- /dev/null
+++ b/patches/port-pragma-init.patch
@@ -0,0 +1,52 @@
+Use constructor attribute on non-Solaris platforms.
+
+Index: zfs+chaos4/lib/libuutil/uu_misc.c
+===================================================================
+--- zfs+chaos4.orig/lib/libuutil/uu_misc.c
++++ zfs+chaos4/lib/libuutil/uu_misc.c
+@@ -251,7 +251,13 @@ uu_release_child(void)
+ uu_release();
+ }
+
++#ifdef __GNUC__
++static void
++uu_init(void) __attribute__((constructor));
++#else
+ #pragma init(uu_init)
++#endif
++
+ static void
+ uu_init(void)
+ {
+Index: zfs+chaos4/lib/libzfs/libzfs_mount.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzfs/libzfs_mount.c
++++ zfs+chaos4/lib/libzfs/libzfs_mount.c
+@@ -128,7 +128,13 @@ zfs_share_proto_t share_all_proto[] = {
+ PROTO_END
+ };
+
++#ifdef __GNUC__
++static void
++zfs_iscsi_init(void) __attribute__((constructor));
++#else
+ #pragma init(zfs_iscsi_init)
++#endif
++
+ static void
+ zfs_iscsi_init(void)
+ {
+@@ -548,8 +554,12 @@ static void (*_sa_update_sharetab_ts)(sa
+ * values to be used later. This is triggered by the runtime loader.
+ * Make sure the correct ISA version is loaded.
+ */
+-
++#ifdef __GNUC__
++static void
++_zfs_init_libshare(void) __attribute__((constructor));
++#else
+ #pragma init(_zfs_init_libshare)
++#endif
+ static void
+ _zfs_init_libshare(void)
+ {
diff --git a/patches/pthreads.patch b/patches/pthreads.patch
new file mode 100644
index 000000000..ec29eb915
--- /dev/null
+++ b/patches/pthreads.patch
@@ -0,0 +1,924 @@
+Use POSIX threads in userspace.
+
+Index: zfs+chaos4/cmd/lztest/ztest.c
+===================================================================
+--- zfs+chaos4.orig/cmd/lztest/ztest.c
++++ zfs+chaos4/cmd/lztest/ztest.c
+@@ -141,7 +141,7 @@ typedef struct ztest_args {
+ spa_t *za_spa;
+ objset_t *za_os;
+ zilog_t *za_zilog;
+- thread_t za_thread;
++ pthread_t za_thread;
+ uint64_t za_instance;
+ uint64_t za_random;
+ uint64_t za_diroff;
+@@ -224,17 +224,17 @@ ztest_info_t ztest_info[] = {
+ * Stuff we need to share writably between parent and child.
+ */
+ typedef struct ztest_shared {
+- mutex_t zs_vdev_lock;
+- rwlock_t zs_name_lock;
+- uint64_t zs_vdev_primaries;
+- uint64_t zs_enospc_count;
+- hrtime_t zs_start_time;
+- hrtime_t zs_stop_time;
+- uint64_t zs_alloc;
+- uint64_t zs_space;
+- ztest_info_t zs_info[ZTEST_FUNCS];
+- mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
+- uint64_t zs_seq[ZTEST_SYNC_LOCKS];
++ pthread_mutex_t zs_vdev_lock;
++ pthread_rwlock_t zs_name_lock;
++ uint64_t zs_vdev_primaries;
++ uint64_t zs_enospc_count;
++ hrtime_t zs_start_time;
++ hrtime_t zs_stop_time;
++ uint64_t zs_alloc;
++ uint64_t zs_space;
++ ztest_info_t zs_info[ZTEST_FUNCS];
++ pthread_mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
++ uint64_t zs_seq[ZTEST_SYNC_LOCKS];
+ } ztest_shared_t;
+
+ static char ztest_dev_template[] = "%s/%s.%llua";
+@@ -818,7 +818,7 @@ ztest_spa_create_destroy(ztest_args_t *z
+ * Attempt to create an existing pool. It shouldn't matter
+ * what's in the nvroot; we should fail with EEXIST.
+ */
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+ nvroot = make_vdev_root(0, 0, 0, 0, 1);
+ error = spa_create(za->za_pool, nvroot, NULL, NULL);
+ nvlist_free(nvroot);
+@@ -834,7 +834,7 @@ ztest_spa_create_destroy(ztest_args_t *z
+ fatal(0, "spa_destroy() = %d", error);
+
+ spa_close(spa, FTAG);
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+ /*
+@@ -851,7 +851,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
+ if (zopt_verbose >= 6)
+ (void) printf("adding vdev\n");
+
+- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
+
+ spa_config_enter(spa, RW_READER, FTAG);
+
+@@ -869,7 +869,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
+ error = spa_vdev_add(spa, nvroot);
+ nvlist_free(nvroot);
+
+- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
+
+ if (error == ENOSPC)
+ ztest_record_enospc("spa_vdev_add");
+@@ -927,7 +927,7 @@ ztest_vdev_attach_detach(ztest_args_t *z
+ int error, expected_error;
+ int fd;
+
+- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
+
+ spa_config_enter(spa, RW_READER, FTAG);
+
+@@ -1054,7 +1054,7 @@ ztest_vdev_attach_detach(ztest_args_t *z
+ oldpath, newpath, replacing, error, expected_error);
+ }
+
+- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
+ }
+
+ /*
+@@ -1071,7 +1071,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
+ size_t fsize;
+ int fd;
+
+- (void) mutex_lock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
+
+ /*
+ * Pick a random leaf vdev.
+@@ -1102,7 +1102,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
+ (void) close(fd);
+ }
+
+- (void) mutex_unlock(&ztest_shared->zs_vdev_lock);
++ (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
+ }
+
+ /* ARGSUSED */
+@@ -1198,7 +1198,7 @@ ztest_dmu_objset_create_destroy(ztest_ar
+ uint64_t objects;
+ ztest_replay_t zr;
+
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+ (void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool,
+ (u_longlong_t)za->za_instance);
+
+@@ -1242,7 +1242,7 @@ ztest_dmu_objset_create_destroy(ztest_ar
+ if (error) {
+ if (error == ENOSPC) {
+ ztest_record_enospc("dmu_objset_create");
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ return;
+ }
+ fatal(0, "dmu_objset_create(%s) = %d", name, error);
+@@ -1321,7 +1321,7 @@ ztest_dmu_objset_create_destroy(ztest_ar
+ if (error)
+ fatal(0, "dmu_objset_destroy(%s) = %d", name, error);
+
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+ /*
+@@ -1335,7 +1335,7 @@ ztest_dmu_snapshot_create_destroy(ztest_
+ char snapname[100];
+ char osname[MAXNAMELEN];
+
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+ dmu_objset_name(os, osname);
+ (void) snprintf(snapname, 100, "%s@%llu", osname,
+ (u_longlong_t)za->za_instance);
+@@ -1348,7 +1348,7 @@ ztest_dmu_snapshot_create_destroy(ztest_
+ ztest_record_enospc("dmu_take_snapshot");
+ else if (error != 0 && error != EEXIST)
+ fatal(0, "dmu_take_snapshot() = %d", error);
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+ #define ZTEST_TRAVERSE_BLOCKS 1000
+@@ -1992,7 +1992,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ int bs = ZTEST_DIROBJ_BLOCKSIZE;
+ int do_free = 0;
+ uint64_t off, txg_how;
+- mutex_t *lp;
++ pthread_mutex_t *lp;
+ char osname[MAXNAMELEN];
+ char iobuf[SPA_MAXBLOCKSIZE];
+ blkptr_t blk = { 0 };
+@@ -2041,7 +2041,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ }
+
+ lp = &ztest_shared->zs_sync_lock[b];
+- (void) mutex_lock(lp);
++ (void) pthread_mutex_lock(lp);
+
+ wbt->bt_objset = dmu_objset_id(os);
+ wbt->bt_object = ZTEST_DIROBJ;
+@@ -2087,7 +2087,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ dmu_write(os, ZTEST_DIROBJ, off, btsize, wbt, tx);
+ }
+
+- (void) mutex_unlock(lp);
++ (void) pthread_mutex_unlock(lp);
+
+ if (ztest_random(1000) == 0)
+ (void) poll(NULL, 0, 1); /* open dn_notxholds window */
+@@ -2106,7 +2106,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ /*
+ * dmu_sync() the block we just wrote.
+ */
+- (void) mutex_lock(lp);
++ (void) pthread_mutex_lock(lp);
+
+ blkoff = P2ALIGN_TYPED(off, bs, uint64_t);
+ error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db);
+@@ -2114,7 +2114,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ if (error) {
+ dprintf("dmu_buf_hold(%s, %d, %llx) = %d\n",
+ osname, ZTEST_DIROBJ, blkoff, error);
+- (void) mutex_unlock(lp);
++ (void) pthread_mutex_unlock(lp);
+ return;
+ }
+ blkoff = off - blkoff;
+@@ -2122,7 +2122,7 @@ ztest_dmu_write_parallel(ztest_args_t *z
+ dmu_buf_rele(db, FTAG);
+ za->za_dbuf = NULL;
+
+- (void) mutex_unlock(lp);
++ (void) pthread_mutex_unlock(lp);
+
+ if (error) {
+ dprintf("dmu_sync(%s, %d, %llx) = %d\n",
+@@ -2502,7 +2502,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
+ char osname[MAXNAMELEN];
+ int error;
+
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+
+ dmu_objset_name(os, osname);
+
+@@ -2541,7 +2541,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
+ }
+ }
+
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+ static void
+@@ -2693,7 +2693,7 @@ ztest_spa_rename(ztest_args_t *za)
+ int error;
+ spa_t *spa;
+
+- (void) rw_wrlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_wrlock(&ztest_shared->zs_name_lock);
+
+ oldname = za->za_pool;
+ newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
+@@ -2745,7 +2745,7 @@ ztest_spa_rename(ztest_args_t *za)
+
+ umem_free(newname, strlen(newname) + 1);
+
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+
+@@ -3090,13 +3090,13 @@ ztest_run(char *pool)
+ ztest_args_t *za;
+ spa_t *spa;
+ char name[100];
+- thread_t tid;
++ pthread_t tid;
+
+- (void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL);
+- (void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL);
++ (void) pthread_mutex_init(&zs->zs_vdev_lock, NULL);
++ (void) pthread_rwlock_init(&zs->zs_name_lock, NULL);
+
+ for (t = 0; t < ZTEST_SYNC_LOCKS; t++)
+- (void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL);
++ (void) pthread_mutex_init(&zs->zs_sync_lock[t], NULL);
+
+ /*
+ * Destroy one disk before we even start.
+@@ -3153,7 +3153,7 @@ ztest_run(char *pool)
+ * start the thread before setting the zio_io_fail_shift, which
+ * will indicate our failure rate.
+ */
+- error = thr_create(0, 0, ztest_suspend_monitor, NULL, THR_BOUND, &tid);
++ error = pthread_create(&tid, NULL, ztest_suspend_monitor, NULL);
+ if (error) {
+ fatal(0, "can't create suspend monitor thread: error %d",
+ t, error);
+@@ -3217,7 +3217,7 @@ ztest_run(char *pool)
+ if (t < zopt_datasets) {
+ ztest_replay_t zr;
+ int test_future = FALSE;
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+ (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
+ error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0,
+ ztest_create_cb, NULL);
+@@ -3225,7 +3225,7 @@ ztest_run(char *pool)
+ test_future = TRUE;
+ } else if (error == ENOSPC) {
+ zs->zs_enospc_count++;
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ break;
+ } else if (error != 0) {
+ fatal(0, "dmu_objset_create(%s) = %d",
+@@ -3236,7 +3236,7 @@ ztest_run(char *pool)
+ if (error)
+ fatal(0, "dmu_objset_open('%s') = %d",
+ name, error);
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ if (test_future)
+ ztest_dmu_check_future_leak(&za[t]);
+ zr.zr_os = za[d].za_os;
+@@ -3245,15 +3245,15 @@ ztest_run(char *pool)
+ za[d].za_zilog = zil_open(za[d].za_os, NULL);
+ }
+
+- error = thr_create(0, 0, ztest_thread, &za[t], THR_BOUND,
+- &za[t].za_thread);
++ error = pthread_create(&za[t].za_thread, NULL, ztest_thread,
++ &za[t]);
+ if (error)
+ fatal(0, "can't create thread %d: error %d",
+ t, error);
+ }
+
+ while (--t >= 0) {
+- error = thr_join(za[t].za_thread, NULL, NULL);
++ error = pthread_join(za[t].za_thread, NULL);
+ if (error)
+ fatal(0, "thr_join(%d) = %d", t, error);
+ if (za[t].za_th)
+@@ -3276,14 +3276,14 @@ ztest_run(char *pool)
+ * If we had out-of-space errors, destroy a random objset.
+ */
+ if (zs->zs_enospc_count != 0) {
+- (void) rw_rdlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
+ d = (int)ztest_random(zopt_datasets);
+ (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
+ if (zopt_verbose >= 3)
+ (void) printf("Destroying %s to free up space\n", name);
+ (void) dmu_objset_find(name, ztest_destroy_cb, &za[d],
+ DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
+- (void) rw_unlock(&ztest_shared->zs_name_lock);
++ (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
+ }
+
+ txg_wait_synced(spa_get_dsl(spa), 0);
+@@ -3301,7 +3301,7 @@ ztest_run(char *pool)
+ mutex_enter(&spa->spa_zio_lock);
+ cv_broadcast(&spa->spa_zio_cv);
+ mutex_exit(&spa->spa_zio_lock);
+- error = thr_join(tid, NULL, NULL);
++ error = pthread_join(tid, NULL);
+ if (error)
+ fatal(0, "thr_join(%d) = %d", tid, error);
+
+Index: zfs+chaos4/lib/libuutil/uu_misc.c
+===================================================================
+--- zfs+chaos4.orig/lib/libuutil/uu_misc.c
++++ zfs+chaos4/lib/libuutil/uu_misc.c
+@@ -37,7 +37,6 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/debug.h>
+-#include <thread.h>
+ #include <unistd.h>
+
+ #if !defined(TEXT_DOMAIN)
+@@ -70,11 +69,12 @@ static va_list uu_panic_args;
+ static pthread_t uu_panic_thread;
+
+ static uint32_t _uu_main_error;
++static __thread int _uu_main_thread = 0;
+
+ void
+ uu_set_error(uint_t code)
+ {
+- if (thr_main() != 0) {
++ if (_uu_main_thread) {
+ _uu_main_error = code;
+ return;
+ }
+@@ -103,7 +103,7 @@ uu_set_error(uint_t code)
+ uint32_t
+ uu_error(void)
+ {
+- if (thr_main() != 0)
++ if (_uu_main_thread)
+ return (_uu_main_error);
+
+ if (uu_error_key_setup < 0) /* can't happen? */
+@@ -255,5 +255,6 @@ uu_release_child(void)
+ static void
+ uu_init(void)
+ {
++ _uu_main_thread = 1;
+ (void) pthread_atfork(uu_lockup, uu_release, uu_release_child);
+ }
+Index: zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zfs_context_user.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/zfs_context_user.h
+@@ -52,8 +52,7 @@ extern "C" {
+ #include <errno.h>
+ #include <string.h>
+ #include <strings.h>
+-#include <synch.h>
+-#include <thread.h>
++#include <pthread.h>
+ #include <assert.h>
+ #include <alloca.h>
+ #include <umem.h>
+@@ -191,13 +190,15 @@ _NOTE(CONSTCOND) } while (0)
+ /*
+ * Threads
+ */
+-#define curthread ((void *)(uintptr_t)thr_self())
++
++/* XXX: not portable */
++#define curthread ((void *)(uintptr_t)pthread_self())
+
+ typedef struct kthread kthread_t;
+
+ #define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
+ zk_thread_create(func, arg)
+-#define thread_exit() thr_exit(NULL)
++#define thread_exit() pthread_exit(NULL)
+
+ extern kthread_t *zk_thread_create(void (*func)(), void *arg);
+
+@@ -207,28 +208,18 @@ extern kthread_t *zk_thread_create(void
+ /*
+ * Mutexes
+ */
++#define MTX_MAGIC 0x9522f51362a6e326ull
+ typedef struct kmutex {
+ void *m_owner;
+- boolean_t initialized;
+- mutex_t m_lock;
++ uint64_t m_magic;
++ pthread_mutex_t m_lock;
+ } kmutex_t;
+
+-#define MUTEX_DEFAULT USYNC_THREAD
+-#undef MUTEX_HELD
+-#define MUTEX_HELD(m) _mutex_held(&(m)->m_lock)
+-
+-/*
+- * Argh -- we have to get cheesy here because the kernel and userland
+- * have different signatures for the same routine.
+- */
+-extern int _mutex_init(mutex_t *mp, int type, void *arg);
+-extern int _mutex_destroy(mutex_t *mp);
+-
+-#define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp))
+-#define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp))
++#define MUTEX_DEFAULT 0
++#define MUTEX_HELD(m) ((m)->m_owner == curthread)
+
+-extern void zmutex_init(kmutex_t *mp);
+-extern void zmutex_destroy(kmutex_t *mp);
++extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
++extern void mutex_destroy(kmutex_t *mp);
+ extern void mutex_enter(kmutex_t *mp);
+ extern void mutex_exit(kmutex_t *mp);
+ extern int mutex_tryenter(kmutex_t *mp);
+@@ -237,23 +228,24 @@ extern void *mutex_owner(kmutex_t *mp);
+ /*
+ * RW locks
+ */
++#define RW_MAGIC 0x4d31fb123648e78aull
+ typedef struct krwlock {
+- void *rw_owner;
+- boolean_t initialized;
+- rwlock_t rw_lock;
++ void *rw_owner;
++ void *rw_wr_owner;
++ uint64_t rw_magic;
++ pthread_rwlock_t rw_lock;
++ uint_t rw_readers;
+ } krwlock_t;
+
+ typedef int krw_t;
+
+ #define RW_READER 0
+ #define RW_WRITER 1
+-#define RW_DEFAULT USYNC_THREAD
+-
+-#undef RW_READ_HELD
+-#define RW_READ_HELD(x) _rw_read_held(&(x)->rw_lock)
++#define RW_DEFAULT 0
+
+-#undef RW_WRITE_HELD
+-#define RW_WRITE_HELD(x) _rw_write_held(&(x)->rw_lock)
++#define RW_READ_HELD(x) ((x)->rw_readers > 0)
++#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread)
++#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
+
+ extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
+ extern void rw_destroy(krwlock_t *rwlp);
+@@ -271,9 +263,13 @@ extern gid_t *crgetgroups(cred_t *cr);
+ /*
+ * Condition variables
+ */
+-typedef cond_t kcondvar_t;
++#define CV_MAGIC 0xd31ea9a83b1b30c4ull
++typedef struct kcondvar {
++ uint64_t cv_magic;
++ pthread_cond_t cv;
++} kcondvar_t;
+
+-#define CV_DEFAULT USYNC_THREAD
++#define CV_DEFAULT 0
+
+ extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
+ extern void cv_destroy(kcondvar_t *cv);
+@@ -444,7 +440,8 @@ extern void delay(clock_t ticks);
+ #define minclsyspri 60
+ #define maxclsyspri 99
+
+-#define CPU_SEQID (thr_self() & (max_ncpus - 1))
++/* XXX: not portable */
++#define CPU_SEQID (pthread_self() & (max_ncpus - 1))
+
+ #define kcred NULL
+ #define CRED() NULL
+Index: zfs+chaos4/lib/libzpool/kernel.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/kernel.c
++++ zfs+chaos4/lib/libzpool/kernel.c
+@@ -38,6 +38,7 @@
+ #include <sys/zfs_context.h>
+ #include <sys/zmod.h>
+ #include <sys/utsname.h>
++#include <sys/time.h>
+
+ /*
+ * Emulation of kernel services in userland.
+@@ -60,11 +61,15 @@ struct utsname utsname = {
+ kthread_t *
+ zk_thread_create(void (*func)(), void *arg)
+ {
+- thread_t tid;
++ pthread_t tid;
+
+- VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED,
+- &tid) == 0);
++ pthread_attr_t attr;
++ VERIFY(pthread_attr_init(&attr) == 0);
++ VERIFY(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0);
+
++ VERIFY(pthread_create(&tid, &attr, (void *(*)(void *))func, arg) == 0);
++
++ /* XXX: not portable */
+ return ((void *)(uintptr_t)tid);
+ }
+
+@@ -97,30 +102,37 @@ kstat_delete(kstat_t *ksp)
+ * =========================================================================
+ */
+ void
+-zmutex_init(kmutex_t *mp)
++mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
+ {
++ ASSERT(type == MUTEX_DEFAULT);
++ ASSERT(cookie == NULL);
++
++#ifdef IM_FEELING_LUCKY
++ ASSERT(mp->m_magic != MTX_MAGIC);
++#endif
++
+ mp->m_owner = NULL;
+- mp->initialized = B_TRUE;
+- (void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL);
++ mp->m_magic = MTX_MAGIC;
++ VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
+ }
+
+ void
+-zmutex_destroy(kmutex_t *mp)
++mutex_destroy(kmutex_t *mp)
+ {
+- ASSERT(mp->initialized == B_TRUE);
++ ASSERT(mp->m_magic == MTX_MAGIC);
+ ASSERT(mp->m_owner == NULL);
+- (void) _mutex_destroy(&(mp)->m_lock);
++ VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0);
+ mp->m_owner = (void *)-1UL;
+- mp->initialized = B_FALSE;
++ mp->m_magic = 0;
+ }
+
+ void
+ mutex_enter(kmutex_t *mp)
+ {
+- ASSERT(mp->initialized == B_TRUE);
++ ASSERT(mp->m_magic == MTX_MAGIC);
+ ASSERT(mp->m_owner != (void *)-1UL);
+ ASSERT(mp->m_owner != curthread);
+- VERIFY(mutex_lock(&mp->m_lock) == 0);
++ VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
+ ASSERT(mp->m_owner == NULL);
+ mp->m_owner = curthread;
+ }
+@@ -128,9 +140,9 @@ mutex_enter(kmutex_t *mp)
+ int
+ mutex_tryenter(kmutex_t *mp)
+ {
+- ASSERT(mp->initialized == B_TRUE);
++ ASSERT(mp->m_magic == MTX_MAGIC);
+ ASSERT(mp->m_owner != (void *)-1UL);
+- if (0 == mutex_trylock(&mp->m_lock)) {
++ if (0 == pthread_mutex_trylock(&mp->m_lock)) {
+ ASSERT(mp->m_owner == NULL);
+ mp->m_owner = curthread;
+ return (1);
+@@ -142,16 +154,16 @@ mutex_tryenter(kmutex_t *mp)
+ void
+ mutex_exit(kmutex_t *mp)
+ {
+- ASSERT(mp->initialized == B_TRUE);
++ ASSERT(mp->m_magic == MTX_MAGIC);
+ ASSERT(mutex_owner(mp) == curthread);
+ mp->m_owner = NULL;
+- VERIFY(mutex_unlock(&mp->m_lock) == 0);
++ VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
+ }
+
+ void *
+ mutex_owner(kmutex_t *mp)
+ {
+- ASSERT(mp->initialized == B_TRUE);
++ ASSERT(mp->m_magic == MTX_MAGIC);
+ return (mp->m_owner);
+ }
+
+@@ -164,31 +176,48 @@ mutex_owner(kmutex_t *mp)
+ void
+ rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
+ {
+- rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
++ ASSERT(type == RW_DEFAULT);
++ ASSERT(arg == NULL);
++
++#ifdef IM_FEELING_LUCKY
++ ASSERT(rwlp->rw_magic != RW_MAGIC);
++#endif
++
++ VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
+ rwlp->rw_owner = NULL;
+- rwlp->initialized = B_TRUE;
++ rwlp->rw_wr_owner = NULL;
++ rwlp->rw_readers = 0;
++ rwlp->rw_magic = RW_MAGIC;
+ }
+
+ void
+ rw_destroy(krwlock_t *rwlp)
+ {
+- rwlock_destroy(&rwlp->rw_lock);
+- rwlp->rw_owner = (void *)-1UL;
+- rwlp->initialized = B_FALSE;
++ ASSERT(rwlp->rw_magic == RW_MAGIC);
++
++ VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
++ rwlp->rw_magic = 0;
+ }
+
+ void
+ rw_enter(krwlock_t *rwlp, krw_t rw)
+ {
+- ASSERT(!RW_LOCK_HELD(rwlp));
+- ASSERT(rwlp->initialized == B_TRUE);
+- ASSERT(rwlp->rw_owner != (void *)-1UL);
++ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ ASSERT(rwlp->rw_owner != curthread);
++ ASSERT(rwlp->rw_wr_owner != curthread);
+
+- if (rw == RW_READER)
+- (void) rw_rdlock(&rwlp->rw_lock);
+- else
+- (void) rw_wrlock(&rwlp->rw_lock);
++ if (rw == RW_READER) {
++ VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
++ ASSERT(rwlp->rw_wr_owner == NULL);
++
++ atomic_inc_uint(&rwlp->rw_readers);
++ } else {
++ VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
++ ASSERT(rwlp->rw_wr_owner == NULL);
++ ASSERT3U(rwlp->rw_readers, ==, 0);
++
++ rwlp->rw_wr_owner = curthread;
++ }
+
+ rwlp->rw_owner = curthread;
+ }
+@@ -196,11 +225,16 @@ rw_enter(krwlock_t *rwlp, krw_t rw)
+ void
+ rw_exit(krwlock_t *rwlp)
+ {
+- ASSERT(rwlp->initialized == B_TRUE);
+- ASSERT(rwlp->rw_owner != (void *)-1UL);
++ ASSERT(rwlp->rw_magic == RW_MAGIC);
++ ASSERT(RW_LOCK_HELD(rwlp));
++
++ if (RW_READ_HELD(rwlp))
++ atomic_dec_uint(&rwlp->rw_readers);
++ else
++ rwlp->rw_wr_owner = NULL;
+
+ rwlp->rw_owner = NULL;
+- (void) rw_unlock(&rwlp->rw_lock);
++ VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
+ }
+
+ int
+@@ -208,19 +242,29 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
+ {
+ int rv;
+
+- ASSERT(rwlp->initialized == B_TRUE);
+- ASSERT(rwlp->rw_owner != (void *)-1UL);
++ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
+ if (rw == RW_READER)
+- rv = rw_tryrdlock(&rwlp->rw_lock);
++ rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
+ else
+- rv = rw_trywrlock(&rwlp->rw_lock);
++ rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
+
+ if (rv == 0) {
++ ASSERT(rwlp->rw_wr_owner == NULL);
++
++ if (rw == RW_READER)
++ atomic_inc_uint(&rwlp->rw_readers);
++ else {
++ ASSERT3U(rwlp->rw_readers, ==, 0);
++ rwlp->rw_wr_owner = curthread;
++ }
++
+ rwlp->rw_owner = curthread;
+ return (1);
+ }
+
++ VERIFY3S(rv, ==, EBUSY);
++
+ return (0);
+ }
+
+@@ -228,8 +272,7 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
+ int
+ rw_tryupgrade(krwlock_t *rwlp)
+ {
+- ASSERT(rwlp->initialized == B_TRUE);
+- ASSERT(rwlp->rw_owner != (void *)-1UL);
++ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
+ return (0);
+ }
+@@ -243,22 +286,34 @@ rw_tryupgrade(krwlock_t *rwlp)
+ void
+ cv_init(kcondvar_t *cv, char *name, int type, void *arg)
+ {
+- VERIFY(cond_init(cv, type, NULL) == 0);
++ ASSERT(type == CV_DEFAULT);
++
++#ifdef IM_FEELING_LUCKY
++ ASSERT(cv->cv_magic != CV_MAGIC);
++#endif
++
++ cv->cv_magic = CV_MAGIC;
++
++ VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0);
+ }
+
+ void
+ cv_destroy(kcondvar_t *cv)
+ {
+- VERIFY(cond_destroy(cv) == 0);
++ ASSERT(cv->cv_magic == CV_MAGIC);
++ VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0);
++ cv->cv_magic = 0;
+ }
+
+ void
+ cv_wait(kcondvar_t *cv, kmutex_t *mp)
+ {
++ ASSERT(cv->cv_magic == CV_MAGIC);
+ ASSERT(mutex_owner(mp) == curthread);
+ mp->m_owner = NULL;
+- int ret = cond_wait(cv, &mp->m_lock);
+- VERIFY(ret == 0 || ret == EINTR);
++ int ret = pthread_cond_wait(&cv->cv, &mp->m_lock);
++ if (ret != 0)
++ VERIFY3S(ret, ==, EINTR);
+ mp->m_owner = curthread;
+ }
+
+@@ -266,29 +321,38 @@ clock_t
+ cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
+ {
+ int error;
++ struct timeval tv;
+ timestruc_t ts;
+ clock_t delta;
+
++ ASSERT(cv->cv_magic == CV_MAGIC);
++
+ top:
+ delta = abstime - lbolt;
+ if (delta <= 0)
+ return (-1);
+
+- ts.tv_sec = delta / hz;
+- ts.tv_nsec = (delta % hz) * (NANOSEC / hz);
++ VERIFY(gettimeofday(&tv, NULL) == 0);
++
++ ts.tv_sec = tv.tv_sec + delta / hz;
++ ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
++ if (ts.tv_nsec >= NANOSEC) {
++ ts.tv_sec++;
++ ts.tv_nsec -= NANOSEC;
++ }
+
+ ASSERT(mutex_owner(mp) == curthread);
+ mp->m_owner = NULL;
+- error = cond_reltimedwait(cv, &mp->m_lock, &ts);
++ error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
+ mp->m_owner = curthread;
+
+- if (error == ETIME)
++ if (error == ETIMEDOUT)
+ return (-1);
+
+ if (error == EINTR)
+ goto top;
+
+- ASSERT(error == 0);
++ VERIFY3S(error, ==, 0);
+
+ return (1);
+ }
+@@ -296,13 +360,15 @@ top:
+ void
+ cv_signal(kcondvar_t *cv)
+ {
+- VERIFY(cond_signal(cv) == 0);
++ ASSERT(cv->cv_magic == CV_MAGIC);
++ VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0);
+ }
+
+ void
+ cv_broadcast(kcondvar_t *cv)
+ {
+- VERIFY(cond_broadcast(cv) == 0);
++ ASSERT(cv->cv_magic == CV_MAGIC);
++ VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0);
+ }
+
+ /*
+@@ -549,11 +615,11 @@ __dprintf(const char *file, const char *
+ dprintf_find_string(func)) {
+ /* Print out just the function name if requested */
+ flockfile(stdout);
+- /* XXX: the following printf may not be portable */
++ /* XXX: the following 2 printfs may not be portable */
+ if (dprintf_find_string("pid"))
+ (void) printf("%llu ", (u_longlong_t) getpid());
+ if (dprintf_find_string("tid"))
+- (void) printf("%u ", (uint_t) thr_self());
++ (void) printf("%u ", (uint_t) pthread_self());
+ if (dprintf_find_string("cpu"))
+ (void) printf("%u ", getcpuid());
+ if (dprintf_find_string("time"))
+Index: zfs+chaos4/lib/libzpool/taskq.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/taskq.c
++++ zfs+chaos4/lib/libzpool/taskq.c
+@@ -43,7 +43,7 @@ struct taskq {
+ krwlock_t tq_threadlock;
+ kcondvar_t tq_dispatch_cv;
+ kcondvar_t tq_wait_cv;
+- thread_t *tq_threadlist;
++ pthread_t *tq_threadlist;
+ int tq_flags;
+ int tq_active;
+ int tq_nthreads;
+@@ -186,7 +186,7 @@ taskq_create(const char *name, int nthre
+ tq->tq_maxalloc = maxalloc;
+ tq->tq_task.task_next = &tq->tq_task;
+ tq->tq_task.task_prev = &tq->tq_task;
+- tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
++ tq->tq_threadlist = kmem_alloc(nthreads * sizeof (pthread_t), KM_SLEEP);
+
+ if (flags & TASKQ_PREPOPULATE) {
+ mutex_enter(&tq->tq_lock);
+@@ -196,8 +196,8 @@ taskq_create(const char *name, int nthre
+ }
+
+ for (t = 0; t < nthreads; t++)
+- VERIFY(thr_create(0, 0, taskq_thread,
+- tq, THR_BOUND, &tq->tq_threadlist[t]) == 0);
++ VERIFY(pthread_create(&tq->tq_threadlist[t],
++ NULL, taskq_thread, tq) == 0);
+
+ return (tq);
+ }
+@@ -227,9 +227,9 @@ taskq_destroy(taskq_t *tq)
+ mutex_exit(&tq->tq_lock);
+
+ for (t = 0; t < nthreads; t++)
+- VERIFY(thr_join(tq->tq_threadlist[t], NULL, NULL) == 0);
++ VERIFY(pthread_join(tq->tq_threadlist[t], NULL) == 0);
+
+- kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
++ kmem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t));
+
+ rw_destroy(&tq->tq_threadlock);
+ mutex_destroy(&tq->tq_lock);
+@@ -248,7 +248,7 @@ taskq_member(taskq_t *tq, void *t)
+ return (1);
+
+ for (i = 0; i < tq->tq_nthreads; i++)
+- if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
++ if (tq->tq_threadlist[i] == (pthread_t)(uintptr_t)t)
+ return (1);
+
+ return (0);
diff --git a/patches/zap-cursor-move-to-key.patch b/patches/zap-cursor-move-to-key.patch
new file mode 100644
index 000000000..ad5bbb93d
--- /dev/null
+++ b/patches/zap-cursor-move-to-key.patch
@@ -0,0 +1,115 @@
+Add a ZAP API to move a ZAP cursor to a given key.
+
+Index: zfs+chaos4/lib/libzfscommon/include/sys/zap.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zap.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/zap.h
+@@ -302,6 +302,11 @@ void zap_cursor_advance(zap_cursor_t *zc
+ uint64_t zap_cursor_serialize(zap_cursor_t *zc);
+
+ /*
++ * Advance the cursor to the attribute having the key.
++ */
++int zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt);
++
++/*
+ * Initialize a zap cursor pointing to the position recorded by
+ * zap_cursor_serialize (in the "serialized" argument). You can also
+ * use a "serialized" argument of 0 to start at the beginning of the
+Index: zfs+chaos4/lib/libzfscommon/include/sys/zap_impl.h
+===================================================================
+--- zfs+chaos4.orig/lib/libzfscommon/include/sys/zap_impl.h
++++ zfs+chaos4/lib/libzfscommon/include/sys/zap_impl.h
+@@ -210,6 +210,7 @@ int fzap_add_cd(zap_name_t *zn,
+ uint64_t integer_size, uint64_t num_integers,
+ const void *val, uint32_t cd, dmu_tx_t *tx);
+ void fzap_upgrade(zap_t *zap, dmu_tx_t *tx);
++int fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn);
+
+ #ifdef __cplusplus
+ }
+Index: zfs+chaos4/lib/libzpool/zap.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/zap.c
++++ zfs+chaos4/lib/libzpool/zap.c
+@@ -1029,6 +1029,30 @@ zap_stats_ptrtbl(zap_t *zap, uint64_t *t
+ }
+ }
+
++int fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn)
++{
++ int err;
++ zap_leaf_t *l;
++ zap_entry_handle_t zeh;
++ uint64_t hash;
++
++ if (zn->zn_name_orij && strlen(zn->zn_name_orij) > ZAP_MAXNAMELEN)
++ return (E2BIG);
++
++ err = zap_deref_leaf(zc->zc_zap, zn->zn_hash, NULL, RW_READER, &l);
++ if (err != 0)
++ return (err);
++
++ err = zap_leaf_lookup(l, zn, &zeh);
++ if (err != 0)
++ return (err);
++
++ zc->zc_leaf = l;
++ zc->zc_hash = zeh.zeh_hash;
++ zc->zc_cd = zeh.zeh_cd;
++ return 0;
++}
++
+ void
+ fzap_get_stats(zap_t *zap, zap_stats_t *zs)
+ {
+Index: zfs+chaos4/lib/libzpool/zap_micro.c
+===================================================================
+--- zfs+chaos4.orig/lib/libzpool/zap_micro.c
++++ zfs+chaos4/lib/libzpool/zap_micro.c
+@@ -1045,6 +1045,45 @@ zap_cursor_advance(zap_cursor_t *zc)
+ }
+ }
+
++int zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
++{
++ int err = 0;
++ mzap_ent_t *mze;
++ zap_name_t *zn;
++
++ if (zc->zc_zap == NULL) {
++ err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
++ RW_READER, TRUE, FALSE, &zc->zc_zap);
++ if (err)
++ return (err);
++ } else {
++ rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
++ }
++
++ zn = zap_name_alloc(zc->zc_zap, name, mt);
++ if (zn == NULL) {
++ rw_exit(&zc->zc_zap->zap_rwlock);
++ return (ENOTSUP);
++ }
++
++ if (!zc->zc_zap->zap_ismicro) {
++ err = fzap_cursor_move_to_key(zc, zn);
++ } else {
++ mze = mze_find(zn);
++ if (mze == NULL) {
++ err = (ENOENT);
++ goto out;
++ }
++ zc->zc_hash = mze->mze_hash;
++ zc->zc_cd = mze->mze_phys.mze_cd;
++ }
++
++out:
++ zap_name_free(zn);
++ rw_exit(&zc->zc_zap->zap_rwlock);
++ return (err);
++}
++
+ int
+ zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
+ {